commit e50173a671f9c86e8110b9efecdf891d1a185b04 Author: Zhongwei Li Date: Sun Nov 30 08:21:05 2025 +0800 Initial commit diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..9502990 --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,15 @@ +{ + "name": "log-analysis-tool", + "description": "Analyze logs for performance insights and issues", + "version": "1.0.0", + "author": { + "name": "Claude Code Plugins", + "email": "[email protected]" + }, + "skills": [ + "./skills" + ], + "commands": [ + "./commands" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..7baf139 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# log-analysis-tool + +Analyze logs for performance insights and issues diff --git a/commands/analyze-logs.md b/commands/analyze-logs.md new file mode 100644 index 0000000..1ea31c5 --- /dev/null +++ b/commands/analyze-logs.md @@ -0,0 +1,38 @@ +--- +description: Analyze logs for performance insights +--- + +# Log Analysis Tool + +Analyze application logs to identify performance issues and optimization opportunities. + +## Analysis Areas + +1. **Slow Requests**: Identify requests exceeding latency thresholds +2. **Error Patterns**: Detect recurring errors and exceptions +3. **Resource Warnings**: Find resource exhaustion warnings +4. **Query Performance**: Extract slow database queries from logs +5. **Traffic Patterns**: Analyze request patterns and spikes +6. **Exception Trends**: Track exception frequency over time + +## Process + +1. Define log aggregation strategy +2. Set up structured logging if not present +3. Configure log parsing and indexing +4. Create performance-focused log queries +5. Build log analysis dashboards +6. Set up log-based alerts + +## Output + +Provide: +- Structured logging implementation guide +- Log aggregation setup (ELK, Loki, CloudWatch, etc.) +- Performance analysis queries +- Dashboard configurations showing: + - Slow request distribution + - Error rate trends + - Performance degradation events +- Alert rules based on log patterns +- Log retention and cost optimization recommendations diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..d97e7aa --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,61 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:jeremylongshore/claude-code-plugins-plus:plugins/performance/log-analysis-tool", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "0d4b162bdcdd834860fe34f67bbc713c2ce50b2e", + "treeHash": "352186bf4b449255fdd965c9764647efeaef308e4200fb8fc2f50ceb6b688930", + "generatedAt": "2025-11-28T10:18:32.825324Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "log-analysis-tool", + "description": "Analyze logs for performance insights and issues", + "version": "1.0.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "5b3db301918cc8caa221910febb0fe14dde5e871aa60d1428adfc8989f5b53e8" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "39c257ec9f84ecebb5e3a8df684a91625a10ee262d9e98a0a3bad86f075225ef" + }, + { + "path": "commands/analyze-logs.md", + "sha256": "bbe0877d110ba019db9d9b5a1d987b7e7856df7f7cacbdd616f111a7272feb52" + }, + { + "path": "skills/log-analysis-tool/SKILL.md", + "sha256": "085ad53a5d00705d5b3129be5a66f41305f92b1df566ca8ae6d8747c35d195f4" + }, + { + "path": "skills/log-analysis-tool/references/README.md", + "sha256": "bf5ae9fee16b7f626074e382fcfff2c886125163b1e934022c6cb7d49815ee46" + }, + { + "path": "skills/log-analysis-tool/scripts/README.md", + "sha256": "72d9c78a701b234bb6626f24867ef614bf1e9e0733669e5c5b5823570c0b5790" + }, + { + "path": "skills/log-analysis-tool/assets/README.md", + "sha256": "7439120bd5e5dae0ecaaf6885ecf6f956babaf8b87c431d10f0dee6947cb8113" + } + ], + "dirSha256": "352186bf4b449255fdd965c9764647efeaef308e4200fb8fc2f50ceb6b688930" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/log-analysis-tool/SKILL.md b/skills/log-analysis-tool/SKILL.md new file mode 100644 index 0000000..f8fd88f --- /dev/null +++ b/skills/log-analysis-tool/SKILL.md @@ -0,0 +1,56 @@ +--- +name: analyzing-logs +description: | + This skill enables Claude to analyze logs for performance insights and issue detection. It is triggered when the user requests log analysis, performance troubleshooting, or debugging assistance. The skill identifies slow requests, error patterns, resource warnings, and other key performance indicators within log files. Use this skill when the user mentions "analyze logs", "performance issues", "error patterns in logs", "slow requests", or requests help with "log aggregation". It helps identify performance bottlenecks and improve application stability by analyzing log data. +allowed-tools: Read, Write, Bash, Grep +version: 1.0.0 +--- + +## Overview + +This skill empowers Claude to automatically analyze application logs, pinpoint performance bottlenecks, and identify recurring errors. It streamlines the debugging process and helps optimize application performance by extracting key insights from log data. + +## How It Works + +1. **Initiate Analysis**: Claude activates the log analysis tool upon detecting relevant trigger phrases. +2. **Log Data Extraction**: The tool extracts relevant data, including timestamps, request durations, error messages, and resource usage metrics. +3. **Pattern Identification**: The tool identifies patterns such as slow requests, frequent errors, and resource exhaustion warnings. +4. **Report Generation**: Claude presents a summary of findings, highlighting potential performance issues and optimization opportunities. + +## When to Use This Skill + +This skill activates when you need to: +- Identify performance bottlenecks in an application. +- Debug recurring errors and exceptions. +- Analyze log data for trends and anomalies. +- Set up structured logging or log aggregation. + +## Examples + +### Example 1: Identifying Slow Requests + +User request: "Analyze logs for slow requests." + +The skill will: +1. Activate the log analysis tool. +2. Identify requests exceeding predefined latency thresholds. +3. Present a list of slow requests with corresponding timestamps and durations. + +### Example 2: Detecting Error Patterns + +User request: "Find error patterns in the application logs." + +The skill will: +1. Activate the log analysis tool. +2. Scan logs for recurring error messages and exceptions. +3. Group similar errors and present a summary of error frequencies. + +## Best Practices + +- **Log Level**: Ensure appropriate log levels (e.g., INFO, WARN, ERROR) are used to capture relevant information. +- **Structured Logging**: Implement structured logging (e.g., JSON format) to facilitate efficient analysis. +- **Log Rotation**: Configure log rotation policies to prevent log files from growing excessively. + +## Integration + +This skill can be integrated with other tools for monitoring and alerting. For example, it can be used in conjunction with a monitoring plugin to automatically trigger alerts based on log analysis results. It can also work with deployment tools to rollback deployments when critical errors are detected in the logs. \ No newline at end of file diff --git a/skills/log-analysis-tool/assets/README.md b/skills/log-analysis-tool/assets/README.md new file mode 100644 index 0000000..921dc74 --- /dev/null +++ b/skills/log-analysis-tool/assets/README.md @@ -0,0 +1,7 @@ +# Assets + +Bundled resources for log-analysis-tool skill + +- [ ] kpi_dashboard_template.json: A template for creating a performance dashboard in a visualization tool like Grafana or Kibana, pre-configured with key performance indicators extracted from logs. +- [ ] alerting_rules_template.yaml: A template for defining alerting rules based on log data, which can be used to trigger notifications when certain performance thresholds are exceeded. +- [ ] example_log_file.log: A sample log file that can be used for testing and demonstration purposes. diff --git a/skills/log-analysis-tool/references/README.md b/skills/log-analysis-tool/references/README.md new file mode 100644 index 0000000..6541b53 --- /dev/null +++ b/skills/log-analysis-tool/references/README.md @@ -0,0 +1,7 @@ +# References + +Bundled resources for log-analysis-tool skill + +- [ ] log_format_reference.md: Documentation of common log formats (e.g., Apache access logs, Nginx error logs, systemd journal logs) and how to parse them. +- [ ] performance_metrics.md: Explanation of key performance metrics that can be extracted from logs, such as request latency, error rates, CPU usage, and memory consumption. +- [ ] troubleshooting_guide.md: A guide to troubleshooting common performance issues based on log analysis, including steps to identify the root cause and potential solutions. diff --git a/skills/log-analysis-tool/scripts/README.md b/skills/log-analysis-tool/scripts/README.md new file mode 100644 index 0000000..424f85d --- /dev/null +++ b/skills/log-analysis-tool/scripts/README.md @@ -0,0 +1,7 @@ +# Scripts + +Bundled resources for log-analysis-tool skill + +- [ ] analyze_log_file.py: Script to parse a log file and extract key performance indicators (KPIs) such as slow requests, error rates, and resource usage. It should accept the log file path as an argument and output a structured summary. +- [ ] aggregate_logs.sh: Script to aggregate logs from multiple sources (e.g., different servers or containers) into a single file for analysis. It should support various log formats and filtering options. +- [ ] detect_anomalies.py: Script to detect anomalies in log data using statistical methods or machine learning algorithms. It should identify unusual patterns or deviations from the norm that may indicate performance issues or security threats.