Files
gh-cskiro-claudex-analysis-…/skills/codebase-auditor/scripts/report_generator.py
2025-11-29 18:16:43 +08:00

346 lines
12 KiB
Python
Raw Blame History

This file contains invisible Unicode characters
This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
#!/usr/bin/env python3
"""
Report Generator
Generates audit reports in multiple formats:
- Markdown (default, human-readable)
- JSON (machine-readable, CI/CD integration)
- HTML (interactive dashboard)
"""
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List
def generate_markdown_report(summary: Dict, findings: Dict[str, List[Dict]], metadata: Dict) -> str:
"""
Generate a Markdown-formatted audit report.
Args:
summary: Executive summary data
findings: All findings organized by category
metadata: Project metadata
Returns:
Markdown report as string
"""
report = []
# Header
report.append("# Codebase Audit Report")
report.append(f"\n**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
report.append(f"**Codebase**: `{metadata.get('path', 'Unknown')}`")
report.append(f"**Tech Stack**: {', '.join(metadata.get('tech_stack', {}).keys())}")
report.append(f"**Total Files**: {metadata.get('total_files', 0):,}")
report.append(f"**Lines of Code**: {metadata.get('total_lines', 0):,}")
report.append("\n---\n")
# Executive Summary
report.append("## Executive Summary")
report.append(f"\n### Overall Health Score: **{summary.get('overall_score', 0)}/100**\n")
# Score breakdown
report.append("#### Category Scores\n")
for category, score in summary.get('category_scores', {}).items():
emoji = score_to_emoji(score)
report.append(f"- **{category.replace('_', ' ').title()}**: {score}/100 {emoji}")
# Issue summary
report.append("\n#### Issue Summary\n")
report.append(f"- **Critical Issues**: {summary.get('critical_issues', 0)}")
report.append(f"- **High Issues**: {summary.get('high_issues', 0)}")
report.append(f"- **Total Issues**: {summary.get('total_issues', 0)}")
report.append("\n---\n")
# Detailed Findings
report.append("## Detailed Findings\n")
severity_order = ['critical', 'high', 'medium', 'low']
for severity in severity_order:
severity_findings = []
for category, category_findings in findings.items():
for finding in category_findings:
if finding.get('severity') == severity:
severity_findings.append((category, finding))
if severity_findings:
severity_emoji = severity_to_emoji(severity)
report.append(f"### {severity_emoji} {severity.upper()} ({len(severity_findings)} issues)\n")
for category, finding in severity_findings:
report.append(f"#### {finding.get('title', 'Untitled Issue')}")
report.append(f"\n**Category**: {category.replace('_', ' ').title()}")
report.append(f"**Subcategory**: {finding.get('subcategory', 'N/A')}")
if finding.get('file'):
file_ref = f"{finding['file']}"
if finding.get('line'):
file_ref += f":{finding['line']}"
report.append(f"**Location**: `{file_ref}`")
report.append(f"\n{finding.get('description', 'No description')}")
if finding.get('code_snippet'):
report.append(f"\n```\n{finding['code_snippet']}\n```")
report.append(f"\n**Impact**: {finding.get('impact', 'Unknown impact')}")
report.append(f"\n**Remediation**: {finding.get('remediation', 'No remediation suggested')}")
report.append(f"\n**Effort**: {finding.get('effort', 'Unknown').upper()}\n")
report.append("---\n")
# Recommendations
report.append("## Recommendations\n")
report.append(generate_recommendations(summary, findings))
# Footer
report.append("\n---\n")
report.append("*Report generated by Codebase Auditor Skill (2024-25 Standards)*")
return '\n'.join(report)
def generate_json_report(summary: Dict, findings: Dict[str, List[Dict]], metadata: Dict) -> str:
"""
Generate a JSON-formatted audit report.
Args:
summary: Executive summary data
findings: All findings organized by category
metadata: Project metadata
Returns:
JSON report as string
"""
report = {
'generated_at': datetime.now().isoformat(),
'metadata': metadata,
'summary': summary,
'findings': findings,
'schema_version': '1.0.0',
}
return json.dumps(report, indent=2)
def generate_html_report(summary: Dict, findings: Dict[str, List[Dict]], metadata: Dict) -> str:
"""
Generate an HTML dashboard report.
Args:
summary: Executive summary data
findings: All findings organized by category
metadata: Project metadata
Returns:
HTML report as string
"""
# Simplified HTML template
html = f"""<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Codebase Audit Report</title>
<style>
body {{
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
line-height: 1.6;
max-width: 1200px;
margin: 0 auto;
padding: 20px;
background: #f5f5f5;
}}
.header {{
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 30px;
border-radius: 10px;
margin-bottom: 20px;
}}
.score {{
font-size: 48px;
font-weight: bold;
margin: 20px 0;
}}
.metrics {{
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 20px;
margin: 20px 0;
}}
.metric {{
background: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}}
.metric-title {{
font-size: 14px;
color: #666;
text-transform: uppercase;
}}
.metric-value {{
font-size: 32px;
font-weight: bold;
margin: 10px 0;
}}
.finding {{
background: white;
padding: 20px;
margin: 10px 0;
border-radius: 8px;
border-left: 4px solid #ddd;
}}
.finding.critical {{ border-left-color: #e53e3e; }}
.finding.high {{ border-left-color: #dd6b20; }}
.finding.medium {{ border-left-color: #d69e2e; }}
.finding.low {{ border-left-color: #38a169; }}
.badge {{
display: inline-block;
padding: 4px 12px;
border-radius: 12px;
font-size: 12px;
font-weight: bold;
text-transform: uppercase;
}}
.badge.critical {{ background: #fed7d7; color: #742a2a; }}
.badge.high {{ background: #feebc8; color: #7c2d12; }}
.badge.medium {{ background: #fefcbf; color: #744210; }}
.badge.low {{ background: #c6f6d5; color: #22543d; }}
code {{
background: #f7fafc;
padding: 2px 6px;
border-radius: 3px;
font-family: 'Courier New', monospace;
}}
pre {{
background: #2d3748;
color: #e2e8f0;
padding: 15px;
border-radius: 5px;
overflow-x: auto;
}}
</style>
</head>
<body>
<div class="header">
<h1>🔍 Codebase Audit Report</h1>
<p><strong>Generated:</strong> {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
<p><strong>Codebase:</strong> {metadata.get('path', 'Unknown')}</p>
<div class="score">Overall Score: {summary.get('overall_score', 0)}/100</div>
</div>
<div class="metrics">
<div class="metric">
<div class="metric-title">Critical Issues</div>
<div class="metric-value" style="color: #e53e3e;">{summary.get('critical_issues', 0)}</div>
</div>
<div class="metric">
<div class="metric-title">High Issues</div>
<div class="metric-value" style="color: #dd6b20;">{summary.get('high_issues', 0)}</div>
</div>
<div class="metric">
<div class="metric-title">Total Issues</div>
<div class="metric-value">{summary.get('total_issues', 0)}</div>
</div>
<div class="metric">
<div class="metric-title">Lines of Code</div>
<div class="metric-value">{metadata.get('total_lines', 0):,}</div>
</div>
</div>
<h2>Findings</h2>
"""
# Add findings
severity_order = ['critical', 'high', 'medium', 'low']
for severity in severity_order:
for category, category_findings in findings.items():
for finding in category_findings:
if finding.get('severity') == severity:
html += f"""
<div class="finding {severity}">
<div>
<span class="badge {severity}">{severity}</span>
<strong>{finding.get('title', 'Untitled')}</strong>
</div>
<p>{finding.get('description', 'No description')}</p>
"""
if finding.get('file'):
html += f"<p><strong>Location:</strong> <code>{finding['file']}"
if finding.get('line'):
html += f":{finding['line']}"
html += "</code></p>"
if finding.get('code_snippet'):
html += f"<pre><code>{finding['code_snippet']}</code></pre>"
html += f"""
<p><strong>Impact:</strong> {finding.get('impact', 'Unknown')}</p>
<p><strong>Remediation:</strong> {finding.get('remediation', 'No suggestion')}</p>
</div>
"""
html += """
</body>
</html>
"""
return html
def score_to_emoji(score: float) -> str:
"""Convert score to emoji."""
if score >= 90:
return ""
elif score >= 70:
return "⚠️"
else:
return ""
def severity_to_emoji(severity: str) -> str:
"""Convert severity to emoji."""
severity_map = {
'critical': '🚨',
'high': '⚠️',
'medium': '',
'low': '',
}
return severity_map.get(severity, '')
def generate_recommendations(summary: Dict, findings: Dict) -> str:
"""Generate recommendations based on findings."""
recommendations = []
critical_count = summary.get('critical_issues', 0)
high_count = summary.get('high_issues', 0)
overall_score = summary.get('overall_score', 0)
if critical_count > 0:
recommendations.append(f"1. **Immediate Action Required**: Address all {critical_count} critical security and quality issues before deploying to production.")
if high_count > 5:
recommendations.append(f"2. **Sprint Focus**: Prioritize fixing the {high_count} high-severity issues in the next sprint. These significantly impact code quality and maintainability.")
if overall_score < 70:
recommendations.append("3. **Technical Debt Sprint**: Schedule a dedicated sprint to address accumulated technical debt and improve code quality metrics.")
if 'testing' in findings and len(findings['testing']) > 0:
recommendations.append("4. **Testing Improvements**: Increase test coverage to meet the 80% minimum threshold. Focus on critical paths first (authentication, payment, data processing).")
if 'security' in findings and len(findings['security']) > 0:
recommendations.append("5. **Security Review**: Conduct a thorough security review and penetration testing given the security issues found.")
if not recommendations:
recommendations.append("1. **Maintain Standards**: Continue following best practices and maintain current quality levels.")
recommendations.append("2. **Continuous Improvement**: Consider implementing automated code quality checks in CI/CD pipeline.")
return '\n'.join(recommendations)