#!/usr/bin/env python3 # ============================================================================ # Quality Report Generator # ============================================================================ # Purpose: Generate comprehensive quality reports in multiple formats # Version: 1.0.0 # Usage: ./report-generator.py --path --format [options] # Returns: 0=success, 1=error # Dependencies: Python 3.6+ # ============================================================================ import sys import argparse import json from datetime import datetime from pathlib import Path from typing import Dict, List, Any, Optional class ReportGenerator: """Generate quality reports in multiple formats.""" def __init__(self, path: str, context: Optional[Dict] = None): """ Initialize report generator. Args: path: Target path being analyzed context: Validation context with results """ self.path = path self.context = context or {} self.timestamp = datetime.now().isoformat() def generate(self, format_type: str = "markdown") -> str: """ Generate report in specified format. Args: format_type: Report format (markdown, json, html) Returns: Formatted report string """ if format_type == "json": return self._generate_json() elif format_type == "html": return self._generate_html() else: return self._generate_markdown() def _generate_markdown(self) -> str: """Generate markdown format report.""" score = self.context.get("score", 0) rating = self.context.get("rating", "Unknown") stars = self.context.get("stars", "") readiness = self.context.get("publication_ready", "Unknown") p0_count = len(self.context.get("issues", {}).get("p0", [])) p1_count = len(self.context.get("issues", {}).get("p1", [])) p2_count = len(self.context.get("issues", {}).get("p2", [])) total_issues = p0_count + p1_count + p2_count target_type = self.context.get("target_type", "plugin") report = f"""# Quality Assessment Report **Generated**: {self.timestamp} **Target**: {self.path} **Type**: Claude Code {target_type.capitalize()} ## Executive Summary **Quality Score**: {score}/100 {stars} ({rating}) **Publication Ready**: {readiness} **Critical Issues**: {p0_count} **Total Issues**: {total_issues} """ if score >= 90: report += "🎉 Excellent! Your plugin is publication-ready.\n\n" elif score >= 75: report += "👍 Nearly ready! Address a few important issues to reach excellent status.\n\n" elif score >= 60: report += "⚠️ Needs work. Several issues should be addressed before publication.\n\n" else: report += "❌ Substantial improvements needed before this is ready for publication.\n\n" # Validation layers report += "## Validation Results\n\n" layers = self.context.get("validation_layers", {}) for layer_name, layer_data in layers.items(): status = layer_data.get("status", "unknown") issue_count = len(layer_data.get("issues", [])) if status == "pass": status_icon = "✅ PASS" elif status == "warnings": status_icon = f"⚠️ WARNINGS ({issue_count} issues)" else: status_icon = f"❌ FAIL ({issue_count} issues)" report += f"### {layer_name.replace('_', ' ').title()} {status_icon}\n" if issue_count == 0: report += "- No issues found\n\n" else: for issue in layer_data.get("issues", [])[:3]: # Show top 3 report += f"- {issue.get('message', 'Unknown issue')}\n" if issue_count > 3: report += f"- ... and {issue_count - 3} more\n" report += "\n" # Issues breakdown report += "## Issues Breakdown\n\n" report += f"### Priority 0 (Critical): {p0_count} issues\n\n" if p0_count == 0: report += "None - excellent!\n\n" else: for idx, issue in enumerate(self.context.get("issues", {}).get("p0", []), 1): report += self._format_issue_markdown(idx, issue) report += f"### Priority 1 (Important): {p1_count} issues\n\n" if p1_count == 0: report += "None - great!\n\n" else: for idx, issue in enumerate(self.context.get("issues", {}).get("p1", []), 1): report += self._format_issue_markdown(idx, issue) report += f"### Priority 2 (Recommended): {p2_count} issues\n\n" if p2_count == 0: report += "No recommendations.\n\n" else: for idx, issue in enumerate(self.context.get("issues", {}).get("p2", [])[:5], 1): report += self._format_issue_markdown(idx, issue) if p2_count > 5: report += f"... and {p2_count - 5} more recommendations\n\n" # Improvement roadmap roadmap = self.context.get("improvement_roadmap", {}) if roadmap: report += "## Improvement Roadmap\n\n" report += f"### Path to Excellent (90+)\n\n" report += f"**Current**: {roadmap.get('current_score', score)}/100\n" report += f"**Target**: {roadmap.get('target_score', 90)}/100\n" report += f"**Gap**: {roadmap.get('gap', 0)} points\n\n" recommendations = roadmap.get("recommendations", []) if recommendations: report += "**Top Recommendations**:\n\n" for idx, rec in enumerate(recommendations[:5], 1): report += f"{idx}. [{rec.get('score_impact', 0):+d} pts] {rec.get('title', 'Unknown')}\n" report += f" - Priority: {rec.get('priority', 'Medium')}\n" report += f" - Effort: {rec.get('effort', 'Unknown')}\n" report += f" - Impact: {rec.get('impact', 'Unknown')}\n\n" # Footer report += "\n---\n" report += "Report generated by marketplace-validator-plugin v1.0.0\n" return report def _format_issue_markdown(self, idx: int, issue: Dict) -> str: """Format a single issue in markdown.""" message = issue.get("message", "Unknown issue") impact = issue.get("impact", "Unknown impact") effort = issue.get("effort", "unknown") fix = issue.get("fix", "No fix available") score_impact = issue.get("score_impact", 0) return f"""#### {idx}. {message} [{score_impact:+d} pts] **Impact**: {impact} **Effort**: {effort.capitalize()} **Fix**: {fix} """ def _generate_json(self) -> str: """Generate JSON format report.""" score = self.context.get("score", 0) rating = self.context.get("rating", "Unknown") stars = self.context.get("stars", "") readiness = self.context.get("publication_ready", "Unknown") p0_issues = self.context.get("issues", {}).get("p0", []) p1_issues = self.context.get("issues", {}).get("p1", []) p2_issues = self.context.get("issues", {}).get("p2", []) report = { "metadata": { "generated": self.timestamp, "target": self.path, "type": self.context.get("target_type", "plugin"), "validator_version": "1.0.0" }, "executive_summary": { "score": score, "rating": rating, "stars": stars, "publication_ready": readiness, "critical_issues": len(p0_issues), "total_issues": len(p0_issues) + len(p1_issues) + len(p2_issues) }, "validation_layers": self.context.get("validation_layers", {}), "issues": { "p0": p0_issues, "p1": p1_issues, "p2": p2_issues }, "improvement_roadmap": self.context.get("improvement_roadmap", {}) } return json.dumps(report, indent=2) def _generate_html(self) -> str: """Generate HTML format report.""" score = self.context.get("score", 0) rating = self.context.get("rating", "Unknown") stars = self.context.get("stars", "") readiness = self.context.get("publication_ready", "Unknown") p0_count = len(self.context.get("issues", {}).get("p0", [])) p1_count = len(self.context.get("issues", {}).get("p1", [])) p2_count = len(self.context.get("issues", {}).get("p2", [])) total_issues = p0_count + p1_count + p2_count # Determine score color if score >= 90: score_color = "#10b981" # green elif score >= 75: score_color = "#3b82f6" # blue elif score >= 60: score_color = "#f59e0b" # orange else: score_color = "#ef4444" # red html = f""" Quality Assessment Report

Quality Assessment Report

Generated: {self.timestamp}
Target: {self.path}
Type: Claude Code Plugin
{score}
{stars} {rating}
{readiness}
Critical Issues
{p0_count}
Important Issues
{p1_count}
Recommendations
{p2_count}
Total Issues
{total_issues}

Validation Layers

""" # Validation layers layers = self.context.get("validation_layers", {}) for layer_name, layer_data in layers.items(): status = layer_data.get("status", "unknown") badge_class = "pass" if status == "pass" else ("warning" if status == "warnings" else "fail") html += f' {layer_name.replace("_", " ").title()}: {status.upper()}\n' html += """

Issues Breakdown

""" # Issues for priority, priority_name in [("p0", "Critical"), ("p1", "Important"), ("p2", "Recommended")]: issues = self.context.get("issues", {}).get(priority, []) html += f'

Priority {priority[1]}: {priority_name} ({len(issues)} issues)

\n' for issue in issues[:5]: # Show top 5 per priority message = issue.get("message", "Unknown issue") impact = issue.get("impact", "Unknown") effort = issue.get("effort", "unknown") fix = issue.get("fix", "No fix available") html += f"""
{message}
Impact: {impact}
Effort: {effort.capitalize()}
Fix: {fix}
""" html += """
""" return html def main(): """Main CLI interface.""" parser = argparse.ArgumentParser( description="Generate comprehensive quality reports", formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( "--path", required=True, help="Target path being analyzed" ) parser.add_argument( "--format", choices=["markdown", "json", "html"], default="markdown", help="Output format (default: markdown)" ) parser.add_argument( "--output", help="Output file path (optional, defaults to stdout)" ) parser.add_argument( "--context", help="Path to JSON file with validation context" ) args = parser.parse_args() # Load context if provided context = {} if args.context: try: with open(args.context, 'r') as f: context = json.load(f) except FileNotFoundError: print(f"Warning: Context file not found: {args.context}", file=sys.stderr) except json.JSONDecodeError as e: print(f"Error: Invalid JSON in context file: {e}", file=sys.stderr) return 1 # Generate report generator = ReportGenerator(args.path, context) report = generator.generate(args.format) # Output report if args.output: try: with open(args.output, 'w') as f: f.write(report) print(f"Report generated: {args.output}") except IOError as e: print(f"Error writing to file: {e}", file=sys.stderr) return 1 else: print(report) return 0 if __name__ == "__main__": sys.exit(main())