Initial commit
This commit is contained in:
305
commands/quality-analysis/.scripts/issue-prioritizer.sh
Executable file
305
commands/quality-analysis/.scripts/issue-prioritizer.sh
Executable file
@@ -0,0 +1,305 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ============================================================================
|
||||
# Issue Prioritization Script
|
||||
# ============================================================================
|
||||
# Purpose: Categorize and prioritize validation issues into P0/P1/P2 tiers
|
||||
# Version: 1.0.0
|
||||
# Usage: ./issue-prioritizer.sh <issues-json-file> [criteria]
|
||||
# Returns: 0=success, 1=error
|
||||
# Dependencies: jq, bash 4.0+
|
||||
# ============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly RED='\033[0;31m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly NC='\033[0m' # No Color
|
||||
|
||||
# Priority definitions
|
||||
declare -A PRIORITY_NAMES=(
|
||||
[0]="Critical - Must Fix"
|
||||
[1]="Important - Should Fix"
|
||||
[2]="Recommended - Nice to Have"
|
||||
)
|
||||
|
||||
declare -A PRIORITY_ICONS=(
|
||||
[0]="❌"
|
||||
[1]="⚠️ "
|
||||
[2]="💡"
|
||||
)
|
||||
|
||||
# Effort labels
|
||||
declare -A EFFORT_LABELS=(
|
||||
[low]="Low"
|
||||
[medium]="Medium"
|
||||
[high]="High"
|
||||
)
|
||||
|
||||
# Effort time estimates
|
||||
declare -A EFFORT_TIMES=(
|
||||
[low]="5-15 minutes"
|
||||
[medium]="30-60 minutes"
|
||||
[high]="2+ hours"
|
||||
)
|
||||
|
||||
# ============================================================================
|
||||
# Functions
|
||||
# ============================================================================
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 <issues-json-file> [criteria]
|
||||
|
||||
Arguments:
|
||||
issues-json-file Path to JSON file with validation issues
|
||||
criteria Prioritization criteria: severity|impact|effort (default: severity)
|
||||
|
||||
Examples:
|
||||
$0 validation-results.json
|
||||
$0 results.json impact
|
||||
$0 results.json severity
|
||||
|
||||
JSON Structure:
|
||||
{
|
||||
"errors": [{"type": "...", "severity": "critical", ...}],
|
||||
"warnings": [{"type": "...", "severity": "important", ...}],
|
||||
"recommendations": [{"type": "...", "severity": "recommended", ...}]
|
||||
}
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
check_dependencies() {
|
||||
local missing_deps=()
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
missing_deps+=("jq")
|
||||
fi
|
||||
|
||||
if [ ${#missing_deps[@]} -gt 0 ]; then
|
||||
echo "Error: Missing dependencies: ${missing_deps[*]}" >&2
|
||||
echo "Install with: sudo apt-get install ${missing_deps[*]}" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
determine_priority() {
|
||||
local severity="$1"
|
||||
local type="$2"
|
||||
|
||||
# P0 (Critical) - Blocking issues
|
||||
if [[ "$severity" == "critical" ]] || \
|
||||
[[ "$type" =~ ^(missing_required|invalid_json|security_vulnerability|format_violation)$ ]]; then
|
||||
echo "0"
|
||||
return
|
||||
fi
|
||||
|
||||
# P1 (Important) - Should fix
|
||||
if [[ "$severity" == "important" ]] || \
|
||||
[[ "$type" =~ ^(missing_recommended|documentation_gap|convention_violation|performance)$ ]]; then
|
||||
echo "1"
|
||||
return
|
||||
fi
|
||||
|
||||
# P2 (Recommended) - Nice to have
|
||||
echo "2"
|
||||
}
|
||||
|
||||
get_effort_estimate() {
|
||||
local type="$1"
|
||||
|
||||
# High effort
|
||||
if [[ "$type" =~ ^(security_vulnerability|performance|architecture)$ ]]; then
|
||||
echo "high"
|
||||
return
|
||||
fi
|
||||
|
||||
# Medium effort
|
||||
if [[ "$type" =~ ^(documentation_gap|convention_violation|missing_recommended)$ ]]; then
|
||||
echo "medium"
|
||||
return
|
||||
fi
|
||||
|
||||
# Low effort (default)
|
||||
echo "low"
|
||||
}
|
||||
|
||||
format_issue() {
|
||||
local priority="$1"
|
||||
local message="$2"
|
||||
local impact="${3:-Unknown impact}"
|
||||
local effort="${4:-low}"
|
||||
local fix="${5:-No fix suggestion available}"
|
||||
|
||||
local icon="${PRIORITY_ICONS[$priority]}"
|
||||
local effort_label="${EFFORT_LABELS[$effort]}"
|
||||
local effort_time="${EFFORT_TIMES[$effort]}"
|
||||
|
||||
cat <<EOF
|
||||
${icon} ${message}
|
||||
Impact: ${impact}
|
||||
Effort: ${effort_label} (${effort_time})
|
||||
Fix: ${fix}
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
process_issues() {
|
||||
local json_file="$1"
|
||||
local criteria="${2:-severity}"
|
||||
|
||||
# Validate JSON file exists
|
||||
if [[ ! -f "$json_file" ]]; then
|
||||
echo "Error: File not found: $json_file" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate JSON syntax
|
||||
if ! jq empty "$json_file" 2>/dev/null; then
|
||||
echo "Error: Invalid JSON in $json_file" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Count total issues
|
||||
local total_errors=$(jq '.errors // [] | length' "$json_file")
|
||||
local total_warnings=$(jq '.warnings // [] | length' "$json_file")
|
||||
local total_recommendations=$(jq '.recommendations // [] | length' "$json_file")
|
||||
local total_issues=$((total_errors + total_warnings + total_recommendations))
|
||||
|
||||
if [[ $total_issues -eq 0 ]]; then
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "ISSUE PRIORITIZATION"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
echo "No issues found! Quality score is perfect."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Initialize priority counters
|
||||
declare -A priority_counts=([0]=0 [1]=0 [2]=0)
|
||||
declare -A priority_issues=([0]="" [1]="" [2]="")
|
||||
|
||||
# Process errors
|
||||
while IFS= read -r issue; do
|
||||
local type=$(echo "$issue" | jq -r '.type // "unknown"')
|
||||
local severity=$(echo "$issue" | jq -r '.severity // "critical"')
|
||||
local message=$(echo "$issue" | jq -r '.message // "Unknown error"')
|
||||
local impact=$(echo "$issue" | jq -r '.impact // "Unknown impact"')
|
||||
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
|
||||
local score_impact=$(echo "$issue" | jq -r '.score_impact // 0')
|
||||
|
||||
local priority=$(determine_priority "$severity" "$type")
|
||||
local effort=$(get_effort_estimate "$type")
|
||||
|
||||
priority_counts[$priority]=$((priority_counts[$priority] + 1))
|
||||
|
||||
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
|
||||
priority_issues[$priority]+="$formatted_issue"
|
||||
done < <(jq -c '.errors // [] | .[]' "$json_file")
|
||||
|
||||
# Process warnings
|
||||
while IFS= read -r issue; do
|
||||
local type=$(echo "$issue" | jq -r '.type // "unknown"')
|
||||
local severity=$(echo "$issue" | jq -r '.severity // "important"')
|
||||
local message=$(echo "$issue" | jq -r '.message // "Unknown warning"')
|
||||
local impact=$(echo "$issue" | jq -r '.impact // "Unknown impact"')
|
||||
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
|
||||
|
||||
local priority=$(determine_priority "$severity" "$type")
|
||||
local effort=$(get_effort_estimate "$type")
|
||||
|
||||
priority_counts[$priority]=$((priority_counts[$priority] + 1))
|
||||
|
||||
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
|
||||
priority_issues[$priority]+="$formatted_issue"
|
||||
done < <(jq -c '.warnings // [] | .[]' "$json_file")
|
||||
|
||||
# Process recommendations
|
||||
while IFS= read -r issue; do
|
||||
local type=$(echo "$issue" | jq -r '.type // "unknown"')
|
||||
local severity=$(echo "$issue" | jq -r '.severity // "recommended"')
|
||||
local message=$(echo "$issue" | jq -r '.message // "Recommendation"')
|
||||
local impact=$(echo "$issue" | jq -r '.impact // "Minor quality improvement"')
|
||||
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
|
||||
|
||||
local priority=$(determine_priority "$severity" "$type")
|
||||
local effort=$(get_effort_estimate "$type")
|
||||
|
||||
priority_counts[$priority]=$((priority_counts[$priority] + 1))
|
||||
|
||||
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
|
||||
priority_issues[$priority]+="$formatted_issue"
|
||||
done < <(jq -c '.recommendations // [] | .[]' "$json_file")
|
||||
|
||||
# Display results
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "ISSUE PRIORITIZATION"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
echo "Total Issues: $total_issues"
|
||||
echo ""
|
||||
|
||||
# Display each priority tier
|
||||
for priority in 0 1 2; do
|
||||
local count=${priority_counts[$priority]}
|
||||
local name="${PRIORITY_NAMES[$priority]}"
|
||||
|
||||
if [[ $count -gt 0 ]]; then
|
||||
echo "Priority $priority ($name): $count"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo -e "${priority_issues[$priority]}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "Summary:"
|
||||
echo "- Fix P0 issues first (blocking publication)"
|
||||
echo "- Address P1 issues for quality improvement"
|
||||
echo "- Consider P2 improvements for excellence"
|
||||
|
||||
if [[ ${priority_counts[0]} -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "⚠️ WARNING: ${priority_counts[0]} blocking issue(s) must be fixed before publication"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
main() {
|
||||
# Check arguments
|
||||
if [[ $# -lt 1 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
local json_file="$1"
|
||||
local criteria="${2:-severity}"
|
||||
|
||||
# Check dependencies
|
||||
if ! check_dependencies; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate criteria
|
||||
if [[ ! "$criteria" =~ ^(severity|impact|effort)$ ]]; then
|
||||
echo "Error: Invalid criteria '$criteria'. Use: severity|impact|effort" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Process issues
|
||||
process_issues "$json_file" "$criteria"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
541
commands/quality-analysis/.scripts/report-generator.py
Executable file
541
commands/quality-analysis/.scripts/report-generator.py
Executable file
@@ -0,0 +1,541 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ============================================================================
|
||||
# Quality Report Generator
|
||||
# ============================================================================
|
||||
# Purpose: Generate comprehensive quality reports in multiple formats
|
||||
# Version: 1.0.0
|
||||
# Usage: ./report-generator.py --path <path> --format <format> [options]
|
||||
# Returns: 0=success, 1=error
|
||||
# Dependencies: Python 3.6+
|
||||
# ============================================================================
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
class ReportGenerator:
|
||||
"""Generate quality reports in multiple formats."""
|
||||
|
||||
def __init__(self, path: str, context: Optional[Dict] = None):
|
||||
"""
|
||||
Initialize report generator.
|
||||
|
||||
Args:
|
||||
path: Target path being analyzed
|
||||
context: Validation context with results
|
||||
"""
|
||||
self.path = path
|
||||
self.context = context or {}
|
||||
self.timestamp = datetime.now().isoformat()
|
||||
|
||||
def generate(self, format_type: str = "markdown") -> str:
|
||||
"""
|
||||
Generate report in specified format.
|
||||
|
||||
Args:
|
||||
format_type: Report format (markdown, json, html)
|
||||
|
||||
Returns:
|
||||
Formatted report string
|
||||
"""
|
||||
if format_type == "json":
|
||||
return self._generate_json()
|
||||
elif format_type == "html":
|
||||
return self._generate_html()
|
||||
else:
|
||||
return self._generate_markdown()
|
||||
|
||||
def _generate_markdown(self) -> str:
|
||||
"""Generate markdown format report."""
|
||||
score = self.context.get("score", 0)
|
||||
rating = self.context.get("rating", "Unknown")
|
||||
stars = self.context.get("stars", "")
|
||||
readiness = self.context.get("publication_ready", "Unknown")
|
||||
|
||||
p0_count = len(self.context.get("issues", {}).get("p0", []))
|
||||
p1_count = len(self.context.get("issues", {}).get("p1", []))
|
||||
p2_count = len(self.context.get("issues", {}).get("p2", []))
|
||||
total_issues = p0_count + p1_count + p2_count
|
||||
|
||||
target_type = self.context.get("target_type", "plugin")
|
||||
|
||||
report = f"""# Quality Assessment Report
|
||||
|
||||
**Generated**: {self.timestamp}
|
||||
**Target**: {self.path}
|
||||
**Type**: Claude Code {target_type.capitalize()}
|
||||
|
||||
## Executive Summary
|
||||
|
||||
**Quality Score**: {score}/100 {stars} ({rating})
|
||||
**Publication Ready**: {readiness}
|
||||
**Critical Issues**: {p0_count}
|
||||
**Total Issues**: {total_issues}
|
||||
|
||||
"""
|
||||
|
||||
if score >= 90:
|
||||
report += "🎉 Excellent! Your plugin is publication-ready.\n\n"
|
||||
elif score >= 75:
|
||||
report += "👍 Nearly ready! Address a few important issues to reach excellent status.\n\n"
|
||||
elif score >= 60:
|
||||
report += "⚠️ Needs work. Several issues should be addressed before publication.\n\n"
|
||||
else:
|
||||
report += "❌ Substantial improvements needed before this is ready for publication.\n\n"
|
||||
|
||||
# Validation layers
|
||||
report += "## Validation Results\n\n"
|
||||
layers = self.context.get("validation_layers", {})
|
||||
|
||||
for layer_name, layer_data in layers.items():
|
||||
status = layer_data.get("status", "unknown")
|
||||
issue_count = len(layer_data.get("issues", []))
|
||||
|
||||
if status == "pass":
|
||||
status_icon = "✅ PASS"
|
||||
elif status == "warnings":
|
||||
status_icon = f"⚠️ WARNINGS ({issue_count} issues)"
|
||||
else:
|
||||
status_icon = f"❌ FAIL ({issue_count} issues)"
|
||||
|
||||
report += f"### {layer_name.replace('_', ' ').title()} {status_icon}\n"
|
||||
|
||||
if issue_count == 0:
|
||||
report += "- No issues found\n\n"
|
||||
else:
|
||||
for issue in layer_data.get("issues", [])[:3]: # Show top 3
|
||||
report += f"- {issue.get('message', 'Unknown issue')}\n"
|
||||
if issue_count > 3:
|
||||
report += f"- ... and {issue_count - 3} more\n"
|
||||
report += "\n"
|
||||
|
||||
# Issues breakdown
|
||||
report += "## Issues Breakdown\n\n"
|
||||
|
||||
report += f"### Priority 0 (Critical): {p0_count} issues\n\n"
|
||||
if p0_count == 0:
|
||||
report += "None - excellent!\n\n"
|
||||
else:
|
||||
for idx, issue in enumerate(self.context.get("issues", {}).get("p0", []), 1):
|
||||
report += self._format_issue_markdown(idx, issue)
|
||||
|
||||
report += f"### Priority 1 (Important): {p1_count} issues\n\n"
|
||||
if p1_count == 0:
|
||||
report += "None - great!\n\n"
|
||||
else:
|
||||
for idx, issue in enumerate(self.context.get("issues", {}).get("p1", []), 1):
|
||||
report += self._format_issue_markdown(idx, issue)
|
||||
|
||||
report += f"### Priority 2 (Recommended): {p2_count} issues\n\n"
|
||||
if p2_count == 0:
|
||||
report += "No recommendations.\n\n"
|
||||
else:
|
||||
for idx, issue in enumerate(self.context.get("issues", {}).get("p2", [])[:5], 1):
|
||||
report += self._format_issue_markdown(idx, issue)
|
||||
if p2_count > 5:
|
||||
report += f"... and {p2_count - 5} more recommendations\n\n"
|
||||
|
||||
# Improvement roadmap
|
||||
roadmap = self.context.get("improvement_roadmap", {})
|
||||
if roadmap:
|
||||
report += "## Improvement Roadmap\n\n"
|
||||
report += f"### Path to Excellent (90+)\n\n"
|
||||
report += f"**Current**: {roadmap.get('current_score', score)}/100\n"
|
||||
report += f"**Target**: {roadmap.get('target_score', 90)}/100\n"
|
||||
report += f"**Gap**: {roadmap.get('gap', 0)} points\n\n"
|
||||
|
||||
recommendations = roadmap.get("recommendations", [])
|
||||
if recommendations:
|
||||
report += "**Top Recommendations**:\n\n"
|
||||
for idx, rec in enumerate(recommendations[:5], 1):
|
||||
report += f"{idx}. [{rec.get('score_impact', 0):+d} pts] {rec.get('title', 'Unknown')}\n"
|
||||
report += f" - Priority: {rec.get('priority', 'Medium')}\n"
|
||||
report += f" - Effort: {rec.get('effort', 'Unknown')}\n"
|
||||
report += f" - Impact: {rec.get('impact', 'Unknown')}\n\n"
|
||||
|
||||
# Footer
|
||||
report += "\n---\n"
|
||||
report += "Report generated by marketplace-validator-plugin v1.0.0\n"
|
||||
|
||||
return report
|
||||
|
||||
def _format_issue_markdown(self, idx: int, issue: Dict) -> str:
|
||||
"""Format a single issue in markdown."""
|
||||
message = issue.get("message", "Unknown issue")
|
||||
impact = issue.get("impact", "Unknown impact")
|
||||
effort = issue.get("effort", "unknown")
|
||||
fix = issue.get("fix", "No fix available")
|
||||
score_impact = issue.get("score_impact", 0)
|
||||
|
||||
return f"""#### {idx}. {message} [{score_impact:+d} pts]
|
||||
|
||||
**Impact**: {impact}
|
||||
**Effort**: {effort.capitalize()}
|
||||
**Fix**: {fix}
|
||||
|
||||
"""
|
||||
|
||||
def _generate_json(self) -> str:
|
||||
"""Generate JSON format report."""
|
||||
score = self.context.get("score", 0)
|
||||
rating = self.context.get("rating", "Unknown")
|
||||
stars = self.context.get("stars", "")
|
||||
readiness = self.context.get("publication_ready", "Unknown")
|
||||
|
||||
p0_issues = self.context.get("issues", {}).get("p0", [])
|
||||
p1_issues = self.context.get("issues", {}).get("p1", [])
|
||||
p2_issues = self.context.get("issues", {}).get("p2", [])
|
||||
|
||||
report = {
|
||||
"metadata": {
|
||||
"generated": self.timestamp,
|
||||
"target": self.path,
|
||||
"type": self.context.get("target_type", "plugin"),
|
||||
"validator_version": "1.0.0"
|
||||
},
|
||||
"executive_summary": {
|
||||
"score": score,
|
||||
"rating": rating,
|
||||
"stars": stars,
|
||||
"publication_ready": readiness,
|
||||
"critical_issues": len(p0_issues),
|
||||
"total_issues": len(p0_issues) + len(p1_issues) + len(p2_issues)
|
||||
},
|
||||
"validation_layers": self.context.get("validation_layers", {}),
|
||||
"issues": {
|
||||
"p0": p0_issues,
|
||||
"p1": p1_issues,
|
||||
"p2": p2_issues
|
||||
},
|
||||
"improvement_roadmap": self.context.get("improvement_roadmap", {})
|
||||
}
|
||||
|
||||
return json.dumps(report, indent=2)
|
||||
|
||||
def _generate_html(self) -> str:
|
||||
"""Generate HTML format report."""
|
||||
score = self.context.get("score", 0)
|
||||
rating = self.context.get("rating", "Unknown")
|
||||
stars = self.context.get("stars", "")
|
||||
readiness = self.context.get("publication_ready", "Unknown")
|
||||
|
||||
p0_count = len(self.context.get("issues", {}).get("p0", []))
|
||||
p1_count = len(self.context.get("issues", {}).get("p1", []))
|
||||
p2_count = len(self.context.get("issues", {}).get("p2", []))
|
||||
total_issues = p0_count + p1_count + p2_count
|
||||
|
||||
# Determine score color
|
||||
if score >= 90:
|
||||
score_color = "#10b981" # green
|
||||
elif score >= 75:
|
||||
score_color = "#3b82f6" # blue
|
||||
elif score >= 60:
|
||||
score_color = "#f59e0b" # orange
|
||||
else:
|
||||
score_color = "#ef4444" # red
|
||||
|
||||
html = f"""<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Quality Assessment Report</title>
|
||||
<style>
|
||||
* {{
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
box-sizing: border-box;
|
||||
}}
|
||||
body {{
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
|
||||
line-height: 1.6;
|
||||
color: #333;
|
||||
background: #f5f5f5;
|
||||
padding: 20px;
|
||||
}}
|
||||
.container {{
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
background: white;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
|
||||
padding: 40px;
|
||||
}}
|
||||
h1 {{
|
||||
font-size: 32px;
|
||||
margin-bottom: 10px;
|
||||
color: #1f2937;
|
||||
}}
|
||||
.meta {{
|
||||
color: #6b7280;
|
||||
margin-bottom: 30px;
|
||||
padding-bottom: 20px;
|
||||
border-bottom: 2px solid #e5e7eb;
|
||||
}}
|
||||
.score-card {{
|
||||
background: linear-gradient(135deg, {score_color} 0%, {score_color}dd 100%);
|
||||
color: white;
|
||||
padding: 30px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 30px;
|
||||
text-align: center;
|
||||
}}
|
||||
.score-number {{
|
||||
font-size: 72px;
|
||||
font-weight: bold;
|
||||
line-height: 1;
|
||||
}}
|
||||
.score-label {{
|
||||
font-size: 18px;
|
||||
margin-top: 10px;
|
||||
opacity: 0.9;
|
||||
}}
|
||||
.stats {{
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
gap: 20px;
|
||||
margin-bottom: 30px;
|
||||
}}
|
||||
.stat-card {{
|
||||
background: #f9fafb;
|
||||
padding: 20px;
|
||||
border-radius: 6px;
|
||||
border-left: 4px solid #3b82f6;
|
||||
}}
|
||||
.stat-label {{
|
||||
font-size: 14px;
|
||||
color: #6b7280;
|
||||
margin-bottom: 5px;
|
||||
}}
|
||||
.stat-value {{
|
||||
font-size: 24px;
|
||||
font-weight: bold;
|
||||
color: #1f2937;
|
||||
}}
|
||||
.section {{
|
||||
margin-bottom: 40px;
|
||||
}}
|
||||
h2 {{
|
||||
font-size: 24px;
|
||||
margin-bottom: 20px;
|
||||
color: #1f2937;
|
||||
border-bottom: 2px solid #e5e7eb;
|
||||
padding-bottom: 10px;
|
||||
}}
|
||||
h3 {{
|
||||
font-size: 18px;
|
||||
margin-bottom: 15px;
|
||||
color: #374151;
|
||||
}}
|
||||
.issue {{
|
||||
background: #f9fafb;
|
||||
padding: 20px;
|
||||
border-radius: 6px;
|
||||
margin-bottom: 15px;
|
||||
border-left: 4px solid #6b7280;
|
||||
}}
|
||||
.issue.p0 {{
|
||||
border-left-color: #ef4444;
|
||||
background: #fef2f2;
|
||||
}}
|
||||
.issue.p1 {{
|
||||
border-left-color: #f59e0b;
|
||||
background: #fffbeb;
|
||||
}}
|
||||
.issue.p2 {{
|
||||
border-left-color: #3b82f6;
|
||||
background: #eff6ff;
|
||||
}}
|
||||
.issue-title {{
|
||||
font-weight: bold;
|
||||
margin-bottom: 10px;
|
||||
font-size: 16px;
|
||||
}}
|
||||
.issue-detail {{
|
||||
font-size: 14px;
|
||||
color: #6b7280;
|
||||
margin: 5px 0;
|
||||
}}
|
||||
.badge {{
|
||||
display: inline-block;
|
||||
padding: 4px 12px;
|
||||
border-radius: 12px;
|
||||
font-size: 12px;
|
||||
font-weight: 600;
|
||||
margin-right: 8px;
|
||||
}}
|
||||
.badge.pass {{
|
||||
background: #d1fae5;
|
||||
color: #065f46;
|
||||
}}
|
||||
.badge.warning {{
|
||||
background: #fef3c7;
|
||||
color: #92400e;
|
||||
}}
|
||||
.badge.fail {{
|
||||
background: #fee2e2;
|
||||
color: #991b1b;
|
||||
}}
|
||||
.footer {{
|
||||
margin-top: 40px;
|
||||
padding-top: 20px;
|
||||
border-top: 2px solid #e5e7eb;
|
||||
color: #6b7280;
|
||||
font-size: 14px;
|
||||
text-align: center;
|
||||
}}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>Quality Assessment Report</h1>
|
||||
<div class="meta">
|
||||
<strong>Generated:</strong> {self.timestamp}<br>
|
||||
<strong>Target:</strong> {self.path}<br>
|
||||
<strong>Type:</strong> Claude Code Plugin
|
||||
</div>
|
||||
|
||||
<div class="score-card">
|
||||
<div class="score-number">{score}</div>
|
||||
<div class="score-label">{stars} {rating}</div>
|
||||
<div class="score-label">{readiness}</div>
|
||||
</div>
|
||||
|
||||
<div class="stats">
|
||||
<div class="stat-card">
|
||||
<div class="stat-label">Critical Issues</div>
|
||||
<div class="stat-value">{p0_count}</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-label">Important Issues</div>
|
||||
<div class="stat-value">{p1_count}</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-label">Recommendations</div>
|
||||
<div class="stat-value">{p2_count}</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-label">Total Issues</div>
|
||||
<div class="stat-value">{total_issues}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Validation Layers</h2>
|
||||
"""
|
||||
|
||||
# Validation layers
|
||||
layers = self.context.get("validation_layers", {})
|
||||
for layer_name, layer_data in layers.items():
|
||||
status = layer_data.get("status", "unknown")
|
||||
badge_class = "pass" if status == "pass" else ("warning" if status == "warnings" else "fail")
|
||||
html += f' <span class="badge {badge_class}">{layer_name.replace("_", " ").title()}: {status.upper()}</span>\n'
|
||||
|
||||
html += """ </div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Issues Breakdown</h2>
|
||||
"""
|
||||
|
||||
# Issues
|
||||
for priority, priority_name in [("p0", "Critical"), ("p1", "Important"), ("p2", "Recommended")]:
|
||||
issues = self.context.get("issues", {}).get(priority, [])
|
||||
html += f' <h3>Priority {priority[1]}: {priority_name} ({len(issues)} issues)</h3>\n'
|
||||
|
||||
for issue in issues[:5]: # Show top 5 per priority
|
||||
message = issue.get("message", "Unknown issue")
|
||||
impact = issue.get("impact", "Unknown")
|
||||
effort = issue.get("effort", "unknown")
|
||||
fix = issue.get("fix", "No fix available")
|
||||
|
||||
html += f""" <div class="issue {priority}">
|
||||
<div class="issue-title">{message}</div>
|
||||
<div class="issue-detail"><strong>Impact:</strong> {impact}</div>
|
||||
<div class="issue-detail"><strong>Effort:</strong> {effort.capitalize()}</div>
|
||||
<div class="issue-detail"><strong>Fix:</strong> {fix}</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
html += """ </div>
|
||||
|
||||
<div class="footer">
|
||||
Report generated by marketplace-validator-plugin v1.0.0
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
return html
|
||||
|
||||
|
||||
def main():
|
||||
"""Main CLI interface."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate comprehensive quality reports",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--path",
|
||||
required=True,
|
||||
help="Target path being analyzed"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--format",
|
||||
choices=["markdown", "json", "html"],
|
||||
default="markdown",
|
||||
help="Output format (default: markdown)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
help="Output file path (optional, defaults to stdout)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--context",
|
||||
help="Path to JSON file with validation context"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load context if provided
|
||||
context = {}
|
||||
if args.context:
|
||||
try:
|
||||
with open(args.context, 'r') as f:
|
||||
context = json.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"Warning: Context file not found: {args.context}", file=sys.stderr)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error: Invalid JSON in context file: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Generate report
|
||||
generator = ReportGenerator(args.path, context)
|
||||
report = generator.generate(args.format)
|
||||
|
||||
# Output report
|
||||
if args.output:
|
||||
try:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(report)
|
||||
print(f"Report generated: {args.output}")
|
||||
except IOError as e:
|
||||
print(f"Error writing to file: {e}", file=sys.stderr)
|
||||
return 1
|
||||
else:
|
||||
print(report)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
239
commands/quality-analysis/.scripts/scoring-algorithm.py
Executable file
239
commands/quality-analysis/.scripts/scoring-algorithm.py
Executable file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ============================================================================
|
||||
# Quality Scoring Algorithm
|
||||
# ============================================================================
|
||||
# Purpose: Calculate quality score (0-100) based on validation results
|
||||
# Version: 1.0.0
|
||||
# Usage: ./scoring-algorithm.py --errors N --warnings N --missing N
|
||||
# Returns: 0=success, 1=error
|
||||
# Dependencies: Python 3.6+
|
||||
# ============================================================================
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import json
|
||||
|
||||
|
||||
def calculate_quality_score(errors: int, warnings: int, missing_recommended: int) -> int:
|
||||
"""
|
||||
Calculate quality score based on validation issues.
|
||||
|
||||
Algorithm:
|
||||
score = 100
|
||||
score -= errors * 20 # Critical errors: -20 each
|
||||
score -= warnings * 10 # Warnings: -10 each
|
||||
score -= missing_recommended * 5 # Missing fields: -5 each
|
||||
return max(0, score)
|
||||
|
||||
Args:
|
||||
errors: Number of critical errors
|
||||
warnings: Number of warnings
|
||||
missing_recommended: Number of missing recommended fields
|
||||
|
||||
Returns:
|
||||
Quality score (0-100)
|
||||
"""
|
||||
score = 100
|
||||
score -= errors * 20
|
||||
score -= warnings * 10
|
||||
score -= missing_recommended * 5
|
||||
return max(0, score)
|
||||
|
||||
|
||||
def get_rating(score: int) -> str:
|
||||
"""
|
||||
Get quality rating based on score.
|
||||
|
||||
Args:
|
||||
score: Quality score (0-100)
|
||||
|
||||
Returns:
|
||||
Rating string
|
||||
"""
|
||||
if score >= 90:
|
||||
return "Excellent"
|
||||
elif score >= 75:
|
||||
return "Good"
|
||||
elif score >= 60:
|
||||
return "Fair"
|
||||
elif score >= 40:
|
||||
return "Needs Improvement"
|
||||
else:
|
||||
return "Poor"
|
||||
|
||||
|
||||
def get_stars(score: int) -> str:
|
||||
"""
|
||||
Get star rating based on score.
|
||||
|
||||
Args:
|
||||
score: Quality score (0-100)
|
||||
|
||||
Returns:
|
||||
Star rating string
|
||||
"""
|
||||
if score >= 90:
|
||||
return "⭐⭐⭐⭐⭐"
|
||||
elif score >= 75:
|
||||
return "⭐⭐⭐⭐"
|
||||
elif score >= 60:
|
||||
return "⭐⭐⭐"
|
||||
elif score >= 40:
|
||||
return "⭐⭐"
|
||||
else:
|
||||
return "⭐"
|
||||
|
||||
|
||||
def get_publication_readiness(score: int) -> str:
|
||||
"""
|
||||
Determine publication readiness based on score.
|
||||
|
||||
Args:
|
||||
score: Quality score (0-100)
|
||||
|
||||
Returns:
|
||||
Publication readiness status
|
||||
"""
|
||||
if score >= 90:
|
||||
return "Yes - Ready to publish"
|
||||
elif score >= 75:
|
||||
return "With Minor Changes - Nearly ready"
|
||||
elif score >= 60:
|
||||
return "Needs Work - Significant improvements needed"
|
||||
else:
|
||||
return "Not Ready - Major overhaul required"
|
||||
|
||||
|
||||
def format_output(score: int, errors: int, warnings: int, missing: int,
|
||||
output_format: str = "text") -> str:
|
||||
"""
|
||||
Format score output in requested format.
|
||||
|
||||
Args:
|
||||
score: Quality score
|
||||
errors: Error count
|
||||
warnings: Warning count
|
||||
missing: Missing field count
|
||||
output_format: Output format (text, json, compact)
|
||||
|
||||
Returns:
|
||||
Formatted output string
|
||||
"""
|
||||
rating = get_rating(score)
|
||||
stars = get_stars(score)
|
||||
readiness = get_publication_readiness(score)
|
||||
|
||||
if output_format == "json":
|
||||
return json.dumps({
|
||||
"score": score,
|
||||
"rating": rating,
|
||||
"stars": stars,
|
||||
"publication_ready": readiness,
|
||||
"breakdown": {
|
||||
"base_score": 100,
|
||||
"errors_penalty": errors * 20,
|
||||
"warnings_penalty": warnings * 10,
|
||||
"missing_penalty": missing * 5
|
||||
},
|
||||
"counts": {
|
||||
"errors": errors,
|
||||
"warnings": warnings,
|
||||
"missing": missing
|
||||
}
|
||||
}, indent=2)
|
||||
|
||||
elif output_format == "compact":
|
||||
return f"{score}/100 {stars} ({rating})"
|
||||
|
||||
else: # text format
|
||||
error_penalty = errors * 20
|
||||
warning_penalty = warnings * 10
|
||||
missing_penalty = missing * 5
|
||||
|
||||
return f"""━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
QUALITY SCORE CALCULATION
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Score: {score}/100
|
||||
Rating: {rating}
|
||||
Stars: {stars}
|
||||
|
||||
Breakdown:
|
||||
Base Score: 100
|
||||
Critical Errors: -{error_penalty} ({errors} × 20)
|
||||
Warnings: -{warning_penalty} ({warnings} × 10)
|
||||
Missing Fields: -{missing_penalty} ({missing} × 5)
|
||||
─────────────────────
|
||||
Final Score: {score}/100
|
||||
|
||||
Publication Ready: {readiness}
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
"""Main CLI interface."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Calculate quality score based on validation results",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
%(prog)s --errors 2 --warnings 5 --missing 3
|
||||
%(prog)s --errors 0 --warnings 0 --missing 0
|
||||
%(prog)s --errors 1 --format json
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--errors",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Number of critical errors (default: 0)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--warnings",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Number of warnings (default: 0)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--missing",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Number of missing recommended fields (default: 0)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--format",
|
||||
choices=["text", "json", "compact"],
|
||||
default="text",
|
||||
help="Output format (default: text)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate inputs
|
||||
if args.errors < 0 or args.warnings < 0 or args.missing < 0:
|
||||
print("Error: Counts cannot be negative", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Calculate score
|
||||
score = calculate_quality_score(args.errors, args.warnings, args.missing)
|
||||
|
||||
# Format and print output
|
||||
output = format_output(
|
||||
score,
|
||||
args.errors,
|
||||
args.warnings,
|
||||
args.missing,
|
||||
args.format
|
||||
)
|
||||
print(output)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user