Initial commit
This commit is contained in:
305
commands/quality-analysis/.scripts/issue-prioritizer.sh
Executable file
305
commands/quality-analysis/.scripts/issue-prioritizer.sh
Executable file
@@ -0,0 +1,305 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ============================================================================
|
||||
# Issue Prioritization Script
|
||||
# ============================================================================
|
||||
# Purpose: Categorize and prioritize validation issues into P0/P1/P2 tiers
|
||||
# Version: 1.0.0
|
||||
# Usage: ./issue-prioritizer.sh <issues-json-file> [criteria]
|
||||
# Returns: 0=success, 1=error
|
||||
# Dependencies: jq, bash 4.0+
|
||||
# ============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly RED='\033[0;31m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly NC='\033[0m' # No Color
|
||||
|
||||
# Priority definitions
|
||||
declare -A PRIORITY_NAMES=(
|
||||
[0]="Critical - Must Fix"
|
||||
[1]="Important - Should Fix"
|
||||
[2]="Recommended - Nice to Have"
|
||||
)
|
||||
|
||||
declare -A PRIORITY_ICONS=(
|
||||
[0]="❌"
|
||||
[1]="⚠️ "
|
||||
[2]="💡"
|
||||
)
|
||||
|
||||
# Effort labels
|
||||
declare -A EFFORT_LABELS=(
|
||||
[low]="Low"
|
||||
[medium]="Medium"
|
||||
[high]="High"
|
||||
)
|
||||
|
||||
# Effort time estimates
|
||||
declare -A EFFORT_TIMES=(
|
||||
[low]="5-15 minutes"
|
||||
[medium]="30-60 minutes"
|
||||
[high]="2+ hours"
|
||||
)
|
||||
|
||||
# ============================================================================
|
||||
# Functions
|
||||
# ============================================================================
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 <issues-json-file> [criteria]
|
||||
|
||||
Arguments:
|
||||
issues-json-file Path to JSON file with validation issues
|
||||
criteria Prioritization criteria: severity|impact|effort (default: severity)
|
||||
|
||||
Examples:
|
||||
$0 validation-results.json
|
||||
$0 results.json impact
|
||||
$0 results.json severity
|
||||
|
||||
JSON Structure:
|
||||
{
|
||||
"errors": [{"type": "...", "severity": "critical", ...}],
|
||||
"warnings": [{"type": "...", "severity": "important", ...}],
|
||||
"recommendations": [{"type": "...", "severity": "recommended", ...}]
|
||||
}
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
check_dependencies() {
|
||||
local missing_deps=()
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
missing_deps+=("jq")
|
||||
fi
|
||||
|
||||
if [ ${#missing_deps[@]} -gt 0 ]; then
|
||||
echo "Error: Missing dependencies: ${missing_deps[*]}" >&2
|
||||
echo "Install with: sudo apt-get install ${missing_deps[*]}" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
determine_priority() {
|
||||
local severity="$1"
|
||||
local type="$2"
|
||||
|
||||
# P0 (Critical) - Blocking issues
|
||||
if [[ "$severity" == "critical" ]] || \
|
||||
[[ "$type" =~ ^(missing_required|invalid_json|security_vulnerability|format_violation)$ ]]; then
|
||||
echo "0"
|
||||
return
|
||||
fi
|
||||
|
||||
# P1 (Important) - Should fix
|
||||
if [[ "$severity" == "important" ]] || \
|
||||
[[ "$type" =~ ^(missing_recommended|documentation_gap|convention_violation|performance)$ ]]; then
|
||||
echo "1"
|
||||
return
|
||||
fi
|
||||
|
||||
# P2 (Recommended) - Nice to have
|
||||
echo "2"
|
||||
}
|
||||
|
||||
get_effort_estimate() {
|
||||
local type="$1"
|
||||
|
||||
# High effort
|
||||
if [[ "$type" =~ ^(security_vulnerability|performance|architecture)$ ]]; then
|
||||
echo "high"
|
||||
return
|
||||
fi
|
||||
|
||||
# Medium effort
|
||||
if [[ "$type" =~ ^(documentation_gap|convention_violation|missing_recommended)$ ]]; then
|
||||
echo "medium"
|
||||
return
|
||||
fi
|
||||
|
||||
# Low effort (default)
|
||||
echo "low"
|
||||
}
|
||||
|
||||
format_issue() {
|
||||
local priority="$1"
|
||||
local message="$2"
|
||||
local impact="${3:-Unknown impact}"
|
||||
local effort="${4:-low}"
|
||||
local fix="${5:-No fix suggestion available}"
|
||||
|
||||
local icon="${PRIORITY_ICONS[$priority]}"
|
||||
local effort_label="${EFFORT_LABELS[$effort]}"
|
||||
local effort_time="${EFFORT_TIMES[$effort]}"
|
||||
|
||||
cat <<EOF
|
||||
${icon} ${message}
|
||||
Impact: ${impact}
|
||||
Effort: ${effort_label} (${effort_time})
|
||||
Fix: ${fix}
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
process_issues() {
|
||||
local json_file="$1"
|
||||
local criteria="${2:-severity}"
|
||||
|
||||
# Validate JSON file exists
|
||||
if [[ ! -f "$json_file" ]]; then
|
||||
echo "Error: File not found: $json_file" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate JSON syntax
|
||||
if ! jq empty "$json_file" 2>/dev/null; then
|
||||
echo "Error: Invalid JSON in $json_file" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Count total issues
|
||||
local total_errors=$(jq '.errors // [] | length' "$json_file")
|
||||
local total_warnings=$(jq '.warnings // [] | length' "$json_file")
|
||||
local total_recommendations=$(jq '.recommendations // [] | length' "$json_file")
|
||||
local total_issues=$((total_errors + total_warnings + total_recommendations))
|
||||
|
||||
if [[ $total_issues -eq 0 ]]; then
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "ISSUE PRIORITIZATION"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
echo "No issues found! Quality score is perfect."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Initialize priority counters
|
||||
declare -A priority_counts=([0]=0 [1]=0 [2]=0)
|
||||
declare -A priority_issues=([0]="" [1]="" [2]="")
|
||||
|
||||
# Process errors
|
||||
while IFS= read -r issue; do
|
||||
local type=$(echo "$issue" | jq -r '.type // "unknown"')
|
||||
local severity=$(echo "$issue" | jq -r '.severity // "critical"')
|
||||
local message=$(echo "$issue" | jq -r '.message // "Unknown error"')
|
||||
local impact=$(echo "$issue" | jq -r '.impact // "Unknown impact"')
|
||||
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
|
||||
local score_impact=$(echo "$issue" | jq -r '.score_impact // 0')
|
||||
|
||||
local priority=$(determine_priority "$severity" "$type")
|
||||
local effort=$(get_effort_estimate "$type")
|
||||
|
||||
priority_counts[$priority]=$((priority_counts[$priority] + 1))
|
||||
|
||||
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
|
||||
priority_issues[$priority]+="$formatted_issue"
|
||||
done < <(jq -c '.errors // [] | .[]' "$json_file")
|
||||
|
||||
# Process warnings
|
||||
while IFS= read -r issue; do
|
||||
local type=$(echo "$issue" | jq -r '.type // "unknown"')
|
||||
local severity=$(echo "$issue" | jq -r '.severity // "important"')
|
||||
local message=$(echo "$issue" | jq -r '.message // "Unknown warning"')
|
||||
local impact=$(echo "$issue" | jq -r '.impact // "Unknown impact"')
|
||||
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
|
||||
|
||||
local priority=$(determine_priority "$severity" "$type")
|
||||
local effort=$(get_effort_estimate "$type")
|
||||
|
||||
priority_counts[$priority]=$((priority_counts[$priority] + 1))
|
||||
|
||||
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
|
||||
priority_issues[$priority]+="$formatted_issue"
|
||||
done < <(jq -c '.warnings // [] | .[]' "$json_file")
|
||||
|
||||
# Process recommendations
|
||||
while IFS= read -r issue; do
|
||||
local type=$(echo "$issue" | jq -r '.type // "unknown"')
|
||||
local severity=$(echo "$issue" | jq -r '.severity // "recommended"')
|
||||
local message=$(echo "$issue" | jq -r '.message // "Recommendation"')
|
||||
local impact=$(echo "$issue" | jq -r '.impact // "Minor quality improvement"')
|
||||
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
|
||||
|
||||
local priority=$(determine_priority "$severity" "$type")
|
||||
local effort=$(get_effort_estimate "$type")
|
||||
|
||||
priority_counts[$priority]=$((priority_counts[$priority] + 1))
|
||||
|
||||
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
|
||||
priority_issues[$priority]+="$formatted_issue"
|
||||
done < <(jq -c '.recommendations // [] | .[]' "$json_file")
|
||||
|
||||
# Display results
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "ISSUE PRIORITIZATION"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
echo "Total Issues: $total_issues"
|
||||
echo ""
|
||||
|
||||
# Display each priority tier
|
||||
for priority in 0 1 2; do
|
||||
local count=${priority_counts[$priority]}
|
||||
local name="${PRIORITY_NAMES[$priority]}"
|
||||
|
||||
if [[ $count -gt 0 ]]; then
|
||||
echo "Priority $priority ($name): $count"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo -e "${priority_issues[$priority]}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "Summary:"
|
||||
echo "- Fix P0 issues first (blocking publication)"
|
||||
echo "- Address P1 issues for quality improvement"
|
||||
echo "- Consider P2 improvements for excellence"
|
||||
|
||||
if [[ ${priority_counts[0]} -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "⚠️ WARNING: ${priority_counts[0]} blocking issue(s) must be fixed before publication"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
main() {
|
||||
# Check arguments
|
||||
if [[ $# -lt 1 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
local json_file="$1"
|
||||
local criteria="${2:-severity}"
|
||||
|
||||
# Check dependencies
|
||||
if ! check_dependencies; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate criteria
|
||||
if [[ ! "$criteria" =~ ^(severity|impact|effort)$ ]]; then
|
||||
echo "Error: Invalid criteria '$criteria'. Use: severity|impact|effort" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Process issues
|
||||
process_issues "$json_file" "$criteria"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
541
commands/quality-analysis/.scripts/report-generator.py
Executable file
541
commands/quality-analysis/.scripts/report-generator.py
Executable file
@@ -0,0 +1,541 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ============================================================================
|
||||
# Quality Report Generator
|
||||
# ============================================================================
|
||||
# Purpose: Generate comprehensive quality reports in multiple formats
|
||||
# Version: 1.0.0
|
||||
# Usage: ./report-generator.py --path <path> --format <format> [options]
|
||||
# Returns: 0=success, 1=error
|
||||
# Dependencies: Python 3.6+
|
||||
# ============================================================================
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
class ReportGenerator:
|
||||
"""Generate quality reports in multiple formats."""
|
||||
|
||||
def __init__(self, path: str, context: Optional[Dict] = None):
|
||||
"""
|
||||
Initialize report generator.
|
||||
|
||||
Args:
|
||||
path: Target path being analyzed
|
||||
context: Validation context with results
|
||||
"""
|
||||
self.path = path
|
||||
self.context = context or {}
|
||||
self.timestamp = datetime.now().isoformat()
|
||||
|
||||
def generate(self, format_type: str = "markdown") -> str:
|
||||
"""
|
||||
Generate report in specified format.
|
||||
|
||||
Args:
|
||||
format_type: Report format (markdown, json, html)
|
||||
|
||||
Returns:
|
||||
Formatted report string
|
||||
"""
|
||||
if format_type == "json":
|
||||
return self._generate_json()
|
||||
elif format_type == "html":
|
||||
return self._generate_html()
|
||||
else:
|
||||
return self._generate_markdown()
|
||||
|
||||
def _generate_markdown(self) -> str:
|
||||
"""Generate markdown format report."""
|
||||
score = self.context.get("score", 0)
|
||||
rating = self.context.get("rating", "Unknown")
|
||||
stars = self.context.get("stars", "")
|
||||
readiness = self.context.get("publication_ready", "Unknown")
|
||||
|
||||
p0_count = len(self.context.get("issues", {}).get("p0", []))
|
||||
p1_count = len(self.context.get("issues", {}).get("p1", []))
|
||||
p2_count = len(self.context.get("issues", {}).get("p2", []))
|
||||
total_issues = p0_count + p1_count + p2_count
|
||||
|
||||
target_type = self.context.get("target_type", "plugin")
|
||||
|
||||
report = f"""# Quality Assessment Report
|
||||
|
||||
**Generated**: {self.timestamp}
|
||||
**Target**: {self.path}
|
||||
**Type**: Claude Code {target_type.capitalize()}
|
||||
|
||||
## Executive Summary
|
||||
|
||||
**Quality Score**: {score}/100 {stars} ({rating})
|
||||
**Publication Ready**: {readiness}
|
||||
**Critical Issues**: {p0_count}
|
||||
**Total Issues**: {total_issues}
|
||||
|
||||
"""
|
||||
|
||||
if score >= 90:
|
||||
report += "🎉 Excellent! Your plugin is publication-ready.\n\n"
|
||||
elif score >= 75:
|
||||
report += "👍 Nearly ready! Address a few important issues to reach excellent status.\n\n"
|
||||
elif score >= 60:
|
||||
report += "⚠️ Needs work. Several issues should be addressed before publication.\n\n"
|
||||
else:
|
||||
report += "❌ Substantial improvements needed before this is ready for publication.\n\n"
|
||||
|
||||
# Validation layers
|
||||
report += "## Validation Results\n\n"
|
||||
layers = self.context.get("validation_layers", {})
|
||||
|
||||
for layer_name, layer_data in layers.items():
|
||||
status = layer_data.get("status", "unknown")
|
||||
issue_count = len(layer_data.get("issues", []))
|
||||
|
||||
if status == "pass":
|
||||
status_icon = "✅ PASS"
|
||||
elif status == "warnings":
|
||||
status_icon = f"⚠️ WARNINGS ({issue_count} issues)"
|
||||
else:
|
||||
status_icon = f"❌ FAIL ({issue_count} issues)"
|
||||
|
||||
report += f"### {layer_name.replace('_', ' ').title()} {status_icon}\n"
|
||||
|
||||
if issue_count == 0:
|
||||
report += "- No issues found\n\n"
|
||||
else:
|
||||
for issue in layer_data.get("issues", [])[:3]: # Show top 3
|
||||
report += f"- {issue.get('message', 'Unknown issue')}\n"
|
||||
if issue_count > 3:
|
||||
report += f"- ... and {issue_count - 3} more\n"
|
||||
report += "\n"
|
||||
|
||||
# Issues breakdown
|
||||
report += "## Issues Breakdown\n\n"
|
||||
|
||||
report += f"### Priority 0 (Critical): {p0_count} issues\n\n"
|
||||
if p0_count == 0:
|
||||
report += "None - excellent!\n\n"
|
||||
else:
|
||||
for idx, issue in enumerate(self.context.get("issues", {}).get("p0", []), 1):
|
||||
report += self._format_issue_markdown(idx, issue)
|
||||
|
||||
report += f"### Priority 1 (Important): {p1_count} issues\n\n"
|
||||
if p1_count == 0:
|
||||
report += "None - great!\n\n"
|
||||
else:
|
||||
for idx, issue in enumerate(self.context.get("issues", {}).get("p1", []), 1):
|
||||
report += self._format_issue_markdown(idx, issue)
|
||||
|
||||
report += f"### Priority 2 (Recommended): {p2_count} issues\n\n"
|
||||
if p2_count == 0:
|
||||
report += "No recommendations.\n\n"
|
||||
else:
|
||||
for idx, issue in enumerate(self.context.get("issues", {}).get("p2", [])[:5], 1):
|
||||
report += self._format_issue_markdown(idx, issue)
|
||||
if p2_count > 5:
|
||||
report += f"... and {p2_count - 5} more recommendations\n\n"
|
||||
|
||||
# Improvement roadmap
|
||||
roadmap = self.context.get("improvement_roadmap", {})
|
||||
if roadmap:
|
||||
report += "## Improvement Roadmap\n\n"
|
||||
report += f"### Path to Excellent (90+)\n\n"
|
||||
report += f"**Current**: {roadmap.get('current_score', score)}/100\n"
|
||||
report += f"**Target**: {roadmap.get('target_score', 90)}/100\n"
|
||||
report += f"**Gap**: {roadmap.get('gap', 0)} points\n\n"
|
||||
|
||||
recommendations = roadmap.get("recommendations", [])
|
||||
if recommendations:
|
||||
report += "**Top Recommendations**:\n\n"
|
||||
for idx, rec in enumerate(recommendations[:5], 1):
|
||||
report += f"{idx}. [{rec.get('score_impact', 0):+d} pts] {rec.get('title', 'Unknown')}\n"
|
||||
report += f" - Priority: {rec.get('priority', 'Medium')}\n"
|
||||
report += f" - Effort: {rec.get('effort', 'Unknown')}\n"
|
||||
report += f" - Impact: {rec.get('impact', 'Unknown')}\n\n"
|
||||
|
||||
# Footer
|
||||
report += "\n---\n"
|
||||
report += "Report generated by marketplace-validator-plugin v1.0.0\n"
|
||||
|
||||
return report
|
||||
|
||||
def _format_issue_markdown(self, idx: int, issue: Dict) -> str:
|
||||
"""Format a single issue in markdown."""
|
||||
message = issue.get("message", "Unknown issue")
|
||||
impact = issue.get("impact", "Unknown impact")
|
||||
effort = issue.get("effort", "unknown")
|
||||
fix = issue.get("fix", "No fix available")
|
||||
score_impact = issue.get("score_impact", 0)
|
||||
|
||||
return f"""#### {idx}. {message} [{score_impact:+d} pts]
|
||||
|
||||
**Impact**: {impact}
|
||||
**Effort**: {effort.capitalize()}
|
||||
**Fix**: {fix}
|
||||
|
||||
"""
|
||||
|
||||
def _generate_json(self) -> str:
|
||||
"""Generate JSON format report."""
|
||||
score = self.context.get("score", 0)
|
||||
rating = self.context.get("rating", "Unknown")
|
||||
stars = self.context.get("stars", "")
|
||||
readiness = self.context.get("publication_ready", "Unknown")
|
||||
|
||||
p0_issues = self.context.get("issues", {}).get("p0", [])
|
||||
p1_issues = self.context.get("issues", {}).get("p1", [])
|
||||
p2_issues = self.context.get("issues", {}).get("p2", [])
|
||||
|
||||
report = {
|
||||
"metadata": {
|
||||
"generated": self.timestamp,
|
||||
"target": self.path,
|
||||
"type": self.context.get("target_type", "plugin"),
|
||||
"validator_version": "1.0.0"
|
||||
},
|
||||
"executive_summary": {
|
||||
"score": score,
|
||||
"rating": rating,
|
||||
"stars": stars,
|
||||
"publication_ready": readiness,
|
||||
"critical_issues": len(p0_issues),
|
||||
"total_issues": len(p0_issues) + len(p1_issues) + len(p2_issues)
|
||||
},
|
||||
"validation_layers": self.context.get("validation_layers", {}),
|
||||
"issues": {
|
||||
"p0": p0_issues,
|
||||
"p1": p1_issues,
|
||||
"p2": p2_issues
|
||||
},
|
||||
"improvement_roadmap": self.context.get("improvement_roadmap", {})
|
||||
}
|
||||
|
||||
return json.dumps(report, indent=2)
|
||||
|
||||
def _generate_html(self) -> str:
|
||||
"""Generate HTML format report."""
|
||||
score = self.context.get("score", 0)
|
||||
rating = self.context.get("rating", "Unknown")
|
||||
stars = self.context.get("stars", "")
|
||||
readiness = self.context.get("publication_ready", "Unknown")
|
||||
|
||||
p0_count = len(self.context.get("issues", {}).get("p0", []))
|
||||
p1_count = len(self.context.get("issues", {}).get("p1", []))
|
||||
p2_count = len(self.context.get("issues", {}).get("p2", []))
|
||||
total_issues = p0_count + p1_count + p2_count
|
||||
|
||||
# Determine score color
|
||||
if score >= 90:
|
||||
score_color = "#10b981" # green
|
||||
elif score >= 75:
|
||||
score_color = "#3b82f6" # blue
|
||||
elif score >= 60:
|
||||
score_color = "#f59e0b" # orange
|
||||
else:
|
||||
score_color = "#ef4444" # red
|
||||
|
||||
html = f"""<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Quality Assessment Report</title>
|
||||
<style>
|
||||
* {{
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
box-sizing: border-box;
|
||||
}}
|
||||
body {{
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
|
||||
line-height: 1.6;
|
||||
color: #333;
|
||||
background: #f5f5f5;
|
||||
padding: 20px;
|
||||
}}
|
||||
.container {{
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
background: white;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
|
||||
padding: 40px;
|
||||
}}
|
||||
h1 {{
|
||||
font-size: 32px;
|
||||
margin-bottom: 10px;
|
||||
color: #1f2937;
|
||||
}}
|
||||
.meta {{
|
||||
color: #6b7280;
|
||||
margin-bottom: 30px;
|
||||
padding-bottom: 20px;
|
||||
border-bottom: 2px solid #e5e7eb;
|
||||
}}
|
||||
.score-card {{
|
||||
background: linear-gradient(135deg, {score_color} 0%, {score_color}dd 100%);
|
||||
color: white;
|
||||
padding: 30px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 30px;
|
||||
text-align: center;
|
||||
}}
|
||||
.score-number {{
|
||||
font-size: 72px;
|
||||
font-weight: bold;
|
||||
line-height: 1;
|
||||
}}
|
||||
.score-label {{
|
||||
font-size: 18px;
|
||||
margin-top: 10px;
|
||||
opacity: 0.9;
|
||||
}}
|
||||
.stats {{
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
gap: 20px;
|
||||
margin-bottom: 30px;
|
||||
}}
|
||||
.stat-card {{
|
||||
background: #f9fafb;
|
||||
padding: 20px;
|
||||
border-radius: 6px;
|
||||
border-left: 4px solid #3b82f6;
|
||||
}}
|
||||
.stat-label {{
|
||||
font-size: 14px;
|
||||
color: #6b7280;
|
||||
margin-bottom: 5px;
|
||||
}}
|
||||
.stat-value {{
|
||||
font-size: 24px;
|
||||
font-weight: bold;
|
||||
color: #1f2937;
|
||||
}}
|
||||
.section {{
|
||||
margin-bottom: 40px;
|
||||
}}
|
||||
h2 {{
|
||||
font-size: 24px;
|
||||
margin-bottom: 20px;
|
||||
color: #1f2937;
|
||||
border-bottom: 2px solid #e5e7eb;
|
||||
padding-bottom: 10px;
|
||||
}}
|
||||
h3 {{
|
||||
font-size: 18px;
|
||||
margin-bottom: 15px;
|
||||
color: #374151;
|
||||
}}
|
||||
.issue {{
|
||||
background: #f9fafb;
|
||||
padding: 20px;
|
||||
border-radius: 6px;
|
||||
margin-bottom: 15px;
|
||||
border-left: 4px solid #6b7280;
|
||||
}}
|
||||
.issue.p0 {{
|
||||
border-left-color: #ef4444;
|
||||
background: #fef2f2;
|
||||
}}
|
||||
.issue.p1 {{
|
||||
border-left-color: #f59e0b;
|
||||
background: #fffbeb;
|
||||
}}
|
||||
.issue.p2 {{
|
||||
border-left-color: #3b82f6;
|
||||
background: #eff6ff;
|
||||
}}
|
||||
.issue-title {{
|
||||
font-weight: bold;
|
||||
margin-bottom: 10px;
|
||||
font-size: 16px;
|
||||
}}
|
||||
.issue-detail {{
|
||||
font-size: 14px;
|
||||
color: #6b7280;
|
||||
margin: 5px 0;
|
||||
}}
|
||||
.badge {{
|
||||
display: inline-block;
|
||||
padding: 4px 12px;
|
||||
border-radius: 12px;
|
||||
font-size: 12px;
|
||||
font-weight: 600;
|
||||
margin-right: 8px;
|
||||
}}
|
||||
.badge.pass {{
|
||||
background: #d1fae5;
|
||||
color: #065f46;
|
||||
}}
|
||||
.badge.warning {{
|
||||
background: #fef3c7;
|
||||
color: #92400e;
|
||||
}}
|
||||
.badge.fail {{
|
||||
background: #fee2e2;
|
||||
color: #991b1b;
|
||||
}}
|
||||
.footer {{
|
||||
margin-top: 40px;
|
||||
padding-top: 20px;
|
||||
border-top: 2px solid #e5e7eb;
|
||||
color: #6b7280;
|
||||
font-size: 14px;
|
||||
text-align: center;
|
||||
}}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>Quality Assessment Report</h1>
|
||||
<div class="meta">
|
||||
<strong>Generated:</strong> {self.timestamp}<br>
|
||||
<strong>Target:</strong> {self.path}<br>
|
||||
<strong>Type:</strong> Claude Code Plugin
|
||||
</div>
|
||||
|
||||
<div class="score-card">
|
||||
<div class="score-number">{score}</div>
|
||||
<div class="score-label">{stars} {rating}</div>
|
||||
<div class="score-label">{readiness}</div>
|
||||
</div>
|
||||
|
||||
<div class="stats">
|
||||
<div class="stat-card">
|
||||
<div class="stat-label">Critical Issues</div>
|
||||
<div class="stat-value">{p0_count}</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-label">Important Issues</div>
|
||||
<div class="stat-value">{p1_count}</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-label">Recommendations</div>
|
||||
<div class="stat-value">{p2_count}</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-label">Total Issues</div>
|
||||
<div class="stat-value">{total_issues}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Validation Layers</h2>
|
||||
"""
|
||||
|
||||
# Validation layers
|
||||
layers = self.context.get("validation_layers", {})
|
||||
for layer_name, layer_data in layers.items():
|
||||
status = layer_data.get("status", "unknown")
|
||||
badge_class = "pass" if status == "pass" else ("warning" if status == "warnings" else "fail")
|
||||
html += f' <span class="badge {badge_class}">{layer_name.replace("_", " ").title()}: {status.upper()}</span>\n'
|
||||
|
||||
html += """ </div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Issues Breakdown</h2>
|
||||
"""
|
||||
|
||||
# Issues
|
||||
for priority, priority_name in [("p0", "Critical"), ("p1", "Important"), ("p2", "Recommended")]:
|
||||
issues = self.context.get("issues", {}).get(priority, [])
|
||||
html += f' <h3>Priority {priority[1]}: {priority_name} ({len(issues)} issues)</h3>\n'
|
||||
|
||||
for issue in issues[:5]: # Show top 5 per priority
|
||||
message = issue.get("message", "Unknown issue")
|
||||
impact = issue.get("impact", "Unknown")
|
||||
effort = issue.get("effort", "unknown")
|
||||
fix = issue.get("fix", "No fix available")
|
||||
|
||||
html += f""" <div class="issue {priority}">
|
||||
<div class="issue-title">{message}</div>
|
||||
<div class="issue-detail"><strong>Impact:</strong> {impact}</div>
|
||||
<div class="issue-detail"><strong>Effort:</strong> {effort.capitalize()}</div>
|
||||
<div class="issue-detail"><strong>Fix:</strong> {fix}</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
html += """ </div>
|
||||
|
||||
<div class="footer">
|
||||
Report generated by marketplace-validator-plugin v1.0.0
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
return html
|
||||
|
||||
|
||||
def main():
|
||||
"""Main CLI interface."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate comprehensive quality reports",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--path",
|
||||
required=True,
|
||||
help="Target path being analyzed"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--format",
|
||||
choices=["markdown", "json", "html"],
|
||||
default="markdown",
|
||||
help="Output format (default: markdown)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
help="Output file path (optional, defaults to stdout)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--context",
|
||||
help="Path to JSON file with validation context"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load context if provided
|
||||
context = {}
|
||||
if args.context:
|
||||
try:
|
||||
with open(args.context, 'r') as f:
|
||||
context = json.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"Warning: Context file not found: {args.context}", file=sys.stderr)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error: Invalid JSON in context file: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Generate report
|
||||
generator = ReportGenerator(args.path, context)
|
||||
report = generator.generate(args.format)
|
||||
|
||||
# Output report
|
||||
if args.output:
|
||||
try:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(report)
|
||||
print(f"Report generated: {args.output}")
|
||||
except IOError as e:
|
||||
print(f"Error writing to file: {e}", file=sys.stderr)
|
||||
return 1
|
||||
else:
|
||||
print(report)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
239
commands/quality-analysis/.scripts/scoring-algorithm.py
Executable file
239
commands/quality-analysis/.scripts/scoring-algorithm.py
Executable file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ============================================================================
|
||||
# Quality Scoring Algorithm
|
||||
# ============================================================================
|
||||
# Purpose: Calculate quality score (0-100) based on validation results
|
||||
# Version: 1.0.0
|
||||
# Usage: ./scoring-algorithm.py --errors N --warnings N --missing N
|
||||
# Returns: 0=success, 1=error
|
||||
# Dependencies: Python 3.6+
|
||||
# ============================================================================
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import json
|
||||
|
||||
|
||||
def calculate_quality_score(errors: int, warnings: int, missing_recommended: int) -> int:
|
||||
"""
|
||||
Calculate quality score based on validation issues.
|
||||
|
||||
Algorithm:
|
||||
score = 100
|
||||
score -= errors * 20 # Critical errors: -20 each
|
||||
score -= warnings * 10 # Warnings: -10 each
|
||||
score -= missing_recommended * 5 # Missing fields: -5 each
|
||||
return max(0, score)
|
||||
|
||||
Args:
|
||||
errors: Number of critical errors
|
||||
warnings: Number of warnings
|
||||
missing_recommended: Number of missing recommended fields
|
||||
|
||||
Returns:
|
||||
Quality score (0-100)
|
||||
"""
|
||||
score = 100
|
||||
score -= errors * 20
|
||||
score -= warnings * 10
|
||||
score -= missing_recommended * 5
|
||||
return max(0, score)
|
||||
|
||||
|
||||
def get_rating(score: int) -> str:
|
||||
"""
|
||||
Get quality rating based on score.
|
||||
|
||||
Args:
|
||||
score: Quality score (0-100)
|
||||
|
||||
Returns:
|
||||
Rating string
|
||||
"""
|
||||
if score >= 90:
|
||||
return "Excellent"
|
||||
elif score >= 75:
|
||||
return "Good"
|
||||
elif score >= 60:
|
||||
return "Fair"
|
||||
elif score >= 40:
|
||||
return "Needs Improvement"
|
||||
else:
|
||||
return "Poor"
|
||||
|
||||
|
||||
def get_stars(score: int) -> str:
|
||||
"""
|
||||
Get star rating based on score.
|
||||
|
||||
Args:
|
||||
score: Quality score (0-100)
|
||||
|
||||
Returns:
|
||||
Star rating string
|
||||
"""
|
||||
if score >= 90:
|
||||
return "⭐⭐⭐⭐⭐"
|
||||
elif score >= 75:
|
||||
return "⭐⭐⭐⭐"
|
||||
elif score >= 60:
|
||||
return "⭐⭐⭐"
|
||||
elif score >= 40:
|
||||
return "⭐⭐"
|
||||
else:
|
||||
return "⭐"
|
||||
|
||||
|
||||
def get_publication_readiness(score: int) -> str:
|
||||
"""
|
||||
Determine publication readiness based on score.
|
||||
|
||||
Args:
|
||||
score: Quality score (0-100)
|
||||
|
||||
Returns:
|
||||
Publication readiness status
|
||||
"""
|
||||
if score >= 90:
|
||||
return "Yes - Ready to publish"
|
||||
elif score >= 75:
|
||||
return "With Minor Changes - Nearly ready"
|
||||
elif score >= 60:
|
||||
return "Needs Work - Significant improvements needed"
|
||||
else:
|
||||
return "Not Ready - Major overhaul required"
|
||||
|
||||
|
||||
def format_output(score: int, errors: int, warnings: int, missing: int,
|
||||
output_format: str = "text") -> str:
|
||||
"""
|
||||
Format score output in requested format.
|
||||
|
||||
Args:
|
||||
score: Quality score
|
||||
errors: Error count
|
||||
warnings: Warning count
|
||||
missing: Missing field count
|
||||
output_format: Output format (text, json, compact)
|
||||
|
||||
Returns:
|
||||
Formatted output string
|
||||
"""
|
||||
rating = get_rating(score)
|
||||
stars = get_stars(score)
|
||||
readiness = get_publication_readiness(score)
|
||||
|
||||
if output_format == "json":
|
||||
return json.dumps({
|
||||
"score": score,
|
||||
"rating": rating,
|
||||
"stars": stars,
|
||||
"publication_ready": readiness,
|
||||
"breakdown": {
|
||||
"base_score": 100,
|
||||
"errors_penalty": errors * 20,
|
||||
"warnings_penalty": warnings * 10,
|
||||
"missing_penalty": missing * 5
|
||||
},
|
||||
"counts": {
|
||||
"errors": errors,
|
||||
"warnings": warnings,
|
||||
"missing": missing
|
||||
}
|
||||
}, indent=2)
|
||||
|
||||
elif output_format == "compact":
|
||||
return f"{score}/100 {stars} ({rating})"
|
||||
|
||||
else: # text format
|
||||
error_penalty = errors * 20
|
||||
warning_penalty = warnings * 10
|
||||
missing_penalty = missing * 5
|
||||
|
||||
return f"""━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
QUALITY SCORE CALCULATION
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Score: {score}/100
|
||||
Rating: {rating}
|
||||
Stars: {stars}
|
||||
|
||||
Breakdown:
|
||||
Base Score: 100
|
||||
Critical Errors: -{error_penalty} ({errors} × 20)
|
||||
Warnings: -{warning_penalty} ({warnings} × 10)
|
||||
Missing Fields: -{missing_penalty} ({missing} × 5)
|
||||
─────────────────────
|
||||
Final Score: {score}/100
|
||||
|
||||
Publication Ready: {readiness}
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
"""Main CLI interface."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Calculate quality score based on validation results",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
%(prog)s --errors 2 --warnings 5 --missing 3
|
||||
%(prog)s --errors 0 --warnings 0 --missing 0
|
||||
%(prog)s --errors 1 --format json
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--errors",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Number of critical errors (default: 0)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--warnings",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Number of warnings (default: 0)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--missing",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Number of missing recommended fields (default: 0)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--format",
|
||||
choices=["text", "json", "compact"],
|
||||
default="text",
|
||||
help="Output format (default: text)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate inputs
|
||||
if args.errors < 0 or args.warnings < 0 or args.missing < 0:
|
||||
print("Error: Counts cannot be negative", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Calculate score
|
||||
score = calculate_quality_score(args.errors, args.warnings, args.missing)
|
||||
|
||||
# Format and print output
|
||||
output = format_output(
|
||||
score,
|
||||
args.errors,
|
||||
args.warnings,
|
||||
args.missing,
|
||||
args.format
|
||||
)
|
||||
print(output)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
112
commands/quality-analysis/calculate-score.md
Normal file
112
commands/quality-analysis/calculate-score.md
Normal file
@@ -0,0 +1,112 @@
|
||||
## Operation: Calculate Quality Score
|
||||
|
||||
Calculate comprehensive quality score (0-100) based on validation results with star rating.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
Extract these parameters from `$ARGUMENTS`:
|
||||
|
||||
- **path**: Target path to analyze (required)
|
||||
- **errors**: Critical error count (default: 0)
|
||||
- **warnings**: Warning count (default: 0)
|
||||
- **missing**: Missing recommended fields count (default: 0)
|
||||
|
||||
### Scoring Algorithm
|
||||
|
||||
Execute the quality scoring algorithm using `.scripts/scoring-algorithm.py`:
|
||||
|
||||
**Algorithm**:
|
||||
```
|
||||
score = 100
|
||||
score -= (errors × 20) # Critical errors: -20 points each
|
||||
score -= (warnings × 10) # Warnings: -10 points each
|
||||
score -= (missing × 5) # Missing recommended: -5 points each
|
||||
score = max(0, score) # Floor at 0
|
||||
```
|
||||
|
||||
**Rating Thresholds**:
|
||||
- **90-100**: Excellent ⭐⭐⭐⭐⭐ (publication-ready)
|
||||
- **75-89**: Good ⭐⭐⭐⭐ (ready with minor improvements)
|
||||
- **60-74**: Fair ⭐⭐⭐ (needs work)
|
||||
- **40-59**: Needs Improvement ⭐⭐ (substantial work needed)
|
||||
- **0-39**: Poor ⭐ (major overhaul required)
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Parse Arguments**
|
||||
```
|
||||
Extract path, errors, warnings, missing from $ARGUMENTS
|
||||
Validate that path exists
|
||||
Set defaults for missing parameters
|
||||
```
|
||||
|
||||
2. **Calculate Score**
|
||||
```bash
|
||||
Invoke Bash tool to execute:
|
||||
python3 .claude/commands/quality-analysis/.scripts/scoring-algorithm.py \
|
||||
--errors $errors \
|
||||
--warnings $warnings \
|
||||
--missing $missing
|
||||
```
|
||||
|
||||
3. **Format Output**
|
||||
```
|
||||
Display results in user-friendly format with:
|
||||
- Numeric score (0-100)
|
||||
- Rating (Excellent/Good/Fair/Needs Improvement/Poor)
|
||||
- Star rating (⭐⭐⭐⭐⭐)
|
||||
- Publication readiness status
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Calculate score with validation results
|
||||
/quality-analysis score path:. errors:2 warnings:5 missing:3
|
||||
|
||||
# Calculate perfect score
|
||||
/quality-analysis score path:. errors:0 warnings:0 missing:0
|
||||
|
||||
# Calculate score with only errors
|
||||
/quality-analysis score path:. errors:3
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
- **Missing path**: Request path parameter
|
||||
- **Invalid counts**: Negative numbers default to 0
|
||||
- **Script not found**: Provide clear error message with remediation
|
||||
- **Python not available**: Fallback to bash calculation
|
||||
|
||||
### Output Format
|
||||
|
||||
```
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
QUALITY SCORE CALCULATION
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Target: <path>
|
||||
|
||||
Score: <0-100>/100
|
||||
Rating: <Excellent|Good|Fair|Needs Improvement|Poor>
|
||||
Stars: <⭐⭐⭐⭐⭐>
|
||||
|
||||
Breakdown:
|
||||
Base Score: 100
|
||||
Critical Errors: -<errors × 20>
|
||||
Warnings: -<warnings × 10>
|
||||
Missing Fields: -<missing × 5>
|
||||
─────────────────────
|
||||
Final Score: <score>/100
|
||||
|
||||
Publication Ready: <Yes|With Minor Changes|Needs Work|Not Ready>
|
||||
```
|
||||
|
||||
### Integration Notes
|
||||
|
||||
This operation is typically invoked by:
|
||||
- `full-analysis.md` as first step
|
||||
- `validation-orchestrator` after comprehensive validation
|
||||
- Direct user invocation for score-only calculation
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
330
commands/quality-analysis/full-analysis.md
Normal file
330
commands/quality-analysis/full-analysis.md
Normal file
@@ -0,0 +1,330 @@
|
||||
## Operation: Full Quality Analysis
|
||||
|
||||
Execute comprehensive quality analysis orchestrating all sub-operations to generate complete assessment.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
Extract these parameters from `$ARGUMENTS`:
|
||||
|
||||
- **path**: Target path to analyze (required)
|
||||
- **context**: Path to validation context JSON file with prior results (optional)
|
||||
- **format**: Report output format - markdown|json|html (default: markdown)
|
||||
- **output**: Output file path for report (optional)
|
||||
|
||||
### Full Analysis Workflow
|
||||
|
||||
This operation orchestrates all quality-analysis sub-operations to provide a complete quality assessment.
|
||||
|
||||
**1. Load Validation Context**
|
||||
```
|
||||
IF context parameter provided:
|
||||
Read validation results from JSON file
|
||||
Extract:
|
||||
- Errors count
|
||||
- Warnings count
|
||||
- Missing fields count
|
||||
- Validation layer results
|
||||
- Detailed issue list
|
||||
ELSE:
|
||||
Use default values:
|
||||
- errors: 0
|
||||
- warnings: 0
|
||||
- missing: 0
|
||||
```
|
||||
|
||||
**2. Calculate Base Score**
|
||||
```
|
||||
Read calculate-score.md operation instructions
|
||||
Execute scoring with validation results:
|
||||
|
||||
python3 .scripts/scoring-algorithm.py \
|
||||
--errors $errors \
|
||||
--warnings $warnings \
|
||||
--missing $missing \
|
||||
--format json
|
||||
|
||||
Capture:
|
||||
- Quality score (0-100)
|
||||
- Rating (Excellent/Good/Fair/Needs Improvement/Poor)
|
||||
- Star rating (⭐⭐⭐⭐⭐)
|
||||
- Publication readiness status
|
||||
```
|
||||
|
||||
**3. Prioritize All Issues**
|
||||
```
|
||||
Read prioritize-issues.md operation instructions
|
||||
|
||||
IF context has issues:
|
||||
Write issues to temporary JSON file
|
||||
Execute issue prioritization:
|
||||
|
||||
bash .scripts/issue-prioritizer.sh $temp_issues_file
|
||||
|
||||
Capture:
|
||||
- P0 (Critical) issues with details
|
||||
- P1 (Important) issues with details
|
||||
- P2 (Recommended) issues with details
|
||||
ELSE:
|
||||
Skip (no issues to prioritize)
|
||||
```
|
||||
|
||||
**4. Generate Improvement Suggestions**
|
||||
```
|
||||
Read suggest-improvements.md operation instructions
|
||||
Generate actionable recommendations:
|
||||
|
||||
Target score: 90 (publication-ready)
|
||||
Current score: $calculated_score
|
||||
|
||||
Generate suggestions for:
|
||||
- Quick wins (< 30 min, high impact)
|
||||
- This week improvements (< 2 hours)
|
||||
- Long-term enhancements
|
||||
|
||||
Include:
|
||||
- Score impact per suggestion
|
||||
- Effort estimates
|
||||
- Priority assignment
|
||||
- Detailed fix instructions
|
||||
```
|
||||
|
||||
**5. Generate Comprehensive Report**
|
||||
```
|
||||
Read generate-report.md operation instructions
|
||||
Execute report generation:
|
||||
|
||||
python3 .scripts/report-generator.py \
|
||||
--path $path \
|
||||
--format $format \
|
||||
--context $aggregated_context \
|
||||
--output $output
|
||||
|
||||
Report includes:
|
||||
- Executive summary
|
||||
- Quality score and rating
|
||||
- Validation layer breakdown
|
||||
- Prioritized issues (P0/P1/P2)
|
||||
- Improvement recommendations
|
||||
- Detailed findings
|
||||
```
|
||||
|
||||
**6. Aggregate and Display Results**
|
||||
```
|
||||
Combine all outputs into unified assessment:
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
COMPREHENSIVE QUALITY ANALYSIS
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Target: <path>
|
||||
Type: <marketplace|plugin>
|
||||
Analyzed: <timestamp>
|
||||
|
||||
QUALITY SCORE: <0-100>/100 <⭐⭐⭐⭐⭐>
|
||||
Rating: <rating>
|
||||
Publication Ready: <Yes|No|With Changes>
|
||||
|
||||
CRITICAL ISSUES: <P0 count>
|
||||
IMPORTANT ISSUES: <P1 count>
|
||||
RECOMMENDATIONS: <P2 count>
|
||||
|
||||
[Executive Summary - 2-3 sentences on readiness]
|
||||
|
||||
[If not publication-ready, show top 3 quick wins]
|
||||
|
||||
[Report file location if output specified]
|
||||
```
|
||||
|
||||
### Workflow Steps
|
||||
|
||||
1. **Initialize Analysis**
|
||||
```
|
||||
Validate path exists
|
||||
Load validation context if provided
|
||||
Set up temporary files for intermediate results
|
||||
```
|
||||
|
||||
2. **Execute Operations Sequentially**
|
||||
```
|
||||
Step 1: Calculate Score
|
||||
└─→ Invoke scoring-algorithm.py
|
||||
└─→ Store result in context
|
||||
|
||||
Step 2: Prioritize Issues (if issues exist)
|
||||
└─→ Invoke issue-prioritizer.sh
|
||||
└─→ Store categorized issues in context
|
||||
|
||||
Step 3: Generate Suggestions
|
||||
└─→ Analyze score gap
|
||||
└─→ Create actionable recommendations
|
||||
└─→ Store in context
|
||||
|
||||
Step 4: Generate Report
|
||||
└─→ Invoke report-generator.py
|
||||
└─→ Aggregate all context data
|
||||
└─→ Format in requested format
|
||||
└─→ Output to file or stdout
|
||||
```
|
||||
|
||||
3. **Present Summary**
|
||||
```
|
||||
Display high-level results
|
||||
Show publication readiness
|
||||
Highlight critical blockers (if any)
|
||||
Show top quick wins
|
||||
Provide next steps
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Full analysis with validation context
|
||||
/quality-analysis full-analysis path:. context:"@validation-results.json"
|
||||
|
||||
# Full analysis generating HTML report
|
||||
/quality-analysis full-analysis path:. format:html output:quality-report.html
|
||||
|
||||
# Full analysis with JSON output
|
||||
/quality-analysis full-analysis path:. context:"@results.json" format:json output:analysis.json
|
||||
|
||||
# Basic full analysis (no prior context)
|
||||
/quality-analysis full-analysis path:.
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
- **Missing path**: Request target path parameter
|
||||
- **Invalid context file**: Continue with limited data, show warning
|
||||
- **Script execution failures**: Show which operation failed, provide fallback
|
||||
- **Output write errors**: Fall back to stdout with warning
|
||||
- **No issues found**: Congratulate on perfect quality, skip issue operations
|
||||
|
||||
### Output Format
|
||||
|
||||
**Terminal Output**:
|
||||
```
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
COMPREHENSIVE QUALITY ANALYSIS
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Target: /path/to/plugin
|
||||
Type: Claude Code Plugin
|
||||
Analyzed: 2025-10-13 14:30:00
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
QUALITY SCORE
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
85/100 ⭐⭐⭐⭐ (Good)
|
||||
Publication Ready: With Minor Changes
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
ISSUES SUMMARY
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Critical (P0): 0 ✅
|
||||
Important (P1): 3 ⚠️
|
||||
Recommended (P2): 5 💡
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
EXECUTIVE SUMMARY
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Your plugin is nearly ready for publication! No critical blockers
|
||||
found. Address 3 important issues to reach excellent status (90+).
|
||||
Quality foundation is solid with good documentation and security.
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
TOP QUICK WINS
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
1. [+10 pts] Add CHANGELOG.md (15 minutes)
|
||||
Impact: Improves version tracking
|
||||
Fix: Create CHANGELOG.md with version history
|
||||
|
||||
2. [+3 pts] Add 2 more keywords (5 minutes)
|
||||
Impact: Better discoverability
|
||||
Fix: Add relevant keywords to plugin.json
|
||||
|
||||
3. [+2 pts] Add repository URL (2 minutes)
|
||||
Impact: Professional appearance
|
||||
Fix: Add repository field to plugin.json
|
||||
|
||||
After Quick Wins: 100/100 ⭐⭐⭐⭐⭐ (Excellent)
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
DETAILED REPORT
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Full report saved to: quality-report.md
|
||||
|
||||
Next Steps:
|
||||
1. Review detailed report for all findings
|
||||
2. Implement quick wins (22 minutes total)
|
||||
3. Re-run validation to verify improvements
|
||||
4. Submit to OpenPlugins marketplace
|
||||
|
||||
Questions? Consult: docs.claude.com/plugins
|
||||
```
|
||||
|
||||
### Integration Notes
|
||||
|
||||
This operation is the **primary entry point** for complete quality assessment.
|
||||
|
||||
**Invoked by**:
|
||||
- `validation-orchestrator` after comprehensive validation
|
||||
- `marketplace-validator` agent for submission readiness
|
||||
- Direct user invocation for full assessment
|
||||
|
||||
**Orchestrates**:
|
||||
- `calculate-score.md` - Quality scoring
|
||||
- `prioritize-issues.md` - Issue categorization
|
||||
- `suggest-improvements.md` - Actionable recommendations
|
||||
- `generate-report.md` - Comprehensive reporting
|
||||
|
||||
**Data Flow**:
|
||||
```
|
||||
Validation Results
|
||||
↓
|
||||
Calculate Score → score, rating, stars
|
||||
↓
|
||||
Prioritize Issues → P0/P1/P2 categorization
|
||||
↓
|
||||
Suggest Improvements → actionable recommendations
|
||||
↓
|
||||
Generate Report → formatted comprehensive report
|
||||
↓
|
||||
Display Summary → user-friendly terminal output
|
||||
```
|
||||
|
||||
### Performance
|
||||
|
||||
- **Execution Time**: 2-5 seconds (depending on issue count)
|
||||
- **I/O Operations**: Minimal (uses temporary files for large datasets)
|
||||
- **Memory Usage**: Low (streaming JSON processing)
|
||||
- **Parallelization**: Sequential (each step depends on previous)
|
||||
|
||||
### Quality Assurance
|
||||
|
||||
**Validation Steps**:
|
||||
1. Verify all scripts are executable
|
||||
2. Check Python 3.6+ availability
|
||||
3. Validate JSON context format
|
||||
4. Verify write permissions for output
|
||||
5. Ensure scoring algorithm consistency
|
||||
|
||||
**Testing**:
|
||||
```bash
|
||||
# Test with perfect plugin
|
||||
/quality-analysis full-analysis path:./test-fixtures/perfect-plugin
|
||||
|
||||
# Test with issues
|
||||
/quality-analysis full-analysis path:./test-fixtures/needs-work
|
||||
|
||||
# Test report formats
|
||||
/quality-analysis full-analysis path:. format:json
|
||||
/quality-analysis full-analysis path:. format:html
|
||||
/quality-analysis full-analysis path:. format:markdown
|
||||
```
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
293
commands/quality-analysis/generate-report.md
Normal file
293
commands/quality-analysis/generate-report.md
Normal file
@@ -0,0 +1,293 @@
|
||||
## Operation: Generate Quality Report
|
||||
|
||||
Generate comprehensive quality report in multiple formats (markdown, JSON, HTML) with detailed findings and recommendations.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
Extract these parameters from `$ARGUMENTS`:
|
||||
|
||||
- **path**: Target path to analyze (required)
|
||||
- **format**: Output format - markdown|json|html (default: markdown)
|
||||
- **output**: Output file path (optional, defaults to stdout)
|
||||
- **context**: Path to validation context JSON file with prior results (optional)
|
||||
|
||||
### Report Structure
|
||||
|
||||
**1. Executive Summary**
|
||||
- Overall quality score and star rating
|
||||
- Publication readiness determination
|
||||
- Key findings at-a-glance
|
||||
- Critical blockers (if any)
|
||||
|
||||
**2. Validation Layers**
|
||||
- Schema validation results (pass/fail with details)
|
||||
- Security scan results (vulnerabilities found)
|
||||
- Documentation quality assessment
|
||||
- Best practices compliance check
|
||||
|
||||
**3. Issues Breakdown**
|
||||
- Priority 0 (Critical): Must fix before publication
|
||||
- Priority 1 (Important): Should fix for quality
|
||||
- Priority 2 (Recommended): Nice to have improvements
|
||||
|
||||
**4. Improvement Roadmap**
|
||||
- Prioritized action items with effort estimates
|
||||
- Expected score improvement per fix
|
||||
- Timeline to reach publication-ready (90+ score)
|
||||
|
||||
**5. Detailed Findings**
|
||||
- Full validation output from each layer
|
||||
- Code examples and fix suggestions
|
||||
- References to best practices documentation
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Load Validation Context**
|
||||
```
|
||||
IF context parameter provided:
|
||||
Read validation results from context file
|
||||
ELSE:
|
||||
Use current validation state
|
||||
|
||||
Extract:
|
||||
- Quality score
|
||||
- Validation layer results
|
||||
- Issue lists
|
||||
- Target metadata
|
||||
```
|
||||
|
||||
2. **Generate Report Sections**
|
||||
```python
|
||||
Execute .scripts/report-generator.py with:
|
||||
- Path to target
|
||||
- Format (markdown|json|html)
|
||||
- Validation context data
|
||||
- Output destination
|
||||
|
||||
Script generates:
|
||||
- Executive summary
|
||||
- Validation layer breakdown
|
||||
- Prioritized issues
|
||||
- Improvement suggestions
|
||||
- Detailed findings
|
||||
```
|
||||
|
||||
3. **Format Output**
|
||||
```
|
||||
IF output parameter specified:
|
||||
Write report to file
|
||||
Display confirmation with file path
|
||||
ELSE:
|
||||
Print report to stdout
|
||||
```
|
||||
|
||||
4. **Display Summary**
|
||||
```
|
||||
Show brief summary:
|
||||
- Report generated successfully
|
||||
- Format used
|
||||
- Output location (if file)
|
||||
- Key metrics (score, issues)
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Generate markdown report to stdout
|
||||
/quality-analysis report path:. format:markdown
|
||||
|
||||
# Generate JSON report to file
|
||||
/quality-analysis report path:. format:json output:quality-report.json
|
||||
|
||||
# Generate HTML report with context
|
||||
/quality-analysis report path:. format:html context:"@validation-results.json" output:report.html
|
||||
|
||||
# Quick markdown report from validation results
|
||||
/quality-analysis report path:. context:"@comprehensive-validation.json"
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
- **Missing path**: Request target path
|
||||
- **Invalid format**: List supported formats (markdown, json, html)
|
||||
- **Context file not found**: Continue with limited data, warn user
|
||||
- **Invalid JSON context**: Show parsing error, suggest validation
|
||||
- **Write permission denied**: Show error, suggest alternative output location
|
||||
- **Python not available**: Fallback to basic text report
|
||||
|
||||
### Output Format
|
||||
|
||||
**Markdown Report**:
|
||||
```markdown
|
||||
# Quality Assessment Report
|
||||
|
||||
Generated: 2025-10-13 14:30:00
|
||||
Target: /path/to/plugin
|
||||
Type: Claude Code Plugin
|
||||
|
||||
## Executive Summary
|
||||
|
||||
**Quality Score**: 85/100 ⭐⭐⭐⭐ (Good)
|
||||
**Publication Ready**: With Minor Changes
|
||||
**Critical Issues**: 0
|
||||
**Total Issues**: 8
|
||||
|
||||
Your plugin is nearly ready for publication! Address 3 important issues to reach excellent status.
|
||||
|
||||
## Validation Results
|
||||
|
||||
### Schema Validation ✅ PASS
|
||||
- All required fields present
|
||||
- Valid JSON syntax
|
||||
- Correct semver format
|
||||
|
||||
### Security Scan ✅ PASS
|
||||
- No secrets exposed
|
||||
- All URLs use HTTPS
|
||||
- File permissions correct
|
||||
|
||||
### Documentation ⚠️ WARNINGS (3 issues)
|
||||
- Missing CHANGELOG.md (-10 pts)
|
||||
- README could use 2 more examples (-5 pts)
|
||||
- No architecture documentation
|
||||
|
||||
### Best Practices ✅ PASS
|
||||
- Naming convention correct
|
||||
- Keywords appropriate (5/7)
|
||||
- Category properly set
|
||||
|
||||
## Issues Breakdown
|
||||
|
||||
### Priority 0 (Critical): 0 issues
|
||||
None - excellent!
|
||||
|
||||
### Priority 1 (Important): 3 issues
|
||||
|
||||
#### 1. Add CHANGELOG.md [+10 pts]
|
||||
Missing version history and change documentation.
|
||||
|
||||
**Impact**: -10 quality score
|
||||
**Effort**: Low (15 minutes)
|
||||
**Fix**: Create CHANGELOG.md following Keep a Changelog format
|
||||
```bash
|
||||
# Create changelog
|
||||
cat > CHANGELOG.md <<EOF
|
||||
# Changelog
|
||||
## [1.0.0] - 2025-10-13
|
||||
### Added
|
||||
- Initial release
|
||||
EOF
|
||||
```
|
||||
|
||||
#### 2. Expand README examples [+5 pts]
|
||||
README has only 1 example, recommend 3-5 examples.
|
||||
|
||||
**Impact**: Poor user onboarding, -5 score
|
||||
**Effort**: Medium (30 minutes)
|
||||
**Fix**: Add 2-4 more usage examples showing different scenarios
|
||||
|
||||
#### 3. Add 2 more keywords [+3 pts]
|
||||
Current: 5 keywords. Optimal: 7 keywords.
|
||||
|
||||
**Impact**: Reduced discoverability
|
||||
**Effort**: Low (5 minutes)
|
||||
**Fix**: Add relevant keywords to plugin.json
|
||||
|
||||
### Priority 2 (Recommended): 5 issues
|
||||
[Details of nice-to-have improvements...]
|
||||
|
||||
## Improvement Roadmap
|
||||
|
||||
### Path to Excellent (90+)
|
||||
|
||||
Current: 85/100
|
||||
Target: 90/100
|
||||
Gap: 5 points
|
||||
|
||||
**Quick Wins** (Total: +8 pts, 20 minutes)
|
||||
1. Add CHANGELOG.md → +10 pts (15 min)
|
||||
2. Add 2 keywords → +3 pts (5 min)
|
||||
|
||||
**This Week** (Total: +5 pts, 30 minutes)
|
||||
3. Expand README examples → +5 pts (30 min)
|
||||
|
||||
**After completion**: 98/100 ⭐⭐⭐⭐⭐ (Excellent)
|
||||
|
||||
## Detailed Findings
|
||||
|
||||
[Complete validation output from all layers...]
|
||||
|
||||
---
|
||||
Report generated by marketplace-validator-plugin v1.0.0
|
||||
```
|
||||
|
||||
**JSON Report**:
|
||||
```json
|
||||
{
|
||||
"metadata": {
|
||||
"generated": "2025-10-13T14:30:00Z",
|
||||
"target": "/path/to/plugin",
|
||||
"type": "plugin",
|
||||
"validator_version": "1.0.0"
|
||||
},
|
||||
"executive_summary": {
|
||||
"score": 85,
|
||||
"rating": "Good",
|
||||
"stars": "⭐⭐⭐⭐",
|
||||
"publication_ready": "With Minor Changes",
|
||||
"critical_issues": 0,
|
||||
"total_issues": 8
|
||||
},
|
||||
"validation_layers": {
|
||||
"schema": {"status": "pass", "issues": []},
|
||||
"security": {"status": "pass", "issues": []},
|
||||
"documentation": {"status": "warnings", "issues": [...]},
|
||||
"best_practices": {"status": "pass", "issues": []}
|
||||
},
|
||||
"issues": {
|
||||
"p0": [],
|
||||
"p1": [...],
|
||||
"p2": [...]
|
||||
},
|
||||
"improvement_roadmap": {
|
||||
"current_score": 85,
|
||||
"target_score": 90,
|
||||
"gap": 5,
|
||||
"recommendations": [...]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**HTML Report**:
|
||||
```html
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Quality Assessment Report</title>
|
||||
<style>
|
||||
/* Styled, responsive HTML report */
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<!-- Executive summary card -->
|
||||
<!-- Validation layer status badges -->
|
||||
<!-- Interactive issue accordion -->
|
||||
<!-- Improvement roadmap timeline -->
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
|
||||
### Integration Notes
|
||||
|
||||
This operation is invoked by:
|
||||
- `full-analysis.md` as final step to consolidate results
|
||||
- `validation-orchestrator` for comprehensive reporting
|
||||
- Direct user invocation for custom reports
|
||||
|
||||
The report aggregates data from:
|
||||
- `calculate-score.md` output
|
||||
- `prioritize-issues.md` categorization
|
||||
- `suggest-improvements.md` recommendations
|
||||
- All validation layer results
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
178
commands/quality-analysis/prioritize-issues.md
Normal file
178
commands/quality-analysis/prioritize-issues.md
Normal file
@@ -0,0 +1,178 @@
|
||||
## Operation: Prioritize Issues
|
||||
|
||||
Categorize and prioritize validation issues by severity and impact using P0/P1/P2 tier system.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
Extract these parameters from `$ARGUMENTS`:
|
||||
|
||||
- **issues**: Path to JSON file with issues or inline JSON string (required)
|
||||
- **criteria**: Prioritization criteria - severity|impact|effort (default: severity)
|
||||
|
||||
### Prioritization Tiers
|
||||
|
||||
**Priority 0 (P0) - Critical - Must Fix**
|
||||
- Invalid JSON syntax (blocks parsing)
|
||||
- Missing required fields (name, version, description, author, license)
|
||||
- Security vulnerabilities (exposed secrets, dangerous patterns)
|
||||
- Format violations (invalid semver, malformed URLs)
|
||||
- Blocks: Publication and installation
|
||||
|
||||
**Priority 1 (P1) - Important - Should Fix**
|
||||
- Missing recommended fields (repository, homepage, keywords)
|
||||
- Documentation gaps (incomplete README, missing CHANGELOG)
|
||||
- Convention violations (naming, structure)
|
||||
- Performance issues (slow scripts, inefficient patterns)
|
||||
- Impact: Reduces quality score significantly
|
||||
|
||||
**Priority 2 (P2) - Recommended - Nice to Have**
|
||||
- Additional keywords for discoverability
|
||||
- Enhanced examples and documentation
|
||||
- Expanded test coverage
|
||||
- Quality improvements and polish
|
||||
- Impact: Minor quality score boost
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Parse Issue Data**
|
||||
```
|
||||
IF issues parameter starts with "@":
|
||||
Read JSON from file (remove @ prefix)
|
||||
ELSE IF issues is valid JSON:
|
||||
Parse inline JSON
|
||||
ELSE:
|
||||
Error: Invalid issues format
|
||||
```
|
||||
|
||||
2. **Categorize Issues**
|
||||
```bash
|
||||
Execute .scripts/issue-prioritizer.sh with issues data
|
||||
Categorize each issue based on:
|
||||
- Severity (critical, important, recommended)
|
||||
- Impact on publication readiness
|
||||
- Blocking status
|
||||
- Effort to fix
|
||||
```
|
||||
|
||||
3. **Sort and Format**
|
||||
```
|
||||
Group issues by priority (P0, P1, P2)
|
||||
Sort within each priority by impact
|
||||
Format with appropriate icons:
|
||||
- P0: ❌ (red X - blocking)
|
||||
- P1: ⚠️ (warning - should fix)
|
||||
- P2: 💡 (lightbulb - suggestion)
|
||||
```
|
||||
|
||||
4. **Generate Summary**
|
||||
```
|
||||
Count issues per priority
|
||||
Calculate total fix effort
|
||||
Estimate score improvement potential
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Prioritize from validation results file
|
||||
/quality-analysis prioritize issues:"@validation-results.json"
|
||||
|
||||
# Prioritize inline JSON
|
||||
/quality-analysis prioritize issues:'{"errors": [{"type": "missing_field", "field": "license"}]}'
|
||||
|
||||
# Prioritize with impact criteria
|
||||
/quality-analysis prioritize issues:"@results.json" criteria:impact
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
- **Missing issues parameter**: Request issues data
|
||||
- **Invalid JSON format**: Show JSON parsing error with line number
|
||||
- **Empty issues array**: Return "No issues found" message
|
||||
- **File not found**: Show file path and suggest correct path
|
||||
- **Script execution error**: Fallback to basic categorization
|
||||
|
||||
### Output Format
|
||||
|
||||
```
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
ISSUE PRIORITIZATION
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Total Issues: <count>
|
||||
Estimated Fix Time: <time>
|
||||
|
||||
Priority 0 (Critical - Must Fix): <count>
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
❌ Missing required field: license
|
||||
Impact: Blocks publication
|
||||
Effort: Low (5 minutes)
|
||||
Fix: Add "license": "MIT" to plugin.json
|
||||
|
||||
❌ Invalid JSON syntax at line 23
|
||||
Impact: Blocks parsing
|
||||
Effort: Low (2 minutes)
|
||||
Fix: Remove trailing comma
|
||||
|
||||
Priority 1 (Important - Should Fix): <count>
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
⚠️ Missing CHANGELOG.md
|
||||
Impact: -10 quality score
|
||||
Effort: Low (15 minutes)
|
||||
Fix: Create CHANGELOG.md following Keep a Changelog format
|
||||
|
||||
⚠️ README missing usage examples
|
||||
Impact: Poor user experience, -5 score
|
||||
Effort: Medium (30 minutes)
|
||||
Fix: Add 3-5 usage examples to README
|
||||
|
||||
Priority 2 (Recommended - Nice to Have): <count>
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
💡 Add 2 more keywords for discoverability
|
||||
Impact: +3 quality score
|
||||
Effort: Low (5 minutes)
|
||||
Fix: Add relevant keywords to plugin.json
|
||||
|
||||
💡 Expand documentation with architecture diagram
|
||||
Impact: Better understanding, +2 score
|
||||
Effort: Medium (45 minutes)
|
||||
Fix: Create docs/ARCHITECTURE.md with diagram
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Summary:
|
||||
- Fix P0 issues first (blocking)
|
||||
- Address P1 issues for quality (30-60 min)
|
||||
- Consider P2 improvements for excellence
|
||||
- Total potential score gain: +20 points
|
||||
```
|
||||
|
||||
### Issue Data Schema
|
||||
|
||||
Expected JSON structure:
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
{
|
||||
"type": "missing_field|invalid_format|security",
|
||||
"severity": "critical|important|recommended",
|
||||
"field": "field_name",
|
||||
"message": "Description",
|
||||
"location": "file:line",
|
||||
"fix": "How to fix",
|
||||
"effort": "low|medium|high",
|
||||
"score_impact": 20
|
||||
}
|
||||
],
|
||||
"warnings": [...],
|
||||
"recommendations": [...]
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Notes
|
||||
|
||||
This operation is invoked by:
|
||||
- `full-analysis.md` after score calculation
|
||||
- `validation-orchestrator` for issue triage
|
||||
- Direct user invocation for issue planning
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
73
commands/quality-analysis/skill.md
Normal file
73
commands/quality-analysis/skill.md
Normal file
@@ -0,0 +1,73 @@
|
||||
---
|
||||
description: Deep quality analysis with scoring, recommendations, and actionable reports
|
||||
---
|
||||
|
||||
You are the Quality Analysis coordinator, responsible for comprehensive quality assessment and scoring.
|
||||
|
||||
## Your Mission
|
||||
|
||||
Parse `$ARGUMENTS` to determine the requested quality analysis operation and route to the appropriate sub-command.
|
||||
|
||||
## Available Operations
|
||||
|
||||
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
|
||||
|
||||
- **score** → Read `.claude/commands/quality-analysis/calculate-score.md`
|
||||
- **report** → Read `.claude/commands/quality-analysis/generate-report.md`
|
||||
- **prioritize** → Read `.claude/commands/quality-analysis/prioritize-issues.md`
|
||||
- **improve** → Read `.claude/commands/quality-analysis/suggest-improvements.md`
|
||||
- **full-analysis** → Read `.claude/commands/quality-analysis/full-analysis.md`
|
||||
|
||||
## Argument Format
|
||||
|
||||
```
|
||||
/quality-analysis <operation> [parameters]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Calculate quality score
|
||||
/quality-analysis score path:. errors:2 warnings:5 missing:3
|
||||
|
||||
# Generate comprehensive report
|
||||
/quality-analysis report path:. format:markdown
|
||||
|
||||
# Prioritize issues by severity
|
||||
/quality-analysis prioritize issues:"@validation-results.json"
|
||||
|
||||
# Get improvement suggestions
|
||||
/quality-analysis improve path:. score:65
|
||||
|
||||
# Run full quality analysis
|
||||
/quality-analysis full-analysis path:. context:"@validation-context.json"
|
||||
```
|
||||
|
||||
## Quality Scoring System
|
||||
|
||||
This skill implements the OpenPlugins quality scoring system:
|
||||
- **90-100**: Excellent ⭐⭐⭐⭐⭐ (publication-ready)
|
||||
- **75-89**: Good ⭐⭐⭐⭐ (ready with minor improvements)
|
||||
- **60-74**: Fair ⭐⭐⭐ (needs work)
|
||||
- **40-59**: Needs Improvement ⭐⭐
|
||||
- **0-39**: Poor ⭐ (substantial work needed)
|
||||
|
||||
## Error Handling
|
||||
|
||||
If the operation is not recognized:
|
||||
1. List all available operations
|
||||
2. Show example usage
|
||||
3. Suggest closest match
|
||||
|
||||
## Base Directory
|
||||
|
||||
Base directory for this skill: `.claude/commands/quality-analysis/`
|
||||
|
||||
## Your Task
|
||||
|
||||
1. Parse `$ARGUMENTS` to extract operation and parameters
|
||||
2. Read the corresponding operation file
|
||||
3. Execute quality analysis with provided parameters
|
||||
4. Return actionable results with clear recommendations
|
||||
|
||||
**Current Request**: $ARGUMENTS
|
||||
317
commands/quality-analysis/suggest-improvements.md
Normal file
317
commands/quality-analysis/suggest-improvements.md
Normal file
@@ -0,0 +1,317 @@
|
||||
## Operation: Suggest Improvements
|
||||
|
||||
Generate actionable improvement suggestions based on current quality score with effort estimates and expected impact.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
Extract these parameters from `$ARGUMENTS`:
|
||||
|
||||
- **path**: Target path to analyze (required)
|
||||
- **score**: Current quality score (required)
|
||||
- **target**: Target score to achieve (default: 90)
|
||||
- **context**: Path to validation context JSON file (optional)
|
||||
|
||||
### Improvement Suggestion Algorithm
|
||||
|
||||
```
|
||||
gap = target_score - current_score
|
||||
improvements_needed = ceiling(gap / 5) # Approximate improvements needed
|
||||
|
||||
FOR each validation layer:
|
||||
IF layer has issues:
|
||||
Generate specific, actionable improvements
|
||||
Estimate score impact (+points)
|
||||
Assign priority based on blocking status and impact
|
||||
Estimate effort (low/medium/high)
|
||||
|
||||
SORT by:
|
||||
1. Priority (P0 first)
|
||||
2. Score impact (highest first)
|
||||
3. Effort (lowest first - quick wins)
|
||||
|
||||
LIMIT to top 10 most impactful improvements
|
||||
```
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Calculate Score Gap**
|
||||
```
|
||||
gap = target - current_score
|
||||
|
||||
IF gap <= 0:
|
||||
Return "Already at or above target!"
|
||||
|
||||
IF gap <= 5:
|
||||
Focus on quick wins (low effort, high impact)
|
||||
|
||||
IF gap > 20:
|
||||
Focus on critical issues first
|
||||
```
|
||||
|
||||
2. **Analyze Validation Context**
|
||||
```
|
||||
IF context provided:
|
||||
Load validation results from JSON file
|
||||
Extract issues from each layer:
|
||||
- Schema validation issues
|
||||
- Security scan findings
|
||||
- Documentation gaps
|
||||
- Best practices violations
|
||||
|
||||
Categorize by:
|
||||
- Severity (P0/P1/P2)
|
||||
- Score impact
|
||||
- Effort required
|
||||
```
|
||||
|
||||
3. **Generate Improvement Suggestions**
|
||||
```
|
||||
For each issue, create suggestion:
|
||||
- Title (brief, actionable)
|
||||
- Score impact (+X points)
|
||||
- Priority (High/Medium/Low)
|
||||
- Effort estimate with time
|
||||
- Detailed fix instructions
|
||||
- Expected outcome
|
||||
|
||||
Sort by effectiveness:
|
||||
effectiveness = score_impact / effort_hours
|
||||
```
|
||||
|
||||
4. **Create Improvement Roadmap**
|
||||
```
|
||||
Group suggestions into phases:
|
||||
- Quick Wins (< 30 min, +5-15 pts)
|
||||
- This Week (< 2 hours, +10-20 pts)
|
||||
- This Sprint (< 1 day, +20+ pts)
|
||||
|
||||
Calculate cumulative score after each phase
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Get improvements for low score
|
||||
/quality-analysis improve path:. score:65
|
||||
|
||||
# Target excellent status
|
||||
/quality-analysis improve path:. score:78 target:95
|
||||
|
||||
# Use validation context for detailed suggestions
|
||||
/quality-analysis improve path:. score:70 context:"@validation-results.json"
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
- **Missing score**: Request current score or run calculate-score first
|
||||
- **Invalid score range**: Score must be 0-100
|
||||
- **Invalid target**: Target must be higher than current score
|
||||
- **Context file not found**: Continue with basic suggestions
|
||||
- **No improvements possible**: Congratulate on perfect score
|
||||
|
||||
### Output Format
|
||||
|
||||
```
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
IMPROVEMENT RECOMMENDATIONS
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Current Score: 65/100 ⭐⭐⭐ (Fair)
|
||||
Target Score: 90/100 ⭐⭐⭐⭐⭐ (Excellent)
|
||||
Gap: 25 points
|
||||
|
||||
To reach your target, implement these improvements:
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
QUICK WINS (Total: +15 pts, 45 minutes)
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
1. [+10 pts] Add CHANGELOG.md with version history
|
||||
Priority: High
|
||||
Effort: Low (15 minutes)
|
||||
Impact: Improves version tracking and transparency
|
||||
|
||||
HOW TO FIX:
|
||||
```bash
|
||||
cat > CHANGELOG.md <<'EOF'
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## [1.0.0] - 2025-10-13
|
||||
### Added
|
||||
- Initial release
|
||||
- Core functionality
|
||||
EOF
|
||||
```
|
||||
|
||||
WHY IT MATTERS:
|
||||
Users need to track changes between versions. CHANGELOG.md is a
|
||||
best practice for professional plugins.
|
||||
|
||||
2. [+3 pts] Add 2 more relevant keywords to plugin.json
|
||||
Priority: Medium
|
||||
Effort: Low (5 minutes)
|
||||
Impact: Improved discoverability in marketplace
|
||||
|
||||
HOW TO FIX:
|
||||
```json
|
||||
{
|
||||
"keywords": ["existing", "keywords", "automation", "workflow"]
|
||||
}
|
||||
```
|
||||
|
||||
SUGGESTION: Based on your plugin's functionality, consider:
|
||||
- "automation" (if you automate tasks)
|
||||
- "productivity" (if you improve efficiency)
|
||||
- "validation" (if you validate data)
|
||||
|
||||
3. [+2 pts] Add repository URL to plugin.json
|
||||
Priority: Medium
|
||||
Effort: Low (2 minutes)
|
||||
Impact: Users can view source and report issues
|
||||
|
||||
HOW TO FIX:
|
||||
```json
|
||||
{
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/username/plugin-name"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
After Quick Wins: 80/100 ⭐⭐⭐⭐ (Good)
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
THIS WEEK (Total: +12 pts, 90 minutes)
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
4. [+5 pts] Expand README with 3 more usage examples
|
||||
Priority: Medium
|
||||
Effort: Medium (30 minutes)
|
||||
Impact: Better user onboarding and adoption
|
||||
|
||||
HOW TO FIX:
|
||||
Add examples showing:
|
||||
- Basic usage (simple case)
|
||||
- Advanced usage (complex scenario)
|
||||
- Common workflows (real-world use)
|
||||
- Error handling (what to do when things fail)
|
||||
|
||||
TEMPLATE:
|
||||
```markdown
|
||||
## Examples
|
||||
|
||||
### Basic Usage
|
||||
/your-command simple-task
|
||||
|
||||
### Advanced Usage
|
||||
/your-command complex-task param:value
|
||||
|
||||
### Common Workflow
|
||||
1. /your-command init
|
||||
2. /your-command process
|
||||
3. /your-command finalize
|
||||
```
|
||||
|
||||
5. [+5 pts] Add homepage URL to plugin.json
|
||||
Priority: Low
|
||||
Effort: Low (5 minutes)
|
||||
Impact: Professional appearance, marketing
|
||||
|
||||
HOW TO FIX:
|
||||
```json
|
||||
{
|
||||
"homepage": "https://your-plugin-docs.com"
|
||||
}
|
||||
```
|
||||
|
||||
6. [+2 pts] Improve description in plugin.json
|
||||
Priority: Low
|
||||
Effort: Medium (10 minutes)
|
||||
Impact: Better first impression in marketplace
|
||||
|
||||
HOW TO FIX:
|
||||
Make description:
|
||||
- Concise (1-2 sentences)
|
||||
- Action-oriented (starts with verb)
|
||||
- Benefit-focused (what user gains)
|
||||
|
||||
BEFORE: "A plugin for validation"
|
||||
AFTER: "Automatically validate your code quality with comprehensive
|
||||
checks for security, performance, and best practices"
|
||||
|
||||
After This Week: 92/100 ⭐⭐⭐⭐⭐ (Excellent)
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
SUMMARY
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Path to Excellence:
|
||||
- Start with Quick Wins (45 min) → 80/100 ⭐⭐⭐⭐
|
||||
- Complete This Week items (90 min) → 92/100 ⭐⭐⭐⭐⭐
|
||||
- Total effort: 2 hours 15 minutes
|
||||
- Total improvement: +27 points
|
||||
|
||||
Priority Order:
|
||||
1. Fix P0 blockers (none currently)
|
||||
2. Implement quick wins for fast progress
|
||||
3. Address documentation improvements
|
||||
4. Polish with recommended enhancements
|
||||
|
||||
Your plugin will be publication-ready after Quick Wins!
|
||||
Excellence status achievable within one week.
|
||||
```
|
||||
|
||||
### Improvement Categories
|
||||
|
||||
**Documentation**
|
||||
- Add/expand README
|
||||
- Create CHANGELOG.md
|
||||
- Add LICENSE file
|
||||
- Include usage examples
|
||||
- Add architecture documentation
|
||||
|
||||
**Metadata**
|
||||
- Add repository URL
|
||||
- Add homepage URL
|
||||
- Expand keywords (3-7 recommended)
|
||||
- Improve description
|
||||
- Add author details
|
||||
|
||||
**Code Quality**
|
||||
- Fix naming conventions
|
||||
- Improve error handling
|
||||
- Add input validation
|
||||
- Optimize performance
|
||||
- Remove code smells
|
||||
|
||||
**Security**
|
||||
- Remove exposed secrets
|
||||
- Validate user input
|
||||
- Use HTTPS for all URLs
|
||||
- Set correct file permissions
|
||||
- Add security documentation
|
||||
|
||||
**Best Practices**
|
||||
- Follow semantic versioning
|
||||
- Use lowercase-hyphen naming
|
||||
- Select appropriate category
|
||||
- Include test coverage
|
||||
- Add CI/CD configuration
|
||||
|
||||
### Integration Notes
|
||||
|
||||
This operation is invoked by:
|
||||
- `full-analysis.md` to provide actionable next steps
|
||||
- `validation-orchestrator` after comprehensive validation
|
||||
- Direct user invocation for improvement planning
|
||||
|
||||
Suggestions are based on:
|
||||
- Current quality score and target
|
||||
- Validation layer findings
|
||||
- Industry best practices
|
||||
- Effort vs impact analysis
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
Reference in New Issue
Block a user