Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:20:28 +08:00
commit b727790a9e
65 changed files with 16412 additions and 0 deletions

View File

@@ -0,0 +1,254 @@
#!/usr/bin/env bash
# ============================================================================
# Category Validator
# ============================================================================
# Purpose: Validate category against OpenPlugins approved category list
# Version: 1.0.0
# Usage: ./category-validator.sh <category> [--suggest]
# Returns: 0=valid, 1=invalid, 2=missing params
# ============================================================================
set -euo pipefail
# OpenPlugins approved categories (exactly 10)
APPROVED_CATEGORIES=(
"development"
"testing"
"deployment"
"documentation"
"security"
"database"
"monitoring"
"productivity"
"quality"
"collaboration"
)
# Category descriptions
declare -A CATEGORY_DESCRIPTIONS=(
["development"]="Code generation, scaffolding, refactoring"
["testing"]="Test generation, coverage, quality assurance"
["deployment"]="CI/CD, infrastructure, release automation"
["documentation"]="Docs generation, API documentation"
["security"]="Vulnerability scanning, secret detection"
["database"]="Schema design, migrations, queries"
["monitoring"]="Performance analysis, logging"
["productivity"]="Workflow automation, task management"
["quality"]="Linting, formatting, code review"
["collaboration"]="Team tools, communication"
)
# ============================================================================
# Functions
# ============================================================================
usage() {
cat <<EOF
Usage: $0 <category> [--suggest]
Validate category against OpenPlugins approved category list.
Arguments:
category Category name to validate (required)
--suggest Show similar categories if invalid
Approved Categories (exactly 10):
1. development - Code generation, scaffolding
2. testing - Test generation, coverage
3. deployment - CI/CD, infrastructure
4. documentation - Docs generation, API docs
5. security - Vulnerability scanning
6. database - Schema design, migrations
7. monitoring - Performance analysis
8. productivity - Workflow automation
9. quality - Linting, formatting
10. collaboration - Team tools, communication
Exit codes:
0 - Valid category
1 - Invalid category
2 - Missing required parameters
EOF
exit 2
}
# Calculate Levenshtein distance for similarity
levenshtein_distance() {
local s1="$1"
local s2="$2"
local len1=${#s1}
local len2=${#s2}
# Simple implementation
if [ "$s1" = "$s2" ]; then
echo 0
return
fi
# Rough approximation: count different characters
local diff=0
local max_len=$((len1 > len2 ? len1 : len2))
for ((i=0; i<max_len; i++)); do
if [ "${s1:i:1}" != "${s2:i:1}" ]; then
((diff++))
fi
done
echo $diff
}
# Find similar categories
find_similar() {
local category="$1"
local suggestions=()
# Check for common misspellings and variations
case "${category,,}" in
*develop*|*dev*)
suggestions+=("development")
;;
*test*)
suggestions+=("testing")
;;
*deploy*|*devops*|*ci*|*cd*)
suggestions+=("deployment")
;;
*doc*|*docs*)
suggestions+=("documentation")
;;
*secur*|*safe*)
suggestions+=("security")
;;
*data*|*db*|*sql*)
suggestions+=("database")
;;
*monitor*|*observ*|*log*)
suggestions+=("monitoring")
;;
*product*|*work*|*auto*)
suggestions+=("productivity")
;;
*qual*|*lint*|*format*)
suggestions+=("quality")
;;
*collab*|*team*|*comm*)
suggestions+=("collaboration")
;;
esac
# If no keyword matches, use similarity
if [ ${#suggestions[@]} -eq 0 ]; then
# Find categories with lowest distance
local best_dist=999
for cat in "${APPROVED_CATEGORIES[@]}"; do
local dist=$(levenshtein_distance "${category,,}" "$cat")
if [ "$dist" -lt "$best_dist" ]; then
best_dist=$dist
suggestions=("$cat")
elif [ "$dist" -eq "$best_dist" ]; then
suggestions+=("$cat")
fi
done
fi
# Remove duplicates
local unique_suggestions=($(printf "%s\n" "${suggestions[@]}" | sort -u))
# Print suggestions
if [ ${#unique_suggestions[@]} -gt 0 ]; then
echo "Did you mean?"
local count=1
for suggestion in "${unique_suggestions[@]}"; do
echo " $count. $suggestion - ${CATEGORY_DESCRIPTIONS[$suggestion]}"
((count++))
done
fi
}
# List all approved categories
list_all_categories() {
cat <<EOF
All Approved Categories:
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
EOF
local count=1
for category in "${APPROVED_CATEGORIES[@]}"; do
printf "%-2d. %-15s - %s\n" "$count" "$category" "${CATEGORY_DESCRIPTIONS[$category]}"
((count++))
done
}
# ============================================================================
# Main
# ============================================================================
main() {
# Check for help flag
if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
usage
fi
local category="$1"
local suggest=false
if [ $# -gt 1 ] && [ "$2" = "--suggest" ]; then
suggest=true
fi
# Check if category is provided
if [ -z "$category" ]; then
echo "ERROR: Category cannot be empty"
echo ""
list_all_categories
exit 2
fi
# Normalize to lowercase for comparison
category_lower="${category,,}"
# Check if category is in approved list
for approved in "${APPROVED_CATEGORIES[@]}"; do
if [ "$category_lower" = "$approved" ]; then
echo "✅ PASS: Valid OpenPlugins category"
echo ""
echo "Category: $approved"
echo "Valid: Yes"
echo ""
echo "Description: ${CATEGORY_DESCRIPTIONS[$approved]}"
echo ""
echo "Quality Score Impact: +5 points"
echo ""
echo "The category is approved for OpenPlugins marketplace."
exit 0
fi
done
# Category not found
echo "❌ FAIL: Invalid category"
echo ""
echo "Category: $category"
echo "Valid: No"
echo ""
echo "This category is not in the OpenPlugins approved list."
echo ""
if [ "$suggest" = true ]; then
find_similar "$category"
echo ""
fi
list_all_categories
echo ""
echo "Quality Score Impact: 0 points (fix to gain +5)"
echo ""
echo "Choose the most appropriate category from the approved list."
exit 1
}
main "$@"

View File

@@ -0,0 +1,392 @@
#!/usr/bin/env python3
"""
============================================================================
Keyword Quality Analyzer
============================================================================
Purpose: Analyze keyword quality, count, and relevance for OpenPlugins
Version: 1.0.0
Usage: ./keyword-analyzer.py <keywords> [--min N] [--max N]
Returns: 0=valid, 1=count violation, 2=quality issues, 3=missing params
============================================================================
"""
import sys
import re
from typing import List, Tuple, Dict
# Default constraints
DEFAULT_MIN_KEYWORDS = 3
DEFAULT_MAX_KEYWORDS = 7
# Generic terms to avoid
GENERIC_BLOCKLIST = [
'plugin', 'tool', 'utility', 'helper', 'app',
'code', 'software', 'program', 'system',
'awesome', 'best', 'perfect', 'great', 'super',
'amazing', 'cool', 'nice', 'good', 'excellent'
]
# OpenPlugins categories (should not be duplicated as keywords)
CATEGORIES = [
'development', 'testing', 'deployment', 'documentation',
'security', 'database', 'monitoring', 'productivity',
'quality', 'collaboration'
]
# Common keyword types for balance checking
FUNCTIONALITY_KEYWORDS = [
'testing', 'deployment', 'formatting', 'linting', 'migration',
'generation', 'automation', 'analysis', 'monitoring', 'scanning',
'refactoring', 'debugging', 'profiling', 'optimization'
]
TECHNOLOGY_KEYWORDS = [
'python', 'javascript', 'typescript', 'docker', 'kubernetes',
'react', 'vue', 'angular', 'node', 'bash', 'terraform',
'postgresql', 'mysql', 'redis', 'aws', 'azure', 'gcp'
]
def usage():
"""Print usage information"""
print("""Usage: keyword-analyzer.py <keywords> [--min N] [--max N]
Analyze keyword quality and relevance for OpenPlugins standards.
Arguments:
keywords Comma-separated list of keywords (required)
--min N Minimum keyword count (default: 3)
--max N Maximum keyword count (default: 7)
Requirements:
- Count: 3-7 keywords (optimal: 5-6)
- No generic terms (plugin, tool, awesome)
- No marketing fluff (best, perfect, amazing)
- Mix of functionality and technology
- No redundant variations
Good examples:
"testing,pytest,automation,tdd,python"
"deployment,kubernetes,ci-cd,docker"
"linting,javascript,code-quality"
Bad examples:
"plugin,tool,awesome" (generic)
"test,testing,tests" (redundant)
"development" (only one, too generic)
Exit codes:
0 - Valid keyword set
1 - Count violation (too few or too many)
2 - Quality issues (generic terms, duplicates)
3 - Missing required parameters
""")
sys.exit(3)
def parse_keywords(keyword_string: str) -> List[str]:
"""Parse and normalize keyword string"""
if not keyword_string:
return []
# Split by comma, strip whitespace, lowercase
keywords = [k.strip().lower() for k in keyword_string.split(',')]
# Remove empty strings
keywords = [k for k in keywords if k]
# Remove duplicates while preserving order
seen = set()
unique_keywords = []
for k in keywords:
if k not in seen:
seen.add(k)
unique_keywords.append(k)
return unique_keywords
def check_generic_terms(keywords: List[str]) -> Tuple[List[str], List[str]]:
"""
Check for generic and marketing terms
Returns:
(generic_terms, marketing_terms)
"""
generic_terms = []
marketing_terms = []
for keyword in keywords:
if keyword in GENERIC_BLOCKLIST:
if keyword in ['awesome', 'best', 'perfect', 'great', 'super', 'amazing', 'cool', 'nice', 'good', 'excellent']:
marketing_terms.append(keyword)
else:
generic_terms.append(keyword)
return generic_terms, marketing_terms
def check_redundant_variations(keywords: List[str]) -> List[Tuple[str, str]]:
"""
Find redundant keyword variations
Returns:
List of (keyword1, keyword2) pairs that are redundant
"""
redundant = []
for i, kw1 in enumerate(keywords):
for kw2 in keywords[i+1:]:
# Check if one is a substring of the other
if kw1 in kw2 or kw2 in kw1:
redundant.append((kw1, kw2))
# Check for plural variations
elif kw1.rstrip('s') == kw2 or kw2.rstrip('s') == kw1:
redundant.append((kw1, kw2))
return redundant
def check_category_duplication(keywords: List[str]) -> List[str]:
"""Check if any keywords exactly match category names"""
duplicates = []
for keyword in keywords:
if keyword in CATEGORIES:
duplicates.append(keyword)
return duplicates
def analyze_balance(keywords: List[str]) -> Dict[str, int]:
"""
Analyze keyword balance across types
Returns:
Dict with counts for each type
"""
balance = {
'functionality': 0,
'technology': 0,
'other': 0
}
for keyword in keywords:
if keyword in FUNCTIONALITY_KEYWORDS:
balance['functionality'] += 1
elif keyword in TECHNOLOGY_KEYWORDS:
balance['technology'] += 1
else:
balance['other'] += 1
return balance
def calculate_quality_score(
keywords: List[str],
generic_terms: List[str],
marketing_terms: List[str],
redundant: List[Tuple[str, str]],
category_dups: List[str],
min_count: int,
max_count: int
) -> Tuple[int, List[str]]:
"""
Calculate quality score and list issues
Returns:
(score out of 10, list of issues)
"""
score = 10
issues = []
# Count violations
count = len(keywords)
if count < min_count:
score -= 5
issues.append(f"Too few keywords ({count} < {min_count} minimum)")
elif count > max_count:
score -= 3
issues.append(f"Too many keywords ({count} > {max_count} maximum)")
# Generic terms
if generic_terms:
score -= len(generic_terms) * 2
issues.append(f"Generic terms detected: {', '.join(generic_terms)}")
# Marketing terms
if marketing_terms:
score -= len(marketing_terms) * 2
issues.append(f"Marketing terms detected: {', '.join(marketing_terms)}")
# Redundant variations
if redundant:
score -= len(redundant) * 2
redundant_str = ', '.join([f"{a}/{b}" for a, b in redundant])
issues.append(f"Redundant variations: {redundant_str}")
# Category duplication
if category_dups:
score -= len(category_dups) * 1
issues.append(f"Category name duplication: {', '.join(category_dups)}")
# Single-character keywords
single_char = [k for k in keywords if len(k) == 1]
if single_char:
score -= len(single_char) * 2
issues.append(f"Single-character keywords: {', '.join(single_char)}")
# Balance check
balance = analyze_balance(keywords)
if balance['functionality'] == 0 and balance['technology'] == 0:
score -= 2
issues.append("No functional or technical keywords")
return max(0, score), issues
def suggest_improvements(
keywords: List[str],
generic_terms: List[str],
marketing_terms: List[str],
redundant: List[Tuple[str, str]],
min_count: int,
max_count: int
) -> List[str]:
"""Generate improvement suggestions"""
suggestions = []
# Remove generic/marketing terms
if generic_terms or marketing_terms:
suggestions.append("Remove generic/marketing terms")
suggestions.append(" Replace with specific functionality (e.g., testing, deployment, formatting)")
# Consolidate redundant variations
if redundant:
suggestions.append("Consolidate redundant variations")
for kw1, kw2 in redundant:
suggestions.append(f" Keep one of: {kw1}, {kw2}")
# Add more keywords if too few
count = len(keywords)
if count < min_count:
needed = min_count - count
suggestions.append(f"Add {needed} more relevant keyword(s)")
suggestions.append(" Consider: specific technologies, use-cases, or functionalities")
# Remove keywords if too many
elif count > max_count:
excess = count - max_count
suggestions.append(f"Remove {excess} least relevant keyword(s)")
# Balance suggestions
balance = analyze_balance(keywords)
if balance['functionality'] == 0:
suggestions.append("Add functionality keywords (e.g., testing, automation, deployment)")
if balance['technology'] == 0:
suggestions.append("Add technology keywords (e.g., python, docker, kubernetes)")
return suggestions
def main():
"""Main entry point"""
if len(sys.argv) < 2 or sys.argv[1] in ['-h', '--help']:
usage()
keyword_string = sys.argv[1]
# Parse optional arguments
min_count = DEFAULT_MIN_KEYWORDS
max_count = DEFAULT_MAX_KEYWORDS
for i, arg in enumerate(sys.argv[2:], start=2):
if arg == '--min' and i + 1 < len(sys.argv):
min_count = int(sys.argv[i + 1])
elif arg == '--max' and i + 1 < len(sys.argv):
max_count = int(sys.argv[i + 1])
# Parse keywords
keywords = parse_keywords(keyword_string)
if not keywords:
print("ERROR: Keywords cannot be empty\n")
print("Provide 3-7 relevant keywords describing your plugin.\n")
print("Examples:")
print(' "testing,pytest,automation"')
print(' "deployment,kubernetes,ci-cd"')
sys.exit(3)
# Analyze keywords
count = len(keywords)
generic_terms, marketing_terms = check_generic_terms(keywords)
redundant = check_redundant_variations(keywords)
category_dups = check_category_duplication(keywords)
balance = analyze_balance(keywords)
# Calculate quality score
score, issues = calculate_quality_score(
keywords, generic_terms, marketing_terms,
redundant, category_dups, min_count, max_count
)
# Determine status
if score >= 9 and min_count <= count <= max_count:
status = "✅ PASS"
exit_code = 0
elif count < min_count or count > max_count:
status = "❌ FAIL"
exit_code = 1
elif score < 7:
status = "❌ FAIL"
exit_code = 2
else:
status = "⚠️ WARNING"
exit_code = 0
# Print results
print(f"{status}: Keyword validation\n")
print(f"Keywords: {', '.join(keywords)}")
print(f"Count: {count} (valid range: {min_count}-{max_count})")
print(f"Quality Score: {score}/10\n")
if issues:
print("Issues Found:")
for issue in issues:
print(f" - {issue}")
print()
# Balance breakdown
print("Breakdown:")
print(f" - Functionality: {balance['functionality']} keywords")
print(f" - Technology: {balance['technology']} keywords")
print(f" - Other: {balance['other']} keywords")
print()
# Score impact
if score >= 9:
print("Quality Score Impact: +10 points (excellent)\n")
if exit_code == 0:
print("Excellent keyword selection for discoverability!")
elif score >= 7:
print("Quality Score Impact: +7 points (good)\n")
print("Good keywords, but could be improved.")
else:
print("Quality Score Impact: 0 points (fix to gain +10)\n")
print("Keywords need significant improvement.")
# Suggestions
if issues:
suggestions = suggest_improvements(
keywords, generic_terms, marketing_terms,
redundant, min_count, max_count
)
if suggestions:
print("\nSuggestions:")
for suggestion in suggestions:
print(f" {suggestion}")
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,224 @@
#!/usr/bin/env bash
# ============================================================================
# Naming Convention Validator
# ============================================================================
# Purpose: Validate plugin names against OpenPlugins lowercase-hyphen convention
# Version: 1.0.0
# Usage: ./naming-validator.sh <name> [--suggest]
# Returns: 0=valid, 1=invalid, 2=missing params
# ============================================================================
set -euo pipefail
# OpenPlugins naming pattern
NAMING_PATTERN='^[a-z0-9]+(-[a-z0-9]+)*$'
# Generic terms to avoid
GENERIC_TERMS=("plugin" "tool" "utility" "helper" "app" "code" "software")
# ============================================================================
# Functions
# ============================================================================
usage() {
cat <<EOF
Usage: $0 <name> [--suggest]
Validate plugin name against OpenPlugins naming convention.
Arguments:
name Plugin name to validate (required)
--suggest Auto-suggest corrected name if invalid
Pattern: ^[a-z0-9]+(-[a-z0-9]+)*$
Valid examples:
- code-formatter
- test-runner
- api-client
Invalid examples:
- Code-Formatter (uppercase)
- test_runner (underscore)
- -helper (leading hyphen)
Exit codes:
0 - Valid naming convention
1 - Invalid naming convention
2 - Missing required parameters
EOF
exit 2
}
# Convert to lowercase-hyphen format
suggest_correction() {
local name="$1"
local corrected="$name"
# Convert to lowercase
corrected="${corrected,,}"
# Replace underscores with hyphens
corrected="${corrected//_/-}"
# Replace spaces with hyphens
corrected="${corrected// /-}"
# Remove non-alphanumeric except hyphens
corrected="$(echo "$corrected" | sed 's/[^a-z0-9-]//g')"
# Remove leading/trailing hyphens
corrected="$(echo "$corrected" | sed 's/^-*//;s/-*$//')"
# Replace multiple consecutive hyphens with single
corrected="$(echo "$corrected" | sed 's/-\+/-/g')"
echo "$corrected"
}
# Check for generic terms
check_generic_terms() {
local name="$1"
local found_generic=()
for term in "${GENERIC_TERMS[@]}"; do
if [[ "$name" == "$term" ]] || [[ "$name" == *"-$term" ]] || [[ "$name" == "$term-"* ]] || [[ "$name" == *"-$term-"* ]]; then
found_generic+=("$term")
fi
done
if [ ${#found_generic[@]} -gt 0 ]; then
echo "Warning: Contains generic term(s): ${found_generic[*]}"
return 1
fi
return 0
}
# Find specific issues in the name
find_issues() {
local name="$1"
local issues=()
# Check for uppercase
if [[ "$name" =~ [A-Z] ]]; then
local uppercase=$(echo "$name" | grep -o '[A-Z]' | tr '\n' ',' | sed 's/,$//')
issues+=("Contains uppercase characters: $uppercase")
fi
# Check for underscores
if [[ "$name" =~ _ ]]; then
issues+=("Contains underscores instead of hyphens")
fi
# Check for spaces
if [[ "$name" =~ \ ]]; then
issues+=("Contains spaces")
fi
# Check for leading hyphen
if [[ "$name" =~ ^- ]]; then
issues+=("Starts with hyphen")
fi
# Check for trailing hyphen
if [[ "$name" =~ -$ ]]; then
issues+=("Ends with hyphen")
fi
# Check for consecutive hyphens
if [[ "$name" =~ -- ]]; then
issues+=("Contains consecutive hyphens")
fi
# Check for special characters
if [[ "$name" =~ [^a-zA-Z0-9_\ -] ]]; then
issues+=("Contains special characters")
fi
# Check for empty or too short
if [ ${#name} -eq 0 ]; then
issues+=("Name is empty")
elif [ ${#name} -eq 1 ]; then
issues+=("Name is too short (single character)")
fi
# Print issues
if [ ${#issues[@]} -gt 0 ]; then
for issue in "${issues[@]}"; do
echo " - $issue"
done
return 1
fi
return 0
}
# ============================================================================
# Main
# ============================================================================
main() {
# Check for help flag
if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
usage
fi
local name="$1"
local suggest=false
if [ $# -gt 1 ] && [ "$2" = "--suggest" ]; then
suggest=true
fi
# Check if name is provided
if [ -z "$name" ]; then
echo "ERROR: Name cannot be empty"
exit 2
fi
# Validate against pattern
if [[ "$name" =~ $NAMING_PATTERN ]]; then
echo "✅ PASS: Valid naming convention"
echo "Name: $name"
echo "Format: lowercase-hyphen"
# Check for generic terms (warning only)
if ! check_generic_terms "$name"; then
echo ""
echo "Recommendation: Use more descriptive, functionality-specific names"
fi
exit 0
else
echo "❌ FAIL: Invalid naming convention"
echo "Name: $name"
echo ""
echo "Issues Found:"
find_issues "$name"
if [ "$suggest" = true ]; then
local correction=$(suggest_correction "$name")
echo ""
echo "Suggested Correction: $correction"
# Validate the suggestion
if [[ "$correction" =~ $NAMING_PATTERN ]]; then
echo "✓ Suggestion is valid"
else
echo "⚠ Manual correction may be needed"
fi
fi
echo ""
echo "Required Pattern: ^[a-z0-9]+(-[a-z0-9]+)*$"
echo ""
echo "Valid Examples:"
echo " - code-formatter"
echo " - test-runner"
echo " - api-client"
exit 1
fi
}
main "$@"

View File

@@ -0,0 +1,234 @@
#!/usr/bin/env python3
"""
============================================================================
Semantic Version Validator
============================================================================
Purpose: Validate version strings against Semantic Versioning 2.0.0
Version: 1.0.0
Usage: ./semver-checker.py <version> [--strict]
Returns: 0=valid, 1=invalid, 2=missing params, 3=strict mode violation
============================================================================
"""
import re
import sys
from typing import Tuple, Optional, Dict, List
# Semantic versioning patterns
STRICT_SEMVER_PATTERN = r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)$'
FULL_SEMVER_PATTERN = r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$'
def usage():
"""Print usage information"""
print("""Usage: semver-checker.py <version> [--strict]
Validate version string against Semantic Versioning 2.0.0 specification.
Arguments:
version Version string to validate (required)
--strict Enforce strict MAJOR.MINOR.PATCH format (no pre-release/build)
Pattern (strict): MAJOR.MINOR.PATCH (e.g., 1.2.3)
Pattern (full): MAJOR.MINOR.PATCH[-PRERELEASE][+BUILD]
Valid examples:
- 1.0.0 (strict)
- 1.2.3 (strict)
- 1.0.0-alpha.1 (full)
- 1.2.3+build.20241013 (full)
Invalid examples:
- 1.0 (missing PATCH)
- v1.0.0 (has prefix)
- 1.2.x (placeholder)
Exit codes:
0 - Valid semantic version
1 - Invalid format
2 - Missing required parameters
3 - Strict mode violation (valid semver, but has pre-release/build)
Reference: https://semver.org/
""")
sys.exit(2)
def parse_semver(version: str) -> Optional[Dict[str, any]]:
"""
Parse semantic version string into components
Returns:
Dict with major, minor, patch, prerelease, build
None if invalid format
"""
match = re.match(FULL_SEMVER_PATTERN, version)
if not match:
return None
major, minor, patch, prerelease, build = match.groups()
return {
'major': int(major),
'minor': int(minor),
'patch': int(patch),
'prerelease': prerelease or None,
'build': build or None,
'is_strict': prerelease is None and build is None
}
def find_issues(version: str) -> List[str]:
"""Find specific issues with version format"""
issues = []
# Check for common mistakes
if version.startswith('v') or version.startswith('V'):
issues.append("Starts with 'v' prefix (remove it)")
# Check for missing components
parts = version.split('.')
if len(parts) < 3:
issues.append(f"Missing components (has {len(parts)}, needs 3: MAJOR.MINOR.PATCH)")
elif len(parts) > 3:
# Check if extra parts are pre-release or build
if '-' not in version and '+' not in version:
issues.append(f"Too many components (has {len(parts)}, expected 3)")
# Check for placeholders
if 'x' in version.lower() or '*' in version:
issues.append("Contains placeholder values (x or *)")
# Check for non-numeric base version
base_version = version.split('-')[0].split('+')[0]
base_parts = base_version.split('.')
for i, part in enumerate(base_parts):
if not part.isdigit():
component = ['MAJOR', 'MINOR', 'PATCH'][i] if i < 3 else 'component'
issues.append(f"{component} is not numeric: '{part}'")
# Check for leading zeros
for i, part in enumerate(base_parts[:3]):
if len(part) > 1 and part.startswith('0'):
component = ['MAJOR', 'MINOR', 'PATCH'][i]
issues.append(f"{component} has leading zero: '{part}'")
# Check for non-standard identifiers
if version in ['latest', 'stable', 'dev', 'master', 'main']:
issues.append("Using non-numeric identifier (not a version)")
return issues
def validate_version(version: str, strict: bool = False) -> Tuple[bool, int, str]:
"""
Validate semantic version
Returns:
(is_valid, exit_code, message)
"""
if not version or version.strip() == '':
return False, 2, "ERROR: Version cannot be empty"
# Parse the version
parsed = parse_semver(version)
if parsed is None:
# Invalid format
issues = find_issues(version)
message = "❌ FAIL: Invalid semantic version format\n\n"
message += f"Version: {version}\n"
message += "Valid: No\n\n"
message += "Issues Found:\n"
if issues:
for issue in issues:
message += f" - {issue}\n"
else:
message += " - Does not match semantic versioning pattern\n"
message += "\nRequired Format: MAJOR.MINOR.PATCH\n"
message += "\nExamples:\n"
message += " - 1.0.0 (initial release)\n"
message += " - 1.2.3 (standard version)\n"
message += " - 2.0.0-beta.1 (pre-release)\n"
message += "\nReference: https://semver.org/"
return False, 1, message
# Check strict mode
if strict and not parsed['is_strict']:
message = "⚠️ WARNING: Valid semver, but not strict format\n\n"
message += f"Version: {version}\n"
message += "Format: Valid semver with "
if parsed['prerelease']:
message += "pre-release"
if parsed['build']:
message += " and " if parsed['prerelease'] else ""
message += "build metadata"
message += "\n\n"
message += "Note: OpenPlugins recommends strict MAJOR.MINOR.PATCH format\n"
message += "without pre-release or build metadata for marketplace submissions.\n\n"
message += f"Recommended: {parsed['major']}.{parsed['minor']}.{parsed['patch']} (for stable release)\n\n"
message += "Quality Score Impact: +5 points (valid, but consider strict format)"
return True, 3, message
# Valid version
message = "✅ PASS: Valid semantic version\n\n"
message += f"Version: {version}\n"
message += "Format: "
if parsed['is_strict']:
message += "MAJOR.MINOR.PATCH (strict)\n"
else:
message += "MAJOR.MINOR.PATCH"
if parsed['prerelease']:
message += "-PRERELEASE"
if parsed['build']:
message += "+BUILD"
message += "\n"
message += "Valid: Yes\n\n"
message += "Components:\n"
message += f" - MAJOR: {parsed['major']}"
if parsed['major'] > 0:
message += " (breaking changes)"
message += "\n"
message += f" - MINOR: {parsed['minor']}"
if parsed['minor'] > 0:
message += " (new features)"
message += "\n"
message += f" - PATCH: {parsed['patch']}"
if parsed['patch'] > 0:
message += " (bug fixes)"
message += "\n"
if parsed['prerelease']:
message += f" - Pre-release: {parsed['prerelease']}\n"
if parsed['build']:
message += f" - Build: {parsed['build']}\n"
message += "\n"
if parsed['prerelease']:
message += "Note: Pre-release versions indicate unstable releases.\n"
message += "Remove pre-release identifier for stable marketplace submission.\n\n"
message += "Quality Score Impact: +5 points\n\n"
message += "The version follows Semantic Versioning 2.0.0 specification."
return True, 0, message
def main():
"""Main entry point"""
if len(sys.argv) < 2 or sys.argv[1] in ['-h', '--help']:
usage()
version = sys.argv[1]
strict = '--strict' in sys.argv
is_valid, exit_code, message = validate_version(version, strict)
print(message)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,325 @@
## Operation: Check Categories
Validate category assignment against OpenPlugins standard category list.
### Parameters from $ARGUMENTS
- **category**: Category name to validate (required)
- **suggest**: Show similar categories if invalid (optional, default: true)
### OpenPlugins Standard Categories
OpenPlugins defines **exactly 10 approved categories**:
1. **development** - Code generation, scaffolding, refactoring
2. **testing** - Test generation, coverage, quality assurance
3. **deployment** - CI/CD, infrastructure, release automation
4. **documentation** - Docs generation, API documentation
5. **security** - Vulnerability scanning, secret detection
6. **database** - Schema design, migrations, queries
7. **monitoring** - Performance analysis, logging
8. **productivity** - Workflow automation, task management
9. **quality** - Linting, formatting, code review
10. **collaboration** - Team tools, communication
### Category Selection Guidance
**development**:
- Code generators
- Project scaffolding
- Refactoring tools
- Boilerplate generation
**testing**:
- Test generators
- Test runners
- Coverage tools
- QA automation
**deployment**:
- CI/CD pipelines
- Infrastructure as code
- Release automation
- Environment management
**documentation**:
- README generators
- API doc generation
- Changelog automation
- Architecture diagrams
**security**:
- Secret scanning
- Vulnerability detection
- Security audits
- Compliance checking
**database**:
- Schema design
- Migration tools
- Query builders
- Database testing
**monitoring**:
- Performance profiling
- Log analysis
- Metrics collection
- Alert systems
**productivity**:
- Task automation
- Workflow orchestration
- Time management
- Note-taking
**quality**:
- Linters
- Code formatters
- Code review tools
- Complexity analysis
**collaboration**:
- Team communication
- Code review
- Knowledge sharing
- Project management
### Workflow
1. **Extract Category from Arguments**
```
Parse $ARGUMENTS to extract category parameter
If category not provided, return error
Normalize to lowercase
```
2. **Execute Category Validator**
```bash
Execute .scripts/category-validator.sh "$category"
Exit codes:
- 0: Valid category
- 1: Invalid category
- 2: Missing required parameters
```
3. **Check Against Approved List**
```
Compare category against 10 approved categories
Use exact string matching (case-insensitive)
```
4. **Suggest Alternatives (if invalid)**
```
IF category invalid AND suggest:true:
Calculate similarity scores
Suggest closest matching categories
Show category descriptions
```
5. **Return Validation Report**
```
Format results:
- Status: PASS/FAIL
- Category: <provided-category>
- Valid: yes/no
- Description: <category-description> (if valid)
- Suggestions: <list> (if invalid)
- Score impact: +5 points (if valid)
```
### Examples
```bash
# Valid category
/best-practices categories category:development
# Result: PASS - Valid OpenPlugins category
# Invalid category (typo)
/best-practices categories category:developement
# Result: FAIL - Did you mean: development?
# Invalid category (plural)
/best-practices categories category:tests
# Result: FAIL - Did you mean: testing?
# Invalid category (custom)
/best-practices categories category:utilities
# Result: FAIL - Not in approved list
# Suggestions: productivity, quality, development
# Case insensitive
/best-practices categories category:TESTING
# Result: PASS - Valid (normalized to: testing)
```
### Error Handling
**Missing category parameter**:
```
ERROR: Missing required parameter 'category'
Usage: /best-practices categories category:<category-name>
Example: /best-practices categories category:development
```
**Empty category**:
```
ERROR: Category cannot be empty
Choose from 10 approved OpenPlugins categories:
development, testing, deployment, documentation, security,
database, monitoring, productivity, quality, collaboration
```
### Output Format
**Success (Valid Category)**:
```
✅ Category Validation: PASS
Category: development
Valid: Yes
Description: Code generation, scaffolding, refactoring
Use Cases:
- Code generators
- Project scaffolding tools
- Refactoring utilities
- Boilerplate generation
Quality Score Impact: +5 points
The category is approved for OpenPlugins marketplace.
```
**Failure (Invalid Category)**:
```
❌ Category Validation: FAIL
Category: developement
Valid: No
This category is not in the OpenPlugins approved list.
Did you mean?
1. development - Code generation, scaffolding, refactoring
2. deployment - CI/CD, infrastructure, release automation
All Approved Categories:
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. development - Code generation, scaffolding
2. testing - Test generation, coverage
3. deployment - CI/CD, infrastructure
4. documentation - Docs generation, API docs
5. security - Vulnerability scanning
6. database - Schema design, migrations
7. monitoring - Performance analysis
8. productivity - Workflow automation
9. quality - Linting, formatting
10. collaboration - Team tools, communication
Quality Score Impact: 0 points (fix to gain +5)
Choose the most appropriate category from the approved list.
```
**Failure (Multiple Matches)**:
```
❌ Category Validation: FAIL
Category: code-tools
Valid: No
This category is not approved. Consider these alternatives:
Best Matches:
1. development - Code generation, scaffolding, refactoring
2. quality - Linting, formatting, code review
3. productivity - Workflow automation, task management
Which fits your plugin best?
- If generating/scaffolding code → development
- If analyzing/formatting code → quality
- If automating workflows → productivity
Quality Score Impact: 0 points (fix to gain +5)
```
### Category Decision Tree
Use this to select the right category:
```
Does your plugin...
Generate or scaffold code?
→ development
Run tests or check quality?
→ testing (if running tests)
→ quality (if analyzing/formatting code)
Deploy or manage infrastructure?
→ deployment
Generate documentation?
→ documentation
Scan for security issues?
→ security
Work with databases?
→ database
Monitor performance or logs?
→ monitoring
Automate workflows or tasks?
→ productivity
Improve code quality?
→ quality
Facilitate team collaboration?
→ collaboration
```
### Common Mistakes
**Using plural forms**:
- ❌ `tests` → ✅ `testing`
- ❌ `deployments` → ✅ `deployment`
- ❌ `databases` → ✅ `database`
**Using generic terms**:
- ❌ `tools` → Choose specific category
- ❌ `utilities` → Choose specific category
- ❌ `helpers` → Choose specific category
**Using multiple categories**:
- ❌ `development,testing` → Choose ONE primary category
- Use keywords for additional topics
**Using custom categories**:
- ❌ `api-tools` → ✅ `development` or `productivity`
- ❌ `devops` → ✅ `deployment`
- ❌ `ci-cd` → ✅ `deployment`
### Compliance Criteria
**PASS Requirements**:
- Exact match with one of 10 approved categories
- Case-insensitive matching accepted
- Single category only (not multiple)
**FAIL Indicators**:
- Not in approved list
- Plural forms
- Custom categories
- Multiple categories
- Empty or missing
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,174 @@
## Operation: Check Naming Conventions
Validate plugin names against OpenPlugins lowercase-hyphen naming convention.
### Parameters from $ARGUMENTS
- **name**: Plugin name to validate (required)
- **fix**: Auto-suggest corrected name (optional, default: true)
### OpenPlugins Naming Convention
**Pattern**: `^[a-z0-9]+(-[a-z0-9]+)*$`
**Valid Examples**:
- `code-formatter`
- `test-runner`
- `deploy-automation`
- `api-client`
- `database-migration`
**Invalid Examples**:
- `Code-Formatter` (uppercase)
- `test_runner` (underscore)
- `Deploy Automation` (space)
- `APIClient` (camelCase)
- `-helper` (leading hyphen)
- `tool-` (trailing hyphen)
### Workflow
1. **Extract Name from Arguments**
```
Parse $ARGUMENTS to extract name parameter
If name not provided, return error
```
2. **Execute Naming Validator**
```bash
Execute .scripts/naming-validator.sh "$name"
Exit codes:
- 0: Valid naming convention
- 1: Invalid naming convention
- 2: Missing required parameters
```
3. **Process Results**
```
IF valid:
Return success with confirmation
ELSE:
Return failure with specific violations
Suggest corrected name if fix:true
Provide examples
```
4. **Return Compliance Report**
```
Format results:
- Status: PASS/FAIL
- Name: <provided-name>
- Valid: yes/no
- Issues: <list of violations>
- Suggestion: <corrected-name>
- Score impact: +5 points (if valid)
```
### Examples
```bash
# Valid name
/best-practices naming name:my-awesome-plugin
# Result: PASS - Valid lowercase-hyphen format
# Invalid name with uppercase
/best-practices naming name:MyPlugin
# Result: FAIL - Contains uppercase (M, P)
# Suggestion: my-plugin
# Invalid name with underscore
/best-practices naming name:test_runner
# Result: FAIL - Contains underscore (_)
# Suggestion: test-runner
# Invalid name with space
/best-practices naming name:"Test Runner"
# Result: FAIL - Contains space
# Suggestion: test-runner
```
### Error Handling
**Missing name parameter**:
```
ERROR: Missing required parameter 'name'
Usage: /best-practices naming name:<plugin-name>
Example: /best-practices naming name:my-plugin
```
**Empty name**:
```
ERROR: Name cannot be empty
Provide a valid plugin name following lowercase-hyphen convention.
```
### Output Format
**Success (Valid Name)**:
```
✅ Naming Convention: PASS
Name: code-formatter
Format: lowercase-hyphen
Pattern: ^[a-z0-9]+(-[a-z0-9]+)*$
Valid: Yes
Quality Score Impact: +5 points
The name follows OpenPlugins naming conventions perfectly.
```
**Failure (Invalid Name)**:
```
❌ Naming Convention: FAIL
Name: Code_Formatter
Format: Invalid
Valid: No
Issues Found:
1. Contains uppercase characters: C, F
2. Contains underscores instead of hyphens
Suggested Correction: code-formatter
Quality Score Impact: 0 points (fix to gain +5)
Fix these issues to comply with OpenPlugins standards.
```
### Compliance Criteria
**PASS Requirements**:
- All lowercase letters (a-z)
- Numbers allowed (0-9)
- Hyphens for word separation
- No leading or trailing hyphens
- No consecutive hyphens
- No other special characters
- Descriptive (not generic like "plugin" or "tool")
**FAIL Indicators**:
- Uppercase letters
- Underscores, spaces, or special characters
- Leading/trailing hyphens
- Empty or single character names
- Generic non-descriptive names
### Best Practices Guidance
**Good Names**:
- Describe functionality: `code-formatter`, `test-runner`
- Include technology: `python-linter`, `docker-manager`
- Indicate purpose: `api-client`, `database-migrator`
**Avoid**:
- Generic: `plugin`, `tool`, `helper`, `utility`
- Abbreviations only: `fmt`, `tst`, `db`
- Version numbers: `plugin-v2`, `tool-2024`
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,514 @@
## Operation: Full Standards Compliance
Execute comprehensive OpenPlugins and Claude Code best practices validation with complete compliance reporting.
### Parameters from $ARGUMENTS
- **path**: Path to plugin or marketplace directory (required)
- **fix**: Auto-suggest corrections for all issues (optional, default: true)
- **format**: Output format (text|json|markdown) (optional, default: text)
### Complete Standards Check
This operation validates all four best practice categories:
1. **Naming Convention** - Lowercase-hyphen format
2. **Semantic Versioning** - MAJOR.MINOR.PATCH format
3. **Category Assignment** - One of 10 approved categories
4. **Keyword Quality** - 3-7 relevant, non-generic keywords
### Workflow
1. **Detect Target Type**
```
Parse $ARGUMENTS to extract path parameter
Detect if path is plugin or marketplace:
- Plugin: Has plugin.json
- Marketplace: Has .claude-plugin/marketplace.json
```
2. **Load Metadata**
```
IF plugin:
Read plugin.json
Extract: name, version, keywords, category
ELSE IF marketplace:
Read .claude-plugin/marketplace.json
Extract marketplace metadata
Validate each plugin entry
ELSE:
Return error: Invalid target
```
3. **Execute All Validations**
```
Run in parallel or sequence:
A. Naming Validation
Execute check-naming.md with name parameter
Store result
B. Version Validation
Execute validate-versioning.md with version parameter
Store result
C. Category Validation
Execute check-categories.md with category parameter
Store result
D. Keyword Validation
Execute validate-keywords.md with keywords parameter
Store result
```
4. **Aggregate Results**
```
Collect all validation results:
- Individual pass/fail status
- Specific issues found
- Suggested corrections
- Score impact for each
Calculate overall compliance:
- Total score: Sum of individual scores
- Pass count: Number of passing validations
- Fail count: Number of failing validations
- Compliance percentage: (pass / total) × 100
```
5. **Generate Compliance Report**
```
Create comprehensive report:
- Executive summary
- Individual validation details
- Issue prioritization
- Suggested fixes
- Compliance score
- Publication readiness
```
6. **Return Results**
```
Format according to output format:
- text: Human-readable console output
- json: Machine-parseable JSON
- markdown: Documentation-ready markdown
```
### Examples
```bash
# Full compliance check on current directory
/best-practices full-standards path:.
# Check specific plugin with JSON output
/best-practices full-standards path:./my-plugin format:json
# Check with auto-fix suggestions
/best-practices full-standards path:. fix:true
# Marketplace validation
/best-practices full-standards path:./marketplace
```
### Error Handling
**Missing path parameter**:
```
ERROR: Missing required parameter 'path'
Usage: /best-practices full-standards path:<directory>
Examples:
/best-practices full-standards path:.
/best-practices full-standards path:./my-plugin
```
**Invalid path**:
```
ERROR: Invalid path or not a plugin/marketplace
Path: <provided-path>
The path must contain either:
- plugin.json (for plugins)
- .claude-plugin/marketplace.json (for marketplaces)
Check the path and try again.
```
**Missing metadata file**:
```
ERROR: Metadata file not found
Expected one of:
- plugin.json
- .claude-plugin/marketplace.json
This does not appear to be a valid Claude Code plugin or marketplace.
```
### Output Format
**Text Format (Complete Compliance)**:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
OPENPLUGINS BEST PRACTICES COMPLIANCE REPORT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: code-formatter-plugin
Type: Plugin
Date: 2024-10-13
Overall Compliance: 100% ✅
Status: PUBLICATION READY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
VALIDATION RESULTS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. Naming Convention: ✅ PASS
Name: code-formatter
Format: lowercase-hyphen
Score: +5 points
The name follows OpenPlugins naming conventions perfectly.
2. Semantic Versioning: ✅ PASS
Version: 1.2.3
Format: MAJOR.MINOR.PATCH
Score: +5 points
Valid semantic version compliant with semver 2.0.0.
3. Category Assignment: ✅ PASS
Category: quality
Description: Linting, formatting, code review
Score: +5 points
Category is approved and appropriate for this plugin.
4. Keyword Quality: ✅ PASS
Keywords: formatting, javascript, eslint, code-quality, automation
Count: 5 (optimal)
Quality: 10/10
Score: +10 points
Excellent keyword selection with balanced mix.
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPLIANCE SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Validations Passed: 4/4 (100%)
Quality Score: 25/25 points
Scoring Breakdown:
✅ Naming Convention: +5 points
✅ Semantic Versioning: +5 points
✅ Category Assignment: +5 points
✅ Keyword Quality: +10 points
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Total Score: 25/25 points
Publication Status: ✅ READY FOR SUBMISSION
This plugin meets all OpenPlugins best practice standards
and is ready for marketplace submission!
Next Steps:
1. Submit to OpenPlugins marketplace
2. Follow contribution guidelines in CONTRIBUTING.md
3. Open pull request with plugin entry
```
**Text Format (Partial Compliance)**:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
OPENPLUGINS BEST PRACTICES COMPLIANCE REPORT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: Test_Runner
Type: Plugin
Date: 2024-10-13
Overall Compliance: 50% ⚠️
Status: NEEDS IMPROVEMENT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
VALIDATION RESULTS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. Naming Convention: ❌ FAIL
Name: Test_Runner
Format: Invalid
Score: 0 points
Issues Found:
- Contains uppercase characters: T, R
- Contains underscore instead of hyphen
✏️ Suggested Fix: test-runner
Impact: +5 points (if fixed)
2. Semantic Versioning: ✅ PASS
Version: 1.0.0
Format: MAJOR.MINOR.PATCH
Score: +5 points
Valid semantic version compliant with semver 2.0.0.
3. Category Assignment: ❌ FAIL
Category: test-tools
Valid: No
Score: 0 points
This category is not in the approved list.
✏️ Suggested Fix: testing
Description: Test generation, coverage, quality assurance
Impact: +5 points (if fixed)
4. Keyword Quality: ⚠️ WARNING
Keywords: plugin, tool, awesome
Count: 3 (minimum met)
Quality: 2/10
Score: 2 points
Issues Found:
- Generic terms: plugin, tool
- Marketing terms: awesome
- No functional keywords
✏️ Suggested Fix: testing, automation, pytest, unit-testing, tdd
Impact: +8 points (if improved to excellent)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPLIANCE SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Validations Passed: 1/4 (25%)
Quality Score: 7/25 points
Scoring Breakdown:
❌ Naming Convention: 0/5 points
✅ Semantic Versioning: 5/5 points
❌ Category Assignment: 0/5 points
⚠️ Keyword Quality: 2/10 points
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Total Score: 7/25 points
Publication Status: ⚠️ NOT READY - NEEDS FIXES
Priority Fixes Required:
1. [P0] Fix naming convention: Test_Runner → test-runner
2. [P0] Fix category: test-tools → testing
3. [P1] Improve keywords: Remove generic terms, add functional keywords
After Fixes (Estimated Score):
✅ Naming Convention: +5 points
✅ Semantic Versioning: +5 points
✅ Category Assignment: +5 points
✅ Keyword Quality: +10 points
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Potential Score: 25/25 points
Next Steps:
1. Apply suggested fixes above
2. Re-run validation: /best-practices full-standards path:.
3. Ensure score reaches 25/25 before submission
```
**JSON Format**:
```json
{
"target": "code-formatter",
"type": "plugin",
"timestamp": "2024-10-13T10:00:00Z",
"compliance": {
"overall": 100,
"status": "READY",
"passed": 4,
"failed": 0,
"warnings": 0
},
"validations": {
"naming": {
"status": "pass",
"name": "code-formatter",
"format": "lowercase-hyphen",
"score": 5,
"issues": []
},
"versioning": {
"status": "pass",
"version": "1.2.3",
"format": "MAJOR.MINOR.PATCH",
"score": 5,
"issues": []
},
"category": {
"status": "pass",
"category": "quality",
"valid": true,
"score": 5,
"issues": []
},
"keywords": {
"status": "pass",
"keywords": ["formatting", "javascript", "eslint", "code-quality", "automation"],
"count": 5,
"quality": 10,
"score": 10,
"issues": []
}
},
"score": {
"total": 25,
"maximum": 25,
"percentage": 100,
"breakdown": {
"naming": 5,
"versioning": 5,
"category": 5,
"keywords": 10
}
},
"publication_ready": true,
"next_steps": [
"Submit to OpenPlugins marketplace",
"Follow contribution guidelines",
"Open pull request"
]
}
```
**Markdown Format** (for documentation):
```markdown
# OpenPlugins Best Practices Compliance Report
**Target**: code-formatter
**Type**: Plugin
**Date**: 2024-10-13
**Status**: ✅ PUBLICATION READY
## Overall Compliance
- **Score**: 25/25 points (100%)
- **Validations Passed**: 4/4
- **Publication Ready**: Yes
## Validation Results
### 1. Naming Convention ✅
- **Status**: PASS
- **Name**: code-formatter
- **Format**: lowercase-hyphen
- **Score**: +5 points
The name follows OpenPlugins naming conventions perfectly.
### 2. Semantic Versioning ✅
- **Status**: PASS
- **Version**: 1.2.3
- **Format**: MAJOR.MINOR.PATCH
- **Score**: +5 points
Valid semantic version compliant with semver 2.0.0.
### 3. Category Assignment ✅
- **Status**: PASS
- **Category**: quality
- **Description**: Linting, formatting, code review
- **Score**: +5 points
Category is approved and appropriate for this plugin.
### 4. Keyword Quality ✅
- **Status**: PASS
- **Keywords**: formatting, javascript, eslint, code-quality, automation
- **Count**: 5 (optimal)
- **Quality**: 10/10
- **Score**: +10 points
Excellent keyword selection with balanced mix.
## Score Breakdown
| Validation | Score | Status |
|------------|-------|--------|
| Naming Convention | 5/5 | ✅ Pass |
| Semantic Versioning | 5/5 | ✅ Pass |
| Category Assignment | 5/5 | ✅ Pass |
| Keyword Quality | 10/10 | ✅ Pass |
| **Total** | **25/25** | **✅ Ready** |
## Next Steps
1. Submit to OpenPlugins marketplace
2. Follow contribution guidelines in CONTRIBUTING.md
3. Open pull request with plugin entry
---
*Report generated by marketplace-validator-plugin v1.0.0*
```
### Compliance Scoring
**Total Score Breakdown**:
- Naming Convention: 5 points
- Semantic Versioning: 5 points
- Category Assignment: 5 points
- Keyword Quality: 10 points
- **Maximum Total**: 25 points
**Publication Readiness**:
- **25/25 points (100%)**: ✅ READY - Perfect compliance
- **20-24 points (80-96%)**: ✅ READY - Minor improvements optional
- **15-19 points (60-76%)**: ⚠️ NEEDS WORK - Address issues before submission
- **10-14 points (40-56%)**: ❌ NOT READY - Significant fixes required
- **0-9 points (0-36%)**: ❌ NOT READY - Major compliance issues
### Integration with Quality Analysis
This operation feeds into the overall quality scoring system:
```
Best Practices Score (25 points max)
Quality Analysis (calculate-score)
Overall Quality Score (100 points total)
Publication Readiness Determination
```
### Best Practices Workflow
For complete plugin validation:
```bash
# 1. Run full standards compliance
/best-practices full-standards path:.
# 2. If issues found, fix them, then re-run
# ... apply fixes ...
/best-practices full-standards path:.
# 3. Once compliant, run comprehensive validation
/validation-orchestrator comprehensive path:.
# 4. Review quality report
# Quality score includes best practices (25 points)
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,105 @@
---
description: Enforce OpenPlugins and Claude Code best practices for naming, versioning, and standards compliance
---
You are the Best Practices coordinator, ensuring adherence to OpenPlugins and Claude Code standards.
## Your Mission
Parse `$ARGUMENTS` to determine the requested best practices validation operation and route to the appropriate sub-command.
## Available Operations
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
- **naming** → Read `.claude/commands/best-practices/check-naming.md`
- **versioning** → Read `.claude/commands/best-practices/validate-versioning.md`
- **categories** → Read `.claude/commands/best-practices/check-categories.md`
- **keywords** → Read `.claude/commands/best-practices/validate-keywords.md`
- **full-standards** → Read `.claude/commands/best-practices/full-compliance.md`
## Argument Format
```
/best-practices <operation> [parameters]
```
### Examples
```bash
# Check naming conventions
/best-practices naming name:my-plugin-name
# Validate semantic versioning
/best-practices versioning version:1.2.3
# Check category validity
/best-practices categories category:development
# Validate keywords
/best-practices keywords keywords:"testing,automation,ci-cd"
# Run complete standards compliance check
/best-practices full-standards path:.
```
## OpenPlugins Standards
**Naming Convention**:
- Format: lowercase-hyphen (e.g., `code-formatter`, `test-runner`)
- Pattern: `^[a-z0-9]+(-[a-z0-9]+)*$`
- No underscores, spaces, or uppercase
- Descriptive, not generic (avoid: "plugin", "tool", "helper")
**Semantic Versioning**:
- Format: MAJOR.MINOR.PATCH (e.g., 1.2.3)
- Pattern: `^[0-9]+\.[0-9]+\.[0-9]+$`
- Optional pre-release: `-alpha.1`, `-beta.2`
- Optional build metadata: `+20241013`
**Categories** (choose ONE):
1. **development** - Code generation, scaffolding, refactoring
2. **testing** - Test generation, coverage, quality assurance
3. **deployment** - CI/CD, infrastructure, release automation
4. **documentation** - Docs generation, API documentation
5. **security** - Vulnerability scanning, secret detection
6. **database** - Schema design, migrations, queries
7. **monitoring** - Performance analysis, logging
8. **productivity** - Workflow automation, task management
9. **quality** - Linting, formatting, code review
10. **collaboration** - Team tools, communication
**Keywords**:
- Count: 3-7 keywords
- Relevance: Functionality, technology, or use-case based
- Avoid: Generic terms (plugin, tool, utility), category duplication
- Good: `testing`, `automation`, `python`, `ci-cd`, `docker`
- Bad: `best`, `awesome`, `perfect`, `plugin`
## Compliance Scoring
Best practices contribute to quality score:
- Valid naming: +5 points
- Semantic versioning: +5 points
- Valid category: +5 points
- Quality keywords (3-7): +10 points
## Error Handling
If the operation is not recognized:
1. List all available operations
2. Show OpenPlugins standards
3. Provide compliance guidance
## Base Directory
Base directory for this skill: `.claude/commands/best-practices/`
## Your Task
1. Parse `$ARGUMENTS` to extract operation and parameters
2. Read the corresponding operation file
3. Execute best practices validation
4. Return compliance results with specific corrections
**Current Request**: $ARGUMENTS

View File

@@ -0,0 +1,337 @@
## Operation: Validate Keywords
Validate keyword selection for relevance, count, and quality against OpenPlugins standards.
### Parameters from $ARGUMENTS
- **keywords**: Comma-separated keyword list (required)
- **min**: Minimum keyword count (optional, default: 3)
- **max**: Maximum keyword count (optional, default: 7)
- **context**: Plugin context for relevance checking (optional, JSON or description)
### OpenPlugins Keyword Standards
**Count Requirements**:
- Minimum: 3 keywords
- Maximum: 7 keywords
- Optimal: 5-6 keywords
**Quality Requirements**:
- Relevant to plugin functionality
- Searchable terms users would use
- Mix of functionality, technology, and use-case
- No generic marketing terms
- No duplicate category names
### Keyword Categories
**Functionality Keywords** (what it does):
- `testing`, `deployment`, `formatting`, `linting`, `migration`
- `generation`, `automation`, `analysis`, `monitoring`, `scanning`
**Technology Keywords** (what it works with):
- `python`, `javascript`, `docker`, `kubernetes`, `postgresql`
- `react`, `vue`, `typescript`, `bash`, `terraform`
**Use-Case Keywords** (how it's used):
- `ci-cd`, `code-review`, `api-testing`, `performance`
- `tdd`, `bdd`, `refactoring`, `debugging`, `profiling`
### Good Keywords Examples
**Well-balanced sets**:
- `["testing", "pytest", "automation", "tdd", "python"]`
- `["deployment", "kubernetes", "ci-cd", "docker", "helm"]`
- `["linting", "javascript", "eslint", "code-quality", "automation"]`
- `["database", "postgresql", "migration", "schema", "sql"]`
**Poor keyword sets**:
- `["plugin", "tool", "awesome"]` - Generic/marketing terms
- `["test", "testing", "tester", "tests"]` - Redundant variations
- `["development"]` - Only category name, too few
- `["a", "b", "c", "d", "e", "f", "g", "h"]` - Too many, non-descriptive
### Workflow
1. **Extract Keywords from Arguments**
```
Parse $ARGUMENTS to extract keywords parameter
Split by comma, trim whitespace
Normalize to lowercase
Remove duplicates
```
2. **Execute Keyword Analyzer**
```bash
Execute .scripts/keyword-analyzer.py "$keywords" "$min" "$max" "$context"
Exit codes:
- 0: Valid keyword set
- 1: Count violation (too few or too many)
- 2: Quality issues (generic terms, duplicates)
- 3: Missing required parameters
```
3. **Validate Count**
```
count = number of keywords
IF count < min: FAIL (too few)
IF count > max: FAIL (too many)
```
4. **Check for Generic Terms**
```
Generic blocklist:
- plugin, tool, utility, helper, awesome
- best, perfect, great, super, amazing
- code, software, app, program
Flag any generic terms found
```
5. **Analyze Quality**
```
Check for:
- Duplicate category names
- Redundant variations (test, testing, tests)
- Single-character keywords
- Non-descriptive terms
```
6. **Calculate Relevance Score**
```
Base score: 10 points
Deductions:
- Generic term: -2 per term
- Too few keywords: -5
- Too many keywords: -3
- Redundant variations: -2 per redundancy
- Non-descriptive: -1 per term
Final score: max(0, base - deductions)
```
7. **Return Analysis Report**
```
Format results:
- Status: PASS/FAIL/WARNING
- Count: <number> (valid range: min-max)
- Quality: <score>/10
- Issues: <list of problems>
- Suggestions: <improved keyword set>
- Score impact: +10 points (if excellent), +5 (if good)
```
### Examples
```bash
# Valid keyword set
/best-practices keywords keywords:"testing,pytest,automation,tdd,python"
# Result: PASS - 5 keywords, well-balanced, relevant
# Too few keywords
/best-practices keywords keywords:"testing,python"
# Result: FAIL - Only 2 keywords (minimum: 3)
# Too many keywords
/best-practices keywords keywords:"a,b,c,d,e,f,g,h,i,j"
# Result: FAIL - 10 keywords (maximum: 7)
# Generic terms
/best-practices keywords keywords:"plugin,tool,awesome,best"
# Result: FAIL - Contains generic/marketing terms
# With custom range
/best-practices keywords keywords:"ci,cd,docker" min:2 max:5
# Result: PASS - 3 keywords within custom range
```
### Error Handling
**Missing keywords parameter**:
```
ERROR: Missing required parameter 'keywords'
Usage: /best-practices keywords keywords:"keyword1,keyword2,keyword3"
Example: /best-practices keywords keywords:"testing,automation,python"
```
**Empty keywords**:
```
ERROR: Keywords cannot be empty
Provide 3-7 relevant keywords describing your plugin.
Good examples:
- "testing,pytest,automation"
- "deployment,kubernetes,ci-cd"
- "linting,javascript,code-quality"
```
### Output Format
**Success (Excellent Keywords)**:
```
✅ Keyword Validation: PASS
Keywords: testing, pytest, automation, tdd, python
Count: 5 (optimal range: 3-7)
Quality Score: 10/10
Analysis:
✅ Balanced mix of functionality, technology, and use-case
✅ All keywords relevant and searchable
✅ No generic or marketing terms
✅ Good variety without redundancy
Breakdown:
- Functionality: testing, automation, tdd
- Technology: pytest, python
- Use-case: tdd
Quality Score Impact: +10 points
Excellent keyword selection for discoverability!
```
**Failure (Count Violation)**:
```
❌ Keyword Validation: FAIL
Keywords: testing, python
Count: 2 (required: 3-7)
Quality Score: 5/10
Issues Found:
1. Too few keywords (2 < 3 minimum)
2. Missing technology or use-case keywords
Suggestions to improve:
Add 1-3 more relevant keywords such as:
- Functionality: automation, unit-testing
- Use-case: tdd, ci-cd
- Specific tools: pytest, unittest
Recommended: testing, python, pytest, automation, tdd
Quality Score Impact: 0 points (fix to gain +10)
```
**Failure (Generic Terms)**:
```
❌ Keyword Validation: FAIL
Keywords: plugin, tool, awesome, best, helper
Count: 5 (valid range)
Quality Score: 2/10
Issues Found:
1. Generic terms detected: plugin, tool, helper
2. Marketing terms detected: awesome, best
3. No functional or technical keywords
These keywords don't help users find your plugin.
Better alternatives:
Instead of generic terms, describe WHAT it does:
- Replace "plugin" → testing, deployment, formatting
- Replace "tool" → specific functionality
- Replace "awesome/best" → actual features
Suggested keywords based on common patterns:
- testing, automation, ci-cd, docker, python
- deployment, kubernetes, infrastructure, terraform
- linting, formatting, code-quality, javascript
Quality Score Impact: 0 points (fix to gain +10)
```
**Warning (Minor Issues)**:
```
⚠️ Keyword Validation: WARNING
Keywords: testing, tests, test, automation, ci-cd
Count: 5 (valid range)
Quality Score: 7/10
Issues Found:
1. Redundant variations: testing, tests, test
2. Consider consolidating to single term
Suggestions:
- Keep: testing, automation, ci-cd
- Remove: tests, test (redundant)
- Add: 2 more specific keywords (e.g., pytest, junit)
Recommended: testing, automation, ci-cd, pytest, unit-testing
Quality Score Impact: +7 points (good, but could be better)
Your keywords are functional but could be more diverse.
```
### Keyword Quality Checklist
**PASS Requirements**:
- 3-7 keywords total
- No generic terms (plugin, tool, utility, helper)
- No marketing terms (awesome, best, perfect)
- No redundant variations
- Mix of functionality and technology
- Relevant to plugin purpose
- Searchable by target users
**FAIL Indicators**:
- < 3 or > 7 keywords
- Contains generic terms
- Contains marketing fluff
- All keywords same type (only technologies, only functionality)
- Single-character keywords
- Category name duplication
### Best Practices
**Do**:
- Use specific functionality terms
- Include primary technologies
- Add relevant use-cases
- Think about user search intent
- Balance breadth and specificity
**Don't**:
- Use generic words (plugin, tool, utility)
- Add marketing terms (best, awesome, perfect)
- Duplicate category names exactly
- Use redundant variations
- Add irrelevant technologies
- Use abbreviations without context
### Quality Scoring Matrix
**10/10 - Excellent**:
- 5-6 keywords
- Perfect mix of functionality/technology/use-case
- All highly relevant
- Great search discoverability
**7-9/10 - Good**:
- 3-7 keywords
- Good mix with minor issues
- Mostly relevant
- Decent discoverability
**4-6/10 - Fair**:
- Count issues OR some generic terms
- Imbalanced mix
- Partial relevance
- Limited discoverability
**0-3/10 - Poor**:
- Severe count violations OR mostly generic
- No functional keywords
- Poor relevance
- Very poor discoverability
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,254 @@
## Operation: Validate Versioning
Validate version strings against Semantic Versioning 2.0.0 specification.
### Parameters from $ARGUMENTS
- **version**: Version string to validate (required)
- **strict**: Enforce strict semver (no pre-release/build metadata) (optional, default: false)
### Semantic Versioning Standard
**Base Pattern**: `MAJOR.MINOR.PATCH` (e.g., `1.2.3`)
**Strict Format**: `^[0-9]+\.[0-9]+\.[0-9]+$`
**Extended Format** (with pre-release and build metadata):
- Pre-release: `1.2.3-alpha.1`, `2.0.0-beta.2`, `1.0.0-rc.1`
- Build metadata: `1.2.3+20241013`, `1.0.0+build.1`
- Combined: `1.2.3-alpha.1+build.20241013`
### Valid Examples
**Strict Semver** (OpenPlugins recommended):
- `1.0.0` - Initial release
- `1.2.3` - Standard version
- `2.5.13` - Double-digit components
- `0.1.0` - Pre-1.0 development
**Extended Semver** (allowed):
- `1.0.0-alpha` - Alpha release
- `1.0.0-beta.2` - Beta release
- `1.0.0-rc.1` - Release candidate
- `1.2.3+20241013` - With build metadata
### Invalid Examples
- `1.0` - Missing PATCH
- `v1.0.0` - Leading 'v' prefix
- `1.0.0.0` - Too many components
- `1.2.x` - Placeholder values
- `latest` - Non-numeric
- `1.0.0-SNAPSHOT` - Non-standard identifier
### Workflow
1. **Extract Version from Arguments**
```
Parse $ARGUMENTS to extract version parameter
If version not provided, return error
```
2. **Execute Semantic Version Checker**
```bash
Execute .scripts/semver-checker.py "$version" "$strict"
Exit codes:
- 0: Valid semantic version
- 1: Invalid format
- 2: Missing required parameters
- 3: Strict mode violation (valid semver, but has pre-release/build)
```
3. **Parse Version Components**
```
Extract components:
- MAJOR: Breaking changes
- MINOR: Backward-compatible features
- PATCH: Backward-compatible fixes
- Pre-release: Optional identifier (alpha, beta, rc)
- Build metadata: Optional metadata
```
4. **Return Validation Report**
```
Format results:
- Status: PASS/FAIL/WARNING
- Version: <provided-version>
- Valid: yes/no
- Components: MAJOR.MINOR.PATCH breakdown
- Pre-release: <identifier> (if present)
- Build: <metadata> (if present)
- Score impact: +5 points (if valid)
```
### Examples
```bash
# Valid strict semver
/best-practices versioning version:1.2.3
# Result: PASS - Valid semantic version (1.2.3)
# Valid with pre-release
/best-practices versioning version:1.0.0-alpha.1
# Result: PASS - Valid semantic version with pre-release
# Invalid format
/best-practices versioning version:1.0
# Result: FAIL - Missing PATCH component
# Strict mode with pre-release
/best-practices versioning version:1.0.0-beta strict:true
# Result: WARNING - Valid semver but not strict format
# Invalid prefix
/best-practices versioning version:v1.2.3
# Result: FAIL - Contains 'v' prefix (use 1.2.3)
```
### Error Handling
**Missing version parameter**:
```
ERROR: Missing required parameter 'version'
Usage: /best-practices versioning version:<semver>
Example: /best-practices versioning version:1.2.3
```
**Invalid format**:
```
ERROR: Invalid semantic version format
The version must follow MAJOR.MINOR.PATCH format.
Examples:
- 1.0.0 (initial release)
- 1.2.3 (standard version)
- 2.0.0-beta.1 (pre-release)
```
### Output Format
**Success (Valid Semver)**:
```
✅ Semantic Versioning: PASS
Version: 1.2.3
Format: MAJOR.MINOR.PATCH
Valid: Yes
Components:
- MAJOR: 1 (breaking changes)
- MINOR: 2 (new features)
- PATCH: 3 (bug fixes)
Quality Score Impact: +5 points
The version follows Semantic Versioning 2.0.0 specification.
```
**Success with Pre-release**:
```
✅ Semantic Versioning: PASS
Version: 1.0.0-beta.2
Format: MAJOR.MINOR.PATCH-PRERELEASE
Valid: Yes
Components:
- MAJOR: 1
- MINOR: 0
- PATCH: 0
- Pre-release: beta.2
Quality Score Impact: +5 points
Note: Pre-release versions indicate unstable releases.
```
**Failure (Invalid Format)**:
```
❌ Semantic Versioning: FAIL
Version: 1.0
Format: Invalid
Valid: No
Issues Found:
1. Missing PATCH component
2. Expected format: MAJOR.MINOR.PATCH
Suggested Correction: 1.0.0
Quality Score Impact: 0 points (fix to gain +5)
Fix to comply with Semantic Versioning 2.0.0 specification.
Reference: https://semver.org/
```
**Warning (Strict Mode)**:
```
⚠️ Semantic Versioning: WARNING
Version: 1.0.0-alpha.1
Format: Valid semver, but not strict
Valid: Yes (with pre-release)
Note: OpenPlugins recommends strict MAJOR.MINOR.PATCH format
without pre-release or build metadata for marketplace submissions.
Recommended: 1.0.0 (for stable release)
Quality Score Impact: +5 points (valid, but consider strict format)
```
### Versioning Guidelines
**When to increment**:
**MAJOR** (X.0.0):
- Breaking API changes
- Incompatible changes
- Major rewrites
**MINOR** (x.Y.0):
- New features (backward-compatible)
- Deprecations
- Significant improvements
**PATCH** (x.y.Z):
- Bug fixes
- Security patches
- Minor improvements
**Initial Development**:
- Start with `0.1.0`
- Increment MINOR for features
- First stable release: `1.0.0`
**Pre-release Identifiers**:
- `alpha` - Early testing
- `beta` - Feature complete, testing
- `rc` - Release candidate
### Compliance Criteria
**PASS Requirements**:
- Three numeric components (MAJOR.MINOR.PATCH)
- Each component is non-negative integer
- Components separated by dots
- Optional pre-release identifier (hyphen-separated)
- Optional build metadata (plus-separated)
- No leading zeros (except single 0)
**FAIL Indicators**:
- Missing components (1.0)
- Too many components (1.0.0.0)
- Non-numeric components (1.x.0)
- Leading 'v' prefix
- Invalid separators
- Leading zeros (01.02.03)
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,254 @@
#!/usr/bin/env bash
# ============================================================================
# CHANGELOG Validator
# ============================================================================
# Purpose: Validate CHANGELOG.md format compliance (Keep a Changelog)
# Version: 1.0.0
# Usage: ./changelog-validator.sh <changelog-path> [--strict] [--json]
# Returns: 0=success, 1=error, JSON output to stdout if --json
# ============================================================================
set -euo pipefail
# Default values
STRICT_MODE=false
JSON_OUTPUT=false
REQUIRE_UNRELEASED=true
# Valid change categories per Keep a Changelog
VALID_CATEGORIES=("Added" "Changed" "Deprecated" "Removed" "Fixed" "Security")
# Parse arguments
CHANGELOG_PATH="${1:-CHANGELOG.md}"
shift || true
while [[ $# -gt 0 ]]; do
case "$1" in
--strict)
STRICT_MODE=true
shift
;;
--json)
JSON_OUTPUT=true
shift
;;
--no-unreleased)
REQUIRE_UNRELEASED=false
shift
;;
*)
shift
;;
esac
done
# Initialize results
declare -a issues=()
declare -a version_entries=()
declare -a categories_used=()
has_title=false
has_unreleased=false
compliance_score=100
# Check if file exists
if [[ ! -f "$CHANGELOG_PATH" ]]; then
if $JSON_OUTPUT; then
cat <<EOF
{
"error": "CHANGELOG not found",
"path": "$CHANGELOG_PATH",
"present": false,
"score": 0,
"status": "warning",
"issues": ["CHANGELOG.md not found"]
}
EOF
else
echo "⚠️ WARNING: CHANGELOG not found at $CHANGELOG_PATH"
echo "CHANGELOG is recommended but not required for initial submission."
fi
exit 1
fi
# Read content
content=$(<"$CHANGELOG_PATH")
# Check for title
if echo "$content" | grep -qiE "^#\s*(changelog|change.?log)"; then
has_title=true
else
issues+=("Missing title 'Changelog' or 'Change Log'")
((compliance_score-=10)) || true
fi
# Check for Unreleased section
if echo "$content" | grep -qE "^##\s*\[Unreleased\]"; then
has_unreleased=true
else
if $REQUIRE_UNRELEASED; then
issues+=("Missing [Unreleased] section")
((compliance_score-=15)) || true
fi
fi
# Extract version headers
while IFS= read -r line; do
if [[ $line =~ ^##[[:space:]]*\[([0-9]+\.[0-9]+\.[0-9]+)\][[:space:]]*-[[:space:]]*([0-9]{4}-[0-9]{2}-[0-9]{2}) ]]; then
version="${BASH_REMATCH[1]}"
date="${BASH_REMATCH[2]}"
version_entries+=("$version|$date")
elif [[ $line =~ ^##[[:space:]]*\[?([0-9]+\.[0-9]+\.[0-9]+)\]? ]] && [[ ! $line =~ \[Unreleased\] ]]; then
# Invalid format detected
issues+=("Invalid version header format: '$line' (should be '## [X.Y.Z] - YYYY-MM-DD')")
((compliance_score-=10)) || true
fi
done <<< "$content"
# Check for valid change categories
for category in "${VALID_CATEGORIES[@]}"; do
if echo "$content" | grep -qE "^###[[:space:]]*$category"; then
categories_used+=("$category")
fi
done
# Detect invalid categories
while IFS= read -r line; do
if [[ $line =~ ^###[[:space:]]+(.*) ]]; then
cat_name="${BASH_REMATCH[1]}"
is_valid=false
for valid_cat in "${VALID_CATEGORIES[@]}"; do
if [[ "$cat_name" == "$valid_cat" ]]; then
is_valid=true
break
fi
done
if ! $is_valid; then
issues+=("Non-standard category: '### $cat_name' (should be one of: ${VALID_CATEGORIES[*]})")
((compliance_score-=5)) || true
fi
fi
done <<< "$content"
# Check date formats in version headers
for entry in "${version_entries[@]}"; do
date_part="${entry#*|}"
if [[ ! $date_part =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then
issues+=("Invalid date format in version entry: $date_part (should be YYYY-MM-DD)")
((compliance_score-=5)) || true
fi
done
# Ensure score doesn't go negative
if ((compliance_score < 0)); then
compliance_score=0
fi
# Determine status
status="pass"
if ((compliance_score < 60)); then
status="fail"
elif ((compliance_score < 80)); then
status="warning"
fi
# Output results
if $JSON_OUTPUT; then
# Build JSON output
cat <<EOF
{
"present": true,
"path": "$CHANGELOG_PATH",
"has_title": $has_title,
"has_unreleased": $has_unreleased,
"version_count": ${#version_entries[@]},
"version_entries": [
$(IFS=,; for entry in "${version_entries[@]}"; do
version="${entry%|*}"
date="${entry#*|}"
echo " {\"version\": \"$version\", \"date\": \"$date\"}"
done | paste -sd, -)
],
"categories_used": [
$(IFS=,; for cat in "${categories_used[@]}"; do
echo " \"$cat\""
done | paste -sd, -)
],
"compliance_score": $compliance_score,
"status": "$status",
"issues": [
$(IFS=,; for issue in "${issues[@]}"; do
# Escape quotes in issue text
escaped_issue="${issue//\"/\\\"}"
echo " \"$escaped_issue\""
done | paste -sd, -)
]
}
EOF
else
# Human-readable output
echo ""
echo "CHANGELOG Validation Results"
echo "========================================"
echo "File: $CHANGELOG_PATH"
echo "Compliance Score: $compliance_score/100"
echo ""
if $has_title; then
echo "✓ Title present"
else
echo "✗ Title missing"
fi
if $has_unreleased; then
echo "✓ [Unreleased] section present"
else
if $REQUIRE_UNRELEASED; then
echo "✗ [Unreleased] section missing"
else
echo "⚠ [Unreleased] section missing (not required)"
fi
fi
echo ""
echo "Version Entries: ${#version_entries[@]}"
for entry in "${version_entries[@]}"; do
version="${entry%|*}"
date="${entry#*|}"
echo " • [$version] - $date"
done
if [[ ${#categories_used[@]} -gt 0 ]]; then
echo ""
echo "Change Categories Used:"
for cat in "${categories_used[@]}"; do
echo "$cat"
done
fi
if [[ ${#issues[@]} -gt 0 ]]; then
echo ""
echo "Issues Found: ${#issues[@]}"
for issue in "${issues[@]}"; do
echo "$issue"
done
fi
echo ""
if [[ "$status" == "pass" ]]; then
echo "Overall: ✓ PASS"
elif [[ "$status" == "warning" ]]; then
echo "Overall: ⚠ WARNINGS"
else
echo "Overall: ✗ FAIL"
fi
echo ""
fi
# Exit with appropriate code
if [[ "$status" == "fail" ]]; then
exit 1
else
exit 0
fi

View File

@@ -0,0 +1,325 @@
#!/usr/bin/env bash
# ============================================================================
# Example Validator
# ============================================================================
# Purpose: Validate example quality and detect placeholder patterns
# Version: 1.0.0
# Usage: ./example-validator.sh <path> [--no-placeholders] [--recursive] [--json]
# Returns: 0=success, 1=warning, JSON output to stdout if --json
# ============================================================================
set -euo pipefail
# Default values
NO_PLACEHOLDERS=true
RECURSIVE=true
JSON_OUTPUT=false
EXTENSIONS="md,txt,json,sh,py,js,ts,yaml,yml"
# Parse arguments
TARGET_PATH="${1:-.}"
shift || true
while [[ $# -gt 0 ]]; do
case "$1" in
--no-placeholders)
NO_PLACEHOLDERS=true
shift
;;
--allow-placeholders)
NO_PLACEHOLDERS=false
shift
;;
--recursive)
RECURSIVE=true
shift
;;
--non-recursive)
RECURSIVE=false
shift
;;
--json)
JSON_OUTPUT=true
shift
;;
--extensions)
EXTENSIONS="$2"
shift 2
;;
*)
shift
;;
esac
done
# Initialize counters
files_checked=0
example_count=0
placeholder_count=0
todo_count=0
declare -a issues=()
declare -a files_with_issues=()
# Build find command based on recursiveness
if $RECURSIVE; then
FIND_DEPTH=""
else
FIND_DEPTH="-maxdepth 1"
fi
# Build extension pattern
ext_pattern=""
IFS=',' read -ra EXT_ARRAY <<< "$EXTENSIONS"
for ext in "${EXT_ARRAY[@]}"; do
if [[ -z "$ext_pattern" ]]; then
ext_pattern="-name '*.${ext}'"
else
ext_pattern="$ext_pattern -o -name '*.${ext}'"
fi
done
# Find files to check
mapfile -t files < <(eval "find '$TARGET_PATH' $FIND_DEPTH -type f \( $ext_pattern \) 2>/dev/null" || true)
# Placeholder patterns to detect
declare -a PLACEHOLDER_PATTERNS=(
'TODO[:\)]'
'FIXME[:\)]'
'XXX[:\)]'
'HACK[:\)]'
'placeholder'
'PLACEHOLDER'
'your-.*-here'
'<your-'
'INSERT.?HERE'
'YOUR_[A-Z_]+'
)
# Generic dummy value patterns
declare -a GENERIC_PATTERNS=(
'\bfoo\b'
'\bbar\b'
'\bbaz\b'
'\bdummy\b'
)
# Acceptable patterns (don't count these)
declare -a ACCEPTABLE_PATTERNS=(
'\{\{[^}]+\}\}' # {{variable}} template syntax
'\$\{[^}]+\}' # ${variable} template syntax
'\$[A-Z_]+' # $VARIABLE environment variables
)
# Function to check if line contains acceptable pattern
is_acceptable_pattern() {
local line="$1"
for pattern in "${ACCEPTABLE_PATTERNS[@]}"; do
if echo "$line" | grep -qE "$pattern"; then
return 0 # Is acceptable
fi
done
return 1 # Not acceptable
}
# Function to count code examples in markdown
count_code_examples() {
local file="$1"
if [[ ! "$file" =~ \.md$ ]]; then
return 0
fi
# Count code blocks (```)
local count
count=$(grep -c '```' "$file" 2>/dev/null || echo "0")
# Ensure count is numeric and divide by 2 since each code block has opening and closing
if [[ "$count" =~ ^[0-9]+$ ]]; then
count=$((count / 2))
else
count=0
fi
echo "$count"
}
# Check each file
for file in "${files[@]}"; do
((files_checked++)) || true
# Count examples in markdown files
if [[ "$file" =~ \.md$ ]]; then
file_examples=$(count_code_examples "$file")
((example_count += file_examples)) || true
fi
file_issues=0
# Check for placeholder patterns
for pattern in "${PLACEHOLDER_PATTERNS[@]}"; do
while IFS=: read -r line_num line_content; do
# Skip if it's an acceptable pattern
if is_acceptable_pattern "$line_content"; then
continue
fi
((placeholder_count++)) || true
((file_issues++)) || true
issue="$file:$line_num: Placeholder pattern detected"
issues+=("$issue")
# Track TODO/FIXME separately
if echo "$pattern" | grep -qE 'TODO|FIXME|XXX'; then
((todo_count++)) || true
fi
done < <(grep -inE "$pattern" "$file" 2>/dev/null || true)
done
# Check for generic dummy values (only in non-test files)
if [[ ! "$file" =~ test ]] && [[ ! "$file" =~ example ]] && [[ ! "$file" =~ spec ]]; then
for pattern in "${GENERIC_PATTERNS[@]}"; do
while IFS=: read -r line_num line_content; do
# Skip code comments explaining these terms
if echo "$line_content" | grep -qE '(#|//|/\*).*'"$pattern"; then
continue
fi
# Skip if in an acceptable context
if is_acceptable_pattern "$line_content"; then
continue
fi
((file_issues++)) || true
issue="$file:$line_num: Generic placeholder value detected"
issues+=("$issue")
done < <(grep -inE "$pattern" "$file" 2>/dev/null || true)
done
fi
# Track files with issues
if ((file_issues > 0)); then
files_with_issues+=("$file:$file_issues")
fi
done
# Calculate quality score
quality_score=100
((quality_score -= placeholder_count * 10)) || true
((quality_score -= todo_count * 5)) || true
if ((example_count < 2)); then
((quality_score -= 20)) || true
fi
# Ensure score doesn't go negative
if ((quality_score < 0)); then
quality_score=0
fi
# Determine status
status="pass"
if ((quality_score < 60)); then
status="fail"
elif ((quality_score < 80)); then
status="warning"
fi
# Output results
if $JSON_OUTPUT; then
# Build JSON output
cat <<EOF
{
"files_checked": $files_checked,
"example_count": $example_count,
"placeholder_count": $placeholder_count,
"todo_count": $todo_count,
"files_with_issues": ${#files_with_issues[@]},
"quality_score": $quality_score,
"status": "$status",
"issues": [
$(IFS=; for issue in "${issues[@]:0:20}"; do # Limit to first 20 issues
# Escape quotes in issue text
escaped_issue="${issue//\"/\\\"}"
echo " \"$escaped_issue\","
done | sed '$ s/,$//')
],
"files_with_issues_list": [
$(IFS=; for file_info in "${files_with_issues[@]:0:10}"; do # Limit to first 10 files
file_path="${file_info%:*}"
file_count="${file_info#*:}"
echo " {\"file\": \"$file_path\", \"issue_count\": $file_count},"
done | sed '$ s/,$//')
]
}
EOF
else
# Human-readable output
echo ""
echo "Example Quality Validation"
echo "========================================"
echo "Files Checked: $files_checked"
echo "Code Examples Found: $example_count"
echo "Quality Score: $quality_score/100"
echo ""
if ((placeholder_count > 0)) || ((todo_count > 0)); then
echo "Issues Detected:"
echo " • Placeholder patterns: $placeholder_count"
echo " • TODO/FIXME markers: $todo_count"
echo " • Files with issues: ${#files_with_issues[@]}"
echo ""
if ((${#files_with_issues[@]} > 0)); then
echo "Files with issues:"
for file_info in "${files_with_issues[@]:0:5}"; do # Show first 5
file_path="${file_info%:*}"
file_count="${file_info#*:}"
echo "$file_path ($file_count issues)"
done
if ((${#files_with_issues[@]} > 5)); then
echo " ... and $((${#files_with_issues[@]} - 5)) more files"
fi
fi
echo ""
echo "Sample Issues:"
for issue in "${issues[@]:0:5}"; do # Show first 5
echo "$issue"
done
if ((${#issues[@]} > 5)); then
echo " ... and $((${#issues[@]} - 5)) more issues"
fi
else
echo "✓ No placeholder patterns detected"
fi
if ((example_count < 2)); then
echo ""
echo "⚠ Recommendation: Add more code examples (found: $example_count, recommended: 3+)"
fi
echo ""
if [[ "$status" == "pass" ]]; then
echo "Overall: ✓ PASS"
elif [[ "$status" == "warning" ]]; then
echo "Overall: ⚠ WARNINGS"
else
echo "Overall: ✗ FAIL"
fi
echo ""
fi
# Exit with appropriate code
if [[ "$status" == "fail" ]]; then
exit 1
elif [[ "$status" == "warning" ]]; then
exit 0 # Warning is not a failure
else
exit 0
fi

View File

@@ -0,0 +1,344 @@
#!/usr/bin/env python3
# ============================================================================
# License Detector
# ============================================================================
# Purpose: Detect and validate LICENSE file content
# Version: 1.0.0
# Usage: ./license-detector.py <path> [--expected LICENSE] [--json]
# Returns: 0=success, 1=error, JSON output to stdout
# ============================================================================
import sys
import os
import re
import json
import argparse
from pathlib import Path
from typing import Dict, Optional, Tuple
# OSI-approved license patterns
LICENSE_PATTERNS = {
"MIT": {
"pattern": r"Permission is hereby granted, free of charge",
"confidence": 95,
"osi_approved": True,
"full_name": "MIT License"
},
"Apache-2.0": {
"pattern": r"Licensed under the Apache License, Version 2\.0",
"confidence": 95,
"osi_approved": True,
"full_name": "Apache License 2.0"
},
"GPL-3.0": {
"pattern": r"GNU GENERAL PUBLIC LICENSE.*Version 3",
"confidence": 95,
"osi_approved": True,
"full_name": "GNU General Public License v3.0"
},
"GPL-2.0": {
"pattern": r"GNU GENERAL PUBLIC LICENSE.*Version 2",
"confidence": 95,
"osi_approved": True,
"full_name": "GNU General Public License v2.0"
},
"BSD-3-Clause": {
"pattern": r"Redistribution and use in source and binary forms.*3\.",
"confidence": 85,
"osi_approved": True,
"full_name": "BSD 3-Clause License"
},
"BSD-2-Clause": {
"pattern": r"Redistribution and use in source and binary forms",
"confidence": 80,
"osi_approved": True,
"full_name": "BSD 2-Clause License"
},
"ISC": {
"pattern": r"Permission to use, copy, modify, and/or distribute",
"confidence": 90,
"osi_approved": True,
"full_name": "ISC License"
},
"MPL-2.0": {
"pattern": r"Mozilla Public License Version 2\.0",
"confidence": 95,
"osi_approved": True,
"full_name": "Mozilla Public License 2.0"
}
}
# License name variations/aliases
LICENSE_ALIASES = {
"MIT License": "MIT",
"MIT license": "MIT",
"Apache License 2.0": "Apache-2.0",
"Apache 2.0": "Apache-2.0",
"Apache-2": "Apache-2.0",
"GNU GPL v3": "GPL-3.0",
"GPLv3": "GPL-3.0",
"GNU GPL v2": "GPL-2.0",
"GPLv2": "GPL-2.0",
"BSD 3-Clause": "BSD-3-Clause",
"BSD 2-Clause": "BSD-2-Clause",
}
def find_license_file(path: str) -> Optional[str]:
"""Find LICENSE file in path."""
path_obj = Path(path)
# Check if path is directly to LICENSE
if path_obj.is_file() and 'license' in path_obj.name.lower():
return str(path_obj)
# Search for LICENSE in directory
if path_obj.is_dir():
for filename in ['LICENSE', 'LICENSE.txt', 'LICENSE.md', 'COPYING', 'COPYING.txt', 'LICENCE']:
license_path = path_obj / filename
if license_path.exists():
return str(license_path)
return None
def read_plugin_manifest(path: str) -> Optional[str]:
"""Read license from plugin.json."""
path_obj = Path(path)
if path_obj.is_file():
path_obj = path_obj.parent
manifest_path = path_obj / '.claude-plugin' / 'plugin.json'
if not manifest_path.exists():
return None
try:
with open(manifest_path, 'r', encoding='utf-8') as f:
manifest = json.load(f)
return manifest.get('license')
except Exception:
return None
def detect_license(content: str) -> Tuple[Optional[str], int, bool]:
"""
Detect license type from content.
Returns: (license_type, confidence, is_complete)
"""
content_normalized = ' '.join(content.split()) # Normalize whitespace
best_match = None
best_confidence = 0
# Check for license text patterns
for license_id, license_info in LICENSE_PATTERNS.items():
pattern = license_info["pattern"]
if re.search(pattern, content, re.IGNORECASE | re.DOTALL):
confidence = license_info["confidence"]
if confidence > best_confidence:
best_match = license_id
best_confidence = confidence
# Check if it's just a name without full text
is_complete = True
if best_match and len(content.strip()) < 200: # Very short content
is_complete = False
# If no pattern match, check for just license names
if not best_match:
for alias, license_id in LICENSE_ALIASES.items():
if re.search(r'\b' + re.escape(alias) + r'\b', content, re.IGNORECASE):
best_match = license_id
best_confidence = 50 # Lower confidence for name-only
is_complete = False
break
return best_match, best_confidence, is_complete
def normalize_license_name(license_name: str) -> str:
"""Normalize license name for comparison."""
if not license_name:
return ""
# Check if it's already a standard ID
if license_name in LICENSE_PATTERNS:
return license_name
# Check aliases
if license_name in LICENSE_ALIASES:
return LICENSE_ALIASES[license_name]
# Normalize common variations
normalized = license_name.strip()
normalized = re.sub(r'\s+', ' ', normalized)
# Try fuzzy matching
for alias, license_id in LICENSE_ALIASES.items():
if normalized.lower() == alias.lower():
return license_id
return license_name
def licenses_match(detected: str, expected: str) -> Tuple[bool, str]:
"""
Check if detected license matches expected.
Returns: (matches, match_type)
"""
detected_norm = normalize_license_name(detected)
expected_norm = normalize_license_name(expected)
if detected_norm == expected_norm:
return True, "exact"
# Check if they're aliases of the same license
if detected_norm in LICENSE_PATTERNS and expected_norm in LICENSE_PATTERNS:
if LICENSE_PATTERNS[detected_norm]["full_name"] == LICENSE_PATTERNS[expected_norm]["full_name"]:
return True, "alias"
# Fuzzy match
if detected_norm.lower().replace('-', '').replace(' ', '') == expected_norm.lower().replace('-', '').replace(' ', ''):
return True, "fuzzy"
return False, "mismatch"
def main():
parser = argparse.ArgumentParser(description='Detect and validate LICENSE file')
parser.add_argument('path', help='Path to LICENSE file or directory containing it')
parser.add_argument('--expected', help='Expected license type (from plugin.json)', default=None)
parser.add_argument('--strict', action='store_true', help='Strict validation (requires full text)')
parser.add_argument('--json', action='store_true', help='Output JSON format')
args = parser.parse_args()
# Find LICENSE file
license_path = find_license_file(args.path)
if not license_path:
result = {
"error": "LICENSE file not found",
"path": args.path,
"present": False,
"score": 0,
"status": "fail",
"issues": ["LICENSE file not found in specified path"]
}
if args.json:
print(json.dumps(result, indent=2))
else:
print("❌ CRITICAL: LICENSE file not found")
print(f"Path: {args.path}")
print("LICENSE file is required for plugin submission.")
return 1
# Read LICENSE content
try:
with open(license_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
result = {
"error": f"Failed to read LICENSE: {str(e)}",
"path": license_path,
"present": True,
"score": 0,
"status": "fail"
}
if args.json:
print(json.dumps(result, indent=2))
else:
print(f"❌ ERROR: Failed to read LICENSE: {e}")
return 1
# Detect license
detected_license, confidence, is_complete = detect_license(content)
# Read expected license from plugin.json if not provided
if not args.expected:
args.expected = read_plugin_manifest(args.path)
# Check consistency
matches_manifest = True
match_type = None
if args.expected:
matches_manifest, match_type = licenses_match(detected_license or "", args.expected)
# Determine if OSI approved
is_osi_approved = False
if detected_license and detected_license in LICENSE_PATTERNS:
is_osi_approved = LICENSE_PATTERNS[detected_license]["osi_approved"]
# Build issues list
issues = []
score = 100
if not detected_license:
issues.append("Unable to identify license type")
score -= 50
elif not is_complete:
issues.append("LICENSE contains only license name, not full text")
score -= 20 if args.strict else 10
if not is_osi_approved and detected_license:
issues.append("License is not OSI-approved")
score -= 30
if args.expected and not matches_manifest:
issues.append(f"LICENSE ({detected_license or 'unknown'}) does not match plugin.json ({args.expected})")
score -= 20
score = max(0, score)
# Determine status
if score >= 80:
status = "pass"
elif score >= 60:
status = "warning"
else:
status = "fail"
# Build result
result = {
"present": True,
"path": license_path,
"detected_license": detected_license,
"confidence": confidence,
"is_complete": is_complete,
"is_osi_approved": is_osi_approved,
"manifest_license": args.expected,
"matches_manifest": matches_manifest,
"match_type": match_type,
"score": score,
"status": status,
"issues": issues
}
# Output
if args.json:
print(json.dumps(result, indent=2))
else:
# Human-readable output
print(f"\nLICENSE Validation Results")
print("=" * 50)
print(f"File: {license_path}")
print(f"Detected: {detected_license or 'Unknown'} (confidence: {confidence}%)")
print(f"Score: {score}/100")
print(f"\nOSI Approved: {'✓ Yes' if is_osi_approved else '✗ No'}")
print(f"Complete Text: {'✓ Yes' if is_complete else '⚠ No (name only)'}")
if args.expected:
print(f"\nConsistency Check:")
print(f" plugin.json: {args.expected}")
print(f" LICENSE file: {detected_license or 'Unknown'}")
print(f" Match: {'✓ Yes' if matches_manifest else '✗ No'}")
if issues:
print(f"\nIssues Found: {len(issues)}")
for issue in issues:
print(f"{issue}")
print(f"\nOverall: {'✓ PASS' if status == 'pass' else '⚠ WARNING' if status == 'warning' else '✗ FAIL'}")
print()
return 0 if status != "fail" else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,311 @@
#!/usr/bin/env python3
# ============================================================================
# README Checker
# ============================================================================
# Purpose: Validate README.md completeness and quality
# Version: 1.0.0
# Usage: ./readme-checker.py <readme-path> [options]
# Returns: 0=success, 1=error, JSON output to stdout
# ============================================================================
import sys
import os
import re
import json
import argparse
from pathlib import Path
from typing import Dict, List, Tuple
# Required sections (case-insensitive patterns)
REQUIRED_SECTIONS = {
"overview": r"(?i)^#{1,3}\s*(overview|description|about)",
"installation": r"(?i)^#{1,3}\s*installation",
"usage": r"(?i)^#{1,3}\s*usage",
"examples": r"(?i)^#{1,3}\s*(examples?|demonstrations?)",
"license": r"(?i)^#{1,3}\s*licen[cs]e"
}
# Optional but recommended sections
RECOMMENDED_SECTIONS = {
"configuration": r"(?i)^#{1,3}\s*(configuration|setup|config)",
"troubleshooting": r"(?i)^#{1,3}\s*(troubleshooting|faq|common.?issues)",
"contributing": r"(?i)^#{1,3}\s*contribut",
"changelog": r"(?i)^#{1,3}\s*(changelog|version.?history|releases)"
}
def find_readme(path: str) -> str:
"""Find README file in path."""
path_obj = Path(path)
# Check if path is directly to README
if path_obj.is_file() and path_obj.name.lower().startswith('readme'):
return str(path_obj)
# Search for README in directory
if path_obj.is_dir():
for filename in ['README.md', 'readme.md', 'README.txt', 'README']:
readme_path = path_obj / filename
if readme_path.exists():
return str(readme_path)
return None
def analyze_sections(content: str) -> Tuple[List[str], List[str]]:
"""Analyze README sections."""
lines = content.split('\n')
found_sections = []
missing_sections = []
# Check required sections
for section_name, pattern in REQUIRED_SECTIONS.items():
found = False
for line in lines:
if re.match(pattern, line.strip()):
found = True
found_sections.append(section_name)
break
if not found:
missing_sections.append(section_name)
return found_sections, missing_sections
def count_examples(content: str) -> int:
"""Count code examples in README."""
# Count code blocks (```...```)
code_blocks = re.findall(r'```[\s\S]*?```', content)
return len(code_blocks)
def check_quality_issues(content: str) -> List[str]:
"""Check for quality issues."""
issues = []
# Check for excessive placeholder text
placeholder_patterns = [
r'TODO',
r'FIXME',
r'XXX',
r'placeholder',
r'your-.*-here',
r'<your-'
]
for pattern in placeholder_patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
if len(matches) > 5: # More than 5 is excessive
issues.append(f"Excessive placeholder patterns: {len(matches)} instances of '{pattern}'")
# Check for very short sections
lines = content.split('\n')
current_section = None
section_lengths = {}
for line in lines:
if re.match(r'^#{1,3}\s+', line):
current_section = line.strip()
section_lengths[current_section] = 0
elif current_section and line.strip():
section_lengths[current_section] += len(line)
for section, length in section_lengths.items():
if length < 100 and any(keyword in section.lower() for keyword in ['installation', 'usage', 'example']):
issues.append(f"Section '{section}' is very short ({length} chars), consider expanding")
return issues
def calculate_score(found_sections: List[str], missing_sections: List[str],
length: int, example_count: int, quality_issues: List[str]) -> int:
"""Calculate README quality score (0-100)."""
score = 100
# Deduct for missing required sections (15 points each)
score -= len(missing_sections) * 15
# Deduct if too short
if length < 200:
score -= 30 # Critical
elif length < 500:
score -= 10 # Warning
# Deduct if no examples
if example_count == 0:
score -= 15
elif example_count < 2:
score -= 5
# Deduct for quality issues (5 points each, max 20)
score -= min(len(quality_issues) * 5, 20)
return max(0, score)
def generate_recommendations(found_sections: List[str], missing_sections: List[str],
length: int, example_count: int, quality_issues: List[str]) -> List[Dict]:
"""Generate actionable recommendations."""
recommendations = []
# Missing sections
for section in missing_sections:
impact = 15
recommendations.append({
"priority": "critical" if section in ["overview", "installation", "usage"] else "important",
"action": f"Add {section.title()} section",
"impact": impact,
"effort": "medium" if section == "examples" else "low",
"description": f"Include a comprehensive {section} section with clear explanations"
})
# Length issues
if length < 500:
gap = 500 - length
recommendations.append({
"priority": "important" if length >= 200 else "critical",
"action": f"Expand README by {gap} characters",
"impact": 10 if length >= 200 else 30,
"effort": "medium",
"description": "Add more detail to existing sections or include additional sections"
})
# Example issues
if example_count < 3:
needed = 3 - example_count
recommendations.append({
"priority": "important",
"action": f"Add {needed} more code example{'s' if needed > 1 else ''}",
"impact": 15 if example_count == 0 else 5,
"effort": "medium",
"description": "Include concrete, copy-pasteable usage examples"
})
# Quality issues
for issue in quality_issues:
recommendations.append({
"priority": "recommended",
"action": "Address quality issue",
"impact": 5,
"effort": "low",
"description": issue
})
return sorted(recommendations, key=lambda x: (
{"critical": 0, "important": 1, "recommended": 2}[x["priority"]],
-x["impact"]
))
def main():
parser = argparse.ArgumentParser(description='Validate README.md quality')
parser.add_argument('path', help='Path to README.md or directory containing it')
parser.add_argument('--sections', help='Comma-separated required sections', default=None)
parser.add_argument('--min-length', type=int, default=500, help='Minimum character count')
parser.add_argument('--strict', action='store_true', help='Enable strict validation')
parser.add_argument('--json', action='store_true', help='Output JSON format')
args = parser.parse_args()
# Find README file
readme_path = find_readme(args.path)
if not readme_path:
result = {
"error": "README.md not found",
"path": args.path,
"present": False,
"score": 0,
"issues": ["README.md file not found in specified path"]
}
print(json.dumps(result, indent=2))
return 1
# Read README content
try:
with open(readme_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
result = {
"error": f"Failed to read README: {str(e)}",
"path": readme_path,
"present": True,
"score": 0
}
print(json.dumps(result, indent=2))
return 1
# Analyze README
length = len(content)
found_sections, missing_sections = analyze_sections(content)
example_count = count_examples(content)
quality_issues = check_quality_issues(content)
# Calculate score
score = calculate_score(found_sections, missing_sections, length, example_count, quality_issues)
# Generate recommendations
recommendations = generate_recommendations(found_sections, missing_sections, length, example_count, quality_issues)
# Build result
result = {
"present": True,
"path": readme_path,
"length": length,
"min_length": args.min_length,
"meets_min_length": length >= args.min_length,
"sections": {
"found": found_sections,
"missing": missing_sections,
"required_count": len(REQUIRED_SECTIONS),
"found_count": len(found_sections)
},
"examples": {
"count": example_count,
"sufficient": example_count >= 2
},
"quality_issues": quality_issues,
"score": score,
"rating": (
"excellent" if score >= 90 else
"good" if score >= 75 else
"fair" if score >= 60 else
"needs_improvement" if score >= 40 else
"poor"
),
"recommendations": recommendations[:10], # Top 10
"status": "pass" if score >= 60 and not missing_sections else "warning" if score >= 40 else "fail"
}
# Output
if args.json:
print(json.dumps(result, indent=2))
else:
# Human-readable output
print(f"\nREADME Validation Results")
print("=" * 50)
print(f"File: {readme_path}")
print(f"Length: {length} characters (min: {args.min_length})")
print(f"Score: {score}/100 ({result['rating'].title()})")
print(f"\nSections Found: {len(found_sections)}/{len(REQUIRED_SECTIONS)}")
for section in found_sections:
print(f"{section.title()}")
if missing_sections:
print(f"\nMissing Sections: {len(missing_sections)}")
for section in missing_sections:
print(f"{section.title()}")
print(f"\nCode Examples: {example_count}")
if quality_issues:
print(f"\nQuality Issues: {len(quality_issues)}")
for issue in quality_issues[:5]: # Top 5
print(f"{issue}")
if recommendations:
print(f"\nTop Recommendations:")
for i, rec in enumerate(recommendations[:5], 1):
print(f" {i}. [{rec['priority'].upper()}] {rec['action']} (+{rec['impact']} pts)")
print()
return 0 if score >= 60 else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,308 @@
## Operation: Check LICENSE File
Validate LICENSE file presence, format, and consistency with plugin metadata.
### Parameters from $ARGUMENTS
- **path**: Target plugin/marketplace path (required)
- **expected**: Expected license type (optional, reads from plugin.json if not provided)
- **strict**: Enable strict validation mode (optional, default: false)
- **check-consistency**: Verify consistency with plugin.json (optional, default: true)
### LICENSE Requirements
**File Presence**:
- LICENSE or LICENSE.txt in plugin root
- Also accept: LICENSE.md, COPYING, COPYING.txt
**OSI-Approved Licenses** (recommended):
- MIT License
- Apache License 2.0
- GNU General Public License (GPL) v2/v3
- BSD 2-Clause or 3-Clause License
- Mozilla Public License 2.0
- ISC License
- Creative Commons (for documentation)
**Validation Checks**:
1. **File exists**: LICENSE file present in root
2. **Valid content**: Contains recognized license text
3. **Complete**: Full license text, not just license name
4. **Consistency**: Matches license field in plugin.json
5. **OSI-approved**: Recognized open-source license
### Workflow
1. **Locate LICENSE File**
```
Check for files in plugin root (case-insensitive):
- LICENSE
- LICENSE.txt
- LICENSE.md
- COPYING
- COPYING.txt
- LICENCE (UK spelling)
If multiple found, prefer LICENSE over others
```
2. **Read Plugin Metadata**
```
Read plugin.json
Extract license field value
Store expected license type for comparison
```
3. **Execute License Detector**
```bash
Execute .scripts/license-detector.py with parameters:
- License file path
- Expected license type (from plugin.json)
- Strict mode flag
Script returns:
- detected_license: Identified license type
- confidence: 0-100 (match confidence)
- is_osi_approved: Boolean
- is_complete: Boolean (full text vs just name)
- matches_manifest: Boolean
- issues: Array of problems
```
4. **Validate License Content**
```
Check for license text patterns:
- MIT: "Permission is hereby granted, free of charge..."
- Apache 2.0: "Licensed under the Apache License, Version 2.0"
- GPL-3.0: "GNU GENERAL PUBLIC LICENSE Version 3"
- BSD-2-Clause: "Redistribution and use in source and binary forms"
Detect incomplete licenses:
- Just "MIT" or "MIT License" (missing full text)
- Just "Apache 2.0" (missing full text)
- Links to license without including text
```
5. **Check Consistency**
```
Compare detected license with plugin.json:
- Exact match: ✅ PASS
- Close match (e.g., "MIT" vs "MIT License"): ⚠️ WARNING
- Mismatch: ❌ ERROR
- Not specified in plugin.json: ⚠️ WARNING
Normalize license names for comparison:
- "MIT License" == "MIT"
- "Apache-2.0" == "Apache License 2.0"
- "GPL-3.0" == "GNU GPL v3"
```
6. **Verify OSI Approval**
```
Check against OSI-approved license list:
- MIT: ✅ Approved
- Apache-2.0: ✅ Approved
- GPL-2.0, GPL-3.0: ✅ Approved
- BSD-2-Clause, BSD-3-Clause: ✅ Approved
- Proprietary: ❌ Not approved
- Custom/Unknown: ⚠️ Review required
```
7. **Format Output**
```
Display:
- ✅/❌ File presence
- Detected license type
- OSI approval status
- Consistency with plugin.json
- Completeness (full text vs name only)
- Issues and recommendations
```
### Examples
```bash
# Check LICENSE with defaults (reads expected from plugin.json)
/documentation-validation license path:.
# Check with explicit expected license
/documentation-validation license path:. expected:MIT
# Strict validation (requires full license text)
/documentation-validation license path:. strict:true
# Skip consistency check (only validate file)
/documentation-validation license path:. check-consistency:false
# Check specific plugin
/documentation-validation license path:/path/to/plugin expected:Apache-2.0
```
### Error Handling
**Error: LICENSE file not found**
```
❌ CRITICAL: LICENSE file not found in <path>
Remediation:
1. Create LICENSE file in plugin root directory
2. Include full license text (not just the name)
3. Use an OSI-approved open-source license (MIT recommended)
4. Ensure license field in plugin.json matches LICENSE file
Recommended licenses for plugins:
- MIT: Simple, permissive (most common)
- Apache 2.0: Permissive with patent grant
- GPL-3.0: Copyleft (requires derivatives to use same license)
- BSD-3-Clause: Permissive, similar to MIT
Full license texts available at: https://choosealicense.com/
This is a BLOCKING issue - plugin cannot be submitted without a LICENSE.
```
**Error: Incomplete license text**
```
⚠️ WARNING: LICENSE file contains only license name, not full text
Current content: "MIT License"
Required: Full MIT License text
The LICENSE file should contain the complete license text, not just the name.
For MIT License, include:
MIT License
Copyright (c) [year] [fullname]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction...
[full license text]
Get full text: https://opensource.org/licenses/MIT
```
**Error: License mismatch with plugin.json**
```
❌ ERROR: LICENSE file does not match plugin.json declaration
plugin.json declares: "Apache-2.0"
LICENSE file contains: "MIT License"
Remediation:
1. Update plugin.json to declare "MIT" license, OR
2. Replace LICENSE file with Apache 2.0 license text
Consistency is required - both files must specify the same license.
```
**Error: Non-OSI-approved license**
```
❌ ERROR: License is not OSI-approved
Detected license: "Proprietary" or "Custom License"
OpenPlugins marketplace requires OSI-approved open-source licenses.
Recommended licenses:
- MIT License (most permissive)
- Apache License 2.0
- GNU GPL v3
- BSD 3-Clause
Choose a license: https://choosealicense.com/
OSI-approved list: https://opensource.org/licenses
This is a BLOCKING issue - plugin cannot be submitted with proprietary license.
```
**Error: Unrecognized license**
```
⚠️ WARNING: Unable to identify license type
The LICENSE file content does not match known license patterns.
Possible issues:
- Custom or modified license (not allowed)
- Corrupted or incomplete license text
- Non-standard format
Remediation:
1. Use standard, unmodified license text from official source
2. Choose from OSI-approved licenses
3. Do not modify standard license text (except copyright holder)
4. Get standard text from https://choosealicense.com/
If using a valid OSI license, ensure text matches standard format exactly.
```
### Output Format
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
LICENSE VALIDATION RESULTS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
File: ✅ LICENSE found
License Type: <detected-license>
Confidence: <0-100>% ✅
OSI Approved: ✅ Yes
Complete Text: ✅ Yes (full license included)
Consistency Check:
plugin.json declares: "<license>"
LICENSE file contains: "<detected-license>"
Match: ✅ Consistent
Validation: ✅ PASS
Recommendations:
- License is valid and properly formatted
- Meets OpenPlugins requirements
- Ready for submission
Overall: <PASS|WARNINGS|FAIL>
```
### Integration
This operation is invoked by:
- `/documentation-validation license path:.` (direct)
- `/documentation-validation full-docs path:.` (as part of complete validation)
- `/validation-orchestrator comprehensive path:.` (via orchestrator)
Results contribute to documentation quality score:
- Present, valid, consistent: +5 points
- Present but issues: 0 points (with warnings)
- Missing: BLOCKING issue (-20 points)
### Common License Patterns
**MIT License Detection**:
```
Pattern: "Permission is hereby granted, free of charge"
Confidence: 95%+
```
**Apache 2.0 Detection**:
```
Pattern: "Licensed under the Apache License, Version 2.0"
Confidence: 95%+
```
**GPL-3.0 Detection**:
```
Pattern: "GNU GENERAL PUBLIC LICENSE" + "Version 3"
Confidence: 95%+
```
**BSD Detection**:
```
Pattern: "Redistribution and use in source and binary forms"
Confidence: 90%+
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,193 @@
## Operation: Check README Completeness
Validate README.md completeness, structure, and quality against OpenPlugins standards.
### Parameters from $ARGUMENTS
- **path**: Target plugin/marketplace path (required)
- **sections**: Comma-separated required sections (optional, defaults to standard set)
- **min-length**: Minimum character count (optional, default: 500)
- **strict**: Enable strict validation mode (optional, default: false)
### README Requirements
**Required Sections** (case-insensitive matching):
1. **Overview/Description**: Plugin purpose and functionality
2. **Installation**: How to install and configure
3. **Usage**: How to use the plugin with examples
4. **Examples**: At least 2-3 concrete usage examples
5. **License**: License information or reference
**Quality Criteria**:
- Minimum 500 characters (configurable)
- No excessive placeholder text
- Proper markdown formatting
- Working links (if present)
- Code blocks properly formatted
### Workflow
1. **Locate README File**
```
Check for README.md in plugin root
If not found, check for README.txt or readme.md
If still not found, report critical error
```
2. **Execute README Checker Script**
```bash
Execute .scripts/readme-checker.py with parameters:
- File path to README.md
- Required sections list
- Minimum length threshold
- Strict mode flag
Script returns JSON with:
- sections_found: Array of detected sections
- sections_missing: Array of missing sections
- length: Character count
- quality_score: 0-100
- issues: Array of specific problems
```
3. **Analyze Results**
```
CRITICAL (blocking):
- README.md file missing
- Length < 200 characters
- Missing 3+ required sections
WARNING (should fix):
- Length < 500 characters
- Missing 1-2 required sections
- Missing examples section
RECOMMENDATION (nice to have):
- Add troubleshooting section
- Expand examples
- Add badges or visual elements
```
4. **Calculate Section Score**
```
score = 100
score -= (missing_required_sections × 15)
score -= (length < 500) ? 10 : 0
score -= (no_examples) ? 15 : 0
score = max(0, score)
```
5. **Format Output**
```
Display:
- ✅/❌ File presence
- ✅/⚠️/❌ Each required section
- Length statistics
- Quality score
- Specific improvement recommendations
```
### Examples
```bash
# Check README with defaults
/documentation-validation readme path:.
# Check with custom sections
/documentation-validation readme path:./my-plugin sections:"overview,installation,usage,examples,contributing,license"
# Strict validation with higher standards
/documentation-validation readme path:. min-length:1000 strict:true
# Check specific plugin
/documentation-validation readme path:/path/to/plugin sections:"overview,usage,license"
```
### Error Handling
**Error: README.md not found**
```
❌ CRITICAL: README.md file not found in <path>
Remediation:
1. Create README.md in plugin root directory
2. Include required sections: Overview, Installation, Usage, Examples, License
3. Ensure minimum 500 characters of meaningful content
4. See https://github.com/dhofheinz/open-plugins/blob/main/README.md for example
This is a BLOCKING issue - plugin cannot be submitted without README.
```
**Error: README too short**
```
⚠️ WARNING: README.md is only <X> characters (minimum: 500)
Current length: <X> characters
Required: 500 characters minimum
Gap: <500-X> characters
Remediation:
- Expand installation instructions with examples
- Add 2-3 usage examples with code blocks
- Include configuration options
- Add troubleshooting section
```
**Error: Missing required sections**
```
❌ ERROR: Missing <N> required sections
Missing sections:
- Installation: How to install the plugin
- Examples: At least 2 concrete usage examples
- License: License information or reference to LICENSE file
Remediation:
Add each missing section with meaningful content.
See CONTRIBUTING.md for section requirements.
```
### Output Format
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
README VALIDATION RESULTS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
File: ✅ README.md found
Required Sections:
✅ Overview/Description
✅ Installation
✅ Usage
⚠️ Examples (found 1, recommended: 3+)
✅ License
Length: <X> characters (minimum: 500) ✅
Quality Score: <0-100>/100
Issues Found: <N>
Critical (blocking): <count>
Warnings (should fix): <count>
Recommendations: <count>
Top Recommendations:
1. Add 2 more usage examples with code blocks [+15 pts]
2. Expand installation section with configuration options [+5 pts]
3. Include troubleshooting section [+5 pts]
Overall: <PASS|WARNINGS|FAIL>
```
### Integration
This operation is invoked by:
- `/documentation-validation readme path:.` (direct)
- `/documentation-validation full-docs path:.` (as part of complete validation)
- `/validation-orchestrator comprehensive path:.` (via orchestrator)
Results feed into quality-analysis scoring system.
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,533 @@
## Operation: Full Documentation Validation
Execute comprehensive documentation validation workflow covering all documentation aspects.
### Parameters from $ARGUMENTS
- **path**: Target plugin/marketplace path (required)
- **detailed**: Include detailed sub-reports (optional, default: true)
- **fix-suggestions**: Generate actionable improvement suggestions (optional, default: true)
- **format**: Output format (text|json|markdown) (optional, default: text)
### Full Documentation Workflow
This operation orchestrates all documentation validation sub-operations to provide
a complete documentation quality assessment.
### Workflow
1. **Initialize Validation Context**
```
Create validation context:
- Target path
- Timestamp
- Validation mode: comprehensive
- Results storage structure
Prepare for aggregating results from:
- README validation
- CHANGELOG validation
- LICENSE validation
- Examples validation
```
2. **Execute README Validation**
```
Invoke: check-readme.md operation
Parameters:
- path: <target-path>
- sections: default required sections
- min-length: 500
Capture results:
- README present: Boolean
- Sections found: Array
- Sections missing: Array
- Length: Integer
- Score: 0-100
- Issues: Array
```
3. **Execute CHANGELOG Validation**
```
Invoke: validate-changelog.md operation
Parameters:
- file: CHANGELOG.md
- format: keepachangelog
- require-unreleased: true
Capture results:
- CHANGELOG present: Boolean
- Format compliance: 0-100%
- Version entries: Array
- Issues: Array
- Score: 0-100
```
4. **Execute LICENSE Validation**
```
Invoke: check-license.md operation
Parameters:
- path: <target-path>
- check-consistency: true
Capture results:
- LICENSE present: Boolean
- License type: String
- OSI approved: Boolean
- Consistent with manifest: Boolean
- Issues: Array
- Score: 0-100
```
5. **Execute Examples Validation**
```
Invoke: validate-examples.md operation
Parameters:
- path: <target-path>
- no-placeholders: true
- recursive: true
Capture results:
- Files checked: Integer
- Examples found: Integer
- Placeholders detected: Integer
- Quality score: 0-100
- Issues: Array
```
6. **Aggregate Results**
```
Calculate overall documentation score:
weights = {
readme: 40%, # Most important
examples: 30%, # Critical for usability
license: 20%, # Required for submission
changelog: 10% # Recommended but not critical
}
overall_score = (
readme_score × 0.40 +
examples_score × 0.30 +
license_score × 0.20 +
changelog_score × 0.10
)
Round to integer: 0-100
```
7. **Categorize Issues by Priority**
```
CRITICAL (P0 - Blocking):
- README.md missing
- LICENSE file missing
- README < 200 characters
- Non-OSI-approved license
- License mismatch with manifest
IMPORTANT (P1 - Should Fix):
- README missing 2+ required sections
- README < 500 characters
- No examples in README
- 5+ placeholder patterns
- CHANGELOG has format errors
RECOMMENDED (P2 - Nice to Have):
- CHANGELOG missing
- README missing optional sections
- < 3 examples
- Minor placeholder patterns
```
8. **Generate Improvement Roadmap**
```
Create prioritized action plan:
For each issue:
- Identify impact on overall score
- Estimate effort (Low/Medium/High)
- Calculate score improvement
- Generate specific remediation steps
Sort by: Priority → Score Impact → Effort
Example:
1. [P0] Add LICENSE file → +20 pts → 15 min
2. [P1] Expand README to 500+ chars → +10 pts → 30 min
3. [P1] Add 2 usage examples → +15 pts → 20 min
4. [P2] Create CHANGELOG.md → +10 pts → 15 min
```
9. **Determine Publication Readiness**
```
Publication readiness determination:
READY (90-100):
- All critical requirements met
- High-quality documentation
- No blocking issues
- Immediate submission recommended
READY WITH MINOR IMPROVEMENTS (75-89):
- Critical requirements met
- Some recommended improvements
- Can submit, but improvements increase quality
- Suggested: Address P1 issues before submission
NEEDS WORK (60-74):
- Critical requirements met
- Several important issues
- Should address P1 issues before submission
- Documentation needs expansion
NOT READY (<60):
- Critical issues present
- Insufficient documentation quality
- Must address P0 and P1 issues
- Submission will be rejected
```
10. **Format Output**
```
Based on format parameter:
- text: Human-readable report
- json: Structured JSON for automation
- markdown: Formatted markdown report
```
### Examples
```bash
# Full documentation validation with defaults
/documentation-validation full-docs path:.
# With detailed sub-reports
/documentation-validation full-docs path:. detailed:true
# JSON output for automation
/documentation-validation full-docs path:. format:json
# Without fix suggestions (faster)
/documentation-validation full-docs path:. fix-suggestions:false
# Validate specific plugin
/documentation-validation full-docs path:/path/to/plugin
```
### Error Handling
**Error: Multiple critical issues**
```
❌ CRITICAL: Multiple blocking documentation issues
Documentation Score: <score>/100 ⚠️
BLOCKING ISSUES (<count>):
1. README.md not found
→ Create README.md with required sections
→ Minimum 500 characters
→ Include Overview, Installation, Usage, Examples, License
2. LICENSE file not found
→ Create LICENSE file with OSI-approved license
→ MIT License recommended
→ Must match plugin.json license field
3. License mismatch
→ plugin.json declares "Apache-2.0"
→ LICENSE file contains "MIT"
→ Update one to match the other
IMPORTANT ISSUES (<count>):
- README missing Examples section
- No code examples found
- CHANGELOG.md recommended
YOUR NEXT STEPS:
1. Add LICENSE file (CRITICAL - 15 minutes)
2. Create comprehensive README.md (CRITICAL - 30 minutes)
3. Add 3 usage examples (IMPORTANT - 20 minutes)
After addressing critical issues, revalidate with:
/documentation-validation full-docs path:.
```
**Error: Documentation too sparse**
```
⚠️ WARNING: Documentation exists but is too sparse
Documentation Score: 65/100 ⚠️
Your documentation meets minimum requirements but needs expansion
for professional quality.
AREAS NEEDING IMPROVEMENT:
1. README is only 342 characters (minimum: 500)
→ Expand installation instructions
→ Add more detailed usage examples
→ Include troubleshooting section
2. Only 1 example found (recommended: 3+)
→ Add basic usage example
→ Add advanced example
→ Add configuration example
3. CHANGELOG missing
→ Create CHANGELOG.md
→ Use Keep a Changelog format
→ Document version 1.0.0 features
IMPACT:
Current: 65/100 (Fair)
After improvements: ~85/100 (Good)
Time investment: ~45 minutes
Quality improvement: +20 points
```
### Output Format
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPREHENSIVE DOCUMENTATION VALIDATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: <path>
Type: <marketplace|plugin>
Timestamp: <YYYY-MM-DD HH:MM:SS>
OVERALL DOCUMENTATION SCORE: <0-100>/100 <⭐⭐⭐⭐⭐>
Rating: <Excellent|Good|Fair|Needs Improvement|Poor>
Publication Ready: <Yes|Yes with improvements|Needs work|No>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPONENT SCORES
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
README (Weight: 40%)
Score: <0-100>/100 ✅
Status: ✅ Complete and comprehensive
Sections: <N>/5 required sections found
Length: <N> characters (minimum: 500) ✅
Issues: None
EXAMPLES (Weight: 30%)
Score: <0-100>/100 ⚠️
Status: ⚠️ Could be improved
Examples found: <N> (recommended: 3+)
Placeholders: <N> detected
Issues: <N> placeholder patterns found
LICENSE (Weight: 20%)
Score: <0-100>/100 ✅
Status: ✅ Valid and consistent
Type: MIT License
OSI Approved: ✅ Yes
Consistency: ✅ Matches plugin.json
Issues: None
CHANGELOG (Weight: 10%)
Score: <0-100>/100 ⚠️
Status: ⚠️ Missing (recommended but not required)
Format: N/A
Versions: 0
Issues: CHANGELOG.md not found
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
ISSUES SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Critical (P0 - Blocking): <count>
Important (P1 - Should Fix): <count>
Recommended (P2 - Nice to Have): <count>
CRITICAL ISSUES:
[None - Ready for submission] ✅
IMPORTANT ISSUES:
⚠️ 1. Add 2 more usage examples to README
Impact: +15 points
Effort: Low (20 minutes)
⚠️ 2. Replace 3 placeholder patterns in examples
Impact: +10 points
Effort: Low (10 minutes)
RECOMMENDATIONS:
💡 1. Create CHANGELOG.md for version tracking
Impact: +10 points
Effort: Low (15 minutes)
💡 2. Add troubleshooting section to README
Impact: +5 points
Effort: Low (15 minutes)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
IMPROVEMENT ROADMAP
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Current Score: <score>/100
Target Score: 90/100 (Excellent - Publication Ready)
Gap: <gap> points
RECOMMENDED ACTIONS (to reach 90+):
1. [+15 pts] Add usage examples
Priority: High
Effort: 20 minutes
Description:
- Add 2 more concrete usage examples to README
- Include basic, intermediate, and advanced scenarios
- Use real plugin commands and parameters
2. [+10 pts] Clean up placeholder patterns
Priority: Medium
Effort: 10 minutes
Description:
- Replace "YOUR_VALUE" patterns with concrete examples
- Complete or remove TODO markers
- Use template syntax (${VAR}) for user-provided values
3. [+10 pts] Create CHANGELOG.md
Priority: Medium
Effort: 15 minutes
Description:
- Use Keep a Changelog format
- Document version 1.0.0 initial release
- Add [Unreleased] section for future changes
AFTER IMPROVEMENTS:
Projected Score: ~90/100 ⭐⭐⭐⭐⭐
Time Investment: ~45 minutes
Status: Excellent - Ready for submission
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
PUBLICATION READINESS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Status: ✅ READY WITH MINOR IMPROVEMENTS
Your plugin documentation meets all critical requirements and is ready
for submission to OpenPlugins marketplace. The recommended improvements
above will increase quality score and provide better user experience.
✅ Strengths:
- Comprehensive README with all required sections
- Valid OSI-approved license (MIT)
- License consistent with plugin.json
- Good documentation structure
⚠️ Improvement Opportunities:
- Add more usage examples for better user onboarding
- Create CHANGELOG for version tracking
- Clean up minor placeholder patterns
NEXT STEPS:
1. (Optional) Address recommended improvements (~45 min)
2. Run validation again to verify improvements
3. Submit to OpenPlugins marketplace
Command to revalidate:
/documentation-validation full-docs path:.
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
```
### Integration
This operation is the primary entry point for complete documentation validation:
**Invoked by**:
- `/documentation-validation full-docs path:.` (direct invocation)
- `/validation-orchestrator comprehensive path:.` (as part of full plugin validation)
- marketplace-validator agent (automatic documentation assessment)
**Invokes sub-operations**:
- `/documentation-validation readme path:.`
- `/documentation-validation changelog file:CHANGELOG.md`
- `/documentation-validation license path:.`
- `/documentation-validation examples path:.`
**Feeds results to**:
- `/quality-analysis full-analysis` (for overall quality scoring)
- `/quality-analysis generate-report` (for report generation)
### JSON Output Format
When `format:json` is specified:
```json
{
"validation_type": "full-documentation",
"target_path": "/path/to/plugin",
"timestamp": "2025-01-15T10:30:00Z",
"overall_score": 85,
"rating": "Good",
"publication_ready": "yes_with_improvements",
"components": {
"readme": {
"score": 90,
"status": "pass",
"present": true,
"sections_found": 5,
"sections_missing": 0,
"length": 1234,
"issues": []
},
"changelog": {
"score": 70,
"status": "warning",
"present": true,
"compliance": 70,
"issues": ["Invalid version header format"]
},
"license": {
"score": 100,
"status": "pass",
"present": true,
"type": "MIT",
"osi_approved": true,
"consistent": true,
"issues": []
},
"examples": {
"score": 75,
"status": "warning",
"examples_found": 2,
"placeholders_detected": 3,
"issues": ["Placeholder patterns detected"]
}
},
"issues": {
"critical": [],
"important": [
{
"component": "examples",
"message": "Add 2 more usage examples",
"impact": 15,
"effort": "low"
}
],
"recommended": [
{
"component": "readme",
"message": "Add troubleshooting section",
"impact": 5,
"effort": "low"
}
]
},
"improvement_roadmap": [
{
"action": "Add usage examples",
"points": 15,
"priority": "high",
"effort": "20 minutes"
}
],
"projected_score_after_improvements": 95
}
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,99 @@
---
description: Validate documentation completeness, format, and quality for plugins and marketplaces
---
You are the Documentation Validation coordinator, ensuring comprehensive and high-quality documentation.
## Your Mission
Parse `$ARGUMENTS` to determine the requested documentation validation operation and route to the appropriate sub-command.
## Available Operations
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
- **readme** → Read `.claude/commands/documentation-validation/check-readme.md`
- **changelog** → Read `.claude/commands/documentation-validation/validate-changelog.md`
- **license** → Read `.claude/commands/documentation-validation/check-license.md`
- **examples** → Read `.claude/commands/documentation-validation/validate-examples.md`
- **full-docs** → Read `.claude/commands/documentation-validation/full-documentation.md`
## Argument Format
```
/documentation-validation <operation> [parameters]
```
### Examples
```bash
# Check README completeness
/documentation-validation readme path:. sections:"overview,installation,usage,examples"
# Validate CHANGELOG format
/documentation-validation changelog file:CHANGELOG.md format:keepachangelog
# Check LICENSE file
/documentation-validation license path:. expected:MIT
# Validate example quality
/documentation-validation examples path:. no-placeholders:true
# Run complete documentation validation
/documentation-validation full-docs path:.
```
## Documentation Standards
**README.md Requirements**:
- Overview/Description section
- Installation instructions
- Usage examples (minimum 2)
- Configuration options (if applicable)
- License information
- Length: Minimum 500 characters
**CHANGELOG.md Requirements**:
- Keep a Changelog format
- Version headers ([X.Y.Z] - YYYY-MM-DD)
- Change categories: Added, Changed, Deprecated, Removed, Fixed, Security
- Unreleased section for upcoming changes
**LICENSE Requirements**:
- LICENSE or LICENSE.txt file present
- Valid OSI-approved license
- License matches plugin.json declaration
**Examples Requirements**:
- No placeholder text (TODO, FIXME, XXX, placeholder)
- Complete, runnable examples
- Real values, not dummy data
- Proper formatting and syntax
## Quality Scoring
Documentation contributes to overall quality score:
- Complete README: +15 points
- CHANGELOG present: +10 points
- LICENSE valid: +5 points
- Quality examples: +10 points
## Error Handling
If the operation is not recognized:
1. List all available documentation operations
2. Show documentation standards
3. Provide improvement suggestions
## Base Directory
Base directory for this skill: `.claude/commands/documentation-validation/`
## Your Task
1. Parse `$ARGUMENTS` to extract operation and parameters
2. Read the corresponding operation file
3. Execute documentation validation checks
4. Return detailed findings with specific improvement guidance
**Current Request**: $ARGUMENTS

View File

@@ -0,0 +1,286 @@
## Operation: Validate CHANGELOG Format
Validate CHANGELOG.md format compliance with "Keep a Changelog" standard.
### Parameters from $ARGUMENTS
- **file**: Path to CHANGELOG file (optional, default: CHANGELOG.md)
- **format**: Expected format (optional, default: keepachangelog)
- **strict**: Enable strict validation (optional, default: false)
- **require-unreleased**: Require [Unreleased] section (optional, default: true)
### CHANGELOG Requirements
**Keep a Changelog Format** (https://keepachangelog.com/):
```markdown
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- New features not yet released
## [1.0.0] - 2025-01-15
### Added
- Initial release feature
### Changed
- Modified behavior
### Fixed
- Bug fixes
```
**Required Elements**:
1. **Title**: "Changelog" or "Change Log"
2. **Version Headers**: `## [X.Y.Z] - YYYY-MM-DD` format
3. **Change Categories**: Added, Changed, Deprecated, Removed, Fixed, Security
4. **Unreleased Section**: `## [Unreleased]` for upcoming changes
5. **Chronological Order**: Newest versions first
**Valid Change Categories**:
- **Added**: New features
- **Changed**: Changes in existing functionality
- **Deprecated**: Soon-to-be removed features
- **Removed**: Removed features
- **Fixed**: Bug fixes
- **Security**: Security vulnerability fixes
### Workflow
1. **Locate CHANGELOG File**
```
Check for CHANGELOG.md in plugin root
Also check: CHANGELOG, CHANGELOG.txt, changelog.md, HISTORY.md
If not found, report as missing (WARNING, not CRITICAL)
```
2. **Execute CHANGELOG Validator**
```bash
Execute .scripts/changelog-validator.sh with parameters:
- File path to CHANGELOG
- Expected format (keepachangelog)
- Strict mode flag
- Require unreleased flag
Script returns:
- has_title: Boolean
- has_unreleased: Boolean
- version_headers: Array of version entries
- categories_used: Array of change categories
- issues: Array of format violations
- compliance_score: 0-100
```
3. **Validate Version Headers**
```
For each version header:
- Check format: ## [X.Y.Z] - YYYY-MM-DD
- Validate semantic version (X.Y.Z)
- Validate date format (YYYY-MM-DD)
- Check chronological order (newest first)
Common violations:
- Missing brackets: ## 1.0.0 - 2025-01-15 (should be [1.0.0])
- Wrong date format: ## [1.0.0] - 01/15/2025
- Invalid semver: ## [1.0] - 2025-01-15
```
4. **Validate Change Categories**
```
For each version section:
- Check for valid category headers (### Added, ### Fixed, etc.)
- Warn if no categories used
- Recommend appropriate categories
Invalid category examples:
- "### New Features" (should be "### Added")
- "### Bugs" (should be "### Fixed")
- "### Updates" (should be "### Changed")
```
5. **Calculate Compliance Score**
```
score = 100
score -= (!has_title) ? 10 : 0
score -= (!has_unreleased) ? 15 : 0
score -= (invalid_version_headers × 10)
score -= (invalid_categories × 5)
score -= (wrong_date_format × 5)
score = max(0, score)
```
6. **Format Output**
```
Display:
- ✅/⚠️/❌ File presence
- ✅/❌ Format compliance
- ✅/⚠️ Version headers
- ✅/⚠️ Change categories
- Compliance score
- Specific violations
- Improvement recommendations
```
### Examples
```bash
# Validate default CHANGELOG.md
/documentation-validation changelog file:CHANGELOG.md
# Validate with custom path
/documentation-validation changelog file:./HISTORY.md
# Strict validation (all elements required)
/documentation-validation changelog file:CHANGELOG.md strict:true
# Don't require Unreleased section
/documentation-validation changelog file:CHANGELOG.md require-unreleased:false
# Part of full documentation check
/documentation-validation full-docs path:.
```
### Error Handling
**Error: CHANGELOG not found**
```
⚠️ WARNING: CHANGELOG.md not found in <path>
Remediation:
1. Create CHANGELOG.md in plugin root directory
2. Use "Keep a Changelog" format (https://keepachangelog.com/)
3. Include [Unreleased] section for upcoming changes
4. Document version history with proper headers
Example:
# Changelog
## [Unreleased]
### Added
- Features in development
## [1.0.0] - 2025-01-15
### Added
- Initial release
Note: CHANGELOG is recommended but not required for initial submission.
It becomes important for version updates.
```
**Error: Invalid version header format**
```
❌ ERROR: Invalid version header format detected
Invalid headers found:
- Line 10: "## 1.0.0 - 2025-01-15" (missing brackets)
- Line 25: "## [1.0] - 01/15/2025" (invalid semver and date format)
Correct format:
## [X.Y.Z] - YYYY-MM-DD
Examples:
- ## [1.0.0] - 2025-01-15
- ## [2.1.3] - 2024-12-20
- ## [0.1.0] - 2024-11-05
Remediation:
1. Add brackets around version numbers: [1.0.0]
2. Use semantic versioning: MAJOR.MINOR.PATCH
3. Use ISO date format: YYYY-MM-DD
```
**Error: Missing Unreleased section**
```
⚠️ WARNING: Missing [Unreleased] section
The Keep a Changelog format recommends an [Unreleased] section for tracking
upcoming changes before they're officially released.
Add to top of CHANGELOG (after title):
## [Unreleased]
### Added
- Features in development
### Changed
- Planned changes
```
**Error: Invalid change categories**
```
⚠️ WARNING: Non-standard change categories detected
Invalid categories found:
- "### New Features" (should be "### Added")
- "### Bug Fixes" (should be "### Fixed")
- "### Updates" (should be "### Changed")
Valid categories:
- Added: New features
- Changed: Changes in existing functionality
- Deprecated: Soon-to-be removed features
- Removed: Removed features
- Fixed: Bug fixes
- Security: Security vulnerability fixes
Remediation:
Replace non-standard categories with Keep a Changelog categories.
```
### Output Format
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
CHANGELOG VALIDATION RESULTS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
File: ✅ CHANGELOG.md found
Format: Keep a Changelog
Compliance: <0-100>% ✅/⚠️/❌
Structure:
✅ Title present
✅ [Unreleased] section present
✅ Version headers formatted correctly
✅ Change categories valid
Version Entries: <count>
- [1.0.0] - 2025-01-15 ✅
- [0.2.0] - 2024-12-20 ✅
- [0.1.0] - 2024-11-05 ✅
Change Categories Used:
✅ Added (3 versions)
✅ Changed (2 versions)
✅ Fixed (3 versions)
Issues Found: <N>
Violations:
<List specific issues if any>
Recommendations:
1. Add Security category for vulnerability fixes
2. Expand [Unreleased] section with upcoming features
3. Add links to version comparison (optional)
Overall: <PASS|WARNINGS|FAIL>
```
### Integration
This operation is invoked by:
- `/documentation-validation changelog file:CHANGELOG.md` (direct)
- `/documentation-validation full-docs path:.` (as part of complete validation)
- `/validation-orchestrator comprehensive path:.` (via orchestrator)
Results contribute to documentation quality score:
- Present and compliant: +10 points
- Present but non-compliant: +5 points
- Missing: 0 points (warning but not blocking)
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,335 @@
## Operation: Validate Example Quality
Validate example code quality, detecting placeholders and ensuring examples are complete and runnable.
### Parameters from $ARGUMENTS
- **path**: Target plugin/marketplace path (required)
- **no-placeholders**: Strict placeholder enforcement (optional, default: true)
- **recursive**: Check all markdown and code files recursively (optional, default: true)
- **extensions**: File extensions to check (optional, default: "md,txt,json,sh,py,js")
### Example Quality Requirements
**Complete Examples**:
- Concrete, runnable code or commands
- Real values, not placeholder text
- Proper syntax and formatting
- Context and explanations
- Expected output or results
**No Placeholder Patterns**:
- **TODO**: `TODO`, `@TODO`, `// TODO:`
- **FIXME**: `FIXME`, `@FIXME`, `// FIXME:`
- **XXX**: `XXX`, `@XXX`, `// XXX:`
- **Placeholders**: `placeholder`, `PLACEHOLDER`, `your-value-here`, `<your-value>`, `[YOUR-VALUE]`
- **Generic**: `example`, `sample`, `test`, `dummy`, `foo`, `bar`, `baz`
- **User substitution**: `<username>`, `<your-email>`, `your-api-key`, `INSERT-HERE`
**Acceptable Patterns** (not placeholders):
- Template variables: `{{variable}}`, `${variable}`, `$VARIABLE`
- Documentation examples: `<name>`, `[optional]` in usage syntax
- Actual values: Real plugin names, real commands, concrete examples
### Workflow
1. **Identify Files to Validate**
```
Scan plugin directory for documentation files:
- README.md (primary source)
- CONTRIBUTING.md
- docs/**/*.md
- examples/**/*
- *.sh, *.py, *.js (example scripts)
If recursive:false, only check README.md
```
2. **Execute Example Validator**
```bash
Execute .scripts/example-validator.sh with parameters:
- Path to plugin directory
- No-placeholders flag
- Recursive flag
- File extensions to check
Script returns:
- files_checked: Count of files analyzed
- placeholders_found: Array of placeholder instances
- files_with_issues: Array of files containing placeholders
- example_count: Number of code examples found
- quality_score: 0-100
```
3. **Detect Placeholder Patterns**
```bash
Search for patterns (case-insensitive):
# TODO/FIXME/XXX markers
grep -iE '(TODO|FIXME|XXX|HACK)[:)]' <files>
# Placeholder text
grep -iE '(placeholder|your-.*-here|<your-|INSERT.?HERE)' <files>
# Generic dummy values
grep -iE '\b(foo|bar|baz|dummy|sample|test)\b' <files>
# User substitution patterns
grep -iE '(<username>|<email>|<api-key>|YOUR_[A-Z_]+)' <files>
# Exclude:
- Comments explaining placeholders
- Documentation of template syntax
- Proper template variables ({{x}}, ${x})
```
4. **Analyze Code Blocks**
```
For each code block in markdown:
- Extract language and content
- Check for placeholder patterns
- Verify syntax highlighting specified
- Ensure examples are complete
Example extraction:
```bash
/plugin install my-plugin@marketplace ✅ Concrete
/plugin install <plugin-name> ⚠️ Documentation (acceptable)
/plugin install YOUR_PLUGIN ❌ Placeholder
```
```
5. **Count and Categorize Examples**
```
Count examples by type:
- Command examples: /plugin install ...
- Configuration examples: JSON snippets
- Code examples: Script samples
- Usage examples: Real-world scenarios
Quality criteria:
- At least 2-3 concrete examples
- Examples cover primary use cases
- Examples are copy-pasteable
```
6. **Calculate Quality Score**
```
score = 100
score -= (placeholder_instances × 10) # -10 per placeholder
score -= (todo_markers × 5) # -5 per TODO/FIXME
score -= (example_count < 2) ? 20 : 0 # -20 if < 2 examples
score -= (incomplete_examples × 15) # -15 per incomplete example
score = max(0, score)
```
7. **Format Output**
```
Display:
- Files checked count
- Examples found count
- Placeholders detected
- Quality score
- Specific issues with file/line references
- Improvement recommendations
```
### Examples
```bash
# Validate examples with strict placeholder checking (default)
/documentation-validation examples path:.
# Check only README.md (non-recursive)
/documentation-validation examples path:. recursive:false
# Allow placeholders (lenient mode)
/documentation-validation examples path:. no-placeholders:false
# Check specific file extensions
/documentation-validation examples path:. extensions:"md,sh,py"
# Strict validation of examples directory
/documentation-validation examples path:./examples no-placeholders:true recursive:true
```
### Error Handling
**Error: Placeholders detected**
```
⚠️ WARNING: Placeholder patterns detected in examples
Placeholders found: <N> instances across <M> files
README.md:
- Line 45: /plugin install YOUR_PLUGIN_NAME
^ Should be concrete plugin name
- Line 67: API_KEY=your-api-key-here
^ Should be removed or use template syntax
examples/usage.sh:
- Line 12: # TODO: Add authentication example
^ Complete example or remove TODO
Remediation:
1. Replace "YOUR_PLUGIN_NAME" with actual plugin name
2. Use template syntax for user-provided values: ${API_KEY}
3. Remove TODO markers - complete examples or remove them
4. Provide concrete, copy-pasteable examples
Acceptable patterns:
- Template variables: ${VARIABLE}, {{variable}}
- Documentation syntax: <name> in usage descriptions
- Generic placeholders in template explanations
```
**Error: Too few examples**
```
⚠️ WARNING: Insufficient examples in documentation
Examples found: <N> (minimum recommended: 3)
README.md contains <N> code examples:
- Installation example ✅
- Basic usage ❌ Missing
- Advanced usage ❌ Missing
Remediation:
Add at least 2-3 concrete usage examples showing:
1. Basic usage (most common scenario)
2. Common configuration options
3. Advanced or specialized use case
Example structure:
```bash
# Basic usage
/my-plugin action param:value
# With options
/my-plugin action param:value option:true
# Advanced example
/my-plugin complex-action config:custom nested:value
```
Good examples are copy-pasteable and use real values.
```
**Error: Incomplete examples**
```
⚠️ WARNING: Incomplete or broken examples detected
Incomplete examples: <N>
README.md:
- Line 34: Code block with syntax error
- Line 56: Example missing expected output
- Line 78: Example truncated with "..."
Remediation:
1. Ensure all code examples are syntactically valid
2. Show expected output or results after examples
3. Complete truncated examples (no "..." placeholders)
4. Test examples before including in documentation
Example format:
```bash
# Command with description
/plugin install example-plugin@marketplace
# Expected output:
# ✓ Installing example-plugin@marketplace
# ✓ Plugin installed successfully
```
```
**Error: Generic dummy values**
```
⚠️ WARNING: Generic placeholder values detected
Generic values found:
- README.md:45 - "foo", "bar" used as example values
- examples/config.json:12 - "sample" as placeholder
While "foo/bar" are common in documentation, concrete examples
are more helpful for users.
Remediation:
Replace generic values with realistic examples:
- Instead of "foo", use actual plugin name
- Instead of "bar", use real parameter value
- Instead of "sample", use concrete example
Good: /my-plugin process file:README.md
Bad: /my-plugin process file:foo.txt
```
### Output Format
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
EXAMPLE QUALITY VALIDATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Files Checked: <N>
Code Examples Found: <N>
Example Count by Type:
- Command examples: <N> ✅
- Configuration examples: <N> ✅
- Usage examples: <N> ⚠️ (recommend 3+)
Placeholder Detection:
TODO/FIXME markers: <N> ❌
Placeholder patterns: <N> ❌
Generic values (foo/bar): <N> ⚠️
Quality Score: <0-100>/100
Issues by File:
README.md: <N> issues
├─ Line 45: YOUR_PLUGIN_NAME (placeholder)
├─ Line 67: TODO marker
└─ Line 89: Generic "foo" value
examples/usage.sh: <N> issues
└─ Line 12: Incomplete example
Recommendations:
1. Replace <N> placeholder patterns with concrete values [+10 pts]
2. Complete or remove <N> TODO markers [+5 pts]
3. Add <N> more usage examples [+15 pts]
Overall: <PASS|WARNINGS|FAIL>
```
### Integration
This operation is invoked by:
- `/documentation-validation examples path:.` (direct)
- `/documentation-validation full-docs path:.` (as part of complete validation)
- `/validation-orchestrator comprehensive path:.` (via orchestrator)
Results contribute to documentation quality score:
- High-quality examples (90+): +10 points
- Some issues (60-89): +5 points
- Poor quality (<60): 0 points
- Missing examples: -10 points
### Special Cases
**Template Documentation**:
If the plugin provides templates or scaffolding, some placeholders
are acceptable when properly documented as template variables.
Example:
```markdown
The generated code includes template variables:
- {{PROJECT_NAME}} - Will be replaced with actual project name
- {{AUTHOR}} - Will be replaced with author information
```
This is acceptable because the placeholders are documented as
intentional template syntax.
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,305 @@
#!/usr/bin/env bash
# ============================================================================
# Issue Prioritization Script
# ============================================================================
# Purpose: Categorize and prioritize validation issues into P0/P1/P2 tiers
# Version: 1.0.0
# Usage: ./issue-prioritizer.sh <issues-json-file> [criteria]
# Returns: 0=success, 1=error
# Dependencies: jq, bash 4.0+
# ============================================================================
set -euo pipefail
# Configuration
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly RED='\033[0;31m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly NC='\033[0m' # No Color
# Priority definitions
declare -A PRIORITY_NAMES=(
[0]="Critical - Must Fix"
[1]="Important - Should Fix"
[2]="Recommended - Nice to Have"
)
declare -A PRIORITY_ICONS=(
[0]="❌"
[1]="⚠️ "
[2]="💡"
)
# Effort labels
declare -A EFFORT_LABELS=(
[low]="Low"
[medium]="Medium"
[high]="High"
)
# Effort time estimates
declare -A EFFORT_TIMES=(
[low]="5-15 minutes"
[medium]="30-60 minutes"
[high]="2+ hours"
)
# ============================================================================
# Functions
# ============================================================================
usage() {
cat <<EOF
Usage: $0 <issues-json-file> [criteria]
Arguments:
issues-json-file Path to JSON file with validation issues
criteria Prioritization criteria: severity|impact|effort (default: severity)
Examples:
$0 validation-results.json
$0 results.json impact
$0 results.json severity
JSON Structure:
{
"errors": [{"type": "...", "severity": "critical", ...}],
"warnings": [{"type": "...", "severity": "important", ...}],
"recommendations": [{"type": "...", "severity": "recommended", ...}]
}
EOF
exit 1
}
check_dependencies() {
local missing_deps=()
if ! command -v jq &> /dev/null; then
missing_deps+=("jq")
fi
if [ ${#missing_deps[@]} -gt 0 ]; then
echo "Error: Missing dependencies: ${missing_deps[*]}" >&2
echo "Install with: sudo apt-get install ${missing_deps[*]}" >&2
return 1
fi
return 0
}
determine_priority() {
local severity="$1"
local type="$2"
# P0 (Critical) - Blocking issues
if [[ "$severity" == "critical" ]] || \
[[ "$type" =~ ^(missing_required|invalid_json|security_vulnerability|format_violation)$ ]]; then
echo "0"
return
fi
# P1 (Important) - Should fix
if [[ "$severity" == "important" ]] || \
[[ "$type" =~ ^(missing_recommended|documentation_gap|convention_violation|performance)$ ]]; then
echo "1"
return
fi
# P2 (Recommended) - Nice to have
echo "2"
}
get_effort_estimate() {
local type="$1"
# High effort
if [[ "$type" =~ ^(security_vulnerability|performance|architecture)$ ]]; then
echo "high"
return
fi
# Medium effort
if [[ "$type" =~ ^(documentation_gap|convention_violation|missing_recommended)$ ]]; then
echo "medium"
return
fi
# Low effort (default)
echo "low"
}
format_issue() {
local priority="$1"
local message="$2"
local impact="${3:-Unknown impact}"
local effort="${4:-low}"
local fix="${5:-No fix suggestion available}"
local icon="${PRIORITY_ICONS[$priority]}"
local effort_label="${EFFORT_LABELS[$effort]}"
local effort_time="${EFFORT_TIMES[$effort]}"
cat <<EOF
${icon} ${message}
Impact: ${impact}
Effort: ${effort_label} (${effort_time})
Fix: ${fix}
EOF
}
process_issues() {
local json_file="$1"
local criteria="${2:-severity}"
# Validate JSON file exists
if [[ ! -f "$json_file" ]]; then
echo "Error: File not found: $json_file" >&2
return 1
fi
# Validate JSON syntax
if ! jq empty "$json_file" 2>/dev/null; then
echo "Error: Invalid JSON in $json_file" >&2
return 1
fi
# Count total issues
local total_errors=$(jq '.errors // [] | length' "$json_file")
local total_warnings=$(jq '.warnings // [] | length' "$json_file")
local total_recommendations=$(jq '.recommendations // [] | length' "$json_file")
local total_issues=$((total_errors + total_warnings + total_recommendations))
if [[ $total_issues -eq 0 ]]; then
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "ISSUE PRIORITIZATION"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "No issues found! Quality score is perfect."
return 0
fi
# Initialize priority counters
declare -A priority_counts=([0]=0 [1]=0 [2]=0)
declare -A priority_issues=([0]="" [1]="" [2]="")
# Process errors
while IFS= read -r issue; do
local type=$(echo "$issue" | jq -r '.type // "unknown"')
local severity=$(echo "$issue" | jq -r '.severity // "critical"')
local message=$(echo "$issue" | jq -r '.message // "Unknown error"')
local impact=$(echo "$issue" | jq -r '.impact // "Unknown impact"')
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
local score_impact=$(echo "$issue" | jq -r '.score_impact // 0')
local priority=$(determine_priority "$severity" "$type")
local effort=$(get_effort_estimate "$type")
priority_counts[$priority]=$((priority_counts[$priority] + 1))
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
priority_issues[$priority]+="$formatted_issue"
done < <(jq -c '.errors // [] | .[]' "$json_file")
# Process warnings
while IFS= read -r issue; do
local type=$(echo "$issue" | jq -r '.type // "unknown"')
local severity=$(echo "$issue" | jq -r '.severity // "important"')
local message=$(echo "$issue" | jq -r '.message // "Unknown warning"')
local impact=$(echo "$issue" | jq -r '.impact // "Unknown impact"')
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
local priority=$(determine_priority "$severity" "$type")
local effort=$(get_effort_estimate "$type")
priority_counts[$priority]=$((priority_counts[$priority] + 1))
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
priority_issues[$priority]+="$formatted_issue"
done < <(jq -c '.warnings // [] | .[]' "$json_file")
# Process recommendations
while IFS= read -r issue; do
local type=$(echo "$issue" | jq -r '.type // "unknown"')
local severity=$(echo "$issue" | jq -r '.severity // "recommended"')
local message=$(echo "$issue" | jq -r '.message // "Recommendation"')
local impact=$(echo "$issue" | jq -r '.impact // "Minor quality improvement"')
local fix=$(echo "$issue" | jq -r '.fix // "No fix available"')
local priority=$(determine_priority "$severity" "$type")
local effort=$(get_effort_estimate "$type")
priority_counts[$priority]=$((priority_counts[$priority] + 1))
local formatted_issue=$(format_issue "$priority" "$message" "$impact" "$effort" "$fix")
priority_issues[$priority]+="$formatted_issue"
done < <(jq -c '.recommendations // [] | .[]' "$json_file")
# Display results
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "ISSUE PRIORITIZATION"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Total Issues: $total_issues"
echo ""
# Display each priority tier
for priority in 0 1 2; do
local count=${priority_counts[$priority]}
local name="${PRIORITY_NAMES[$priority]}"
if [[ $count -gt 0 ]]; then
echo "Priority $priority ($name): $count"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo -e "${priority_issues[$priority]}"
fi
done
# Summary
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Summary:"
echo "- Fix P0 issues first (blocking publication)"
echo "- Address P1 issues for quality improvement"
echo "- Consider P2 improvements for excellence"
if [[ ${priority_counts[0]} -gt 0 ]]; then
echo ""
echo "⚠️ WARNING: ${priority_counts[0]} blocking issue(s) must be fixed before publication"
fi
return 0
}
# ============================================================================
# Main
# ============================================================================
main() {
# Check arguments
if [[ $# -lt 1 ]]; then
usage
fi
local json_file="$1"
local criteria="${2:-severity}"
# Check dependencies
if ! check_dependencies; then
return 1
fi
# Validate criteria
if [[ ! "$criteria" =~ ^(severity|impact|effort)$ ]]; then
echo "Error: Invalid criteria '$criteria'. Use: severity|impact|effort" >&2
return 1
fi
# Process issues
process_issues "$json_file" "$criteria"
return 0
}
main "$@"

View File

@@ -0,0 +1,541 @@
#!/usr/bin/env python3
# ============================================================================
# Quality Report Generator
# ============================================================================
# Purpose: Generate comprehensive quality reports in multiple formats
# Version: 1.0.0
# Usage: ./report-generator.py --path <path> --format <format> [options]
# Returns: 0=success, 1=error
# Dependencies: Python 3.6+
# ============================================================================
import sys
import argparse
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Optional
class ReportGenerator:
"""Generate quality reports in multiple formats."""
def __init__(self, path: str, context: Optional[Dict] = None):
"""
Initialize report generator.
Args:
path: Target path being analyzed
context: Validation context with results
"""
self.path = path
self.context = context or {}
self.timestamp = datetime.now().isoformat()
def generate(self, format_type: str = "markdown") -> str:
"""
Generate report in specified format.
Args:
format_type: Report format (markdown, json, html)
Returns:
Formatted report string
"""
if format_type == "json":
return self._generate_json()
elif format_type == "html":
return self._generate_html()
else:
return self._generate_markdown()
def _generate_markdown(self) -> str:
"""Generate markdown format report."""
score = self.context.get("score", 0)
rating = self.context.get("rating", "Unknown")
stars = self.context.get("stars", "")
readiness = self.context.get("publication_ready", "Unknown")
p0_count = len(self.context.get("issues", {}).get("p0", []))
p1_count = len(self.context.get("issues", {}).get("p1", []))
p2_count = len(self.context.get("issues", {}).get("p2", []))
total_issues = p0_count + p1_count + p2_count
target_type = self.context.get("target_type", "plugin")
report = f"""# Quality Assessment Report
**Generated**: {self.timestamp}
**Target**: {self.path}
**Type**: Claude Code {target_type.capitalize()}
## Executive Summary
**Quality Score**: {score}/100 {stars} ({rating})
**Publication Ready**: {readiness}
**Critical Issues**: {p0_count}
**Total Issues**: {total_issues}
"""
if score >= 90:
report += "🎉 Excellent! Your plugin is publication-ready.\n\n"
elif score >= 75:
report += "👍 Nearly ready! Address a few important issues to reach excellent status.\n\n"
elif score >= 60:
report += "⚠️ Needs work. Several issues should be addressed before publication.\n\n"
else:
report += "❌ Substantial improvements needed before this is ready for publication.\n\n"
# Validation layers
report += "## Validation Results\n\n"
layers = self.context.get("validation_layers", {})
for layer_name, layer_data in layers.items():
status = layer_data.get("status", "unknown")
issue_count = len(layer_data.get("issues", []))
if status == "pass":
status_icon = "✅ PASS"
elif status == "warnings":
status_icon = f"⚠️ WARNINGS ({issue_count} issues)"
else:
status_icon = f"❌ FAIL ({issue_count} issues)"
report += f"### {layer_name.replace('_', ' ').title()} {status_icon}\n"
if issue_count == 0:
report += "- No issues found\n\n"
else:
for issue in layer_data.get("issues", [])[:3]: # Show top 3
report += f"- {issue.get('message', 'Unknown issue')}\n"
if issue_count > 3:
report += f"- ... and {issue_count - 3} more\n"
report += "\n"
# Issues breakdown
report += "## Issues Breakdown\n\n"
report += f"### Priority 0 (Critical): {p0_count} issues\n\n"
if p0_count == 0:
report += "None - excellent!\n\n"
else:
for idx, issue in enumerate(self.context.get("issues", {}).get("p0", []), 1):
report += self._format_issue_markdown(idx, issue)
report += f"### Priority 1 (Important): {p1_count} issues\n\n"
if p1_count == 0:
report += "None - great!\n\n"
else:
for idx, issue in enumerate(self.context.get("issues", {}).get("p1", []), 1):
report += self._format_issue_markdown(idx, issue)
report += f"### Priority 2 (Recommended): {p2_count} issues\n\n"
if p2_count == 0:
report += "No recommendations.\n\n"
else:
for idx, issue in enumerate(self.context.get("issues", {}).get("p2", [])[:5], 1):
report += self._format_issue_markdown(idx, issue)
if p2_count > 5:
report += f"... and {p2_count - 5} more recommendations\n\n"
# Improvement roadmap
roadmap = self.context.get("improvement_roadmap", {})
if roadmap:
report += "## Improvement Roadmap\n\n"
report += f"### Path to Excellent (90+)\n\n"
report += f"**Current**: {roadmap.get('current_score', score)}/100\n"
report += f"**Target**: {roadmap.get('target_score', 90)}/100\n"
report += f"**Gap**: {roadmap.get('gap', 0)} points\n\n"
recommendations = roadmap.get("recommendations", [])
if recommendations:
report += "**Top Recommendations**:\n\n"
for idx, rec in enumerate(recommendations[:5], 1):
report += f"{idx}. [{rec.get('score_impact', 0):+d} pts] {rec.get('title', 'Unknown')}\n"
report += f" - Priority: {rec.get('priority', 'Medium')}\n"
report += f" - Effort: {rec.get('effort', 'Unknown')}\n"
report += f" - Impact: {rec.get('impact', 'Unknown')}\n\n"
# Footer
report += "\n---\n"
report += "Report generated by marketplace-validator-plugin v1.0.0\n"
return report
def _format_issue_markdown(self, idx: int, issue: Dict) -> str:
"""Format a single issue in markdown."""
message = issue.get("message", "Unknown issue")
impact = issue.get("impact", "Unknown impact")
effort = issue.get("effort", "unknown")
fix = issue.get("fix", "No fix available")
score_impact = issue.get("score_impact", 0)
return f"""#### {idx}. {message} [{score_impact:+d} pts]
**Impact**: {impact}
**Effort**: {effort.capitalize()}
**Fix**: {fix}
"""
def _generate_json(self) -> str:
"""Generate JSON format report."""
score = self.context.get("score", 0)
rating = self.context.get("rating", "Unknown")
stars = self.context.get("stars", "")
readiness = self.context.get("publication_ready", "Unknown")
p0_issues = self.context.get("issues", {}).get("p0", [])
p1_issues = self.context.get("issues", {}).get("p1", [])
p2_issues = self.context.get("issues", {}).get("p2", [])
report = {
"metadata": {
"generated": self.timestamp,
"target": self.path,
"type": self.context.get("target_type", "plugin"),
"validator_version": "1.0.0"
},
"executive_summary": {
"score": score,
"rating": rating,
"stars": stars,
"publication_ready": readiness,
"critical_issues": len(p0_issues),
"total_issues": len(p0_issues) + len(p1_issues) + len(p2_issues)
},
"validation_layers": self.context.get("validation_layers", {}),
"issues": {
"p0": p0_issues,
"p1": p1_issues,
"p2": p2_issues
},
"improvement_roadmap": self.context.get("improvement_roadmap", {})
}
return json.dumps(report, indent=2)
def _generate_html(self) -> str:
"""Generate HTML format report."""
score = self.context.get("score", 0)
rating = self.context.get("rating", "Unknown")
stars = self.context.get("stars", "")
readiness = self.context.get("publication_ready", "Unknown")
p0_count = len(self.context.get("issues", {}).get("p0", []))
p1_count = len(self.context.get("issues", {}).get("p1", []))
p2_count = len(self.context.get("issues", {}).get("p2", []))
total_issues = p0_count + p1_count + p2_count
# Determine score color
if score >= 90:
score_color = "#10b981" # green
elif score >= 75:
score_color = "#3b82f6" # blue
elif score >= 60:
score_color = "#f59e0b" # orange
else:
score_color = "#ef4444" # red
html = f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Quality Assessment Report</title>
<style>
* {{
margin: 0;
padding: 0;
box-sizing: border-box;
}}
body {{
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
line-height: 1.6;
color: #333;
background: #f5f5f5;
padding: 20px;
}}
.container {{
max-width: 1200px;
margin: 0 auto;
background: white;
border-radius: 8px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
padding: 40px;
}}
h1 {{
font-size: 32px;
margin-bottom: 10px;
color: #1f2937;
}}
.meta {{
color: #6b7280;
margin-bottom: 30px;
padding-bottom: 20px;
border-bottom: 2px solid #e5e7eb;
}}
.score-card {{
background: linear-gradient(135deg, {score_color} 0%, {score_color}dd 100%);
color: white;
padding: 30px;
border-radius: 8px;
margin-bottom: 30px;
text-align: center;
}}
.score-number {{
font-size: 72px;
font-weight: bold;
line-height: 1;
}}
.score-label {{
font-size: 18px;
margin-top: 10px;
opacity: 0.9;
}}
.stats {{
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 20px;
margin-bottom: 30px;
}}
.stat-card {{
background: #f9fafb;
padding: 20px;
border-radius: 6px;
border-left: 4px solid #3b82f6;
}}
.stat-label {{
font-size: 14px;
color: #6b7280;
margin-bottom: 5px;
}}
.stat-value {{
font-size: 24px;
font-weight: bold;
color: #1f2937;
}}
.section {{
margin-bottom: 40px;
}}
h2 {{
font-size: 24px;
margin-bottom: 20px;
color: #1f2937;
border-bottom: 2px solid #e5e7eb;
padding-bottom: 10px;
}}
h3 {{
font-size: 18px;
margin-bottom: 15px;
color: #374151;
}}
.issue {{
background: #f9fafb;
padding: 20px;
border-radius: 6px;
margin-bottom: 15px;
border-left: 4px solid #6b7280;
}}
.issue.p0 {{
border-left-color: #ef4444;
background: #fef2f2;
}}
.issue.p1 {{
border-left-color: #f59e0b;
background: #fffbeb;
}}
.issue.p2 {{
border-left-color: #3b82f6;
background: #eff6ff;
}}
.issue-title {{
font-weight: bold;
margin-bottom: 10px;
font-size: 16px;
}}
.issue-detail {{
font-size: 14px;
color: #6b7280;
margin: 5px 0;
}}
.badge {{
display: inline-block;
padding: 4px 12px;
border-radius: 12px;
font-size: 12px;
font-weight: 600;
margin-right: 8px;
}}
.badge.pass {{
background: #d1fae5;
color: #065f46;
}}
.badge.warning {{
background: #fef3c7;
color: #92400e;
}}
.badge.fail {{
background: #fee2e2;
color: #991b1b;
}}
.footer {{
margin-top: 40px;
padding-top: 20px;
border-top: 2px solid #e5e7eb;
color: #6b7280;
font-size: 14px;
text-align: center;
}}
</style>
</head>
<body>
<div class="container">
<h1>Quality Assessment Report</h1>
<div class="meta">
<strong>Generated:</strong> {self.timestamp}<br>
<strong>Target:</strong> {self.path}<br>
<strong>Type:</strong> Claude Code Plugin
</div>
<div class="score-card">
<div class="score-number">{score}</div>
<div class="score-label">{stars} {rating}</div>
<div class="score-label">{readiness}</div>
</div>
<div class="stats">
<div class="stat-card">
<div class="stat-label">Critical Issues</div>
<div class="stat-value">{p0_count}</div>
</div>
<div class="stat-card">
<div class="stat-label">Important Issues</div>
<div class="stat-value">{p1_count}</div>
</div>
<div class="stat-card">
<div class="stat-label">Recommendations</div>
<div class="stat-value">{p2_count}</div>
</div>
<div class="stat-card">
<div class="stat-label">Total Issues</div>
<div class="stat-value">{total_issues}</div>
</div>
</div>
<div class="section">
<h2>Validation Layers</h2>
"""
# Validation layers
layers = self.context.get("validation_layers", {})
for layer_name, layer_data in layers.items():
status = layer_data.get("status", "unknown")
badge_class = "pass" if status == "pass" else ("warning" if status == "warnings" else "fail")
html += f' <span class="badge {badge_class}">{layer_name.replace("_", " ").title()}: {status.upper()}</span>\n'
html += """ </div>
<div class="section">
<h2>Issues Breakdown</h2>
"""
# Issues
for priority, priority_name in [("p0", "Critical"), ("p1", "Important"), ("p2", "Recommended")]:
issues = self.context.get("issues", {}).get(priority, [])
html += f' <h3>Priority {priority[1]}: {priority_name} ({len(issues)} issues)</h3>\n'
for issue in issues[:5]: # Show top 5 per priority
message = issue.get("message", "Unknown issue")
impact = issue.get("impact", "Unknown")
effort = issue.get("effort", "unknown")
fix = issue.get("fix", "No fix available")
html += f""" <div class="issue {priority}">
<div class="issue-title">{message}</div>
<div class="issue-detail"><strong>Impact:</strong> {impact}</div>
<div class="issue-detail"><strong>Effort:</strong> {effort.capitalize()}</div>
<div class="issue-detail"><strong>Fix:</strong> {fix}</div>
</div>
"""
html += """ </div>
<div class="footer">
Report generated by marketplace-validator-plugin v1.0.0
</div>
</div>
</body>
</html>
"""
return html
def main():
"""Main CLI interface."""
parser = argparse.ArgumentParser(
description="Generate comprehensive quality reports",
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--path",
required=True,
help="Target path being analyzed"
)
parser.add_argument(
"--format",
choices=["markdown", "json", "html"],
default="markdown",
help="Output format (default: markdown)"
)
parser.add_argument(
"--output",
help="Output file path (optional, defaults to stdout)"
)
parser.add_argument(
"--context",
help="Path to JSON file with validation context"
)
args = parser.parse_args()
# Load context if provided
context = {}
if args.context:
try:
with open(args.context, 'r') as f:
context = json.load(f)
except FileNotFoundError:
print(f"Warning: Context file not found: {args.context}", file=sys.stderr)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON in context file: {e}", file=sys.stderr)
return 1
# Generate report
generator = ReportGenerator(args.path, context)
report = generator.generate(args.format)
# Output report
if args.output:
try:
with open(args.output, 'w') as f:
f.write(report)
print(f"Report generated: {args.output}")
except IOError as e:
print(f"Error writing to file: {e}", file=sys.stderr)
return 1
else:
print(report)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,239 @@
#!/usr/bin/env python3
# ============================================================================
# Quality Scoring Algorithm
# ============================================================================
# Purpose: Calculate quality score (0-100) based on validation results
# Version: 1.0.0
# Usage: ./scoring-algorithm.py --errors N --warnings N --missing N
# Returns: 0=success, 1=error
# Dependencies: Python 3.6+
# ============================================================================
import sys
import argparse
import json
def calculate_quality_score(errors: int, warnings: int, missing_recommended: int) -> int:
"""
Calculate quality score based on validation issues.
Algorithm:
score = 100
score -= errors * 20 # Critical errors: -20 each
score -= warnings * 10 # Warnings: -10 each
score -= missing_recommended * 5 # Missing fields: -5 each
return max(0, score)
Args:
errors: Number of critical errors
warnings: Number of warnings
missing_recommended: Number of missing recommended fields
Returns:
Quality score (0-100)
"""
score = 100
score -= errors * 20
score -= warnings * 10
score -= missing_recommended * 5
return max(0, score)
def get_rating(score: int) -> str:
"""
Get quality rating based on score.
Args:
score: Quality score (0-100)
Returns:
Rating string
"""
if score >= 90:
return "Excellent"
elif score >= 75:
return "Good"
elif score >= 60:
return "Fair"
elif score >= 40:
return "Needs Improvement"
else:
return "Poor"
def get_stars(score: int) -> str:
"""
Get star rating based on score.
Args:
score: Quality score (0-100)
Returns:
Star rating string
"""
if score >= 90:
return "⭐⭐⭐⭐⭐"
elif score >= 75:
return "⭐⭐⭐⭐"
elif score >= 60:
return "⭐⭐⭐"
elif score >= 40:
return "⭐⭐"
else:
return ""
def get_publication_readiness(score: int) -> str:
"""
Determine publication readiness based on score.
Args:
score: Quality score (0-100)
Returns:
Publication readiness status
"""
if score >= 90:
return "Yes - Ready to publish"
elif score >= 75:
return "With Minor Changes - Nearly ready"
elif score >= 60:
return "Needs Work - Significant improvements needed"
else:
return "Not Ready - Major overhaul required"
def format_output(score: int, errors: int, warnings: int, missing: int,
output_format: str = "text") -> str:
"""
Format score output in requested format.
Args:
score: Quality score
errors: Error count
warnings: Warning count
missing: Missing field count
output_format: Output format (text, json, compact)
Returns:
Formatted output string
"""
rating = get_rating(score)
stars = get_stars(score)
readiness = get_publication_readiness(score)
if output_format == "json":
return json.dumps({
"score": score,
"rating": rating,
"stars": stars,
"publication_ready": readiness,
"breakdown": {
"base_score": 100,
"errors_penalty": errors * 20,
"warnings_penalty": warnings * 10,
"missing_penalty": missing * 5
},
"counts": {
"errors": errors,
"warnings": warnings,
"missing": missing
}
}, indent=2)
elif output_format == "compact":
return f"{score}/100 {stars} ({rating})"
else: # text format
error_penalty = errors * 20
warning_penalty = warnings * 10
missing_penalty = missing * 5
return f"""━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
QUALITY SCORE CALCULATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Score: {score}/100
Rating: {rating}
Stars: {stars}
Breakdown:
Base Score: 100
Critical Errors: -{error_penalty} ({errors} × 20)
Warnings: -{warning_penalty} ({warnings} × 10)
Missing Fields: -{missing_penalty} ({missing} × 5)
─────────────────────
Final Score: {score}/100
Publication Ready: {readiness}
"""
def main():
"""Main CLI interface."""
parser = argparse.ArgumentParser(
description="Calculate quality score based on validation results",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s --errors 2 --warnings 5 --missing 3
%(prog)s --errors 0 --warnings 0 --missing 0
%(prog)s --errors 1 --format json
"""
)
parser.add_argument(
"--errors",
type=int,
default=0,
help="Number of critical errors (default: 0)"
)
parser.add_argument(
"--warnings",
type=int,
default=0,
help="Number of warnings (default: 0)"
)
parser.add_argument(
"--missing",
type=int,
default=0,
help="Number of missing recommended fields (default: 0)"
)
parser.add_argument(
"--format",
choices=["text", "json", "compact"],
default="text",
help="Output format (default: text)"
)
args = parser.parse_args()
# Validate inputs
if args.errors < 0 or args.warnings < 0 or args.missing < 0:
print("Error: Counts cannot be negative", file=sys.stderr)
return 1
# Calculate score
score = calculate_quality_score(args.errors, args.warnings, args.missing)
# Format and print output
output = format_output(
score,
args.errors,
args.warnings,
args.missing,
args.format
)
print(output)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,112 @@
## Operation: Calculate Quality Score
Calculate comprehensive quality score (0-100) based on validation results with star rating.
### Parameters from $ARGUMENTS
Extract these parameters from `$ARGUMENTS`:
- **path**: Target path to analyze (required)
- **errors**: Critical error count (default: 0)
- **warnings**: Warning count (default: 0)
- **missing**: Missing recommended fields count (default: 0)
### Scoring Algorithm
Execute the quality scoring algorithm using `.scripts/scoring-algorithm.py`:
**Algorithm**:
```
score = 100
score -= (errors × 20) # Critical errors: -20 points each
score -= (warnings × 10) # Warnings: -10 points each
score -= (missing × 5) # Missing recommended: -5 points each
score = max(0, score) # Floor at 0
```
**Rating Thresholds**:
- **90-100**: Excellent ⭐⭐⭐⭐⭐ (publication-ready)
- **75-89**: Good ⭐⭐⭐⭐ (ready with minor improvements)
- **60-74**: Fair ⭐⭐⭐ (needs work)
- **40-59**: Needs Improvement ⭐⭐ (substantial work needed)
- **0-39**: Poor ⭐ (major overhaul required)
### Workflow
1. **Parse Arguments**
```
Extract path, errors, warnings, missing from $ARGUMENTS
Validate that path exists
Set defaults for missing parameters
```
2. **Calculate Score**
```bash
Invoke Bash tool to execute:
python3 .claude/commands/quality-analysis/.scripts/scoring-algorithm.py \
--errors $errors \
--warnings $warnings \
--missing $missing
```
3. **Format Output**
```
Display results in user-friendly format with:
- Numeric score (0-100)
- Rating (Excellent/Good/Fair/Needs Improvement/Poor)
- Star rating (⭐⭐⭐⭐⭐)
- Publication readiness status
```
### Examples
```bash
# Calculate score with validation results
/quality-analysis score path:. errors:2 warnings:5 missing:3
# Calculate perfect score
/quality-analysis score path:. errors:0 warnings:0 missing:0
# Calculate score with only errors
/quality-analysis score path:. errors:3
```
### Error Handling
- **Missing path**: Request path parameter
- **Invalid counts**: Negative numbers default to 0
- **Script not found**: Provide clear error message with remediation
- **Python not available**: Fallback to bash calculation
### Output Format
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
QUALITY SCORE CALCULATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: <path>
Score: <0-100>/100
Rating: <Excellent|Good|Fair|Needs Improvement|Poor>
Stars: <⭐⭐⭐⭐⭐>
Breakdown:
Base Score: 100
Critical Errors: -<errors × 20>
Warnings: -<warnings × 10>
Missing Fields: -<missing × 5>
─────────────────────
Final Score: <score>/100
Publication Ready: <Yes|With Minor Changes|Needs Work|Not Ready>
```
### Integration Notes
This operation is typically invoked by:
- `full-analysis.md` as first step
- `validation-orchestrator` after comprehensive validation
- Direct user invocation for score-only calculation
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,330 @@
## Operation: Full Quality Analysis
Execute comprehensive quality analysis orchestrating all sub-operations to generate complete assessment.
### Parameters from $ARGUMENTS
Extract these parameters from `$ARGUMENTS`:
- **path**: Target path to analyze (required)
- **context**: Path to validation context JSON file with prior results (optional)
- **format**: Report output format - markdown|json|html (default: markdown)
- **output**: Output file path for report (optional)
### Full Analysis Workflow
This operation orchestrates all quality-analysis sub-operations to provide a complete quality assessment.
**1. Load Validation Context**
```
IF context parameter provided:
Read validation results from JSON file
Extract:
- Errors count
- Warnings count
- Missing fields count
- Validation layer results
- Detailed issue list
ELSE:
Use default values:
- errors: 0
- warnings: 0
- missing: 0
```
**2. Calculate Base Score**
```
Read calculate-score.md operation instructions
Execute scoring with validation results:
python3 .scripts/scoring-algorithm.py \
--errors $errors \
--warnings $warnings \
--missing $missing \
--format json
Capture:
- Quality score (0-100)
- Rating (Excellent/Good/Fair/Needs Improvement/Poor)
- Star rating (⭐⭐⭐⭐⭐)
- Publication readiness status
```
**3. Prioritize All Issues**
```
Read prioritize-issues.md operation instructions
IF context has issues:
Write issues to temporary JSON file
Execute issue prioritization:
bash .scripts/issue-prioritizer.sh $temp_issues_file
Capture:
- P0 (Critical) issues with details
- P1 (Important) issues with details
- P2 (Recommended) issues with details
ELSE:
Skip (no issues to prioritize)
```
**4. Generate Improvement Suggestions**
```
Read suggest-improvements.md operation instructions
Generate actionable recommendations:
Target score: 90 (publication-ready)
Current score: $calculated_score
Generate suggestions for:
- Quick wins (< 30 min, high impact)
- This week improvements (< 2 hours)
- Long-term enhancements
Include:
- Score impact per suggestion
- Effort estimates
- Priority assignment
- Detailed fix instructions
```
**5. Generate Comprehensive Report**
```
Read generate-report.md operation instructions
Execute report generation:
python3 .scripts/report-generator.py \
--path $path \
--format $format \
--context $aggregated_context \
--output $output
Report includes:
- Executive summary
- Quality score and rating
- Validation layer breakdown
- Prioritized issues (P0/P1/P2)
- Improvement recommendations
- Detailed findings
```
**6. Aggregate and Display Results**
```
Combine all outputs into unified assessment:
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPREHENSIVE QUALITY ANALYSIS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: <path>
Type: <marketplace|plugin>
Analyzed: <timestamp>
QUALITY SCORE: <0-100>/100 <⭐⭐⭐⭐⭐>
Rating: <rating>
Publication Ready: <Yes|No|With Changes>
CRITICAL ISSUES: <P0 count>
IMPORTANT ISSUES: <P1 count>
RECOMMENDATIONS: <P2 count>
[Executive Summary - 2-3 sentences on readiness]
[If not publication-ready, show top 3 quick wins]
[Report file location if output specified]
```
### Workflow Steps
1. **Initialize Analysis**
```
Validate path exists
Load validation context if provided
Set up temporary files for intermediate results
```
2. **Execute Operations Sequentially**
```
Step 1: Calculate Score
└─→ Invoke scoring-algorithm.py
└─→ Store result in context
Step 2: Prioritize Issues (if issues exist)
└─→ Invoke issue-prioritizer.sh
└─→ Store categorized issues in context
Step 3: Generate Suggestions
└─→ Analyze score gap
└─→ Create actionable recommendations
└─→ Store in context
Step 4: Generate Report
└─→ Invoke report-generator.py
└─→ Aggregate all context data
└─→ Format in requested format
└─→ Output to file or stdout
```
3. **Present Summary**
```
Display high-level results
Show publication readiness
Highlight critical blockers (if any)
Show top quick wins
Provide next steps
```
### Examples
```bash
# Full analysis with validation context
/quality-analysis full-analysis path:. context:"@validation-results.json"
# Full analysis generating HTML report
/quality-analysis full-analysis path:. format:html output:quality-report.html
# Full analysis with JSON output
/quality-analysis full-analysis path:. context:"@results.json" format:json output:analysis.json
# Basic full analysis (no prior context)
/quality-analysis full-analysis path:.
```
### Error Handling
- **Missing path**: Request target path parameter
- **Invalid context file**: Continue with limited data, show warning
- **Script execution failures**: Show which operation failed, provide fallback
- **Output write errors**: Fall back to stdout with warning
- **No issues found**: Congratulate on perfect quality, skip issue operations
### Output Format
**Terminal Output**:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPREHENSIVE QUALITY ANALYSIS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: /path/to/plugin
Type: Claude Code Plugin
Analyzed: 2025-10-13 14:30:00
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
QUALITY SCORE
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
85/100 ⭐⭐⭐⭐ (Good)
Publication Ready: With Minor Changes
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
ISSUES SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Critical (P0): 0 ✅
Important (P1): 3 ⚠️
Recommended (P2): 5 💡
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
EXECUTIVE SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Your plugin is nearly ready for publication! No critical blockers
found. Address 3 important issues to reach excellent status (90+).
Quality foundation is solid with good documentation and security.
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
TOP QUICK WINS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. [+10 pts] Add CHANGELOG.md (15 minutes)
Impact: Improves version tracking
Fix: Create CHANGELOG.md with version history
2. [+3 pts] Add 2 more keywords (5 minutes)
Impact: Better discoverability
Fix: Add relevant keywords to plugin.json
3. [+2 pts] Add repository URL (2 minutes)
Impact: Professional appearance
Fix: Add repository field to plugin.json
After Quick Wins: 100/100 ⭐⭐⭐⭐⭐ (Excellent)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
DETAILED REPORT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Full report saved to: quality-report.md
Next Steps:
1. Review detailed report for all findings
2. Implement quick wins (22 minutes total)
3. Re-run validation to verify improvements
4. Submit to OpenPlugins marketplace
Questions? Consult: docs.claude.com/plugins
```
### Integration Notes
This operation is the **primary entry point** for complete quality assessment.
**Invoked by**:
- `validation-orchestrator` after comprehensive validation
- `marketplace-validator` agent for submission readiness
- Direct user invocation for full assessment
**Orchestrates**:
- `calculate-score.md` - Quality scoring
- `prioritize-issues.md` - Issue categorization
- `suggest-improvements.md` - Actionable recommendations
- `generate-report.md` - Comprehensive reporting
**Data Flow**:
```
Validation Results
Calculate Score → score, rating, stars
Prioritize Issues → P0/P1/P2 categorization
Suggest Improvements → actionable recommendations
Generate Report → formatted comprehensive report
Display Summary → user-friendly terminal output
```
### Performance
- **Execution Time**: 2-5 seconds (depending on issue count)
- **I/O Operations**: Minimal (uses temporary files for large datasets)
- **Memory Usage**: Low (streaming JSON processing)
- **Parallelization**: Sequential (each step depends on previous)
### Quality Assurance
**Validation Steps**:
1. Verify all scripts are executable
2. Check Python 3.6+ availability
3. Validate JSON context format
4. Verify write permissions for output
5. Ensure scoring algorithm consistency
**Testing**:
```bash
# Test with perfect plugin
/quality-analysis full-analysis path:./test-fixtures/perfect-plugin
# Test with issues
/quality-analysis full-analysis path:./test-fixtures/needs-work
# Test report formats
/quality-analysis full-analysis path:. format:json
/quality-analysis full-analysis path:. format:html
/quality-analysis full-analysis path:. format:markdown
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,293 @@
## Operation: Generate Quality Report
Generate comprehensive quality report in multiple formats (markdown, JSON, HTML) with detailed findings and recommendations.
### Parameters from $ARGUMENTS
Extract these parameters from `$ARGUMENTS`:
- **path**: Target path to analyze (required)
- **format**: Output format - markdown|json|html (default: markdown)
- **output**: Output file path (optional, defaults to stdout)
- **context**: Path to validation context JSON file with prior results (optional)
### Report Structure
**1. Executive Summary**
- Overall quality score and star rating
- Publication readiness determination
- Key findings at-a-glance
- Critical blockers (if any)
**2. Validation Layers**
- Schema validation results (pass/fail with details)
- Security scan results (vulnerabilities found)
- Documentation quality assessment
- Best practices compliance check
**3. Issues Breakdown**
- Priority 0 (Critical): Must fix before publication
- Priority 1 (Important): Should fix for quality
- Priority 2 (Recommended): Nice to have improvements
**4. Improvement Roadmap**
- Prioritized action items with effort estimates
- Expected score improvement per fix
- Timeline to reach publication-ready (90+ score)
**5. Detailed Findings**
- Full validation output from each layer
- Code examples and fix suggestions
- References to best practices documentation
### Workflow
1. **Load Validation Context**
```
IF context parameter provided:
Read validation results from context file
ELSE:
Use current validation state
Extract:
- Quality score
- Validation layer results
- Issue lists
- Target metadata
```
2. **Generate Report Sections**
```python
Execute .scripts/report-generator.py with:
- Path to target
- Format (markdown|json|html)
- Validation context data
- Output destination
Script generates:
- Executive summary
- Validation layer breakdown
- Prioritized issues
- Improvement suggestions
- Detailed findings
```
3. **Format Output**
```
IF output parameter specified:
Write report to file
Display confirmation with file path
ELSE:
Print report to stdout
```
4. **Display Summary**
```
Show brief summary:
- Report generated successfully
- Format used
- Output location (if file)
- Key metrics (score, issues)
```
### Examples
```bash
# Generate markdown report to stdout
/quality-analysis report path:. format:markdown
# Generate JSON report to file
/quality-analysis report path:. format:json output:quality-report.json
# Generate HTML report with context
/quality-analysis report path:. format:html context:"@validation-results.json" output:report.html
# Quick markdown report from validation results
/quality-analysis report path:. context:"@comprehensive-validation.json"
```
### Error Handling
- **Missing path**: Request target path
- **Invalid format**: List supported formats (markdown, json, html)
- **Context file not found**: Continue with limited data, warn user
- **Invalid JSON context**: Show parsing error, suggest validation
- **Write permission denied**: Show error, suggest alternative output location
- **Python not available**: Fallback to basic text report
### Output Format
**Markdown Report**:
```markdown
# Quality Assessment Report
Generated: 2025-10-13 14:30:00
Target: /path/to/plugin
Type: Claude Code Plugin
## Executive Summary
**Quality Score**: 85/100 ⭐⭐⭐⭐ (Good)
**Publication Ready**: With Minor Changes
**Critical Issues**: 0
**Total Issues**: 8
Your plugin is nearly ready for publication! Address 3 important issues to reach excellent status.
## Validation Results
### Schema Validation ✅ PASS
- All required fields present
- Valid JSON syntax
- Correct semver format
### Security Scan ✅ PASS
- No secrets exposed
- All URLs use HTTPS
- File permissions correct
### Documentation ⚠️ WARNINGS (3 issues)
- Missing CHANGELOG.md (-10 pts)
- README could use 2 more examples (-5 pts)
- No architecture documentation
### Best Practices ✅ PASS
- Naming convention correct
- Keywords appropriate (5/7)
- Category properly set
## Issues Breakdown
### Priority 0 (Critical): 0 issues
None - excellent!
### Priority 1 (Important): 3 issues
#### 1. Add CHANGELOG.md [+10 pts]
Missing version history and change documentation.
**Impact**: -10 quality score
**Effort**: Low (15 minutes)
**Fix**: Create CHANGELOG.md following Keep a Changelog format
```bash
# Create changelog
cat > CHANGELOG.md <<EOF
# Changelog
## [1.0.0] - 2025-10-13
### Added
- Initial release
EOF
```
#### 2. Expand README examples [+5 pts]
README has only 1 example, recommend 3-5 examples.
**Impact**: Poor user onboarding, -5 score
**Effort**: Medium (30 minutes)
**Fix**: Add 2-4 more usage examples showing different scenarios
#### 3. Add 2 more keywords [+3 pts]
Current: 5 keywords. Optimal: 7 keywords.
**Impact**: Reduced discoverability
**Effort**: Low (5 minutes)
**Fix**: Add relevant keywords to plugin.json
### Priority 2 (Recommended): 5 issues
[Details of nice-to-have improvements...]
## Improvement Roadmap
### Path to Excellent (90+)
Current: 85/100
Target: 90/100
Gap: 5 points
**Quick Wins** (Total: +8 pts, 20 minutes)
1. Add CHANGELOG.md → +10 pts (15 min)
2. Add 2 keywords → +3 pts (5 min)
**This Week** (Total: +5 pts, 30 minutes)
3. Expand README examples → +5 pts (30 min)
**After completion**: 98/100 ⭐⭐⭐⭐⭐ (Excellent)
## Detailed Findings
[Complete validation output from all layers...]
---
Report generated by marketplace-validator-plugin v1.0.0
```
**JSON Report**:
```json
{
"metadata": {
"generated": "2025-10-13T14:30:00Z",
"target": "/path/to/plugin",
"type": "plugin",
"validator_version": "1.0.0"
},
"executive_summary": {
"score": 85,
"rating": "Good",
"stars": "⭐⭐⭐⭐",
"publication_ready": "With Minor Changes",
"critical_issues": 0,
"total_issues": 8
},
"validation_layers": {
"schema": {"status": "pass", "issues": []},
"security": {"status": "pass", "issues": []},
"documentation": {"status": "warnings", "issues": [...]},
"best_practices": {"status": "pass", "issues": []}
},
"issues": {
"p0": [],
"p1": [...],
"p2": [...]
},
"improvement_roadmap": {
"current_score": 85,
"target_score": 90,
"gap": 5,
"recommendations": [...]
}
}
```
**HTML Report**:
```html
<!DOCTYPE html>
<html>
<head>
<title>Quality Assessment Report</title>
<style>
/* Styled, responsive HTML report */
</style>
</head>
<body>
<!-- Executive summary card -->
<!-- Validation layer status badges -->
<!-- Interactive issue accordion -->
<!-- Improvement roadmap timeline -->
</body>
</html>
```
### Integration Notes
This operation is invoked by:
- `full-analysis.md` as final step to consolidate results
- `validation-orchestrator` for comprehensive reporting
- Direct user invocation for custom reports
The report aggregates data from:
- `calculate-score.md` output
- `prioritize-issues.md` categorization
- `suggest-improvements.md` recommendations
- All validation layer results
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,178 @@
## Operation: Prioritize Issues
Categorize and prioritize validation issues by severity and impact using P0/P1/P2 tier system.
### Parameters from $ARGUMENTS
Extract these parameters from `$ARGUMENTS`:
- **issues**: Path to JSON file with issues or inline JSON string (required)
- **criteria**: Prioritization criteria - severity|impact|effort (default: severity)
### Prioritization Tiers
**Priority 0 (P0) - Critical - Must Fix**
- Invalid JSON syntax (blocks parsing)
- Missing required fields (name, version, description, author, license)
- Security vulnerabilities (exposed secrets, dangerous patterns)
- Format violations (invalid semver, malformed URLs)
- Blocks: Publication and installation
**Priority 1 (P1) - Important - Should Fix**
- Missing recommended fields (repository, homepage, keywords)
- Documentation gaps (incomplete README, missing CHANGELOG)
- Convention violations (naming, structure)
- Performance issues (slow scripts, inefficient patterns)
- Impact: Reduces quality score significantly
**Priority 2 (P2) - Recommended - Nice to Have**
- Additional keywords for discoverability
- Enhanced examples and documentation
- Expanded test coverage
- Quality improvements and polish
- Impact: Minor quality score boost
### Workflow
1. **Parse Issue Data**
```
IF issues parameter starts with "@":
Read JSON from file (remove @ prefix)
ELSE IF issues is valid JSON:
Parse inline JSON
ELSE:
Error: Invalid issues format
```
2. **Categorize Issues**
```bash
Execute .scripts/issue-prioritizer.sh with issues data
Categorize each issue based on:
- Severity (critical, important, recommended)
- Impact on publication readiness
- Blocking status
- Effort to fix
```
3. **Sort and Format**
```
Group issues by priority (P0, P1, P2)
Sort within each priority by impact
Format with appropriate icons:
- P0: ❌ (red X - blocking)
- P1: ⚠️ (warning - should fix)
- P2: 💡 (lightbulb - suggestion)
```
4. **Generate Summary**
```
Count issues per priority
Calculate total fix effort
Estimate score improvement potential
```
### Examples
```bash
# Prioritize from validation results file
/quality-analysis prioritize issues:"@validation-results.json"
# Prioritize inline JSON
/quality-analysis prioritize issues:'{"errors": [{"type": "missing_field", "field": "license"}]}'
# Prioritize with impact criteria
/quality-analysis prioritize issues:"@results.json" criteria:impact
```
### Error Handling
- **Missing issues parameter**: Request issues data
- **Invalid JSON format**: Show JSON parsing error with line number
- **Empty issues array**: Return "No issues found" message
- **File not found**: Show file path and suggest correct path
- **Script execution error**: Fallback to basic categorization
### Output Format
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
ISSUE PRIORITIZATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Total Issues: <count>
Estimated Fix Time: <time>
Priority 0 (Critical - Must Fix): <count>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
❌ Missing required field: license
Impact: Blocks publication
Effort: Low (5 minutes)
Fix: Add "license": "MIT" to plugin.json
❌ Invalid JSON syntax at line 23
Impact: Blocks parsing
Effort: Low (2 minutes)
Fix: Remove trailing comma
Priority 1 (Important - Should Fix): <count>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
⚠️ Missing CHANGELOG.md
Impact: -10 quality score
Effort: Low (15 minutes)
Fix: Create CHANGELOG.md following Keep a Changelog format
⚠️ README missing usage examples
Impact: Poor user experience, -5 score
Effort: Medium (30 minutes)
Fix: Add 3-5 usage examples to README
Priority 2 (Recommended - Nice to Have): <count>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
💡 Add 2 more keywords for discoverability
Impact: +3 quality score
Effort: Low (5 minutes)
Fix: Add relevant keywords to plugin.json
💡 Expand documentation with architecture diagram
Impact: Better understanding, +2 score
Effort: Medium (45 minutes)
Fix: Create docs/ARCHITECTURE.md with diagram
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Summary:
- Fix P0 issues first (blocking)
- Address P1 issues for quality (30-60 min)
- Consider P2 improvements for excellence
- Total potential score gain: +20 points
```
### Issue Data Schema
Expected JSON structure:
```json
{
"errors": [
{
"type": "missing_field|invalid_format|security",
"severity": "critical|important|recommended",
"field": "field_name",
"message": "Description",
"location": "file:line",
"fix": "How to fix",
"effort": "low|medium|high",
"score_impact": 20
}
],
"warnings": [...],
"recommendations": [...]
}
```
### Integration Notes
This operation is invoked by:
- `full-analysis.md` after score calculation
- `validation-orchestrator` for issue triage
- Direct user invocation for issue planning
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,73 @@
---
description: Deep quality analysis with scoring, recommendations, and actionable reports
---
You are the Quality Analysis coordinator, responsible for comprehensive quality assessment and scoring.
## Your Mission
Parse `$ARGUMENTS` to determine the requested quality analysis operation and route to the appropriate sub-command.
## Available Operations
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
- **score** → Read `.claude/commands/quality-analysis/calculate-score.md`
- **report** → Read `.claude/commands/quality-analysis/generate-report.md`
- **prioritize** → Read `.claude/commands/quality-analysis/prioritize-issues.md`
- **improve** → Read `.claude/commands/quality-analysis/suggest-improvements.md`
- **full-analysis** → Read `.claude/commands/quality-analysis/full-analysis.md`
## Argument Format
```
/quality-analysis <operation> [parameters]
```
### Examples
```bash
# Calculate quality score
/quality-analysis score path:. errors:2 warnings:5 missing:3
# Generate comprehensive report
/quality-analysis report path:. format:markdown
# Prioritize issues by severity
/quality-analysis prioritize issues:"@validation-results.json"
# Get improvement suggestions
/quality-analysis improve path:. score:65
# Run full quality analysis
/quality-analysis full-analysis path:. context:"@validation-context.json"
```
## Quality Scoring System
This skill implements the OpenPlugins quality scoring system:
- **90-100**: Excellent ⭐⭐⭐⭐⭐ (publication-ready)
- **75-89**: Good ⭐⭐⭐⭐ (ready with minor improvements)
- **60-74**: Fair ⭐⭐⭐ (needs work)
- **40-59**: Needs Improvement ⭐⭐
- **0-39**: Poor ⭐ (substantial work needed)
## Error Handling
If the operation is not recognized:
1. List all available operations
2. Show example usage
3. Suggest closest match
## Base Directory
Base directory for this skill: `.claude/commands/quality-analysis/`
## Your Task
1. Parse `$ARGUMENTS` to extract operation and parameters
2. Read the corresponding operation file
3. Execute quality analysis with provided parameters
4. Return actionable results with clear recommendations
**Current Request**: $ARGUMENTS

View File

@@ -0,0 +1,317 @@
## Operation: Suggest Improvements
Generate actionable improvement suggestions based on current quality score with effort estimates and expected impact.
### Parameters from $ARGUMENTS
Extract these parameters from `$ARGUMENTS`:
- **path**: Target path to analyze (required)
- **score**: Current quality score (required)
- **target**: Target score to achieve (default: 90)
- **context**: Path to validation context JSON file (optional)
### Improvement Suggestion Algorithm
```
gap = target_score - current_score
improvements_needed = ceiling(gap / 5) # Approximate improvements needed
FOR each validation layer:
IF layer has issues:
Generate specific, actionable improvements
Estimate score impact (+points)
Assign priority based on blocking status and impact
Estimate effort (low/medium/high)
SORT by:
1. Priority (P0 first)
2. Score impact (highest first)
3. Effort (lowest first - quick wins)
LIMIT to top 10 most impactful improvements
```
### Workflow
1. **Calculate Score Gap**
```
gap = target - current_score
IF gap <= 0:
Return "Already at or above target!"
IF gap <= 5:
Focus on quick wins (low effort, high impact)
IF gap > 20:
Focus on critical issues first
```
2. **Analyze Validation Context**
```
IF context provided:
Load validation results from JSON file
Extract issues from each layer:
- Schema validation issues
- Security scan findings
- Documentation gaps
- Best practices violations
Categorize by:
- Severity (P0/P1/P2)
- Score impact
- Effort required
```
3. **Generate Improvement Suggestions**
```
For each issue, create suggestion:
- Title (brief, actionable)
- Score impact (+X points)
- Priority (High/Medium/Low)
- Effort estimate with time
- Detailed fix instructions
- Expected outcome
Sort by effectiveness:
effectiveness = score_impact / effort_hours
```
4. **Create Improvement Roadmap**
```
Group suggestions into phases:
- Quick Wins (< 30 min, +5-15 pts)
- This Week (< 2 hours, +10-20 pts)
- This Sprint (< 1 day, +20+ pts)
Calculate cumulative score after each phase
```
### Examples
```bash
# Get improvements for low score
/quality-analysis improve path:. score:65
# Target excellent status
/quality-analysis improve path:. score:78 target:95
# Use validation context for detailed suggestions
/quality-analysis improve path:. score:70 context:"@validation-results.json"
```
### Error Handling
- **Missing score**: Request current score or run calculate-score first
- **Invalid score range**: Score must be 0-100
- **Invalid target**: Target must be higher than current score
- **Context file not found**: Continue with basic suggestions
- **No improvements possible**: Congratulate on perfect score
### Output Format
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
IMPROVEMENT RECOMMENDATIONS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Current Score: 65/100 ⭐⭐⭐ (Fair)
Target Score: 90/100 ⭐⭐⭐⭐⭐ (Excellent)
Gap: 25 points
To reach your target, implement these improvements:
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
QUICK WINS (Total: +15 pts, 45 minutes)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. [+10 pts] Add CHANGELOG.md with version history
Priority: High
Effort: Low (15 minutes)
Impact: Improves version tracking and transparency
HOW TO FIX:
```bash
cat > CHANGELOG.md <<'EOF'
# Changelog
All notable changes to this project will be documented in this file.
## [1.0.0] - 2025-10-13
### Added
- Initial release
- Core functionality
EOF
```
WHY IT MATTERS:
Users need to track changes between versions. CHANGELOG.md is a
best practice for professional plugins.
2. [+3 pts] Add 2 more relevant keywords to plugin.json
Priority: Medium
Effort: Low (5 minutes)
Impact: Improved discoverability in marketplace
HOW TO FIX:
```json
{
"keywords": ["existing", "keywords", "automation", "workflow"]
}
```
SUGGESTION: Based on your plugin's functionality, consider:
- "automation" (if you automate tasks)
- "productivity" (if you improve efficiency)
- "validation" (if you validate data)
3. [+2 pts] Add repository URL to plugin.json
Priority: Medium
Effort: Low (2 minutes)
Impact: Users can view source and report issues
HOW TO FIX:
```json
{
"repository": {
"type": "git",
"url": "https://github.com/username/plugin-name"
}
}
```
After Quick Wins: 80/100 ⭐⭐⭐⭐ (Good)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
THIS WEEK (Total: +12 pts, 90 minutes)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
4. [+5 pts] Expand README with 3 more usage examples
Priority: Medium
Effort: Medium (30 minutes)
Impact: Better user onboarding and adoption
HOW TO FIX:
Add examples showing:
- Basic usage (simple case)
- Advanced usage (complex scenario)
- Common workflows (real-world use)
- Error handling (what to do when things fail)
TEMPLATE:
```markdown
## Examples
### Basic Usage
/your-command simple-task
### Advanced Usage
/your-command complex-task param:value
### Common Workflow
1. /your-command init
2. /your-command process
3. /your-command finalize
```
5. [+5 pts] Add homepage URL to plugin.json
Priority: Low
Effort: Low (5 minutes)
Impact: Professional appearance, marketing
HOW TO FIX:
```json
{
"homepage": "https://your-plugin-docs.com"
}
```
6. [+2 pts] Improve description in plugin.json
Priority: Low
Effort: Medium (10 minutes)
Impact: Better first impression in marketplace
HOW TO FIX:
Make description:
- Concise (1-2 sentences)
- Action-oriented (starts with verb)
- Benefit-focused (what user gains)
BEFORE: "A plugin for validation"
AFTER: "Automatically validate your code quality with comprehensive
checks for security, performance, and best practices"
After This Week: 92/100 ⭐⭐⭐⭐⭐ (Excellent)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Path to Excellence:
- Start with Quick Wins (45 min) → 80/100 ⭐⭐⭐⭐
- Complete This Week items (90 min) → 92/100 ⭐⭐⭐⭐⭐
- Total effort: 2 hours 15 minutes
- Total improvement: +27 points
Priority Order:
1. Fix P0 blockers (none currently)
2. Implement quick wins for fast progress
3. Address documentation improvements
4. Polish with recommended enhancements
Your plugin will be publication-ready after Quick Wins!
Excellence status achievable within one week.
```
### Improvement Categories
**Documentation**
- Add/expand README
- Create CHANGELOG.md
- Add LICENSE file
- Include usage examples
- Add architecture documentation
**Metadata**
- Add repository URL
- Add homepage URL
- Expand keywords (3-7 recommended)
- Improve description
- Add author details
**Code Quality**
- Fix naming conventions
- Improve error handling
- Add input validation
- Optimize performance
- Remove code smells
**Security**
- Remove exposed secrets
- Validate user input
- Use HTTPS for all URLs
- Set correct file permissions
- Add security documentation
**Best Practices**
- Follow semantic versioning
- Use lowercase-hyphen naming
- Select appropriate category
- Include test coverage
- Add CI/CD configuration
### Integration Notes
This operation is invoked by:
- `full-analysis.md` to provide actionable next steps
- `validation-orchestrator` after comprehensive validation
- Direct user invocation for improvement planning
Suggestions are based on:
- Current quality score and target
- Validation layer findings
- Industry best practices
- Effort vs impact analysis
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,331 @@
#!/usr/bin/env bash
# ============================================================================
# Field Checker Script
# ============================================================================
# Purpose: Verify required and recommended fields in plugin/marketplace configs
# Version: 1.0.0
# Usage: ./field-checker.sh <config-file> <type> [strict]
# Returns: 0=all required present, 1=missing required, 2=error
# ============================================================================
set -euo pipefail
# Source shared library
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/../../../scripts/validate-lib.sh"
# ====================
# Configuration
# ====================
readonly CONFIG_FILE="${1:-}"
readonly TYPE="${2:-}"
readonly STRICT="${3:-false}"
# ====================
# Field Definitions
# ====================
# Plugin required fields
PLUGIN_REQUIRED_FIELDS=(
"name"
"version"
"description"
"author"
"license"
)
# Plugin recommended fields
PLUGIN_RECOMMENDED_FIELDS=(
"repository"
"homepage"
"keywords"
"category"
)
# Marketplace required fields
MARKETPLACE_REQUIRED_FIELDS=(
"name"
"owner"
"owner.name"
"owner.email"
"plugins"
)
# Marketplace recommended fields
MARKETPLACE_RECOMMENDED_FIELDS=(
"version"
"metadata.description"
"metadata.homepage"
"metadata.repository"
)
# ====================
# Validation Functions
# ====================
check_field_exists() {
local file="$1"
local field="$2"
local value
# Use json_get from validate-lib.sh
value=$(json_get "${file}" ".${field}")
if [[ -n "${value}" && "${value}" != "null" ]]; then
return 0
else
return 1
fi
}
get_field_value() {
local file="$1"
local field="$2"
json_get "${file}" ".${field}"
}
check_field_empty() {
local value="$1"
if [[ -z "${value}" || "${value}" == "null" || "${value}" == '""' || "${value}" == "[]" || "${value}" == "{}" ]]; then
return 0 # Is empty
else
return 1 # Not empty
fi
}
# ====================
# Plugin Validation
# ====================
validate_plugin_fields() {
local file="$1"
local strict="$2"
local missing_required=0
local missing_recommended=0
print_section "Required Fields (${#PLUGIN_REQUIRED_FIELDS[@]})"
for field in "${PLUGIN_REQUIRED_FIELDS[@]}"; do
if check_field_exists "${file}" "${field}"; then
local value
value=$(get_field_value "${file}" "${field}")
# Check if empty
if check_field_empty "${value}"; then
print_error "${field}: Present but empty (REQUIRED)"
((missing_required++))
else
# Truncate long values for display
if [[ "${#value}" -gt 50 ]]; then
value="${value:0:47}..."
fi
print_success "${field}: \"${value}\""
fi
else
print_error "${field}: Missing (REQUIRED)"
((missing_required++))
fi
done
echo ""
print_section "Recommended Fields (${#PLUGIN_RECOMMENDED_FIELDS[@]})"
for field in "${PLUGIN_RECOMMENDED_FIELDS[@]}"; do
if check_field_exists "${file}" "${field}"; then
print_success "${field}: Present"
else
print_warning "${field}: Missing (improves quality)"
((missing_recommended++))
fi
done
echo ""
# Summary
if [[ ${missing_required} -eq 0 ]]; then
print_success "All required fields present"
else
print_error "Missing ${missing_required} required field(s)"
fi
if [[ ${missing_recommended} -gt 0 ]]; then
print_info "Missing ${missing_recommended} recommended field(s)"
fi
# Determine exit code
if [[ ${missing_required} -gt 0 ]]; then
return 1
elif [[ "${strict}" == "true" && ${missing_recommended} -gt 0 ]]; then
print_warning "Strict mode: Recommended fields are required"
return 1
else
return 0
fi
}
# ====================
# Marketplace Validation
# ====================
validate_marketplace_fields() {
local file="$1"
local strict="$2"
local missing_required=0
local missing_recommended=0
print_section "Required Fields (${#MARKETPLACE_REQUIRED_FIELDS[@]})"
for field in "${MARKETPLACE_REQUIRED_FIELDS[@]}"; do
if check_field_exists "${file}" "${field}"; then
local value
value=$(get_field_value "${file}" "${field}")
# Special handling for plugins array
if [[ "${field}" == "plugins" ]]; then
local count
count=$(get_json_array_length "${file}" ".plugins")
if [[ ${count} -gt 0 ]]; then
print_success "${field}: Array with ${count} entries"
else
print_error "${field}: Present but empty (REQUIRED)"
((missing_required++))
fi
elif check_field_empty "${value}"; then
print_error "${field}: Present but empty (REQUIRED)"
((missing_required++))
else
# Truncate long values
if [[ "${#value}" -gt 50 ]]; then
value="${value:0:47}..."
fi
print_success "${field}: \"${value}\""
fi
else
print_error "${field}: Missing (REQUIRED)"
((missing_required++))
fi
done
echo ""
print_section "Recommended Fields (${#MARKETPLACE_RECOMMENDED_FIELDS[@]})"
for field in "${MARKETPLACE_RECOMMENDED_FIELDS[@]}"; do
if check_field_exists "${file}" "${field}"; then
print_success "${field}: Present"
else
print_warning "${field}: Missing (improves quality)"
((missing_recommended++))
fi
done
echo ""
# Summary
if [[ ${missing_required} -eq 0 ]]; then
print_success "All required fields present"
else
print_error "Missing ${missing_required} required field(s)"
fi
if [[ ${missing_recommended} -gt 0 ]]; then
print_info "Missing ${missing_recommended} recommended field(s)"
fi
# Determine exit code
if [[ ${missing_required} -gt 0 ]]; then
return 1
elif [[ "${strict}" == "true" && ${missing_recommended} -gt 0 ]]; then
print_warning "Strict mode: Recommended fields are required"
return 1
else
return 0
fi
}
# ====================
# Main Logic
# ====================
main() {
# Validate arguments
if [[ -z "${CONFIG_FILE}" ]]; then
print_error "Usage: $0 <config-file> <type> [strict]"
print_info "Types: plugin, marketplace"
exit 2
fi
if [[ ! -f "${CONFIG_FILE}" ]]; then
print_error "Configuration file not found: ${CONFIG_FILE}"
exit 2
fi
if [[ -z "${TYPE}" ]]; then
print_error "Type required: plugin or marketplace"
exit 2
fi
# Validate JSON syntax first
if ! validate_json_syntax "${CONFIG_FILE}"; then
print_error "Invalid JSON syntax in ${CONFIG_FILE}"
print_info "Run JSON validation first to fix syntax errors"
exit 2
fi
# Print header
print_header "Required Fields Validation"
echo "Target: ${CONFIG_FILE}"
echo "Type: ${TYPE}"
echo "Strict Mode: ${STRICT}"
echo ""
# Validate based on type
case "${TYPE}" in
plugin)
if validate_plugin_fields "${CONFIG_FILE}" "${STRICT}"; then
echo ""
print_header "✅ PASS: All required fields present"
exit 0
else
echo ""
print_header "❌ FAIL: Missing required fields"
# Show remediation
echo ""
print_info "Action Required:"
echo " Add missing required fields to ${CONFIG_FILE}"
echo " Refer to plugin schema: .claude/docs/plugins/plugins-reference.md"
exit 1
fi
;;
marketplace)
if validate_marketplace_fields "${CONFIG_FILE}" "${STRICT}"; then
echo ""
print_header "✅ PASS: All required fields present"
exit 0
else
echo ""
print_header "❌ FAIL: Missing required fields"
# Show remediation
echo ""
print_info "Action Required:"
echo " Add missing required fields to ${CONFIG_FILE}"
echo " Refer to marketplace schema: .claude/docs/plugins/plugin-marketplaces.md"
exit 1
fi
;;
*)
print_error "Unknown type: ${TYPE}"
print_info "Valid types: plugin, marketplace"
exit 2
;;
esac
}
main "$@"

View File

@@ -0,0 +1,448 @@
#!/usr/bin/env python3
# ============================================================================
# Format Validator Script
# ============================================================================
# Purpose: Validate format compliance for semver, URLs, emails, naming
# Version: 1.0.0
# Usage: ./format-validator.py --file <path> --type <plugin|marketplace> [--strict]
# Returns: 0=all valid, 1=format violations, 2=error
# ============================================================================
import json
import sys
import argparse
import re
from pathlib import Path
from typing import Dict, List, Tuple, Optional
# ====================
# Color Support
# ====================
class Colors:
"""ANSI color codes for terminal output"""
RED = '\033[0;31m'
GREEN = '\033[0;32m'
YELLOW = '\033[1;33m'
BLUE = '\033[0;34m'
CYAN = '\033[0;36m'
BOLD = '\033[1m'
NC = '\033[0m'
@classmethod
def disable(cls):
"""Disable colors for non-TTY output"""
cls.RED = cls.GREEN = cls.YELLOW = cls.BLUE = cls.CYAN = cls.BOLD = cls.NC = ''
if not sys.stdout.isatty():
Colors.disable()
# ====================
# Format Patterns
# ====================
# Semantic versioning: X.Y.Z
SEMVER_PATTERN = re.compile(r'^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?(\+[a-zA-Z0-9.]+)?$')
# Lowercase-hyphen naming: plugin-name
LOWERCASE_HYPHEN_PATTERN = re.compile(r'^[a-z0-9]+(-[a-z0-9]+)*$')
# Email: RFC 5322 simplified
EMAIL_PATTERN = re.compile(r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$')
# URL: http or https
URL_PATTERN = re.compile(r'^https?://')
# HTTPS only
HTTPS_PATTERN = re.compile(r'^https://')
# SPDX License Identifiers (common ones)
SPDX_LICENSES = [
'MIT', 'Apache-2.0', 'GPL-3.0', 'GPL-2.0', 'LGPL-3.0', 'LGPL-2.1',
'BSD-2-Clause', 'BSD-3-Clause', 'ISC', 'MPL-2.0', 'AGPL-3.0',
'Unlicense', 'CC0-1.0', 'Proprietary'
]
# Approved categories (10 standard)
APPROVED_CATEGORIES = [
'development', 'testing', 'deployment', 'documentation', 'security',
'database', 'monitoring', 'productivity', 'quality', 'collaboration'
]
# ====================
# Validation Functions
# ====================
class FormatValidator:
"""Format validation logic"""
def __init__(self, strict_https: bool = False):
self.strict_https = strict_https
self.errors: List[Tuple[str, str, str]] = []
self.warnings: List[Tuple[str, str]] = []
self.passed: List[Tuple[str, str]] = []
def validate_semver(self, field: str, value: str) -> bool:
"""Validate semantic versioning"""
if not value:
return True # Skip empty (handled by required fields check)
if SEMVER_PATTERN.match(value):
self.passed.append((field, f'"{value}" (semver)'))
return True
else:
error = (
field,
f'"{value}"',
'Invalid: Must use semantic versioning (X.Y.Z)\n'
' Pattern: MAJOR.MINOR.PATCH\n'
' Example: 1.0.0, 2.1.5'
)
self.errors.append(error)
return False
def validate_lowercase_hyphen(self, field: str, value: str) -> bool:
"""Validate lowercase-hyphen naming"""
if not value:
return True
if LOWERCASE_HYPHEN_PATTERN.match(value):
self.passed.append((field, f'"{value}" (lowercase-hyphen)'))
return True
else:
error = (
field,
f'"{value}"',
'Invalid: Must use lowercase-hyphen format\n'
' Pattern: ^[a-z0-9]+(-[a-z0-9]+)*$\n'
' Example: my-plugin, test-tool, plugin123'
)
self.errors.append(error)
return False
def validate_email(self, field: str, value: str) -> bool:
"""Validate email address"""
if not value:
return True
if EMAIL_PATTERN.match(value):
self.passed.append((field, f'"{value}" (valid email)'))
return True
else:
error = (
field,
f'"{value}"',
'Invalid: Must be valid email address\n'
' Pattern: user@domain.tld\n'
' Example: developer@example.com'
)
self.errors.append(error)
return False
def validate_url(self, field: str, value: str) -> bool:
"""Validate URL format"""
if not value:
return True
if self.strict_https and not HTTPS_PATTERN.match(value):
error = (
field,
f'"{value}"',
'Invalid: HTTPS required in strict mode\n'
f' Current: {value}\n'
f' Required: {value.replace("http://", "https://", 1)}'
)
self.errors.append(error)
return False
elif URL_PATTERN.match(value):
if value.startswith('http://'):
self.warnings.append((
field,
f'"{value}" - Consider using HTTPS for security'
))
self.passed.append((field, f'"{value}" (valid URL)'))
return True
else:
error = (
field,
f'"{value}"',
'Invalid: Must be valid URL\n'
' Pattern: https://domain.tld/path\n'
' Example: https://github.com/user/repo'
)
self.errors.append(error)
return False
def validate_license(self, field: str, value: str) -> bool:
"""Validate SPDX license identifier"""
if not value:
return True
if value in SPDX_LICENSES:
self.passed.append((field, f'"{value}" (SPDX identifier)'))
return True
else:
error = (
field,
f'"{value}"',
'Invalid: Must be SPDX license identifier\n'
' Common: MIT, Apache-2.0, GPL-3.0, BSD-3-Clause, ISC\n'
' See: https://spdx.org/licenses/'
)
self.errors.append(error)
return False
def validate_category(self, field: str, value: str) -> bool:
"""Validate category against approved list"""
if not value:
return True
if value in APPROVED_CATEGORIES:
self.passed.append((field, f'"{value}" (approved category)'))
return True
else:
error = (
field,
f'"{value}"',
'Invalid: Must be one of 10 approved categories\n'
' Valid: development, testing, deployment, documentation,\n'
' security, database, monitoring, productivity,\n'
' quality, collaboration'
)
self.errors.append(error)
return False
def validate_description_length(self, field: str, value: str) -> bool:
"""Validate description length (50-200 chars recommended)"""
if not value:
return True
length = len(value)
if 50 <= length <= 200:
self.passed.append((field, f'Valid length ({length} chars)'))
return True
elif length < 50:
self.warnings.append((
field,
f'Short description ({length} chars) - consider 50-200 characters for clarity'
))
return True
else:
self.warnings.append((
field,
f'Long description ({length} chars) - consider keeping under 200 characters'
))
return True
# ====================
# Plugin Validation
# ====================
def validate_plugin_formats(data: Dict, validator: FormatValidator) -> int:
"""Validate plugin format compliance"""
print(f"{Colors.CYAN}Format Checks:{Colors.NC}\n")
# name: lowercase-hyphen
if 'name' in data:
validator.validate_lowercase_hyphen('name', data['name'])
# version: semver
if 'version' in data:
validator.validate_semver('version', data['version'])
# description: length check
if 'description' in data:
validator.validate_description_length('description', data['description'])
# license: SPDX
if 'license' in data:
validator.validate_license('license', data['license'])
# homepage: URL
if 'homepage' in data:
validator.validate_url('homepage', data['homepage'])
# repository: URL or object
if 'repository' in data:
repo = data['repository']
if isinstance(repo, str):
validator.validate_url('repository', repo)
elif isinstance(repo, dict) and 'url' in repo:
validator.validate_url('repository.url', repo['url'])
# category: approved list
if 'category' in data:
validator.validate_category('category', data['category'])
# author: email if object
if 'author' in data:
author = data['author']
if isinstance(author, dict) and 'email' in author:
validator.validate_email('author.email', author['email'])
return 0 if not validator.errors else 1
# ====================
# Marketplace Validation
# ====================
def validate_marketplace_formats(data: Dict, validator: FormatValidator) -> int:
"""Validate marketplace format compliance"""
print(f"{Colors.CYAN}Format Checks:{Colors.NC}\n")
# name: lowercase-hyphen
if 'name' in data:
validator.validate_lowercase_hyphen('name', data['name'])
# owner.email: email
if 'owner' in data and isinstance(data['owner'], dict):
if 'email' in data['owner']:
validator.validate_email('owner.email', data['owner']['email'])
# version: semver (if present)
if 'version' in data:
validator.validate_semver('version', data['version'])
# metadata fields
if 'metadata' in data and isinstance(data['metadata'], dict):
metadata = data['metadata']
if 'description' in metadata:
validator.validate_description_length('metadata.description', metadata['description'])
if 'homepage' in metadata:
validator.validate_url('metadata.homepage', metadata['homepage'])
if 'repository' in metadata:
validator.validate_url('metadata.repository', metadata['repository'])
return 0 if not validator.errors else 1
# ====================
# Output Formatting
# ====================
def print_results(validator: FormatValidator):
"""Print validation results"""
print()
# Passed checks
if validator.passed:
for field, msg in validator.passed:
print(f" {Colors.GREEN}{field}: {msg}{Colors.NC}")
# Errors
if validator.errors:
print()
for field, value, msg in validator.errors:
print(f" {Colors.RED}{field}: {value}{Colors.NC}")
for line in msg.split('\n'):
print(f" {line}")
print()
# Warnings
if validator.warnings:
print()
for field, msg in validator.warnings:
print(f" {Colors.YELLOW}⚠️ {field}: {msg}{Colors.NC}")
# Summary
print()
print(f"{Colors.BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Colors.NC}")
total = len(validator.passed) + len(validator.errors)
passed_count = len(validator.passed)
if validator.errors:
print(f"{Colors.RED}Failed: {len(validator.errors)}{Colors.NC}")
if validator.warnings:
print(f"{Colors.YELLOW}Warnings: {len(validator.warnings)}{Colors.NC}")
print(f"Status: {Colors.RED}FAIL{Colors.NC}")
else:
print(f"Passed: {passed_count}/{total}")
if validator.warnings:
print(f"{Colors.YELLOW}Warnings: {len(validator.warnings)}{Colors.NC}")
print(f"Status: {Colors.GREEN}PASS{Colors.NC}")
# ====================
# Main Logic
# ====================
def main():
"""CLI entry point"""
parser = argparse.ArgumentParser(
description='Validate format compliance for plugin and marketplace configurations',
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--file',
type=str,
required=True,
help='Path to configuration file (plugin.json or marketplace.json)'
)
parser.add_argument(
'--type',
type=str,
choices=['plugin', 'marketplace'],
required=True,
help='Configuration type'
)
parser.add_argument(
'--strict',
action='store_true',
help='Enforce HTTPS for all URLs'
)
args = parser.parse_args()
# Load configuration file
try:
with open(args.file, 'r', encoding='utf-8') as f:
data = json.load(f)
except FileNotFoundError:
print(f"{Colors.RED}❌ File not found: {args.file}{Colors.NC}", file=sys.stderr)
return 2
except json.JSONDecodeError as e:
print(f"{Colors.RED}❌ Invalid JSON: {e}{Colors.NC}", file=sys.stderr)
print(f"{Colors.BLUE} Run JSON validation first{Colors.NC}", file=sys.stderr)
return 2
# Print header
print(f"{Colors.BOLD}{Colors.BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Colors.NC}")
print(f"{Colors.BOLD}Format Validation{Colors.NC}")
print(f"{Colors.BOLD}{Colors.BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Colors.NC}")
print(f"Target: {args.file}")
print(f"Type: {args.type}")
if args.strict:
print(f"Strict HTTPS: {Colors.GREEN}Enforced{Colors.NC}")
print()
# Create validator
validator = FormatValidator(strict_https=args.strict)
# Validate based on type
if args.type == 'plugin':
result = validate_plugin_formats(data, validator)
else:
result = validate_marketplace_formats(data, validator)
# Print results
print_results(validator)
return result
if __name__ == '__main__':
sys.exit(main())

View File

@@ -0,0 +1,263 @@
#!/usr/bin/env python3
# ============================================================================
# JSON Validator Script
# ============================================================================
# Purpose: Multi-backend JSON syntax validation with detailed error reporting
# Version: 1.0.0
# Usage: ./json-validator.py --file <path> [--verbose]
# Returns: 0=valid, 1=invalid, 2=error
# Backends: jq (preferred), python3 json module (fallback)
# ============================================================================
import json
import sys
import argparse
import subprocess
import shutil
from pathlib import Path
# ====================
# Color Support
# ====================
class Colors:
"""ANSI color codes for terminal output"""
RED = '\033[0;31m'
GREEN = '\033[0;32m'
YELLOW = '\033[1;33m'
BLUE = '\033[0;34m'
CYAN = '\033[0;36m'
BOLD = '\033[1m'
NC = '\033[0m'
@classmethod
def disable(cls):
"""Disable colors for non-TTY output"""
cls.RED = cls.GREEN = cls.YELLOW = cls.BLUE = cls.CYAN = cls.BOLD = cls.NC = ''
if not sys.stdout.isatty():
Colors.disable()
# ====================
# Backend Detection
# ====================
def detect_backend():
"""Detect available JSON validation backend"""
if shutil.which('jq'):
return 'jq'
elif sys.version_info >= (3, 0):
return 'python3'
else:
return 'none'
def print_backend_info():
"""Print detected backend information"""
backend = detect_backend()
if backend == 'jq':
print(f"{Colors.GREEN}✅ Backend: jq (preferred){Colors.NC}")
elif backend == 'python3':
print(f"{Colors.YELLOW}⚠️ Backend: python3 (fallback){Colors.NC}")
else:
print(f"{Colors.RED}❌ No JSON validator available{Colors.NC}")
print(f"{Colors.BLUE} Install jq for better error messages: apt-get install jq{Colors.NC}")
return backend
# ====================
# JQ Backend
# ====================
def validate_with_jq(file_path, verbose=False):
"""Validate JSON using jq (provides better error messages)"""
try:
result = subprocess.run(
['jq', 'empty', file_path],
capture_output=True,
text=True,
check=False
)
if result.returncode == 0:
print(f"{Colors.GREEN}✅ Valid JSON: {file_path}{Colors.NC}")
print(f"Backend: jq")
return 0
else:
# Parse jq error message
error_msg = result.stderr.strip()
print(f"{Colors.RED}❌ Invalid JSON: {file_path}{Colors.NC}")
if verbose:
print(f"{Colors.BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Colors.NC}")
print(f"{Colors.RED}Error Details:{Colors.NC}")
print(f" {error_msg}")
print()
print(f"{Colors.YELLOW}Remediation:{Colors.NC}")
print(" - Check for missing commas between object properties")
print(" - Verify bracket matching: [ ] { }")
print(" - Ensure proper string quoting")
print(" - Use a JSON formatter/linter in your editor")
else:
# Extract line number if available
if "parse error" in error_msg.lower():
print(f"Error: {error_msg}")
return 1
except FileNotFoundError:
print(f"{Colors.RED}❌ File not found: {file_path}{Colors.NC}", file=sys.stderr)
return 2
except Exception as e:
print(f"{Colors.RED}❌ Error running jq: {e}{Colors.NC}", file=sys.stderr)
return 2
# ====================
# Python3 Backend
# ====================
def validate_with_python(file_path, verbose=False):
"""Validate JSON using Python's json module (universal fallback)"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# Attempt to parse JSON
json.loads(content)
print(f"{Colors.GREEN}✅ Valid JSON: {file_path}{Colors.NC}")
print(f"Backend: python3")
return 0
except FileNotFoundError:
print(f"{Colors.RED}❌ File not found: {file_path}{Colors.NC}", file=sys.stderr)
return 2
except json.JSONDecodeError as e:
print(f"{Colors.RED}❌ Invalid JSON: {file_path}{Colors.NC}")
if verbose:
print(f"{Colors.BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━{Colors.NC}")
print(f"{Colors.RED}Error Details:{Colors.NC}")
print(f" Line: {e.lineno}")
print(f" Column: {e.colno}")
print(f" Issue: {e.msg}")
print()
# Show problematic section
try:
lines = content.split('\n')
start = max(0, e.lineno - 3)
end = min(len(lines), e.lineno + 2)
print(f"Problematic Section (lines {start+1}-{end}):")
for i in range(start, end):
line_num = i + 1
marker = "" if line_num == e.lineno else " "
print(f" {marker} {line_num:3d} | {lines[i]}")
print()
except:
pass
print(f"{Colors.YELLOW}Remediation:{Colors.NC}")
print(" - Check for missing commas between array elements or object properties")
print(" - Verify bracket matching: [ ] { }")
print(" - Ensure all strings are properly quoted")
print(" - Use a JSON formatter/linter in your editor")
else:
print(f"Error: {e.msg} at line {e.lineno}, column {e.colno}")
return 1
except Exception as e:
print(f"{Colors.RED}❌ Error reading file: {e}{Colors.NC}", file=sys.stderr)
return 2
# ====================
# Main Validation
# ====================
def validate_json(file_path, verbose=False):
"""Main validation function with backend selection"""
backend = detect_backend()
if backend == 'none':
print(f"{Colors.RED}❌ No JSON validation backend available{Colors.NC}", file=sys.stderr)
print(f"{Colors.BLUE} Install jq or ensure python3 is available{Colors.NC}", file=sys.stderr)
return 2
if backend == 'jq':
return validate_with_jq(file_path, verbose)
else:
return validate_with_python(file_path, verbose)
# ====================
# CLI Interface
# ====================
def main():
"""CLI entry point"""
parser = argparse.ArgumentParser(
description='Validate JSON file syntax with multi-backend support',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''
Examples:
./json-validator.py --file plugin.json
./json-validator.py --file marketplace.json --verbose
./json-validator.py --detect
Backends:
- jq (preferred): Fast, excellent error messages
- python3 (fallback): Universal availability
Exit codes:
0: Valid JSON
1: Invalid JSON
2: File error or backend unavailable
'''
)
parser.add_argument(
'--file',
type=str,
help='Path to JSON file to validate'
)
parser.add_argument(
'--verbose',
action='store_true',
help='Show detailed error information and remediation'
)
parser.add_argument(
'--detect',
action='store_true',
help='Detect and display available backend'
)
args = parser.parse_args()
# Handle backend detection
if args.detect:
print_backend_info()
return 0
# Validate required arguments
if not args.file:
parser.print_help()
return 2
# Perform validation
return validate_json(args.file, args.verbose)
if __name__ == '__main__':
sys.exit(main())

View File

@@ -0,0 +1,265 @@
#!/usr/bin/env bash
# ============================================================================
# Schema Differ Script
# ============================================================================
# Purpose: Compare configuration against reference schemas and validate plugin entries
# Version: 1.0.0
# Usage: ./schema-differ.sh <marketplace-file> [index]
# Returns: 0=all valid, 1=validation errors, 2=error
# ============================================================================
set -euo pipefail
# Source shared library
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/../../../scripts/validate-lib.sh"
# ====================
# Configuration
# ====================
readonly MARKETPLACE_FILE="${1:-}"
readonly INDEX="${2:-all}"
# ====================
# Plugin Entry Validation
# ====================
validate_plugin_entry() {
local index=$1
local entry_json=$2
local strict=${3:-false}
local has_errors=0
local has_warnings=0
# Extract fields using json_get would be complex here, so use jq/python inline
local name version source description author keywords license
name=$(echo "${entry_json}" | jq -r '.name // empty' 2>/dev/null || echo "")
version=$(echo "${entry_json}" | jq -r '.version // empty' 2>/dev/null || echo "")
source=$(echo "${entry_json}" | jq -r '.source // empty' 2>/dev/null || echo "")
description=$(echo "${entry_json}" | jq -r '.description // empty' 2>/dev/null || echo "")
author=$(echo "${entry_json}" | jq -r '.author // empty' 2>/dev/null || echo "")
keywords=$(echo "${entry_json}" | jq -r '.keywords // empty' 2>/dev/null || echo "")
license=$(echo "${entry_json}" | jq -r '.license // empty' 2>/dev/null || echo "")
echo ""
print_section "Entry ${index}: ${name:-<unnamed>}"
# Required fields
echo " Required (3):"
# name (required, lowercase-hyphen)
if [[ -z "${name}" ]]; then
print_error " name: Missing (REQUIRED)"
((has_errors++))
elif ! validate_name_format "${name}"; then
print_error " name: \"${name}\" - Invalid format"
print_info " Expected: lowercase-hyphen (my-plugin)"
((has_errors++))
else
print_success " name: \"${name}\""
fi
# source (required, valid format)
if [[ -z "${source}" ]]; then
print_error " source: Missing (REQUIRED)"
((has_errors++))
elif ! validate_source_format "${source}"; then
print_error " source: \"${source}\" - Invalid format"
print_info " Valid: ./path, github:user/repo, https://url"
((has_errors++))
else
print_success " source: \"${source}\""
fi
# description (required, non-empty)
if [[ -z "${description}" ]]; then
print_error " description: Missing (REQUIRED)"
((has_errors++))
else
# Truncate for display
local desc_display="${description}"
if [[ ${#description} -gt 50 ]]; then
desc_display="${description:0:47}..."
fi
print_success " description: \"${desc_display}\""
fi
echo ""
echo " Recommended (4):"
# version (recommended, semver)
if [[ -z "${version}" ]]; then
print_warning " version: Missing"
((has_warnings++))
elif ! validate_semver "${version}"; then
print_warning " version: \"${version}\" - Invalid semver"
((has_warnings++))
else
print_success " version: \"${version}\""
fi
# author (recommended)
if [[ -z "${author}" || "${author}" == "null" ]]; then
print_warning " author: Missing"
((has_warnings++))
else
print_success " author: Present"
fi
# keywords (recommended)
if [[ -z "${keywords}" || "${keywords}" == "null" || "${keywords}" == "[]" ]]; then
print_warning " keywords: Missing"
((has_warnings++))
else
local keyword_count
keyword_count=$(echo "${entry_json}" | jq '.keywords | length' 2>/dev/null || echo "0")
print_success " keywords: ${keyword_count} items"
fi
# license (recommended, SPDX)
if [[ -z "${license}" ]]; then
print_warning " license: Missing"
((has_warnings++))
elif ! validate_license "${license}"; then
print_warning " license: \"${license}\" - Unknown SPDX identifier"
((has_warnings++))
else
print_success " license: \"${license}\""
fi
# Entry status
echo ""
if [[ ${has_errors} -eq 0 ]]; then
if [[ ${has_warnings} -eq 0 ]]; then
print_success "Status: PASS (no issues)"
else
print_info "Status: PASS with ${has_warnings} warning(s)"
fi
return 0
else
print_error "Status: FAIL (${has_errors} critical issues, ${has_warnings} warnings)"
return 1
fi
}
# ====================
# Main Logic
# ====================
main() {
# Validate arguments
if [[ -z "${MARKETPLACE_FILE}" ]]; then
print_error "Usage: $0 <marketplace-file> [index]"
exit 2
fi
if [[ ! -f "${MARKETPLACE_FILE}" ]]; then
print_error "Marketplace file not found: ${MARKETPLACE_FILE}"
exit 2
fi
# Validate JSON syntax
if ! validate_json_syntax "${MARKETPLACE_FILE}"; then
print_error "Invalid JSON in ${MARKETPLACE_FILE}"
print_info "Run JSON validation first to fix syntax errors"
exit 2
fi
# Print header
print_header "Plugin Entries Validation"
echo "Marketplace: ${MARKETPLACE_FILE}"
echo ""
# Get plugins array length
local plugin_count
plugin_count=$(get_json_array_length "${MARKETPLACE_FILE}" ".plugins")
if [[ ${plugin_count} -eq 0 ]]; then
print_warning "No plugin entries found in marketplace"
print_info "The plugins array is empty"
exit 0
fi
echo "Total Entries: ${plugin_count}"
# Determine which entries to validate
local entries_to_check=()
if [[ "${INDEX}" == "all" ]]; then
for ((i=0; i<plugin_count; i++)); do
entries_to_check+=("$i")
done
elif [[ "${INDEX}" =~ ^[0-9]+$ ]]; then
if [[ ${INDEX} -ge ${plugin_count} ]]; then
print_error "Invalid index: ${INDEX} (valid range: 0-$((plugin_count-1)))"
exit 2
fi
entries_to_check=("${INDEX}")
else
print_error "Invalid index: ${INDEX} (must be number or 'all')"
exit 2
fi
# Validate each entry
local failed_count=0
local total_errors=0
local total_warnings=0
for idx in "${entries_to_check[@]}"; do
# Extract plugin entry
local entry_json
if command -v jq &> /dev/null; then
entry_json=$(jq ".plugins[${idx}]" "${MARKETPLACE_FILE}" 2>/dev/null)
else
# Fallback to python
entry_json=$(python3 <<EOF 2>/dev/null
import json
with open('${MARKETPLACE_FILE}') as f:
data = json.load(f)
print(json.dumps(data['plugins'][${idx}]))
EOF
)
fi
# Validate entry
if ! validate_plugin_entry "${idx}" "${entry_json}" "false"; then
((failed_count++))
fi
done
# Summary
echo ""
print_header "Summary"
local passed_count=$((${#entries_to_check[@]} - failed_count))
local pass_percentage=$((passed_count * 100 / ${#entries_to_check[@]}))
echo "Total Entries: ${#entries_to_check[@]}"
if [[ ${passed_count} -gt 0 ]]; then
print_success "Passed: ${passed_count} (${pass_percentage}%)"
fi
if [[ ${failed_count} -gt 0 ]]; then
print_error "Failed: ${failed_count} ($((100 - pass_percentage))%)"
fi
echo ""
if [[ ${failed_count} -eq 0 ]]; then
print_header "✅ PASS: All plugin entries valid"
exit 0
else
print_header "❌ FAIL: ${failed_count} plugin entries have errors"
echo ""
print_info "Action Required:"
echo " Fix validation errors in plugin entries"
echo " Ensure all required fields are present"
echo " Use correct formats for names, versions, and sources"
exit 1
fi
}
main "$@"

View File

@@ -0,0 +1,241 @@
## Operation: Check Plugin Entries
Validate plugin entries in marketplace configuration for completeness and format compliance.
### Parameters from $ARGUMENTS
- **marketplace**: Path to marketplace.json file (required)
- **strict**: Require all recommended fields in plugin entries (optional, default: false)
- **index**: Validate specific plugin entry by index (optional, validates all if not specified)
### Workflow
1. **Load Marketplace Configuration**
```
Locate marketplace.json:
- Direct path: <marketplace>
- Relative: <marketplace>/marketplace.json
- Claude plugin: <marketplace>/.claude-plugin/marketplace.json
Validate JSON syntax first
```
2. **Extract Plugin Entries**
```
Parse plugins array from marketplace.json
Count total entries
Determine which entries to validate (all or specific index)
```
3. **Validate Each Plugin Entry**
```
For each plugin entry, execute .scripts/schema-differ.sh
Check required fields:
- name (string, lowercase-hyphen)
- source (string, valid format: ./path, github:, https://)
- description (string, non-empty)
Check recommended fields:
- version (string, semver)
- author (string or object)
- keywords (array, 3-7 items)
- license (string, SPDX identifier)
Validate field formats:
- name: lowercase-hyphen pattern
- version: semantic versioning
- source: valid source format
- license: SPDX identifier
```
4. **Aggregate Results**
```
Per-entry summary:
- Entry index
- Plugin name
- Status: PASS/FAIL
- Missing required fields
- Missing recommended fields
- Format violations
Overall summary:
- Total entries
- Passed count
- Failed count
- Total issues
```
### Plugin Entry Required Fields
- `name`: Unique plugin identifier (lowercase-hyphen)
- `source`: Where to locate plugin (./path, github:user/repo, https://url)
- `description`: Brief plugin description (non-empty)
### Plugin Entry Recommended Fields
- `version`: Plugin version (semver)
- `author`: Plugin author (string or object)
- `keywords`: Search keywords (array of 3-7 strings)
- `license`: License identifier (SPDX)
- `homepage`: Documentation URL
- `repository`: Source code URL
### Source Format Validation
**Relative Path**:
- Pattern: `./` or `../`
- Example: `./plugins/my-plugin`
**GitHub Format**:
- Pattern: `github:owner/repo`
- Example: `github:anthropics/claude-plugin`
**Git URL**:
- Pattern: `https://...git`
- Example: `https://github.com/user/plugin.git`
**Archive URL**:
- Pattern: `https://....(zip|tar.gz|tgz)`
- Example: `https://example.com/plugin.zip`
### Examples
```bash
# Validate all plugin entries in marketplace
/schema-validation entries marketplace:./test-marketplace
# Validate with strict mode (require recommended fields)
/schema-validation entries marketplace:marketplace.json strict:true
# Validate specific plugin entry by index
/schema-validation entries marketplace:marketplace.json index:0
```
### Error Handling
- **Marketplace not found**: Show searched paths
- **Invalid JSON**: Suggest running json validation
- **No plugins array**: Error - required field
- **Empty plugins array**: Warning - marketplace has no plugins
- **Invalid index**: Error with valid range
### Output Format
**Success (all entries valid)**:
```
✅ Plugin Entries Validation: PASS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Marketplace: ./test-marketplace/marketplace.json
Total Entries: 3
Entry 0: code-review ✅
Required (3/3):
✅ name: "code-review"
✅ source: "./plugins/code-review"
✅ description: "Automated code review..."
Recommended (4/4):
✅ version: "2.0.0"
✅ author: Present
✅ keywords: 3 items
✅ license: "MIT"
Entry 1: deploy-tools ✅
Required (3/3):
✅ name: "deploy-tools"
✅ source: "github:company/deploy"
✅ description: "Deployment automation..."
Recommended (3/4):
✅ version: "1.5.0"
✅ author: Present
⚠️ keywords: Missing
Entry 2: security-scan ✅
Required (3/3):
✅ name: "security-scan"
✅ source: "https://example.com/plugin.zip"
✅ description: "Security vulnerability scanning..."
Summary:
Total: 3 entries
Passed: 3 (100%)
Failed: 0
Warnings: 1 (non-blocking)
Status: PASS
```
**Failure (validation errors)**:
```
❌ Plugin Entries Validation: FAIL
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Marketplace: marketplace.json
Total Entries: 2
Entry 0: my-plugin ❌
Required (2/3):
❌ name: "My-Plugin"
Invalid: Must use lowercase-hyphen format
Expected: my-plugin
❌ source: Missing (REQUIRED)
✅ description: "My awesome plugin"
Recommended (1/4):
✅ version: "1.0.0"
❌ author: Missing
❌ keywords: Missing
❌ license: Missing
Issues: 5 (2 critical, 3 warnings)
Entry 1: test-tool ✅
Required (3/3):
✅ name: "test-tool"
✅ source: "./plugins/test-tool"
✅ description: "Testing utilities"
Recommended (2/4):
⚠️ version: Missing
⚠️ author: Missing
Summary:
Total: 2 entries
Passed: 1 (50%)
Failed: 1 (50%)
Critical Issues: 2
Warnings: 5
Status: FAIL
Action Required:
Fix plugin entry #0 (my-plugin):
- Change name to lowercase-hyphen: "my-plugin"
- Add source field: "./plugins/my-plugin"
- Consider adding: author, keywords, license
```
**Empty Marketplace**:
```
⚠️ Plugin Entries Validation: WARNING
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Marketplace: marketplace.json
⚠️ No plugin entries found
- plugins array is empty
- Add at least one plugin entry to marketplace
Status: WARNING (empty marketplace)
```
### Integration
This operation is called by:
- `full-schema-validation.md` - When validating marketplace type
- `validation-orchestrator` - Marketplace comprehensive validation
- Direct user invocation for plugin entry checking
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,183 @@
## Operation: Check Required Fields
Verify all required fields are present and non-empty in plugin or marketplace configuration.
### Parameters from $ARGUMENTS
- **path**: Path to plugin directory or marketplace file (required)
- **type**: Target type: `plugin` or `marketplace` (required)
- **strict**: Fail on missing recommended fields (optional, default: false)
### Workflow
1. **Detect Target Type**
```
IF type not specified:
Auto-detect based on path structure:
- Has plugin.json → plugin
- Has marketplace.json or .claude-plugin/marketplace.json → marketplace
- Otherwise → error
```
2. **Locate Configuration File**
```
For plugin:
Check: <path>/plugin.json
For marketplace:
Check: <path>/marketplace.json
OR: <path>/.claude-plugin/marketplace.json
IF not found:
Return error with searched paths
```
3. **Execute Field Validation**
```
Execute .scripts/field-checker.sh "$config_file" "$type" "$strict"
Returns:
- List of required fields: present ✅ or missing ❌
- List of recommended fields: present ✅ or missing ⚠️
- Overall status: PASS or FAIL
```
4. **Aggregate Results**
```
Count:
- Required missing: critical errors
- Recommended missing: warnings
IF any required missing:
Exit with status 1
IF strict mode AND any recommended missing:
Exit with status 1
Otherwise:
Exit with status 0
```
### Required Fields by Type
**Plugin** (from plugin.json):
- `name` (string, lowercase-hyphen format)
- `version` (string, semver X.Y.Z)
- `description` (string, 50-200 characters)
- `author` (string or object with name field)
- `license` (string, SPDX identifier)
**Marketplace** (from marketplace.json):
- `name` (string, lowercase-hyphen format)
- `owner` (object with name field)
- `owner.name` (string)
- `owner.email` (string, valid email format)
- `plugins` (array, at least one entry)
### Recommended Fields
**Plugin**:
- `repository` (object or string, source code location)
- `homepage` (string, documentation URL)
- `keywords` (array, 3-7 relevant keywords)
- `category` (string, one of 10 approved categories)
**Marketplace**:
- `version` (string, marketplace version)
- `metadata.description` (string, marketplace purpose)
- `metadata.homepage` (string, marketplace documentation)
- `metadata.repository` (string, marketplace source)
### Examples
```bash
# Check plugin required fields
/schema-validation fields path:. type:plugin
# Check marketplace with strict mode (fail on missing recommended)
/schema-validation fields path:./test-marketplace type:marketplace strict:true
# Auto-detect type
/schema-validation fields path:.
```
### Error Handling
- **File not found**: List all searched paths
- **Invalid JSON**: Suggest running json validation first
- **Unknown type**: Show valid types (plugin, marketplace)
- **Empty field**: Report which field is present but empty
### Output Format
**Success (all required present)**:
```
✅ Required Fields Validation: PASS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: plugin.json
Type: plugin
Required Fields (5/5):
✅ name: "my-plugin"
✅ version: "1.0.0"
✅ description: "My awesome plugin"
✅ author: "Developer Name"
✅ license: "MIT"
Recommended Fields (3/4):
✅ repository: Present
✅ homepage: Present
✅ keywords: Present
⚠️ category: Missing (improves discoverability)
Status: PASS
Warnings: 1 (non-blocking)
```
**Failure (missing required)**:
```
❌ Required Fields Validation: FAIL
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: plugin.json
Type: plugin
Required Fields (3/5):
✅ name: "my-plugin"
❌ version: Missing (REQUIRED - use semver X.Y.Z)
✅ description: "My plugin"
❌ license: Missing (REQUIRED - use MIT, Apache-2.0, etc.)
✅ author: "Developer"
Critical Issues: 2
Status: FAIL
Action Required:
Add missing required fields to plugin.json:
- version: "1.0.0"
- license: "MIT"
```
**Marketplace Example**:
```
✅ Required Fields Validation: PASS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: marketplace.json
Type: marketplace
Required Fields (5/5):
✅ name: "my-marketplace"
✅ owner.name: "DevTools Team"
✅ owner.email: "devtools@example.com"
✅ plugins: Array with 3 entries
Status: PASS
```
### Integration
This operation is called by:
- `full-schema-validation.md` - Second validation step after JSON syntax
- `validation-orchestrator` - Comprehensive validation checks
- Direct user invocation for field checking
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,313 @@
## Operation: Full Schema Validation
Execute complete schema validation workflow: JSON syntax → Required fields → Format compliance → Plugin entries (if marketplace).
### Parameters from $ARGUMENTS
- **path**: Path to plugin directory or marketplace (required)
- **type**: Target type: `plugin` or `marketplace` (optional, auto-detect)
- **strict**: Fail on warnings and missing recommended fields (optional, default: false)
- **verbose**: Show detailed error information (optional, default: false)
### Workflow
1. **Detect Target Type**
```
IF type not specified:
Auto-detect based on path structure:
- Has plugin.json at root → plugin
- Has marketplace.json → marketplace
- Otherwise → error
Locate configuration file:
Plugin: <path>/plugin.json
Marketplace: <path>/marketplace.json or <path>/.claude-plugin/marketplace.json
```
2. **Phase 1: JSON Syntax Validation**
```
Read validate-json.md instructions
Execute: .scripts/json-validator.py --file "$config" --verbose "$verbose"
On failure:
- Report JSON syntax errors
- Stop validation (cannot proceed with invalid JSON)
- Exit with status 1
On success:
- Continue to Phase 2
```
3. **Phase 2: Required Fields Check**
```
Read check-required-fields.md instructions
Execute: .scripts/field-checker.sh "$config" "$type" "$strict"
Collect results:
- Required fields: present/missing
- Recommended fields: present/missing
- Critical errors count
- Warnings count
On failure:
- Report missing required fields
- Continue to Phase 3 (show all issues)
On success:
- Continue to Phase 3
```
4. **Phase 3: Format Validation**
```
Read validate-formats.md instructions
Execute: .scripts/format-validator.py --file "$config" --type "$type" --strict "$strict"
Validate:
- Semantic versioning
- Lowercase-hyphen naming
- URL formats
- Email addresses
- License identifiers
- Category names (if present)
Collect results:
- Format violations count
- Warnings count
```
5. **Phase 4: Plugin Entries Validation (Marketplace Only)**
```
IF type == "marketplace":
Read check-plugin-entries.md instructions
Execute: .scripts/schema-differ.sh "$config" "all"
Validate each plugin entry:
- Required fields (name, source, description)
- Recommended fields (version, author, license, keywords)
- Format compliance
Collect results:
- Total plugin entries
- Passed entries
- Failed entries
- Total issues per entry
```
6. **Aggregate Results**
```
Compile all validation phases:
Phase 1: JSON Syntax [PASS/FAIL]
Phase 2: Required Fields [PASS/FAIL]
Phase 3: Format Compliance [PASS/FAIL]
Phase 4: Plugin Entries [PASS/FAIL] (marketplace only)
Calculate overall status:
IF any phase FAIL: Overall FAIL
IF strict mode AND any warnings: Overall FAIL
ELSE: Overall PASS
Generate summary report:
- Total checks performed
- Critical errors
- Warnings
- Overall status
- Publication readiness
```
### Exit Codes
- **0**: All validation passed (or warnings only in non-strict mode)
- **1**: Validation failed (critical errors or strict mode with warnings)
- **2**: Error (file not found, invalid arguments, etc.)
### Examples
```bash
# Full validation with auto-detect
/schema-validation full-schema path:.
# Full plugin validation with strict mode
/schema-validation full-schema path:. type:plugin strict:true
# Full marketplace validation with verbose output
/schema-validation full-schema path:./test-marketplace type:marketplace verbose:true
# Validate specific plugin in subdirectory
/schema-validation full-schema path:./plugins/my-plugin type:plugin
```
### Integration
This operation is the primary entry point for complete schema validation and is called by:
- `validation-orchestrator` comprehensive validation
- Marketplace submission workflows
- CI/CD validation pipelines
- Direct user invocation for thorough checking
### Output Format
**Success (all phases pass)**:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
FULL SCHEMA VALIDATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: plugin.json
Type: plugin
Mode: Standard
Phase 1: JSON Syntax ✅
Status: Valid JSON
Backend: jq
Phase 2: Required Fields ✅
Required: 5/5 present
Recommended: 3/4 present
Missing: category (non-critical)
Phase 3: Format Compliance ✅
Checks: 7/7 passed
Version: 1.0.0 (valid semver)
Name: my-plugin (valid lowercase-hyphen)
License: MIT (valid SPDX)
URLs: All valid HTTPS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
VALIDATION SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Overall Status: ✅ PASS
Checks Performed: 15
Critical Errors: 0
Warnings: 1
Passed: 14
Publication Readiness: READY ✅
Your plugin meets all required standards
Consider adding: category field for better discoverability
Quality Score: 95/100 ⭐⭐⭐⭐⭐
```
**Failure (multiple phases fail)**:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
FULL SCHEMA VALIDATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: plugin.json
Type: plugin
Mode: Standard
Phase 1: JSON Syntax ✅
Status: Valid JSON
Backend: python3
Phase 2: Required Fields ❌
Required: 3/5 present
Missing:
❌ version (REQUIRED - use semver X.Y.Z)
❌ license (REQUIRED - use MIT, Apache-2.0, etc.)
Phase 3: Format Compliance ❌
Checks: 4/6 passed
Violations:
❌ name: "My-Plugin" - must use lowercase-hyphen
❌ homepage: "example.com" - must be valid URL
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
VALIDATION SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Overall Status: ❌ FAIL
Checks Performed: 11
Critical Errors: 4
Warnings: 0
Passed: 7
Publication Readiness: NOT READY ❌
Fix 4 critical issues before submission
Priority Actions:
1. Add version field: "1.0.0"
2. Add license field: "MIT"
3. Fix name format: "my-plugin"
4. Fix homepage URL: "https://example.com"
Quality Score: 45/100 ⭐⭐
Rating: Needs Improvement
Next Steps:
1. Fix all critical errors above
2. Re-run validation: /schema-validation full-schema path:.
3. Aim for quality score 90+ for publication
```
**Marketplace Example (with plugin entries)**:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
FULL SCHEMA VALIDATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: marketplace.json
Type: marketplace
Mode: Standard
Phase 1: JSON Syntax ✅
Status: Valid JSON
Phase 2: Required Fields ✅
Required: 5/5 present
Recommended: 4/4 present
Phase 3: Format Compliance ✅
Checks: 4/4 passed
Phase 4: Plugin Entries ✅
Total Entries: 3
Passed: 3 (100%)
Failed: 0
Entry 0: code-review ✅
Entry 1: deploy-tools ✅
Entry 2: security-scan ✅
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
VALIDATION SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Overall Status: ✅ PASS
Checks Performed: 23
Critical Errors: 0
Warnings: 0
Passed: 23
Publication Readiness: READY ✅
Your marketplace meets all standards
All 3 plugin entries are valid
Quality Score: 100/100 ⭐⭐⭐⭐⭐
Rating: Excellent
```
### Error Handling
- **File not found**: List searched paths, suggest creating configuration
- **Invalid JSON**: Stop at Phase 1, show syntax errors
- **Auto-detect failure**: Suggest specifying type explicitly
- **Script execution error**: Show script path and error message
### Performance
- **Plugin**: 1-2 seconds (3 phases)
- **Marketplace**: 2-5 seconds (4 phases, depends on plugin entry count)
- **Large Marketplace**: 5-10 seconds (50+ plugin entries)
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,76 @@
---
description: Validate JSON schemas, required fields, and format compliance for marketplaces and plugins
---
You are the Schema Validation coordinator, ensuring structural integrity and format compliance.
## Your Mission
Parse `$ARGUMENTS` to determine the requested schema validation operation and route to the appropriate sub-command.
## Available Operations
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
- **json** → Read `.claude/commands/schema-validation/validate-json.md`
- **fields** → Read `.claude/commands/schema-validation/check-required-fields.md`
- **formats** → Read `.claude/commands/schema-validation/validate-formats.md`
- **entries** → Read `.claude/commands/schema-validation/check-plugin-entries.md`
- **full-schema** → Read `.claude/commands/schema-validation/full-schema-validation.md`
## Argument Format
```
/schema-validation <operation> [parameters]
```
### Examples
```bash
# Validate JSON syntax
/schema-validation json file:plugin.json
# Check required fields
/schema-validation fields path:. type:plugin
# Validate formats (semver, URLs, naming)
/schema-validation formats path:.
# Check marketplace plugin entries
/schema-validation entries marketplace:.claude-plugin/marketplace.json
# Run complete schema validation
/schema-validation full-schema path:. type:plugin
```
## Validation Scope
**For Plugins**:
- Required: name, version, description, author, license
- Formats: semver (version), lowercase-hyphen (name), valid license
- Optional: keywords, category, homepage, repository
**For Marketplaces**:
- Required: name, owner, plugins
- Plugin entries: name, version, source, description, author, license
- Formats: valid source (github:, URL, path)
## Error Handling
If the operation is not recognized:
1. List all available operations
2. Show validation scope
3. Provide usage examples
## Base Directory
Base directory for this skill: `.claude/commands/schema-validation/`
## Your Task
1. Parse `$ARGUMENTS` to extract operation and parameters
2. Read the corresponding operation file
3. Execute schema validation with multi-backend support (jq, python3)
4. Return detailed validation results with line numbers for errors
**Current Request**: $ARGUMENTS

View File

@@ -0,0 +1,195 @@
## Operation: Validate Formats
Validate format compliance for semver, URLs, email addresses, and naming conventions.
### Parameters from $ARGUMENTS
- **path**: Path to plugin directory or marketplace file (required)
- **type**: Target type: `plugin` or `marketplace` (optional, auto-detect)
- **strict**: Enforce HTTPS for all URLs (optional, default: false)
### Workflow
1. **Locate Configuration File**
```
Auto-detect or use specified type:
Plugin: plugin.json
Marketplace: marketplace.json or .claude-plugin/marketplace.json
```
2. **Execute Format Validation**
```
Execute .scripts/format-validator.py --file "$config" --type "$type" --strict "$strict"
Validates:
- Semantic versioning (X.Y.Z)
- Lowercase-hyphen naming (^[a-z0-9]+(-[a-z0-9]+)*$)
- URL formats (http/https)
- Email addresses (RFC 5322 compliant)
- License identifiers (SPDX)
- Category names (10 approved categories)
```
3. **Report Results**
```
For each field:
✅ Valid format
❌ Invalid format with specific error and remediation
Summary:
- Total fields checked
- Passed count
- Failed count
- Exit code: 0 (all pass) or 1 (any fail)
```
### Format Validation Rules
**Semantic Versioning (version field)**:
- Pattern: `X.Y.Z` where X, Y, Z are non-negative integers
- Valid: `1.0.0`, `2.5.3`, `10.20.30`
- Invalid: `1.0`, `v1.0.0`, `1.0.0-beta` (pre-release allowed but optional)
**Lowercase-Hyphen Naming (name field)**:
- Pattern: `^[a-z0-9]+(-[a-z0-9]+)*$`
- Valid: `my-plugin`, `test-marketplace`, `plugin123`
- Invalid: `My-Plugin`, `test_plugin`, `plugin.name`, `-plugin`, `plugin-`
**URL Format (homepage, repository fields)**:
- Must start with `http://` or `https://`
- Strict mode: Only `https://` allowed
- Valid: `https://example.com`, `http://localhost:3000`
- Invalid: `example.com`, `www.example.com`, `ftp://example.com`
**Email Format (owner.email, author.email fields)**:
- RFC 5322 compliant pattern
- Valid: `user@example.com`, `name.surname@company.co.uk`
- Invalid: `user@`, `@example.com`, `user example.com`
**License Identifier (license field)**:
- SPDX identifier or "Proprietary"
- Common: MIT, Apache-2.0, GPL-3.0, BSD-3-Clause
- Valid: `MIT`, `Apache-2.0`, `ISC`, `Proprietary`
- Invalid: `mit`, `Apache 2.0`, `BSD`
**Category (category field)**:
- One of 10 approved categories
- Valid: development, testing, deployment, documentation, security, database, monitoring, productivity, quality, collaboration
- Invalid: coding, devops, tools, utilities
### Examples
```bash
# Validate plugin formats
/schema-validation formats path:.
# Validate marketplace with strict HTTPS enforcement
/schema-validation formats path:./test-marketplace type:marketplace strict:true
# Validate specific plugin
/schema-validation formats path:./my-plugin type:plugin
```
### Error Handling
- **File not found**: Show expected locations
- **Invalid JSON**: Suggest running json validation first
- **Format violation**: Specific error with correct pattern
- **Unknown field**: Warn but don't fail
### Output Format
**Success (all formats valid)**:
```
✅ Format Validation: PASS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: plugin.json
Type: plugin
Format Checks (7/7):
✅ name: "my-plugin" (lowercase-hyphen)
✅ version: "1.0.0" (semver)
✅ description: Valid length (73 chars)
✅ license: "MIT" (SPDX identifier)
✅ homepage: "https://example.com" (valid URL)
✅ repository: "https://github.com/user/repo" (valid URL)
✅ category: "development" (approved category)
Status: PASS
```
**Failure (format violations)**:
```
❌ Format Validation: FAIL
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: plugin.json
Type: plugin
Format Checks (4/7):
❌ name: "My-Plugin"
Invalid: Must use lowercase-hyphen format
Pattern: ^[a-z0-9]+(-[a-z0-9]+)*$
Example: my-plugin, test-tool, plugin123
❌ version: "1.0"
Invalid: Must use semantic versioning (X.Y.Z)
Expected: Three version numbers separated by dots
Example: 1.0.0, 2.1.5
✅ description: Valid (80 characters)
❌ license: "Apache 2.0"
Invalid: Must be SPDX identifier
Expected: Apache-2.0
Valid identifiers: MIT, Apache-2.0, GPL-3.0, BSD-3-Clause
⚠️ homepage: "http://example.com"
Warning: Consider using HTTPS for security
Current: http://example.com
Recommended: https://example.com
✅ repository: "https://github.com/user/repo"
❌ category: "coding"
Invalid: Must be one of 10 approved categories
Valid: development, testing, deployment, documentation,
security, database, monitoring, productivity,
quality, collaboration
Failed: 4
Warnings: 1
Status: FAIL
Action Required:
Fix format violations:
- name: Convert to lowercase-hyphen (my-plugin)
- version: Use semver format (1.0.0)
- license: Use SPDX identifier (Apache-2.0)
- category: Choose approved category (development)
```
**Marketplace Example**:
```
✅ Format Validation: PASS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: marketplace.json
Type: marketplace
Format Checks (4/4):
✅ name: "enterprise-marketplace" (lowercase-hyphen)
✅ owner.email: "devtools@company.com" (valid email)
✅ metadata.homepage: "https://company.com/plugins" (valid HTTPS URL)
✅ metadata.repository: "https://github.com/company/plugins" (valid HTTPS URL)
Status: PASS
Strict HTTPS: Enforced ✅
```
### Integration
This operation is called by:
- `full-schema-validation.md` - Third validation step after fields check
- `best-practices` skill - Naming and versioning validation
- Direct user invocation for format checking
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,110 @@
## Operation: Validate JSON Syntax
Validate JSON file syntax with multi-backend support (jq + python3 fallback).
### Parameters from $ARGUMENTS
- **file**: Path to JSON file (required)
- **verbose**: Show detailed error information (optional, default: false)
### Workflow
1. **Parse Arguments**
```
Extract file path from $ARGUMENTS
Check if verbose mode requested
```
2. **Validate File Exists**
```
IF file does not exist:
Return error with file path
Exit with status 1
```
3. **Detect JSON Tool**
```
Execute .scripts/json-validator.py --detect
Primary: jq (faster, better error messages)
Fallback: python3 (universal availability)
```
4. **Validate JSON Syntax**
```
Execute .scripts/json-validator.py --file "$file" --verbose "$verbose"
On success:
- Print success message with file path
- Return 0
On failure:
- Print error message with line number and details
- Show problematic JSON section
- Return 1
```
### Examples
```bash
# Basic JSON validation
/schema-validation json file:plugin.json
# Verbose validation with details
/schema-validation json file:marketplace.json verbose:true
# Validate multiple files (call multiple times)
/schema-validation json file:plugin1.json
/schema-validation json file:plugin2.json
```
### Error Handling
- **File not found**: Clear message with expected path
- **Invalid JSON**: Line number, character position, error description
- **No JSON tool available**: Instruction to install jq or python3
- **Permission denied**: File access error with remediation
### Output Format
**Success**:
```
✅ Valid JSON: plugin.json
Backend: jq
```
**Failure (basic)**:
```
❌ Invalid JSON: marketplace.json
Error: Unexpected token at line 15
```
**Failure (verbose)**:
```
❌ Invalid JSON: marketplace.json
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Error Details:
Line: 15
Position: 8
Issue: Expected ',' or ']' but got '}'
Problematic Section (lines 13-17):
13 | "plugins": [
14 | {
15 | "name": "test"
16 | }
17 | }
Remediation:
- Check for missing commas between array elements
- Verify bracket matching: [ ] { }
- Use a JSON formatter/linter in your editor
```
### Integration
This operation is called by:
- `full-schema-validation.md` - First validation step
- `validation-orchestrator` - Quick validation checks
- Direct user invocation for single file checks
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,533 @@
#!/usr/bin/env bash
# ============================================================================
# File Scanner - Detect dangerous files and sensitive configurations
# ============================================================================
# Purpose: Identify files that should not be committed to version control
# Version: 1.0.0
# Usage: ./file-scanner.sh <path> <patterns> <include_hidden> <check_gitignore>
# Returns: 0=no dangerous files, 1=dangerous files found, 2=error
# ============================================================================
set -euo pipefail
# Source shared validation library
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PLUGIN_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
if [[ -f "${PLUGIN_ROOT}/scripts/validate-lib.sh" ]]; then
source "${PLUGIN_ROOT}/scripts/validate-lib.sh"
fi
# ============================================================================
# Configuration
# ============================================================================
PATH_TO_SCAN="${1:-.}"
PATTERNS="${2:-all}"
INCLUDE_HIDDEN="${3:-true}"
CHECK_GITIGNORE="${4:-true}"
DANGEROUS_FILES_FOUND=0
declare -a FINDINGS=()
# ============================================================================
# Dangerous File Pattern Definitions
# ============================================================================
# Environment Files (CRITICAL)
declare -a ENV_PATTERNS=(
".env"
".env.local"
".env.production"
".env.development"
".env.staging"
".env.test"
"env.sh"
"setenv.sh"
".envrc"
)
# Credential Files (CRITICAL)
declare -a CREDENTIAL_PATTERNS=(
"*credentials*"
"*secrets*"
"*password*"
".aws/credentials"
".azure/credentials"
".gcp/credentials.json"
"gcloud/credentials"
"service-account*.json"
)
# Private Keys (CRITICAL)
declare -a KEY_PATTERNS=(
"id_rsa"
"id_dsa"
"id_ed25519"
"id_ecdsa"
"*.pem"
"*.key"
"*.p12"
"*.pfx"
"*.jks"
"*.keystore"
".gnupg/*"
".ssh/id_*"
)
# Database Files (HIGH)
declare -a DATABASE_PATTERNS=(
"*.db"
"*.sqlite"
"*.sqlite3"
"dump.sql"
"*backup*.sql"
"*.mdb"
"*.accdb"
)
# Configuration Files (MEDIUM)
declare -a CONFIG_PATTERNS=(
"config/database.yml"
"appsettings.json"
"wp-config.php"
"settings.py"
".htpasswd"
)
# Backup Files (MEDIUM)
declare -a BACKUP_PATTERNS=(
"*.bak"
"*.backup"
"*.old"
"*.orig"
"*.copy"
"*~"
"*.swp"
"*.swo"
)
# Log Files (LOW)
declare -a LOG_PATTERNS=(
"*.log"
"debug.log"
"error.log"
)
# ============================================================================
# Severity Classification
# ============================================================================
get_file_severity() {
local filename="$1"
# CRITICAL: Environment, credentials, keys
for pattern in "${ENV_PATTERNS[@]}" "${CREDENTIAL_PATTERNS[@]}" "${KEY_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "critical"
return
fi
done
# HIGH: Databases
for pattern in "${DATABASE_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "high"
return
fi
done
# MEDIUM: Config, backups
for pattern in "${CONFIG_PATTERNS[@]}" "${BACKUP_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "medium"
return
fi
done
# LOW: Logs
for pattern in "${LOG_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "low"
return
fi
done
echo "unknown"
}
get_file_type() {
local filename="$1"
for pattern in "${ENV_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "Environment File"
return
fi
done
for pattern in "${CREDENTIAL_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "Credential File"
return
fi
done
for pattern in "${KEY_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "Private Key"
return
fi
done
for pattern in "${DATABASE_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "Database File"
return
fi
done
for pattern in "${CONFIG_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "Configuration File"
return
fi
done
for pattern in "${BACKUP_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "Backup File"
return
fi
done
for pattern in "${LOG_PATTERNS[@]}"; do
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
echo "Log File"
return
fi
done
echo "Unknown"
}
get_risk_description() {
local file_type="$1"
case "${file_type}" in
"Environment File")
echo "Contains secrets, API keys, and configuration"
;;
"Credential File")
echo "Direct access credentials"
;;
"Private Key")
echo "Authentication keys"
;;
"Database File")
echo "May contain sensitive user data"
;;
"Configuration File")
echo "May contain hardcoded secrets"
;;
"Backup File")
echo "May contain previous versions with secrets"
;;
"Log File")
echo "May contain leaked sensitive information"
;;
*)
echo "Unknown risk"
;;
esac
}
get_remediation() {
local file_type="$1"
local in_gitignore="$2"
if [[ "${in_gitignore}" == "false" ]]; then
echo "Add to .gitignore, remove from git history, rotate credentials"
else
echo "Verify .gitignore is working, review if file should exist"
fi
}
# ============================================================================
# .gitignore Checking
# ============================================================================
is_in_gitignore() {
local file="$1"
local gitignore="${PATH_TO_SCAN}/.gitignore"
if [[ ! -f "${gitignore}" ]]; then
echo "false"
return
fi
# Simple check - does not handle all gitignore patterns perfectly
local basename
basename=$(basename "${file}")
local dirname
dirname=$(dirname "${file}")
if grep -qF "${basename}" "${gitignore}" 2>/dev/null; then
echo "true"
return
fi
if grep -qF "${file}" "${gitignore}" 2>/dev/null; then
echo "true"
return
fi
# Check pattern matches
while IFS= read -r pattern; do
# Skip comments and empty lines
[[ "${pattern}" =~ ^#.*$ || -z "${pattern}" ]] && continue
# Simple pattern matching (not complete gitignore spec)
if [[ "${basename}" == ${pattern} ]]; then
echo "true"
return
fi
done < "${gitignore}"
echo "false"
}
# ============================================================================
# File Scanning
# ============================================================================
should_check_pattern() {
local filename="$1"
if [[ "${PATTERNS}" == "all" ]]; then
return 0
fi
case "${PATTERNS}" in
*env*)
for pattern in "${ENV_PATTERNS[@]}"; do
[[ "${filename}" == ${pattern} ]] && return 0
done
;;
*credentials*)
for pattern in "${CREDENTIAL_PATTERNS[@]}"; do
[[ "${filename}" == ${pattern} ]] && return 0
done
;;
*keys*)
for pattern in "${KEY_PATTERNS[@]}"; do
[[ "${filename}" == ${pattern} ]] && return 0
done
;;
esac
return 1
}
scan_file() {
local filepath="$1"
local filename
filename=$(basename "${filepath}")
# Check if hidden file (skip if not including hidden)
if [[ "${filename}" =~ ^\. && "${INCLUDE_HIDDEN}" != "true" ]]; then
return
fi
# Skip certain directories
if [[ "${filepath}" =~ (\.git|node_modules|vendor|dist|build)/ ]]; then
return
fi
# Check if file matches dangerous patterns
local severity
severity=$(get_file_severity "${filename}")
if [[ "${severity}" == "unknown" ]]; then
return
fi
if ! should_check_pattern "${filename}"; then
return
fi
# Get file details
local file_type
file_type=$(get_file_type "${filename}")
local size
size=$(stat -f%z "${filepath}" 2>/dev/null || stat -c%s "${filepath}" 2>/dev/null || echo "0")
local in_gitignore="false"
if [[ "${CHECK_GITIGNORE}" == "true" ]]; then
in_gitignore=$(is_in_gitignore "${filepath}")
fi
local risk
risk=$(get_risk_description "${file_type}")
local remediation
remediation=$(get_remediation "${file_type}" "${in_gitignore}")
FINDINGS+=("${severity}|${filepath}|${file_type}|${size}|${in_gitignore}|${risk}|${remediation}")
((DANGEROUS_FILES_FOUND++))
}
# ============================================================================
# Main Execution
# ============================================================================
main() {
# Validate path
if [[ ! -d "${PATH_TO_SCAN}" ]]; then
echo "ERROR: Path is not a directory: ${PATH_TO_SCAN}" >&2
exit 2
fi
echo "Dangerous Files Scan Results"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Path: ${PATH_TO_SCAN}"
echo "Include Hidden: ${INCLUDE_HIDDEN}"
echo "Check .gitignore: ${CHECK_GITIGNORE}"
echo ""
# Scan files
local files_scanned=0
if [[ "${INCLUDE_HIDDEN}" == "true" ]]; then
while IFS= read -r -d '' file; do
scan_file "${file}"
((files_scanned++))
done < <(find "${PATH_TO_SCAN}" -type f -print0 2>/dev/null)
else
while IFS= read -r -d '' file; do
scan_file "${file}"
((files_scanned++))
done < <(find "${PATH_TO_SCAN}" -type f -not -path '*/.*' -print0 2>/dev/null)
fi
echo "Files Scanned: ${files_scanned}"
echo ""
# Report findings
if [[ ${DANGEROUS_FILES_FOUND} -eq 0 ]]; then
echo "✅ SUCCESS: No dangerous files detected"
echo "All files safe"
exit 0
fi
echo "⚠️ DANGEROUS FILES DETECTED: ${DANGEROUS_FILES_FOUND}"
echo ""
# Check .gitignore status
if [[ "${CHECK_GITIGNORE}" == "true" && ! -f "${PATH_TO_SCAN}/.gitignore" ]]; then
echo "⚠️ WARNING: No .gitignore file found"
echo " Recommendation: Create .gitignore to prevent committing sensitive files"
echo ""
fi
# Group by severity
local critical_count=0
local high_count=0
local medium_count=0
local low_count=0
local not_in_gitignore=0
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity filepath file_type size in_gitignore risk remediation <<< "${finding}"
case "${severity}" in
critical) ((critical_count++)) ;;
high) ((high_count++)) ;;
medium) ((medium_count++)) ;;
low) ((low_count++)) ;;
esac
[[ "${in_gitignore}" == "false" ]] && ((not_in_gitignore++))
done
# Print findings by severity
if [[ ${critical_count} -gt 0 ]]; then
echo "CRITICAL Files (${critical_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity filepath file_type size in_gitignore risk remediation <<< "${finding}"
if [[ "${severity}" == "critical" ]]; then
# Convert size to human readable
local size_human
if [[ ${size} -ge 1048576 ]]; then
size_human="$(( size / 1048576 )) MB"
elif [[ ${size} -ge 1024 ]]; then
size_human="$(( size / 1024 )) KB"
else
size_human="${size} bytes"
fi
echo "${filepath} (${size_human})"
echo " Type: ${file_type}"
echo " Risk: ${risk}"
if [[ "${CHECK_GITIGNORE}" == "true" ]]; then
if [[ "${in_gitignore}" == "true" ]]; then
echo " Status: In .gitignore ✓"
else
echo " Status: NOT in .gitignore ⚠️"
fi
fi
echo " Remediation: ${remediation}"
echo ""
fi
done
fi
if [[ ${high_count} -gt 0 ]]; then
echo "HIGH Files (${high_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity filepath file_type size in_gitignore risk remediation <<< "${finding}"
if [[ "${severity}" == "high" ]]; then
local size_human
if [[ ${size} -ge 1048576 ]]; then
size_human="$(( size / 1048576 )) MB"
elif [[ ${size} -ge 1024 ]]; then
size_human="$(( size / 1024 )) KB"
else
size_human="${size} bytes"
fi
echo " ⚠️ ${filepath} (${size_human})"
echo " Type: ${file_type}"
if [[ "${CHECK_GITIGNORE}" == "true" ]]; then
echo " Status: $([ "${in_gitignore}" == "true" ] && echo "In .gitignore ✓" || echo "NOT in .gitignore ⚠️")"
fi
echo ""
fi
done
fi
if [[ ${medium_count} -gt 0 ]]; then
echo "MEDIUM Files (${medium_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity filepath file_type size in_gitignore risk remediation <<< "${finding}"
if [[ "${severity}" == "medium" ]]; then
echo " 💡 ${filepath}"
echo " Type: ${file_type}"
echo ""
fi
done
fi
echo "Summary:"
echo " Critical: ${critical_count}"
echo " High: ${high_count}"
echo " Medium: ${medium_count}"
echo " Low: ${low_count}"
if [[ "${CHECK_GITIGNORE}" == "true" ]]; then
echo " Not in .gitignore: ${not_in_gitignore}"
fi
echo ""
echo "Action Required: $([ ${critical_count} -gt 0 ] || [ ${not_in_gitignore} -gt 0 ] && echo "YES" || echo "REVIEW")"
exit 1
}
main "$@"

View File

@@ -0,0 +1,407 @@
#!/usr/bin/env bash
# ============================================================================
# Permission Checker - Audit file permissions for security issues
# ============================================================================
# Purpose: Detect world-writable files, overly permissive scripts, and permission issues
# Version: 1.0.0
# Usage: ./permission-checker.sh <path> <strict> <check_executables> <report_all>
# Returns: 0=all permissions correct, 1=issues found, 2=error
# ============================================================================
set -euo pipefail
# Source shared validation library
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PLUGIN_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
if [[ -f "${PLUGIN_ROOT}/scripts/validate-lib.sh" ]]; then
source "${PLUGIN_ROOT}/scripts/validate-lib.sh"
fi
# ============================================================================
# Configuration
# ============================================================================
PATH_TO_SCAN="${1:-.}"
STRICT="${2:-false}"
CHECK_EXECUTABLES="${3:-true}"
REPORT_ALL="${4:-false}"
ISSUES_FOUND=0
declare -a FINDINGS=()
# ============================================================================
# Permission Classification
# ============================================================================
get_permission_octal() {
local file="$1"
stat -f "%Op" "${file}" 2>/dev/null | sed 's/.*\([0-7][0-7][0-7][0-7]\)$/\1/' || \
stat -c "%a" "${file}" 2>/dev/null || echo "0644"
}
get_permission_symbolic() {
local file="$1"
ls -ld "${file}" 2>/dev/null | awk '{print $1}' | tail -c 10
}
is_world_writable() {
local perms="$1"
[[ "${perms: -1}" =~ [2367] ]]
}
is_world_readable() {
local perms="$1"
[[ "${perms: -1}" =~ [4567] ]]
}
is_executable() {
local perms="$1"
[[ "${perms}" =~ [1357] ]]
}
# ============================================================================
# Severity Classification
# ============================================================================
get_issue_severity() {
local issue_type="$1"
local perms="$2"
case "${issue_type}" in
world_writable_executable)
echo "critical"
;;
world_writable)
echo "critical"
;;
missing_shebang)
echo "high"
;;
overly_permissive_sensitive)
echo "high"
;;
wrong_directory_perms)
echo "medium"
;;
non_executable_script)
echo "medium"
;;
inconsistent_perms)
echo "low"
;;
*)
echo "low"
;;
esac
}
# ============================================================================
# Shebang Validation
# ============================================================================
has_shebang() {
local file="$1"
if [[ ! -f "${file}" ]]; then
return 1
fi
local first_line
first_line=$(head -n 1 "${file}" 2>/dev/null || echo "")
[[ "${first_line}" =~ ^#! ]]
}
get_expected_shebang() {
local file="$1"
local basename
basename=$(basename "${file}")
case "${basename}" in
*.sh|*.bash)
echo "#!/usr/bin/env bash"
;;
*.py)
echo "#!/usr/bin/env python3"
;;
*.js)
echo "#!/usr/bin/env node"
;;
*.rb)
echo "#!/usr/bin/env ruby"
;;
*)
echo ""
;;
esac
}
# ============================================================================
# Expected Permissions
# ============================================================================
get_expected_permissions() {
local file="$1"
local basename
basename=$(basename "${file}")
local is_exec
# Check if currently executable
if [[ -x "${file}" ]]; then
is_exec="true"
else
is_exec="false"
fi
# Sensitive files
if [[ "${basename}" =~ ^\.env || "${basename}" =~ credentials || "${basename}" =~ secrets ]]; then
echo "600"
return
fi
# SSH/GPG files
if [[ "${file}" =~ \.ssh/id_ || "${file}" =~ \.gnupg/ ]]; then
if [[ "${basename}" =~ \.pub$ ]]; then
echo "644"
else
echo "600"
fi
return
fi
# Scripts
if [[ "${basename}" =~ \.(sh|bash|py|js|rb)$ ]]; then
if [[ "${is_exec}" == "true" ]] || has_shebang "${file}"; then
echo "755"
else
echo "644"
fi
return
fi
# Directories
if [[ -d "${file}" ]]; then
if [[ "${basename}" =~ ^\.ssh$ || "${basename}" =~ ^\.gnupg$ ]]; then
echo "700"
else
echo "755"
fi
return
fi
# Default
echo "644"
}
# ============================================================================
# Permission Checking
# ============================================================================
check_file_permissions() {
local file="$1"
local perms
perms=$(get_permission_octal "${file}")
local symbolic
symbolic=$(get_permission_symbolic "${file}")
local expected
expected=$(get_expected_permissions "${file}")
local basename
basename=$(basename "${file}")
# Skip certain directories
if [[ "${file}" =~ (\.git|node_modules|vendor|dist|build)/ ]]; then
return
fi
# CRITICAL: Check for 777 (world-writable and executable)
if [[ "${perms}" == "0777" || "${perms}" == "777" ]]; then
local issue_type="world_writable_executable"
local severity
severity=$(get_issue_severity "${issue_type}" "${perms}")
FINDINGS+=("${severity}|${file}|${perms}|${symbolic}|${expected}|World-writable and executable|Anyone can modify and execute|chmod ${expected} \"${file}\"")
((ISSUES_FOUND++))
return
fi
# CRITICAL: Check for 666 (world-writable)
if [[ "${perms}" == "0666" || "${perms}" == "666" ]]; then
local issue_type="world_writable"
local severity
severity=$(get_issue_severity "${issue_type}" "${perms}")
FINDINGS+=("${severity}|${file}|${perms}|${symbolic}|${expected}|World-writable file|Anyone can modify content|chmod ${expected} \"${file}\"")
((ISSUES_FOUND++))
return
fi
# Check if executable but missing shebang
if [[ -f "${file}" && -x "${file}" && "${CHECK_EXECUTABLES}" == "true" ]]; then
if [[ "${basename}" =~ \.(sh|bash|py|js|rb)$ ]]; then
if ! has_shebang "${file}"; then
local expected_shebang
expected_shebang=$(get_expected_shebang "${file}")
FINDINGS+=("high|${file}|${perms}|${symbolic}|${perms}|Executable without shebang|May not execute correctly|Add ${expected_shebang} to first line")
((ISSUES_FOUND++))
fi
fi
fi
# Check sensitive files
if [[ "${basename}" =~ ^\.env || "${basename}" =~ credentials || "${basename}" =~ secrets ]]; then
if is_world_readable "${perms}"; then
FINDINGS+=("high|${file}|${perms}|${symbolic}|600|Sensitive file world-readable|Secrets visible to all users|chmod 600 \"${file}\"")
((ISSUES_FOUND++))
return
fi
if [[ "${perms}" != "0600" && "${perms}" != "600" && "${STRICT}" == "true" ]]; then
FINDINGS+=("medium|${file}|${perms}|${symbolic}|600|Sensitive file should be 600|Reduce permissions|chmod 600 \"${file}\"")
((ISSUES_FOUND++))
return
fi
fi
# Strict mode: Check for any discrepancies
if [[ "${STRICT}" == "true" ]]; then
if [[ "${perms}" != "0${expected}" && "${perms}" != "${expected}" ]]; then
# Check if it's a minor discrepancy
if [[ "${perms}" =~ ^0?775$ && "${expected}" == "755" ]]; then
FINDINGS+=("medium|${file}|${perms}|${symbolic}|${expected}|Group-writable (strict mode)|Remove group write|chmod ${expected} \"${file}\"")
((ISSUES_FOUND++))
elif [[ "${perms}" =~ ^0?755$ && "${expected}" == "644" ]]; then
FINDINGS+=("low|${file}|${perms}|${symbolic}|${expected}|Executable but should not be|Remove executable bit|chmod ${expected} \"${file}\"")
((ISSUES_FOUND++))
fi
fi
fi
# Report all mode
if [[ "${REPORT_ALL}" == "true" ]]; then
if [[ "${perms}" == "0${expected}" || "${perms}" == "${expected}" ]]; then
FINDINGS+=("info|${file}|${perms}|${symbolic}|${expected}|Permissions correct|N/A|N/A")
fi
fi
}
# ============================================================================
# Main Execution
# ============================================================================
main() {
# Validate path
if [[ ! -e "${PATH_TO_SCAN}" ]]; then
echo "ERROR: Path does not exist: ${PATH_TO_SCAN}" >&2
exit 2
fi
echo "File Permission Audit Results"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Path: ${PATH_TO_SCAN}"
echo "Strict Mode: ${STRICT}"
echo "Check Executables: ${CHECK_EXECUTABLES}"
echo ""
# Scan files
local files_checked=0
if [[ -f "${PATH_TO_SCAN}" ]]; then
check_file_permissions "${PATH_TO_SCAN}"
((files_checked++))
elif [[ -d "${PATH_TO_SCAN}" ]]; then
while IFS= read -r -d '' file; do
check_file_permissions "${file}"
((files_checked++))
done < <(find "${PATH_TO_SCAN}" -print0 2>/dev/null)
fi
echo "Files Checked: ${files_checked}"
echo ""
# Report findings
if [[ ${ISSUES_FOUND} -eq 0 ]]; then
echo "✅ SUCCESS: All file permissions correct"
echo "No permission issues detected"
exit 0
fi
echo "⚠️ PERMISSION ISSUES DETECTED: ${ISSUES_FOUND}"
echo ""
# Group by severity
local critical_count=0
local high_count=0
local medium_count=0
local low_count=0
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity file perms symbolic expected issue risk fix <<< "${finding}"
case "${severity}" in
critical) ((critical_count++)) ;;
high) ((high_count++)) ;;
medium) ((medium_count++)) ;;
low) ((low_count++)) ;;
info) ;; # Don't count info
esac
done
# Print findings by severity
if [[ ${critical_count} -gt 0 ]]; then
echo "CRITICAL Issues (${critical_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity file perms symbolic expected issue risk fix <<< "${finding}"
if [[ "${severity}" == "critical" ]]; then
echo "${file} (${perms})"
echo " Current: ${symbolic} (${perms})"
echo " Issue: ${issue}"
echo " Risk: ${risk}"
echo " Fix: ${fix}"
echo ""
fi
done
fi
if [[ ${high_count} -gt 0 ]]; then
echo "HIGH Issues (${high_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity file perms symbolic expected issue risk fix <<< "${finding}"
if [[ "${severity}" == "high" ]]; then
echo " ⚠️ ${file} (${perms})"
echo " Issue: ${issue}"
echo " Fix: ${fix}"
echo ""
fi
done
fi
if [[ ${medium_count} -gt 0 ]]; then
echo "MEDIUM Issues (${medium_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity file perms symbolic expected issue risk fix <<< "${finding}"
if [[ "${severity}" == "medium" ]]; then
echo " 💡 ${file} (${perms})"
echo " Recommendation: ${issue}"
echo " Fix: ${fix}"
echo ""
fi
done
fi
echo "Summary:"
echo " Critical: ${critical_count}"
echo " High: ${high_count}"
echo " Medium: ${medium_count}"
echo " Low: ${low_count}"
echo ""
if [[ ${critical_count} -gt 0 ]]; then
echo "Action Required: FIX IMMEDIATELY"
elif [[ ${high_count} -gt 0 ]]; then
echo "Action Required: YES"
else
echo "Action Required: REVIEW"
fi
exit 1
}
main "$@"

View File

@@ -0,0 +1,416 @@
#!/usr/bin/env bash
# ============================================================================
# Secret Scanner - Detect exposed secrets with 50+ patterns
# ============================================================================
# Purpose: Comprehensive secret detection for API keys, tokens, credentials
# Version: 1.0.0
# Usage: ./secret-scanner.sh <path> <recursive> <patterns> <exclude> <severity>
# Returns: 0=no secrets, 1=secrets found, 2=error
# ============================================================================
set -euo pipefail
# Source shared validation library
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PLUGIN_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
if [[ -f "${PLUGIN_ROOT}/scripts/validate-lib.sh" ]]; then
source "${PLUGIN_ROOT}/scripts/validate-lib.sh"
fi
# ============================================================================
# Configuration
# ============================================================================
# Default values
PATH_TO_SCAN="${1:-.}"
RECURSIVE="${2:-true}"
PATTERNS="${3:-all}"
EXCLUDE="${4:-}"
MIN_SEVERITY="${5:-medium}"
SECRETS_FOUND=0
declare -a FINDINGS=()
# ============================================================================
# Secret Pattern Definitions (50+ patterns)
# ============================================================================
# API Keys & Service Tokens
declare -A API_KEY_PATTERNS=(
# Stripe
["stripe_live_key"]='sk_live_[a-zA-Z0-9]{24,}'
["stripe_test_key"]='sk_test_[a-zA-Z0-9]{24,}'
["stripe_publishable_live"]='pk_live_[a-zA-Z0-9]{24,}'
["stripe_publishable_test"]='pk_test_[a-zA-Z0-9]{24,}'
# OpenAI
["openai_api_key"]='sk-[a-zA-Z0-9]{32,}'
# AWS
["aws_access_key_id"]='AKIA[0-9A-Z]{16}'
["aws_secret_access_key"]='aws_secret_access_key.*[=:].*[A-Za-z0-9/+=]{40}'
# Google
["google_api_key"]='AIza[0-9A-Za-z_-]{35}'
["google_oauth_id"]='[0-9]+-[0-9A-Za-z_-]{32}\.apps\.googleusercontent\.com'
# GitHub
["github_personal_token"]='ghp_[a-zA-Z0-9]{36}'
["github_oauth_token"]='gho_[a-zA-Z0-9]{36}'
["github_app_token"]='ghs_[a-zA-Z0-9]{36}'
["github_user_token"]='ghu_[a-zA-Z0-9]{36}'
["github_refresh_token"]='ghr_[a-zA-Z0-9]{36}'
# Slack
["slack_token"]='xox[baprs]-[0-9a-zA-Z]{10,}'
["slack_webhook"]='https://hooks\.slack\.com/services/T[0-9A-Z]{8}/B[0-9A-Z]{8}/[0-9A-Za-z]{24}'
# Twitter
["twitter_access_token"]='[0-9]{15,}-[0-9a-zA-Z]{35,44}'
["twitter_api_key"]='[A-Za-z0-9]{25}'
["twitter_api_secret"]='[A-Za-z0-9]{50}'
# Facebook
["facebook_access_token"]='EAA[0-9A-Za-z]{90,}'
# SendGrid
["sendgrid_api_key"]='SG\.[a-zA-Z0-9_-]{22}\.[a-zA-Z0-9_-]{43}'
# Mailgun
["mailgun_api_key"]='key-[0-9a-zA-Z]{32}'
# Twilio
["twilio_account_sid"]='AC[a-f0-9]{32}'
["twilio_api_key"]='SK[a-f0-9]{32}'
# Azure
["azure_storage_key"]='[a-zA-Z0-9/+=]{88}'
["azure_connection_string"]='AccountKey=[a-zA-Z0-9/+=]{88}'
# Generic patterns
["generic_api_key"]='api[_-]?key.*[=:].*['\''"][a-zA-Z0-9]{20,}['\''"]'
["generic_secret"]='secret.*[=:].*['\''"][a-zA-Z0-9]{20,}['\''"]'
["generic_token"]='token.*[=:].*['\''"][a-zA-Z0-9]{20,}['\''"]'
["generic_password"]='password.*[=:].*['\''"][^'\''\"]{8,}['\''"]'
["bearer_token"]='Bearer [a-zA-Z0-9_-]{20,}'
["authorization_header"]='Authorization.*Basic [a-zA-Z0-9+/=]{20,}'
)
# Private Keys
declare -A PRIVATE_KEY_PATTERNS=(
["rsa_private_key"]='-----BEGIN RSA PRIVATE KEY-----'
["openssh_private_key"]='-----BEGIN OPENSSH PRIVATE KEY-----'
["private_key_generic"]='-----BEGIN PRIVATE KEY-----'
["pgp_private_key"]='-----BEGIN PGP PRIVATE KEY BLOCK-----'
["dsa_private_key"]='-----BEGIN DSA PRIVATE KEY-----'
["ec_private_key"]='-----BEGIN EC PRIVATE KEY-----'
["encrypted_private_key"]='-----BEGIN ENCRYPTED PRIVATE KEY-----'
)
# Cloud Provider Credentials
declare -A CLOUD_PATTERNS=(
["aws_credentials_block"]='aws_access_key_id|aws_secret_access_key'
["gcp_service_account"]='type.*service_account'
["azure_client_secret"]='client_secret.*[=:].*[a-zA-Z0-9~._-]{34,}'
["heroku_api_key"]='[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
)
# Database Connection Strings
declare -A DATABASE_PATTERNS=(
["mongodb_connection"]='mongodb(\+srv)?://[^:]+:[^@]+@'
["postgres_connection"]='postgres(ql)?://[^:]+:[^@]+@'
["mysql_connection"]='mysql://[^:]+:[^@]+@'
["redis_connection"]='redis://[^:]+:[^@]+@'
)
# ============================================================================
# Severity Classification
# ============================================================================
get_pattern_severity() {
local pattern_name="$1"
case "${pattern_name}" in
# CRITICAL: Private keys, production credentials
*_private_key*|aws_access_key_id|aws_secret_access_key|*_connection)
echo "critical"
;;
# HIGH: Service API keys, OAuth tokens
stripe_live_key|openai_api_key|github_*_token|slack_token|*_access_token)
echo "high"
;;
# MEDIUM: Passwords, secrets, test keys
*_password|*_secret|stripe_test_key|generic_*)
echo "medium"
;;
# LOW: Everything else
*)
echo "low"
;;
esac
}
# ============================================================================
# Pattern Filtering
# ============================================================================
should_check_pattern() {
local pattern_name="$1"
local severity
severity=$(get_pattern_severity "${pattern_name}")
# Check if pattern category requested
if [[ "${PATTERNS}" != "all" ]]; then
case "${PATTERNS}" in
*api-keys*) [[ "${pattern_name}" =~ _api_key|_token ]] || return 1 ;;
*private-keys*) [[ "${pattern_name}" =~ private_key ]] || return 1 ;;
*passwords*) [[ "${pattern_name}" =~ password ]] || return 1 ;;
*cloud*) [[ "${pattern_name}" =~ aws_|gcp_|azure_ ]] || return 1 ;;
esac
fi
# Check severity threshold
case "${MIN_SEVERITY}" in
critical)
[[ "${severity}" == "critical" ]] || return 1
;;
high)
[[ "${severity}" == "critical" || "${severity}" == "high" ]] || return 1
;;
medium)
[[ "${severity}" != "low" ]] || return 1
;;
low)
# Report all
;;
esac
return 0
}
# ============================================================================
# File Exclusion
# ============================================================================
should_exclude_file() {
local file="$1"
# Default exclusions
if [[ "${file}" =~ \.(git|node_modules|vendor|dist|build)/ ]]; then
return 0
fi
# User-specified exclusions
if [[ -n "${EXCLUDE}" ]]; then
IFS=',' read -ra EXCLUDE_PATTERNS <<< "${EXCLUDE}"
for pattern in "${EXCLUDE_PATTERNS[@]}"; do
if [[ "${file}" =~ ${pattern} ]]; then
return 0
fi
done
fi
return 1
}
# ============================================================================
# Secret Scanning
# ============================================================================
scan_file() {
local file="$1"
local file_findings=0
# Skip excluded files
if should_exclude_file "${file}"; then
return 0
fi
# Skip binary files
if file "${file}" 2>/dev/null | grep -q "text"; then
:
else
return 0
fi
# Scan with all pattern categories
for pattern_name in "${!API_KEY_PATTERNS[@]}"; do
if should_check_pattern "${pattern_name}"; then
local pattern="${API_KEY_PATTERNS[${pattern_name}]}"
if grep -nE "${pattern}" "${file}" &>/dev/null; then
local severity
severity=$(get_pattern_severity "${pattern_name}")
local line_numbers
line_numbers=$(grep -nE "${pattern}" "${file}" | cut -d: -f1 | tr '\n' ',' | sed 's/,$//')
FINDINGS+=("${severity}|${file}|${line_numbers}|${pattern_name}|API Key")
((file_findings++))
fi
fi
done
for pattern_name in "${!PRIVATE_KEY_PATTERNS[@]}"; do
if should_check_pattern "${pattern_name}"; then
local pattern="${PRIVATE_KEY_PATTERNS[${pattern_name}]}"
if grep -nF "${pattern}" "${file}" &>/dev/null; then
local severity
severity=$(get_pattern_severity "${pattern_name}")
local line_numbers
line_numbers=$(grep -nF "${pattern}" "${file}" | cut -d: -f1 | tr '\n' ',' | sed 's/,$//')
FINDINGS+=("critical|${file}|${line_numbers}|${pattern_name}|Private Key")
((file_findings++))
fi
fi
done
for pattern_name in "${!CLOUD_PATTERNS[@]}"; do
if should_check_pattern "${pattern_name}"; then
local pattern="${CLOUD_PATTERNS[${pattern_name}]}"
if grep -nE "${pattern}" "${file}" &>/dev/null; then
local severity
severity=$(get_pattern_severity "${pattern_name}")
local line_numbers
line_numbers=$(grep -nE "${pattern}" "${file}" | cut -d: -f1 | tr '\n' ',' | sed 's/,$//')
FINDINGS+=("${severity}|${file}|${line_numbers}|${pattern_name}|Cloud Credential")
((file_findings++))
fi
fi
done
for pattern_name in "${!DATABASE_PATTERNS[@]}"; do
if should_check_pattern "${pattern_name}"; then
local pattern="${DATABASE_PATTERNS[${pattern_name}]}"
if grep -nE "${pattern}" "${file}" &>/dev/null; then
FINDINGS+=("critical|${file}|$(grep -nE "${pattern}" "${file}" | cut -d: -f1 | tr '\n' ',' | sed 's/,$//')|${pattern_name}|Database Connection")
((file_findings++))
fi
fi
done
((SECRETS_FOUND += file_findings))
return 0
}
# ============================================================================
# Main Execution
# ============================================================================
main() {
# Validate path
if [[ ! -e "${PATH_TO_SCAN}" ]]; then
echo "ERROR: Path does not exist: ${PATH_TO_SCAN}" >&2
exit 2
fi
echo "Secret Scanner"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Path: ${PATH_TO_SCAN}"
echo "Recursive: ${RECURSIVE}"
echo "Min Severity: ${MIN_SEVERITY}"
echo "Patterns: 50+"
echo ""
# Scan files
local files_scanned=0
if [[ -f "${PATH_TO_SCAN}" ]]; then
# Single file
scan_file "${PATH_TO_SCAN}"
((files_scanned++))
elif [[ -d "${PATH_TO_SCAN}" ]]; then
# Directory
if [[ "${RECURSIVE}" == "true" ]]; then
while IFS= read -r -d '' file; do
scan_file "${file}"
((files_scanned++))
done < <(find "${PATH_TO_SCAN}" -type f -print0)
else
while IFS= read -r file; do
scan_file "${file}"
((files_scanned++))
done < <(find "${PATH_TO_SCAN}" -maxdepth 1 -type f)
fi
fi
echo "Files Scanned: ${files_scanned}"
echo ""
# Report findings
if [[ ${SECRETS_FOUND} -eq 0 ]]; then
echo "✅ SUCCESS: No secrets detected"
echo "All files clean"
exit 0
fi
echo "⚠️ SECRETS DETECTED: ${SECRETS_FOUND}"
echo ""
# Group by severity
local critical_count=0
local high_count=0
local medium_count=0
local low_count=0
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity file lines pattern type <<< "${finding}"
case "${severity}" in
critical) ((critical_count++)) ;;
high) ((high_count++)) ;;
medium) ((medium_count++)) ;;
low) ((low_count++)) ;;
esac
done
# Print findings by severity
if [[ ${critical_count} -gt 0 ]]; then
echo "CRITICAL Issues (${critical_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity file lines pattern type <<< "${finding}"
if [[ "${severity}" == "critical" ]]; then
echo "${file}:${lines}"
echo " Type: ${type}"
echo " Pattern: ${pattern}"
echo " Remediation: Remove and rotate immediately"
echo ""
fi
done
fi
if [[ ${high_count} -gt 0 ]]; then
echo "HIGH Issues (${high_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity file lines pattern type <<< "${finding}"
if [[ "${severity}" == "high" ]]; then
echo " ⚠️ ${file}:${lines}"
echo " Type: ${type}"
echo " Pattern: ${pattern}"
echo ""
fi
done
fi
if [[ ${medium_count} -gt 0 ]]; then
echo "MEDIUM Issues (${medium_count}):"
for finding in "${FINDINGS[@]}"; do
IFS='|' read -r severity file lines pattern type <<< "${finding}"
if [[ "${severity}" == "medium" ]]; then
echo " 💡 ${file}:${lines}"
echo " Type: ${type}"
echo ""
fi
done
fi
echo "Summary:"
echo " Critical: ${critical_count}"
echo " High: ${high_count}"
echo " Medium: ${medium_count}"
echo " Low: ${low_count}"
echo ""
echo "Action Required: YES"
exit 1
}
main "$@"

View File

@@ -0,0 +1,386 @@
#!/usr/bin/env python3
"""
URL Validator - Check URL safety and detect malicious patterns
"""
import sys
import os
import re
import json
from pathlib import Path
from urllib.parse import urlparse
from typing import List, Dict, Tuple, Set
# ============================================================================
# Configuration
# ============================================================================
class Config:
"""Configuration for URL validation"""
SUSPICIOUS_TLDS = {'.tk', '.ml', '.ga', '.cf', '.gq'}
URL_SHORTENERS = {'bit.ly', 'tinyurl.com', 'goo.gl', 't.co', 'ow.ly'}
TRUSTED_REGISTRIES = {
'registry.npmjs.org',
'pypi.org',
'registry.hub.docker.com',
'github.com',
'gitlab.com'
}
# ============================================================================
# URL Pattern Definitions
# ============================================================================
# Comprehensive URL pattern
URL_PATTERN = re.compile(
r'(?:(?:https?|ftp|file)://|www\.|ftp\.)'
r'(?:\S+(?::\S*)?@)?'
r'(?:'
r'(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])'
r'(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}'
r'(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))'
r'|'
r'(?:(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)'
r'(?:\.(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)*'
r'(?:\.(?:[a-z\u00a1-\uffff]{2,}))'
r')'
r'(?::\d{2,5})?'
r'(?:[/?#]\S*)?',
re.IGNORECASE
)
# Dangerous code execution patterns
DANGEROUS_PATTERNS = {
'curl_pipe_sh': re.compile(r'curl\s+[^|]+\|\s*(sh|bash)', re.IGNORECASE),
'wget_pipe_sh': re.compile(r'wget\s+[^|]+\|\s*(sh|bash)', re.IGNORECASE),
'curl_silent_pipe': re.compile(r'curl\s+-[a-zA-Z]*s[a-zA-Z]*\s+[^|]+\|\s*(sh|bash)', re.IGNORECASE),
'bash_redirect': re.compile(r'bash\s+<\s*\(\s*curl', re.IGNORECASE),
'eval_fetch': re.compile(r'eval.*fetch\s*\(', re.IGNORECASE),
'eval_curl': re.compile(r'eval.*curl', re.IGNORECASE),
'exec_wget': re.compile(r'exec\s*\(.*wget', re.IGNORECASE),
'rm_rf_url': re.compile(r'rm\s+-rf.*https?://', re.IGNORECASE),
}
# Obfuscation patterns
OBFUSCATION_PATTERNS = {
'base64_url': re.compile(r'(?:atob|base64|Buffer\.from)\s*\([^)]*https?:', re.IGNORECASE),
'hex_encoded': re.compile(r'\\x[0-9a-f]{2}.*https?:', re.IGNORECASE),
'unicode_escape': re.compile(r'\\u[0-9a-f]{4}.*https?:', re.IGNORECASE),
}
# ============================================================================
# Severity Classification
# ============================================================================
class Severity:
CRITICAL = 'critical'
HIGH = 'high'
MEDIUM = 'medium'
LOW = 'low'
# ============================================================================
# Finding Class
# ============================================================================
class Finding:
"""Represents a URL security finding"""
def __init__(self, file_path: str, line_num: int, url: str, issue: str,
severity: str, risk: str, remediation: str):
self.file = file_path
self.line = line_num
self.url = url
self.issue = issue
self.severity = severity
self.risk = risk
self.remediation = remediation
def to_dict(self) -> Dict:
return {
'file': self.file,
'line': self.line,
'url': self.url,
'issue': self.issue,
'severity': self.severity,
'risk': self.risk,
'remediation': self.remediation
}
# ============================================================================
# URL Validator
# ============================================================================
class URLValidator:
"""Main URL validation class"""
def __init__(self, path: str, https_only: bool = False,
allow_localhost: bool = True, check_code_patterns: bool = True):
self.path = Path(path)
self.https_only = https_only
self.allow_localhost = allow_localhost
self.check_code_patterns = check_code_patterns
self.findings: List[Finding] = []
self.urls_checked = 0
self.files_scanned = 0
def is_text_file(self, file_path: Path) -> bool:
"""Check if file is text"""
try:
with open(file_path, 'rb') as f:
chunk = f.read(512)
if b'\0' in chunk:
return False
return True
except Exception:
return False
def should_exclude(self, file_path: Path) -> bool:
"""Check if file should be excluded"""
exclude_patterns = {'.git', 'node_modules', 'vendor', 'dist', 'build', '__pycache__'}
return any(part in exclude_patterns for part in file_path.parts)
def get_context(self, file_path: Path, line_num: int) -> str:
"""Get context around a line"""
try:
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
if 0 <= line_num - 1 < len(lines):
# Check if in comment or documentation
line = lines[line_num - 1].strip()
if line.startswith('#') or line.startswith('//') or line.startswith('*'):
return 'documentation'
if 'test' in str(file_path).lower() or 'spec' in str(file_path).lower():
return 'test'
if 'example' in str(file_path).lower() or 'mock' in str(file_path).lower():
return 'example'
return 'production'
except Exception:
pass
return 'unknown'
def check_url_safety(self, url: str, file_path: Path, line_num: int) -> None:
"""Check if URL is safe"""
try:
parsed = urlparse(url)
except Exception:
return
context = self.get_context(file_path, line_num)
# Check protocol
if parsed.scheme == 'http':
# Allow localhost in development
if self.allow_localhost and parsed.hostname in ('localhost', '127.0.0.1', '0.0.0.0'):
return
# Enforce HTTPS
if self.https_only or context == 'production':
severity = Severity.HIGH if context == 'production' else Severity.MEDIUM
self.findings.append(Finding(
str(file_path), line_num, url,
'Non-HTTPS URL',
severity,
'Man-in-the-middle attacks, data interception',
'Change to HTTPS: ' + url.replace('http://', 'https://')
))
return
# Check for FTP/Telnet
if parsed.scheme in ('ftp', 'telnet'):
self.findings.append(Finding(
str(file_path), line_num, url,
'Insecure protocol',
Severity.HIGH,
'Unencrypted data transmission',
'Use secure alternatives (HTTPS, SFTP, SSH)'
))
return
# Check for file:// protocol
if parsed.scheme == 'file':
self.findings.append(Finding(
str(file_path), line_num, url,
'File protocol detected',
Severity.MEDIUM,
'Potential security risk, path disclosure',
'Review necessity of file:// protocol'
))
# Check for IP addresses
if parsed.hostname and re.match(r'^\d+\.\d+\.\d+\.\d+$', parsed.hostname):
self.findings.append(Finding(
str(file_path), line_num, url,
'IP address instead of domain',
Severity.LOW,
'Harder to verify legitimacy, no certificate validation',
'Use domain name instead of IP address'
))
# Check for suspicious TLDs
if parsed.hostname:
for tld in Config.SUSPICIOUS_TLDS:
if parsed.hostname.endswith(tld):
self.findings.append(Finding(
str(file_path), line_num, url,
'Suspicious TLD',
Severity.MEDIUM,
'Often used for malicious purposes',
'Verify domain legitimacy before use'
))
break
# Check for URL shorteners
if parsed.hostname in Config.URL_SHORTENERS:
self.findings.append(Finding(
str(file_path), line_num, url,
'Shortened URL',
Severity.LOW,
'Cannot verify destination',
'Expand URL and use full destination'
))
def check_dangerous_patterns(self, content: str, file_path: Path) -> None:
"""Check for dangerous code execution patterns"""
if not self.check_code_patterns:
return
lines = content.split('\n')
for pattern_name, pattern in DANGEROUS_PATTERNS.items():
for match in pattern.finditer(content):
line_num = content[:match.start()].count('\n') + 1
self.findings.append(Finding(
str(file_path), line_num, match.group(0),
'Remote code execution pattern',
Severity.CRITICAL,
f'Executes arbitrary code from remote source ({pattern_name})',
'Download, verify checksum, review code, then execute'
))
for pattern_name, pattern in OBFUSCATION_PATTERNS.items():
for match in pattern.finditer(content):
line_num = content[:match.start()].count('\n') + 1
self.findings.append(Finding(
str(file_path), line_num, match.group(0)[:50] + '...',
'Obfuscated URL',
Severity.HIGH,
f'URL obfuscation detected ({pattern_name})',
'Review obfuscated content for malicious intent'
))
def scan_file(self, file_path: Path) -> None:
"""Scan a single file"""
if self.should_exclude(file_path) or not self.is_text_file(file_path):
return
try:
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
self.files_scanned += 1
# Check for dangerous patterns first
self.check_dangerous_patterns(content, file_path)
# Find all URLs
lines = content.split('\n')
for line_num, line in enumerate(lines, 1):
for match in URL_PATTERN.finditer(line):
url = match.group(0)
self.urls_checked += 1
self.check_url_safety(url, file_path, line_num)
except Exception as e:
print(f"Warning: Could not scan {file_path}: {e}", file=sys.stderr)
def scan(self) -> None:
"""Scan path for URLs"""
if self.path.is_file():
self.scan_file(self.path)
elif self.path.is_dir():
for file_path in self.path.rglob('*'):
if file_path.is_file():
self.scan_file(file_path)
def report(self) -> int:
"""Generate report and return exit code"""
print("URL Safety Scan Results")
print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
print(f"Path: {self.path}")
print(f"Files Scanned: {self.files_scanned}")
print(f"URLs Checked: {self.urls_checked}")
print()
if not self.findings:
print("✅ SUCCESS: All URLs safe")
print("No unsafe URLs or malicious patterns detected")
return 0
# Group by severity
critical = [f for f in self.findings if f.severity == Severity.CRITICAL]
high = [f for f in self.findings if f.severity == Severity.HIGH]
medium = [f for f in self.findings if f.severity == Severity.MEDIUM]
low = [f for f in self.findings if f.severity == Severity.LOW]
print(f"⚠️ UNSAFE URLS DETECTED: {len(self.findings)}")
print()
if critical:
print(f"CRITICAL Issues ({len(critical)}):")
for finding in critical:
print(f"{finding.file}:{finding.line}")
print(f" Pattern: {finding.url}")
print(f" Risk: {finding.risk}")
print(f" Remediation: {finding.remediation}")
print()
if high:
print(f"HIGH Issues ({len(high)}):")
for finding in high:
print(f" ⚠️ {finding.file}:{finding.line}")
print(f" URL: {finding.url}")
print(f" Issue: {finding.issue}")
print(f" Remediation: {finding.remediation}")
print()
if medium:
print(f"MEDIUM Issues ({len(medium)}):")
for finding in medium:
print(f" 💡 {finding.file}:{finding.line}")
print(f" Issue: {finding.issue}")
print()
print("Summary:")
print(f" Critical: {len(critical)}")
print(f" High: {len(high)}")
print(f" Medium: {len(medium)}")
print(f" Low: {len(low)}")
print()
print("Action Required: YES" if (critical or high) else "Review Recommended")
return 1
# ============================================================================
# Main
# ============================================================================
def main():
if len(sys.argv) < 2:
print("Usage: url-validator.py <path> [https_only] [allow_localhost] [check_code_patterns]")
sys.exit(2)
path = sys.argv[1]
https_only = sys.argv[2].lower() == 'true' if len(sys.argv) > 2 else False
allow_localhost = sys.argv[3].lower() == 'true' if len(sys.argv) > 3 else True
check_code_patterns = sys.argv[4].lower() == 'true' if len(sys.argv) > 4 else True
if not os.path.exists(path):
print(f"ERROR: Path does not exist: {path}", file=sys.stderr)
sys.exit(2)
validator = URLValidator(path, https_only, allow_localhost, check_code_patterns)
validator.scan()
sys.exit(validator.report())
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,353 @@
## Operation: Check File Permissions
Audit file permissions to detect world-writable files, overly permissive scripts, and inappropriate executability.
### Parameters from $ARGUMENTS
- **path**: Target directory to scan (required)
- **strict**: Enforce strict permission rules (true|false, default: false)
- **check-executables**: Verify executable files have shebangs (true|false, default: true)
- **report-all**: Report all permissions, not just issues (true|false, default: false)
### Permission Rules
**Forbidden Permissions** (CRITICAL):
- **777** (rwxrwxrwx) - World-writable and executable
- Risk: Anyone can modify and execute
- Remediation: chmod 755 (directories) or 644 (files)
- **666** (rw-rw-rw-) - World-writable files
- Risk: Anyone can modify content
- Remediation: chmod 644 (owner write, others read)
- **000** (---------) - Inaccessible files
- Risk: Unusable file, potential error
- Remediation: chmod 644 or remove
**Scripts & Executables** (HIGH priority):
- Shell scripts (*.sh, *.bash) SHOULD be:
- 755 (rwxr-xr-x) or 750 (rwxr-x---)
- Have shebang (#!/bin/bash, #!/usr/bin/env bash)
- Not world-writable
- Python scripts (*.py) SHOULD be:
- 755 if executable, 644 if library
- Have shebang if executable (#!/usr/bin/env python3)
- Node.js scripts (*.js, *.ts) SHOULD be:
- 644 (not executable, run via node)
- Exception: CLI tools can be 755 with shebang
**Configuration Files** (MEDIUM priority):
- Config files (.env, *.json, *.yaml, *.conf) SHOULD be:
- 600 (rw-------) for sensitive configs
- 644 (rw-r--r--) for non-sensitive
- Never 666 or 777
- SSH/GPG files MUST be:
- Private keys: 600 (rw-------)
- Public keys: 644 (rw-r--r--)
- ~/.ssh directory: 700 (rwx------)
**Directories** (MEDIUM priority):
- Standard directories: 755 (rwxr-xr-x)
- Private directories: 750 or 700
- Never 777 (world-writable)
### Workflow
1. **Parse arguments**
```
Extract path, strict, check-executables, report-all
Validate path exists
Determine scan scope
```
2. **Execute permission checker**
```bash
Execute .scripts/permission-checker.sh "$path" "$strict" "$check_executables" "$report_all"
Returns:
- 0: All permissions correct
- 1: Permission issues found
- 2: Scan error
```
3. **Analyze results**
```
Categorize findings:
- CRITICAL: 777, 666, world-writable
- HIGH: Executables without shebangs, 775 on sensitive files
- MEDIUM: Overly permissive configs, wrong directory perms
- LOW: Inconsistent permissions, non-executable scripts
Generate fix commands
```
4. **Format output**
```
File Permission Audit Results
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Path: <path>
Files Checked: <count>
CRITICAL Issues (<count>):
❌ scripts/deploy.sh (777)
Current: rwxrwxrwx (777)
Issue: World-writable and executable
Risk: Anyone can modify and execute script
Fix: chmod 755 scripts/deploy.sh
❌ config/secrets.json (666)
Current: rw-rw-rw- (666)
Issue: World-writable configuration
Risk: Secrets can be modified by anyone
Fix: chmod 600 config/secrets.json
HIGH Issues (<count>):
⚠️ bin/cli.sh (755) - Missing shebang
Issue: Executable without shebang
Fix: Add #!/usr/bin/env bash to first line
MEDIUM Issues (<count>):
💡 .env (644)
Current: rw-r--r-- (644)
Recommendation: Restrict to owner only
Fix: chmod 600 .env
Summary:
- Total issues: <count>
- Critical: <count> (fix immediately)
- Fixes available: Yes
```
### Permission Patterns
**Standard File Permissions**:
```
644 (rw-r--r--) - Regular files, documentation
755 (rwxr-xr-x) - Executable scripts, directories
600 (rw-------) - Sensitive configs, private keys
700 (rwx------) - Private directories (.ssh, .gnupg)
```
**Forbidden Permissions**:
```
777 (rwxrwxrwx) - Never use (world-writable + executable)
666 (rw-rw-rw-) - Never use (world-writable)
000 (---------) - Inaccessible (likely error)
```
**Context-Specific**:
```
Shell scripts: 755 with #!/bin/bash
Python scripts: 755 with #!/usr/bin/env python3 (if CLI)
644 without shebang (if library)
Config files: 600 (sensitive) or 644 (public)
SSH keys: 600 (private), 644 (public)
Directories: 755 (public), 700 (private)
```
### Shebang Validation
**Valid shebangs**:
```bash
#!/bin/bash
#!/usr/bin/env bash
#!/usr/bin/env python3
#!/usr/bin/env node
#!/usr/bin/env ruby
```
**Invalid patterns**:
```bash
#!/bin/sh # Too generic, prefer bash
#! /bin/bash # Space after #!
# /usr/bin/env bash # Missing !
```
### Examples
```bash
# Check all permissions in current directory
/security-scan permissions path:.
# Strict mode - flag all non-standard permissions
/security-scan permissions path:. strict:true
# Check executables for shebangs
/security-scan permissions path:./scripts/ check-executables:true
# Report all files, not just issues
/security-scan permissions path:. report-all:true
```
### Error Handling
**Path not found**:
```
ERROR: Path does not exist: <path>
Remediation: Verify path and try again
```
**Permission denied**:
```
ERROR: Cannot read permissions for: <path>
Remediation: Run with sufficient privileges or check ownership
```
**No issues found**:
```
SUCCESS: All file permissions correct
No action required
```
### Automated Fixes
**Critical Issues**:
```bash
# Fix world-writable files
find . -type f -perm 0666 -exec chmod 644 {} \;
find . -type f -perm 0777 -exec chmod 755 {} \;
# Fix world-writable directories
find . -type d -perm 0777 -exec chmod 755 {} \;
```
**Sensitive Files**:
```bash
# Restrict sensitive configs
chmod 600 .env
chmod 600 config/credentials.json
chmod 600 ~/.ssh/id_rsa
# Secure directories
chmod 700 ~/.ssh
chmod 700 ~/.gnupg
```
**Executables**:
```bash
# Make scripts executable
chmod +x scripts/*.sh
# Remove execute from libraries
chmod 644 src/**/*.py
```
### Platform-Specific Notes
**Unix/Linux**:
- Full permission support (owner/group/other)
- Numeric (755) or symbolic (rwxr-xr-x) modes
- Respect umask settings
**macOS**:
- Same as Unix/Linux
- Additional extended attributes (xattr)
- May have quarantine attributes on downloaded files
**Windows (WSL/Git Bash)**:
- Limited permission support
- Executable bit preserved in git
- May show 755 for all files by default
### Strict Mode Rules
When `strict:true`:
**Additional checks**:
- Flag 775 on any file (group-writable)
- Flag 755 on non-executable files
- Require 600 for all .env files
- Require 700 for all .ssh, .gnupg directories
- Flag inconsistent permissions in same directory
**Stricter recommendations**:
- Config files: Must be 600
- Scripts: Must have correct shebang
- No group-writable files
- Directories: 750 instead of 755
### Remediation Guidance
**For world-writable files (777, 666)**:
1. Determine correct permission level
2. Apply fix immediately: `chmod 644 <file>` or `chmod 755 <executable>`
3. Verify no unauthorized modifications
4. Check git history for permission changes
5. Document required permissions in README
**For executables without shebangs**:
1. Add appropriate shebang:
```bash
#!/usr/bin/env bash
```
2. Verify script runs correctly
3. Consider using absolute path if specific version needed
**For overly permissive configs**:
1. Restrict to owner: `chmod 600 <config>`
2. Verify application can still read
3. Update deployment documentation
4. Use principle of least privilege
**For inconsistent permissions**:
1. Establish permission standards
2. Document in CONTRIBUTING.md
3. Add pre-commit hook to enforce
4. Use tools like .editorconfig
### Security Best Practices
**General**:
- Use most restrictive permissions possible
- Never use 777 or 666
- Sensitive files: 600 (owner read/write only)
- Executables: 755 (everyone execute, owner write)
- Configs: 644 (everyone read, owner write) or 600 (owner only)
**For Scripts**:
- Always include shebang
- Make executable only if meant to be run directly
- Libraries should be 644, not 755
- Verify no secrets in scripts
**For Keys**:
- Private keys: 600 or SSH refuses to use them
- Public keys: 644
- Key directories: 700
- Never group or world readable
### Output Format
```json
{
"scan_type": "permissions",
"path": "<path>",
"files_checked": <count>,
"issues_found": <count>,
"severity_breakdown": {
"critical": <count>,
"high": <count>,
"medium": <count>,
"low": <count>
},
"findings": [
{
"file": "<file_path>",
"current_permissions": "<octal>",
"current_symbolic": "<symbolic>",
"issue": "<issue_description>",
"severity": "<severity>",
"risk": "<risk_description>",
"recommended_permissions": "<octal>",
"fix_command": "chmod <perms> <file>"
}
],
"fixes_available": <boolean>,
"action_required": <boolean>
}
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,262 @@
## Operation: Check URL Safety
Validate URL safety, enforce HTTPS, and detect malicious patterns in URLs and code.
### Parameters from $ARGUMENTS
- **path**: Target directory or file to scan (required)
- **https-only**: Enforce HTTPS for all URLs (true|false, default: false)
- **allow-localhost**: Allow http://localhost URLs (true|false, default: true)
- **check-code-patterns**: Check for dangerous code execution patterns (true|false, default: true)
### URL Safety Checks
**Protocol Validation**:
- HTTPS enforcement (production contexts)
- HTTP allowed only for localhost/127.0.0.1
- FTP/telnet flagged as insecure
- file:// protocol flagged (potential security risk)
**Malicious Patterns**:
- `curl ... | sh` - Remote code execution
- `wget ... | bash` - Remote script execution
- `eval(fetch(...))` - Dynamic code execution
- `exec(...)` with URLs - Command injection risk
- `rm -rf` in scripts downloaded from URLs
- Obfuscated URLs (base64, hex encoded)
**Domain Validation**:
- Check for typosquatting (common package registries)
- Suspicious TLDs (.tk, .ml, .ga, .cf)
- IP addresses instead of domains
- Shortened URLs (bit.ly, tinyurl) - potential phishing
### Workflow
1. **Parse arguments**
```
Extract path, https-only, allow-localhost, check-code-patterns
Validate path exists
Determine scan scope
```
2. **Execute URL validator**
```bash
Execute .scripts/url-validator.py "$path" "$https_only" "$allow_localhost" "$check_code_patterns"
Returns:
- 0: All URLs safe
- 1: Unsafe URLs detected
- 2: Validation error
```
3. **Analyze results**
```
Categorize findings:
- CRITICAL: Remote code execution patterns
- HIGH: Non-HTTPS in production, obfuscated URLs
- MEDIUM: HTTP in non-localhost, suspicious TLDs
- LOW: Shortened URLs, IP addresses
Generate context-aware remediation
```
4. **Format output**
```
URL Safety Scan Results
━━━━━━━━━━━━━━━━━━━━━━
Path: <path>
URLs Scanned: <count>
CRITICAL Issues (<count>):
❌ <file>:<line>: Remote code execution pattern
Pattern: curl https://example.com/script.sh | bash
Risk: Executes arbitrary code without verification
Remediation: Download, verify, then execute
HIGH Issues (<count>):
⚠️ <file>:<line>: Non-HTTPS URL in production context
URL: http://api.example.com
Risk: Man-in-the-middle attacks
Remediation: Use HTTPS
Summary:
- Total URLs: <count>
- Safe: <count>
- Unsafe: <count>
- Action required: <yes|no>
```
### Dangerous Code Patterns
**Remote Execution** (CRITICAL):
```bash
# Dangerous patterns
curl https://example.com/install.sh | bash
wget -qO- https://get.example.com | sh
eval "$(curl -fsSL https://example.com/script)"
bash <(curl -s https://example.com/setup.sh)
```
**Dynamic Code Execution** (HIGH):
```javascript
// Dangerous patterns
eval(fetch(url).then(r => r.text()))
new Function(await fetch(url).text())()
exec(`curl ${url}`)
```
**Command Injection** (HIGH):
```bash
# Vulnerable patterns
wget $USER_INPUT
curl "$UNTRUSTED_URL"
git clone $URL # without validation
```
### Safe Alternatives
**Instead of curl | sh**:
```bash
# Safe: Download, verify, then execute
wget https://example.com/install.sh
sha256sum -c install.sh.sha256
chmod +x install.sh
./install.sh
```
**Instead of eval(fetch())**:
```javascript
// Safe: Fetch as data, validate, then use
const response = await fetch(url);
const data = await response.json();
// Process data, not as code
```
### Examples
```bash
# Check all URLs, enforce HTTPS
/security-scan urls path:. https-only:true
# Allow localhost HTTP during development
/security-scan urls path:. https-only:true allow-localhost:true
# Check for code execution patterns
/security-scan urls path:./scripts/ check-code-patterns:true
# Scan specific file
/security-scan urls path:./install.sh
```
### Error Handling
**Path not found**:
```
ERROR: Path does not exist: <path>
Remediation: Verify path and try again
```
**No URLs found**:
```
INFO: No URLs detected
No action required
```
**Python unavailable**:
```
ERROR: Python3 not available
Remediation: Install Python 3.x or skip URL validation
```
### Context-Aware Rules
**Production contexts** (strict):
- package.json scripts
- Dockerfiles
- CI/CD configs (.github/, .gitlab-ci.yml)
- Installation scripts (install.sh, setup.sh)
→ Enforce HTTPS, no remote execution
**Development contexts** (relaxed):
- Test files (*test*, *spec*)
- Mock data
- Local development configs
→ Allow HTTP for localhost
**Documentation contexts** (informational):
- README.md
- *.md files
- Comments
→ Flag but don't fail
### URL Categories
**Registry URLs** (validate carefully):
- npm: https://registry.npmjs.org
- PyPI: https://pypi.org
- Docker: https://registry.hub.docker.com
- GitHub: https://github.com
→ Verify exact domain, check for typosquatting
**CDN URLs** (HTTPS required):
- https://cdn.jsdelivr.net
- https://unpkg.com
- https://cdnjs.cloudflare.com
→ Must use HTTPS, verify integrity hashes
**Shortened URLs** (flag for review):
- bit.ly, tinyurl.com, goo.gl
→ Cannot verify destination, recommend expanding
### Remediation Guidance
**For remote code execution**:
1. Remove pipe-to-shell patterns
2. Download scripts explicitly
3. Verify checksums/signatures
4. Review code before execution
5. Use official package managers when possible
**For non-HTTPS URLs**:
1. Update to HTTPS version
2. Verify certificate validity
3. Pin certificate if highly sensitive
4. Consider using subresource integrity (SRI) for CDNs
**For suspicious URLs**:
1. Verify domain legitimacy
2. Check for typosquatting
3. Expand shortened URLs
4. Review destination manually
### Output Format
```json
{
"scan_type": "urls",
"path": "<path>",
"urls_scanned": <count>,
"unsafe_urls": <count>,
"severity_breakdown": {
"critical": <count>,
"high": <count>,
"medium": <count>,
"low": <count>
},
"findings": [
{
"file": "<file_path>",
"line": <line_number>,
"url": "<url>",
"issue": "<issue_type>",
"severity": "<severity>",
"risk": "<risk_description>",
"remediation": "<action>"
}
],
"action_required": <boolean>
}
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,469 @@
## Operation: Full Security Audit
Execute comprehensive security audit combining all security scans: secrets, URLs, files, and permissions.
### Parameters from $ARGUMENTS
- **path**: Target directory to audit (required)
- **severity**: Minimum severity to report (critical|high|medium|low, default: medium)
- **strict**: Enable strict mode for all checks (true|false, default: false)
- **format**: Output format (text|json|markdown, default: text)
### Full Audit Workflow
1. **Initialize audit**
```
Validate path exists
Parse severity threshold
Set strict mode for all sub-scans
Initialize results aggregator
```
2. **Execute all security scans**
```
PARALLEL EXECUTION (where possible):
┌─ Scan 1: Secret Detection
│ Read scan-secrets.md
│ Execute with path, recursive:true, severity
│ Capture results
├─ Scan 2: URL Safety Check
│ Read check-urls.md
│ Execute with path, https-only, check-code-patterns
│ Capture results
├─ Scan 3: Dangerous Files
│ Read scan-files.md
│ Execute with path, include-hidden, check-gitignore
│ Capture results
└─ Scan 4: Permission Audit
Read check-permissions.md
Execute with path, strict, check-executables
Capture results
```
3. **Aggregate results**
```
Combine all findings
Deduplicate issues
Sort by severity:
1. CRITICAL issues (block publication)
2. HIGH issues (fix before publication)
3. MEDIUM issues (recommended fixes)
4. LOW issues (nice to have)
Calculate overall security score:
Base score: 100
- CRITICAL: -25 points each
- HIGH: -10 points each
- MEDIUM: -5 points each
- LOW: -2 points each
Score = max(0, base - deductions)
```
4. **Generate comprehensive report**
```
FULL SECURITY AUDIT REPORT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: <path>
Scan Date: <timestamp>
Severity Threshold: <severity>
OVERALL SECURITY SCORE: <0-100>/100
Rating: <Excellent|Good|Fair|Poor|Critical>
Publication Ready: <Yes|No|With Fixes>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
EXECUTIVE SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Security Posture: <assessment>
Critical Issues: <count> (IMMEDIATE ACTION REQUIRED)
High Priority: <count> (FIX BEFORE PUBLICATION)
Medium Priority: <count> (RECOMMENDED)
Low Priority: <count> (OPTIONAL)
Action Required: <Yes|No>
Estimated Fix Time: <time_estimate>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
SCAN RESULTS BY LAYER
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
[1] SECRET DETECTION
Status: <PASS|FAIL>
Secrets Found: <count>
Files Scanned: <count>
<Details...>
[2] URL SAFETY
Status: <PASS|FAIL>
Unsafe URLs: <count>
URLs Checked: <count>
<Details...>
[3] DANGEROUS FILES
Status: <PASS|FAIL>
Dangerous Files: <count>
Files Scanned: <count>
<Details...>
[4] FILE PERMISSIONS
Status: <PASS|FAIL>
Permission Issues: <count>
Files Checked: <count>
<Details...>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
CRITICAL ISSUES (IMMEDIATE ACTION)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
❌ Issue 1: <description>
File: <path>:<line>
Severity: CRITICAL
Risk: <risk_assessment>
Remediation: <specific_steps>
❌ Issue 2: ...
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
HIGH PRIORITY ISSUES
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
⚠️ Issue 1: ...
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
REMEDIATION PLAN
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Phase 1: Critical Issues (Immediate)
□ Remove exposed secrets from .env
□ Rotate compromised API keys
□ Fix world-writable permissions (777)
□ Remove dangerous files from repository
Phase 2: High Priority (Before Publication)
□ Update all HTTP URLs to HTTPS
□ Add dangerous files to .gitignore
□ Fix executables without shebangs
□ Remove remote code execution patterns
Phase 3: Recommended Improvements
□ Restrict config file permissions to 600
□ Review and expand shortened URLs
□ Add security documentation
Phase 4: Optional Enhancements
□ Implement pre-commit hooks
□ Add automated security scanning to CI/CD
□ Document security best practices
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
SECURITY RECOMMENDATIONS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🔒 Secrets Management
- Use environment variables for all secrets
- Implement secret rotation policy
- Consider using secret management tools
(AWS Secrets Manager, HashiCorp Vault)
🌐 URL Security
- Enforce HTTPS for all external URLs
- Verify checksums for downloaded scripts
- Never pipe remote content to shell
📁 File Security
- Review .gitignore completeness
- Remove sensitive files from git history
- Implement file scanning in CI/CD
🔐 Permission Security
- Use least privilege principle
- Document required permissions
- Regular permission audits
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
PUBLICATION READINESS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
<if score >= 90>
✅ READY FOR PUBLICATION
Security score is excellent. No critical issues found.
All security checks passed. Safe to publish.
<if 70 <= score < 90>
⚠️ READY WITH MINOR FIXES
Security score is good but has some issues.
Fix high priority issues before publication.
Estimated fix time: <time>
<if score < 70>
❌ NOT READY FOR PUBLICATION
Critical security issues must be resolved.
Publication blocked until critical issues fixed.
Do not publish in current state.
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
NEXT STEPS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. Address all CRITICAL issues immediately
2. Fix HIGH priority issues before publication
3. Review and implement recommended improvements
4. Re-run full security audit to verify fixes
5. Document security practices for maintainers
```
### Security Score Calculation
```
Base Score: 100 points
Deductions:
- CRITICAL issues: -25 points each
- HIGH issues: -10 points each
- MEDIUM issues: -5 points each
- LOW issues: -2 points each
Final Score: max(0, Base - Deductions)
Rating Scale:
- 90-100: Excellent ⭐⭐⭐⭐⭐ (Publication ready)
- 70-89: Good ⭐⭐⭐⭐ (Ready with minor fixes)
- 50-69: Fair ⭐⭐⭐ (Needs work)
- 30-49: Poor ⭐⭐ (Not ready)
- 0-29: Critical ⭐ (Major security issues)
```
### Examples
```bash
# Full security audit with default settings
/security-scan full-security-audit path:.
# Strict mode - enforce all strict rules
/security-scan full-security-audit path:. strict:true
# Only report critical and high issues
/security-scan full-security-audit path:. severity:high
# JSON output for CI/CD integration
/security-scan full-security-audit path:. format:json
# Markdown report for documentation
/security-scan full-security-audit path:. format:markdown
```
### Error Handling
**Path not found**:
```
ERROR: Path does not exist: <path>
Remediation: Verify path and try again
```
**Scan failures**:
```
WARNING: One or more security scans failed
Partial results available:
- Secrets: ✓ Completed
- URLs: ✓ Completed
- Files: ✗ Failed
- Permissions: ✓ Completed
Recommendation: Review failures and re-run
```
**All scans passed**:
```
SUCCESS: Full Security Audit Passed
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Security Score: 100/100 ⭐⭐⭐⭐⭐
Rating: Excellent
All security checks passed with no issues.
Your plugin/marketplace is secure and ready for publication.
Summary:
✓ No secrets detected
✓ All URLs safe
✓ No dangerous files
✓ All permissions correct
Excellent security posture! 🎉
```
### Integration with CI/CD
**GitHub Actions Example**:
```yaml
name: Security Audit
on: [push, pull_request]
jobs:
security:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Run Security Audit
run: |
/security-scan full-security-audit path:. format:json > security-report.json
- name: Check Security Score
run: |
score=$(jq '.security_score' security-report.json)
if [ $score -lt 70 ]; then
echo "Security score too low: $score"
exit 1
fi
- name: Upload Report
uses: actions/upload-artifact@v3
with:
name: security-report
path: security-report.json
```
**GitLab CI Example**:
```yaml
security_audit:
stage: test
script:
- /security-scan full-security-audit path:. format:json
only:
- main
- merge_requests
artifacts:
reports:
security: security-report.json
```
### Report Formats
**Text Format** (default):
- Human-readable console output
- Color-coded severity levels
- Section dividers for clarity
- Suitable for terminal viewing
**JSON Format**:
```json
{
"scan_type": "full-audit",
"timestamp": "<ISO8601>",
"path": "<path>",
"security_score": <0-100>,
"rating": "<rating>",
"publication_ready": <boolean>,
"scans": {
"secrets": { "status": "pass", "issues": [] },
"urls": { "status": "fail", "issues": [...] },
"files": { "status": "pass", "issues": [] },
"permissions": { "status": "pass", "issues": [] }
},
"severity_breakdown": {
"critical": <count>,
"high": <count>,
"medium": <count>,
"low": <count>
},
"all_findings": [...],
"remediation_plan": [...],
"recommendations": [...]
}
```
**Markdown Format**:
- GitHub/GitLab compatible
- Can be added to PR comments
- Suitable for documentation
- Includes tables and checkboxes
### Time Estimates
**By Issue Count**:
- 0 issues: No time needed ✅
- 1-3 CRITICAL: 2-4 hours
- 4-10 HIGH: 1-2 hours
- 11-20 MEDIUM: 30-60 minutes
- 20+ LOW: 15-30 minutes
**By Issue Type**:
- Secret rotation: 30-60 minutes each
- URL updates: 5-10 minutes each
- File removal: 15-30 minutes (including .gitignore)
- Permission fixes: 5 minutes total (batch operation)
### Remediation Verification
After fixing issues, re-run audit:
```bash
# Fix issues
chmod 755 scripts/*.sh
git rm .env
echo ".env" >> .gitignore
# Verify fixes
/security-scan full-security-audit path:.
# Should see improved score
```
### Best Practices
**Regular Audits**:
- Run before each release
- Include in CI/CD pipeline
- Weekly scans for active development
- After adding dependencies
**Fix Priority**:
1. CRITICAL: Drop everything and fix
2. HIGH: Fix within 24 hours
3. MEDIUM: Fix within 1 week
4. LOW: Address when convenient
**Team Communication**:
- Share audit results with team
- Document security requirements
- Train on secure development
- Review security in code reviews
### Output Format
```json
{
"scan_type": "full-audit",
"timestamp": "<ISO8601>",
"path": "<path>",
"security_score": <0-100>,
"rating": "<Excellent|Good|Fair|Poor|Critical>",
"publication_ready": <boolean>,
"estimated_fix_time": "<time_string>",
"severity_breakdown": {
"critical": <count>,
"high": <count>,
"medium": <count>,
"low": <count>
},
"scan_results": {
"secrets": { "status": "pass|fail", "findings": [...] },
"urls": { "status": "pass|fail", "findings": [...] },
"files": { "status": "pass|fail", "findings": [...] },
"permissions": { "status": "pass|fail", "findings": [...] }
},
"all_findings": [...],
"remediation_plan": [...],
"recommendations": [...],
"action_required": <boolean>
}
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,364 @@
## Operation: Scan for Dangerous Files
Detect dangerous files, sensitive configurations, and files that should not be committed to version control.
### Parameters from $ARGUMENTS
- **path**: Target directory to scan (required)
- **patterns**: Specific file patterns to check (optional, default: all)
- **include-hidden**: Scan hidden files and directories (true|false, default: true)
- **check-gitignore**: Verify .gitignore coverage (true|false, default: true)
### Dangerous File Categories
**Environment Files** (CRITICAL):
- .env, .env.local, .env.production, .env.development
- .env.*.local (any environment-specific)
- env.sh, setenv.sh
→ Often contain secrets, should never be committed
**Credential Files** (CRITICAL):
- credentials.json, credentials.yaml, credentials.yml
- secrets.json, secrets.yaml, config/secrets/*
- .aws/credentials, .azure/credentials
- .gcp/credentials.json, gcloud/credentials
→ Direct access credentials, rotate if exposed
**Private Keys** (CRITICAL):
- id_rsa, id_dsa, id_ed25519 (SSH keys)
- *.pem, *.key, *.p12, *.pfx (SSL/TLS certificates)
- *.jks, *.keystore (Java keystores)
- .gnupg/*, .ssh/id_* (GPG and SSH directories)
→ Authentication keys, regenerate if exposed
**Database Files** (HIGH):
- *.db, *.sqlite, *.sqlite3
- *.sql with INSERT statements (data dumps)
- dump.sql, backup.sql
- *.mdb, *.accdb (Access databases)
→ May contain sensitive data
**Configuration Files** (MEDIUM):
- config/database.yml with passwords
- appsettings.json with connection strings
- wp-config.php with DB credentials
- settings.py with SECRET_KEY
→ Review for hardcoded secrets
**Backup Files** (MEDIUM):
- *.bak, *.backup, *.old
- *~, *.swp, *.swo (editor backups)
- *.orig, *.copy
→ May contain previous versions with secrets
**Log Files** (LOW):
- *.log with potential sensitive data
- debug.log, error.log
- Combined log files (>10MB)
→ Review for leaked information
### Workflow
1. **Parse arguments**
```
Extract path, patterns, include-hidden, check-gitignore
Validate path exists and is directory
Load dangerous file patterns
```
2. **Execute file scanner**
```bash
Execute .scripts/file-scanner.sh "$path" "$patterns" "$include_hidden" "$check_gitignore"
Returns:
- 0: No dangerous files found
- 1: Dangerous files detected
- 2: Scan error
```
3. **Process results**
```
Categorize by risk:
- CRITICAL: Private keys, credentials, production env files
- HIGH: Database files, config with secrets
- MEDIUM: Backup files, test credentials
- LOW: Log files, temporary files
Cross-reference with .gitignore:
- Files that SHOULD be in .gitignore but aren't
- Already ignored files (informational)
```
4. **Format output**
```
Dangerous Files Scan Results
━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Path: <path>
Files Scanned: <count>
CRITICAL Files (<count>):
❌ .env (157 bytes)
Type: Environment file
Risk: Contains API keys and secrets
Status: NOT in .gitignore ⚠️
Remediation: Add to .gitignore, remove from git history
❌ config/credentials.json (2.3 KB)
Type: Credential file
Risk: Contains authentication credentials
Status: NOT in .gitignore ⚠️
Remediation: Remove, rotate credentials, use env vars
HIGH Files (<count>):
⚠️ database/dev.db (45 MB)
Type: SQLite database
Risk: May contain user data
Status: In .gitignore ✓
Remediation: Verify .gitignore working
Summary:
- Total dangerous files: <count>
- Not in .gitignore: <count>
- Action required: <yes|no>
```
### File Pattern Signatures
**Environment files**:
```
.env
.env.*
env.sh
setenv.sh
.envrc
```
**Credential files**:
```
*credentials*
*secrets*
*password*
.aws/credentials
.azure/credentials
.gcp/*credentials*
```
**Private keys**:
```
id_rsa
id_dsa
id_ed25519
*.pem
*.key
*.p12
*.pfx
*.jks
*.keystore
.gnupg/*
```
**Database files**:
```
*.db
*.sqlite
*.sqlite3
*.sql (with INSERT/UPDATE)
dump.sql
*backup*.sql
```
**Backup patterns**:
```
*.bak
*.backup
*.old
*.orig
*.copy
*~
*.swp
*.swo
```
### .gitignore Validation
**Should be ignored**:
```gitignore
# Environment
.env*
!.env.example
# Credentials
credentials.*
secrets.*
*.pem
*.key
id_rsa*
# Databases
*.db
*.sqlite*
dump.sql
# Backups
*.bak
*.backup
*~
```
**Safe to commit** (examples):
```
.env.example
.env.template
credentials.example.json
README.md
package.json
```
### Examples
```bash
# Scan current directory
/security-scan files path:.
# Check specific patterns only
/security-scan files path:. patterns:".env,credentials,*.pem"
# Include hidden files explicitly
/security-scan files path:. include-hidden:true
# Scan and verify .gitignore coverage
/security-scan files path:. check-gitignore:true
```
### Error Handling
**Path not found**:
```
ERROR: Path does not exist: <path>
Remediation: Verify path is correct
```
**Path is not directory**:
```
ERROR: Path is not a directory: <path>
Remediation: Provide directory path for file scanning
```
**No .gitignore found**:
```
WARNING: No .gitignore file found
Recommendation: Create .gitignore to prevent committing sensitive files
```
### Remediation Guidance
**For environment files (.env)**:
1. Add to .gitignore immediately
2. Remove from git history if committed:
```bash
git filter-branch --force --index-filter \
"git rm --cached --ignore-unmatch .env" \
--prune-empty --tag-name-filter cat -- --all
```
3. Create .env.example with dummy values
4. Document environment variables in README
**For credential files**:
1. Remove from repository
2. Rotate all exposed credentials
3. Use environment variables or secret managers
4. Add to .gitignore
5. Consider using git-secrets or similar tools
**For private keys**:
1. Regenerate keys immediately
2. Remove from repository
3. Update deployed systems with new keys
4. Add *.pem, *.key, id_rsa to .gitignore
5. Audit access logs for unauthorized use
**For database files**:
1. Remove from repository if contains real data
2. For test data, ensure no real emails/names
3. Add *.db, *.sqlite to .gitignore
4. Use schema-only dumps in version control
**For backup files**:
1. Clean up backup files before commit
2. Add backup patterns to .gitignore
3. Use .gitignore_global for editor backups
4. Configure editors to save backups elsewhere
### Git History Cleanup
If sensitive files were already committed:
```bash
# Using git filter-repo (recommended)
git filter-repo --path .env --invert-paths
# Using BFG Repo-Cleaner (fast for large repos)
bfg --delete-files .env
git reflog expire --expire=now --all
git gc --prune=now --aggressive
# Force push (WARNING: destructive)
git push origin --force --all
```
### Prevention Strategies
**Pre-commit hooks**:
```bash
# .git/hooks/pre-commit
#!/bin/bash
# Check for dangerous files
if git diff --cached --name-only | grep -E '\\.env$|credentials|id_rsa'; then
echo "ERROR: Attempting to commit sensitive file"
exit 1
fi
```
**Use git-secrets**:
```bash
git secrets --install
git secrets --register-aws
git secrets --add 'credentials\.json'
```
**IDE Configuration**:
- Configure .gitignore templates
- Use .editorconfig
- Set up file watchers for dangerous patterns
### Output Format
```json
{
"scan_type": "files",
"path": "<path>",
"files_scanned": <count>,
"dangerous_files": <count>,
"not_in_gitignore": <count>,
"severity_breakdown": {
"critical": <count>,
"high": <count>,
"medium": <count>,
"low": <count>
},
"findings": [
{
"file": "<file_path>",
"type": "<file_type>",
"size": <size_bytes>,
"severity": "<severity>",
"risk": "<risk_description>",
"in_gitignore": <boolean>,
"remediation": "<action>"
}
],
"action_required": <boolean>
}
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,207 @@
## Operation: Scan for Exposed Secrets
Detect exposed secrets, API keys, tokens, passwords, and private keys using 50+ pattern signatures.
### Parameters from $ARGUMENTS
- **path**: Target directory or file to scan (required)
- **recursive**: Scan subdirectories (true|false, default: true)
- **patterns**: Specific pattern categories to check (optional, default: all)
- **exclude**: Patterns to exclude from scan (optional)
- **severity**: Minimum severity to report (critical|high|medium|low, default: medium)
### Secret Detection Patterns (50+)
**API Keys & Tokens**:
- Stripe: sk_live_, sk_test_, pk_live_, pk_test_
- OpenAI: sk-[a-zA-Z0-9]{32,}
- AWS: AKIA[0-9A-Z]{16}
- Google: AIza[0-9A-Za-z_-]{35}
- GitHub: ghp_, gho_, ghs_, ghu_
- Slack: xox[baprs]-[0-9a-zA-Z]{10,}
- Twitter: [0-9a-zA-Z]{35,44}
- Facebook: EAA[0-9A-Za-z]{90,}
**Private Keys**:
- RSA: BEGIN RSA PRIVATE KEY
- Generic: BEGIN PRIVATE KEY
- SSH: BEGIN OPENSSH PRIVATE KEY
- PGP: BEGIN PGP PRIVATE KEY
- DSA: BEGIN DSA PRIVATE KEY
- EC: BEGIN EC PRIVATE KEY
**Credentials**:
- Passwords: password\s*[=:]\s*['\"][^'\"]+['\"]
- API keys: api[_-]?key\s*[=:]\s*['\"][^'\"]+['\"]
- Secrets: secret\s*[=:]\s*['\"][^'\"]+['\"]
- Tokens: token\s*[=:]\s*['\"][^'\"]+['\"]
- Auth: authorization\s*[=:]\s*['\"]Bearer [^'\"]+['\"]
**Cloud Provider Credentials**:
- AWS Access Key: aws_access_key_id
- AWS Secret: aws_secret_access_key
- Azure: [0-9a-zA-Z/+]{88}==
- GCP Service Account: type.*service_account
### Workflow
1. **Parse arguments**
```
Extract path, recursive, patterns, exclude, severity
Validate path exists
Determine scan scope (file vs directory)
```
2. **Execute secret scanner**
```bash
Execute .scripts/secret-scanner.sh "$path" "$recursive" "$patterns" "$exclude" "$severity"
Returns:
- 0: No secrets found
- 1: Secrets detected
- 2: Scan error
```
3. **Process results**
```
Parse scanner output
Categorize by severity:
- CRITICAL: Private keys, production API keys
- HIGH: API keys, tokens with broad scope
- MEDIUM: Passwords, secrets in config
- LOW: Test keys, development credentials
Generate remediation guidance per finding
```
4. **Format output**
```
Secrets Scan Results
━━━━━━━━━━━━━━━━━━━━
Path: <path>
Files Scanned: <count>
CRITICAL Issues (<count>):
❌ <file>:<line>: <type> detected
Pattern: <pattern_name>
Remediation: Remove and rotate immediately
HIGH Issues (<count>):
⚠️ <file>:<line>: <type> detected
Summary:
- Total secrets: <count>
- Unique patterns: <count>
- Action required: <yes|no>
```
### Examples
```bash
# Scan current directory recursively
/security-scan secrets path:.
# Scan specific file only
/security-scan secrets path:./config/settings.json recursive:false
# Check only API key patterns
/security-scan secrets path:. patterns:"api-keys,tokens"
# Exclude test directories
/security-scan secrets path:. exclude:"test,mock,fixtures"
# Only critical severity
/security-scan secrets path:. severity:critical
```
### Error Handling
**Path not found**:
```
ERROR: Path does not exist: <path>
Remediation: Verify path and try again
```
**No patterns matched**:
```
INFO: No secrets detected
All files clean
```
**Scanner unavailable**:
```
ERROR: Secret scanner script not found
Remediation: Verify plugin installation
```
### Severity Levels
**CRITICAL** (Immediate action required):
- Private keys (RSA, SSH, PGP)
- Production API keys (live_, prod_)
- AWS credentials
- Database connection strings with passwords
**HIGH** (Action required):
- API keys (generic)
- OAuth tokens
- Bearer tokens
- Authentication credentials
**MEDIUM** (Should address):
- Passwords in config files
- Secret variables
- Session tokens
- Development credentials in non-test contexts
**LOW** (Review recommended):
- Test API keys
- Mock credentials
- Example configurations
### Remediation Guidance
**For exposed secrets**:
1. Remove from code immediately
2. Rotate/regenerate the credential
3. Use environment variables instead
4. Add to .gitignore if file-based
5. Review git history for exposure
6. Consider using secret management (AWS Secrets Manager, HashiCorp Vault)
**Prevention**:
- Use .env files (never commit)
- Use environment variables
- Implement pre-commit hooks
- Use secret scanning in CI/CD
- Educate team on security practices
### Output Format
```json
{
"scan_type": "secrets",
"path": "<path>",
"files_scanned": <count>,
"secrets_found": <count>,
"severity_breakdown": {
"critical": <count>,
"high": <count>,
"medium": <count>,
"low": <count>
},
"findings": [
{
"file": "<file_path>",
"line": <line_number>,
"type": "<secret_type>",
"severity": "<severity>",
"pattern": "<pattern_name>",
"remediation": "<action>"
}
],
"action_required": <boolean>
}
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,89 @@
---
description: Comprehensive security scanning for secrets, vulnerabilities, and unsafe practices
---
You are the Security Scan coordinator, protecting against security vulnerabilities and exposed secrets.
## Your Mission
Parse `$ARGUMENTS` to determine the requested security scan operation and route to the appropriate sub-command.
## Available Operations
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
- **secrets** → Read `.claude/commands/security-scan/scan-secrets.md`
- **urls** → Read `.claude/commands/security-scan/check-urls.md`
- **files** → Read `.claude/commands/security-scan/scan-files.md`
- **permissions** → Read `.claude/commands/security-scan/check-permissions.md`
- **full-security-audit** → Read `.claude/commands/security-scan/full-audit.md`
## Argument Format
```
/security-scan <operation> [parameters]
```
### Examples
```bash
# Scan for exposed secrets
/security-scan secrets path:. recursive:true
# Validate URL safety
/security-scan urls path:. https-only:true
# Detect dangerous files
/security-scan files path:. patterns:".env,credentials.json,id_rsa"
# Check file permissions
/security-scan permissions path:. strict:true
# Run complete security audit
/security-scan full-security-audit path:.
```
## Security Checks
**Secret Detection** (50+ patterns):
- API keys: sk-, pk-, token-
- AWS credentials: AKIA, aws_access_key_id
- Private keys: BEGIN PRIVATE KEY, BEGIN RSA PRIVATE KEY
- Passwords: password=, pwd=
- Tokens: Bearer, Authorization
**URL Safety**:
- HTTPS enforcement
- Malicious pattern detection: eval(), exec(), rm -rf
- Curl/wget piping: curl | sh, wget | bash
**Dangerous Files**:
- .env files with secrets
- credentials.json, config.json with keys
- Private keys: id_rsa, *.pem, *.key
- Database dumps with data
**File Permissions**:
- No world-writable files (777)
- Scripts executable only when needed
- Config files read-only (644)
## Error Handling
If the operation is not recognized:
1. List all available security operations
2. Show security best practices
3. Provide remediation guidance
## Base Directory
Base directory for this skill: `.claude/commands/security-scan/`
## Your Task
1. Parse `$ARGUMENTS` to extract operation and parameters
2. Read the corresponding operation file
3. Execute security scans with pattern matching
4. Return prioritized security findings with remediation steps
**Current Request**: $ARGUMENTS

View File

@@ -0,0 +1,106 @@
---
description: Comprehensive validation of Claude Code marketplace.json with quality scoring
argument-hint: [path-to-marketplace.json]
allowed-tools: Bash(${CLAUDE_PLUGIN_ROOT}/scripts/*), Read
---
# Validate Marketplace
You are a marketplace validation specialist. Your task is to comprehensively validate a Claude Code marketplace.json file.
## Process
### 1. Locate Marketplace File
Determine the marketplace.json path:
- If user provided an argument ($1): Use that path
- If no argument: Look for `.claude-plugin/marketplace.json` in current directory
- Auto-detect: Search up directory tree for `.claude-plugin/marketplace.json`
### 2. Run Validation Script
Execute the validation script located at:
```
${CLAUDE_PLUGIN_ROOT}/scripts/validate-marketplace-full.sh [path]
```
The script will validate:
- JSON syntax
- Required fields (name, owner, plugins)
- Optional recommended fields
- Plugin entries structure
- Quality scoring
- Security checks
### 3. Interpret Results
The script outputs:
- ✅ Success messages for valid items
- ❌ Errors for critical issues
- ⚠️ Warnings for recommended improvements
- Info for suggestions
Quality Score:
- 90-100: Excellent ⭐⭐⭐⭐⭐
- 75-89: Good ⭐⭐⭐⭐
- 60-74: Fair ⭐⭐⭐
- 40-59: Needs Improvement ⭐⭐
- 0-39: Poor ⭐
### 4. Provide Recommendations
Based on validation results, provide:
1. **Critical Issues**: Must be fixed before publication
2. **Warnings**: Should be addressed for quality
3. **Suggestions**: Would improve discoverability and usability
### 5. Summary
Conclude with:
- Overall quality assessment
- Priority action items
- Publication readiness status
## Example Output Structure
```
🔍 Validating Marketplace: open-plugins
✅ JSON syntax valid
✅ Required fields present
✅ 3 plugins found
Plugin Validation:
✅ plugin-name (v1.0.0) - Excellent ⭐⭐⭐⭐⭐
⚠️ another-plugin (v0.1.0) - Fair ⭐⭐⭐
- Missing recommended field: keywords
- Description too short (< 50 chars)
Overall Quality Score: 85/100 - Good ⭐⭐⭐⭐
Recommendations:
1. Add keywords to another-plugin
2. Expand description in another-plugin
Summary: 1 excellent, 1 fair - Address warnings before publication
```
## Error Handling
If validation fails:
1. Show clear error messages
2. Explain what's wrong
3. Provide remediation steps
4. Reference documentation if needed
Common issues:
- **Invalid JSON**: Show syntax error location
- **Missing required fields**: List which fields are missing
- **Invalid formats**: Show expected format
- **Security issues**: Explain the risk and how to fix
## References
- Marketplace schema: `.claude/docs/plugins/plugin-marketplaces.md`
- Plugin standards: `.claude/docs/plugins/plugins-reference.md`
- OpenPlugins contributing: `open-plugins/CONTRIBUTING.md`

198
commands/validate-plugin.md Normal file
View File

@@ -0,0 +1,198 @@
---
description: Comprehensive validation of Claude Code plugin.json with structure checks
argument-hint: [path-to-plugin-directory]
allowed-tools: Bash(${CLAUDE_PLUGIN_ROOT}/scripts/*), Read, Glob
---
# Validate Plugin
You are a plugin validation specialist. Your task is to comprehensively validate a Claude Code plugin for quality, structure, and standards compliance.
## Process
### 1. Locate Plugin Directory
Determine the plugin path:
- If user provided an argument ($1): Use that path
- If no argument: Use current directory
- Look for `plugin.json` at plugin root to confirm it's a plugin
### 2. Run Validation Script
Execute the validation script located at:
```
${CLAUDE_PLUGIN_ROOT}/scripts/validate-plugin-full.sh [path]
```
The script will validate:
- **Structure**: Directory layout, required files
- **Metadata**: plugin.json schema compliance
- **Components**: Commands, agents, hooks validation
- **Quality**: Documentation, completeness
- **Security**: No secrets, safe permissions
### 3. Interpret Results
The script outputs:
- ✅ Success messages for valid components
- ❌ Errors for critical issues (must fix)
- ⚠️ Warnings for recommended improvements
- Info for optional suggestions
Quality Score Categories:
- 90-100: Excellent ⭐⭐⭐⭐⭐ (publication-ready)
- 75-89: Good ⭐⭐⭐⭐ (minor improvements)
- 60-74: Fair ⭐⭐⭐ (needs work)
- 40-59: Needs Improvement ⭐⭐ (significant issues)
- 0-39: Poor ⭐ (not ready)
### 4. Provide Detailed Feedback
For each issue found, provide:
1. **What's wrong**: Clear explanation
2. **Why it matters**: Impact on functionality/quality
3. **How to fix**: Specific remediation steps
4. **Examples**: Show correct format
### 5. Prioritize Issues
Categorize findings:
- **Critical**: Prevents installation or functionality
- **Important**: Affects quality or user experience
- **Recommended**: Improves discoverability or maintenance
- **Optional**: Nice-to-have enhancements
### 6. Generate Action Plan
Create a numbered list of fixes in priority order:
1. Fix critical errors first
2. Address important warnings
3. Implement recommended improvements
4. Consider optional enhancements
## Validation Checklist
### Structure Validation
- [ ] `plugin.json` exists at plugin root
- [ ] `plugin.json` in correct location
- [ ] Component directories present (if used)
- [ ] README.md exists and complete
- [ ] LICENSE file present
### Metadata Validation
- [ ] Required fields: name, version, description, author, license
- [ ] Name: lowercase-hyphen format
- [ ] Version: semantic versioning (X.Y.Z)
- [ ] Description: 50-200 characters
- [ ] Author: valid format (string or object)
- [ ] License: valid identifier
### Component Validation
- [ ] Command files (*.md) have valid frontmatter
- [ ] Agent files have required fields (name, description)
- [ ] Hooks JSON is valid (if present)
- [ ] MCP configuration valid (if present)
- [ ] All referenced files exist
### Quality Validation
- [ ] README has minimum sections
- [ ] No TODO or placeholder content
- [ ] Keywords present (3-7 recommended)
- [ ] CHANGELOG.md exists (recommended)
### Security Validation
- [ ] No .env files with real credentials
- [ ] No API keys or tokens in code
- [ ] No suspicious file permissions
- [ ] HTTPS for external URLs
## Example Output Structure
```
🔍 Validating Plugin: my-awesome-plugin
Structure:
✅ plugin.json exists at plugin root
✅ plugin.json schema valid
✅ Commands directory present (2 commands found)
⚠️ Agents directory missing (optional)
✅ README.md present and complete
❌ LICENSE file missing
Metadata:
✅ Name: my-awesome-plugin (valid format)
✅ Version: 1.0.0 (valid semver)
✅ Description: "A comprehensive plugin..." (132 chars)
❌ Author field missing
Components:
✅ commands/action.md - valid frontmatter
❌ commands/broken.md - missing description
Security:
✅ No exposed secrets
⚠️ File .env.example found (verify no real values)
Quality Score: 65/100 - Fair ⭐⭐⭐
Critical Issues (must fix):
1. Add LICENSE file (MIT recommended)
2. Add author field to plugin.json
3. Fix commands/broken.md frontmatter
Recommendations:
1. Add CHANGELOG.md for version tracking
2. Consider adding agents directory
3. Review .env.example for sensitive data
Status: NEEDS FIXES before publication
```
## Error Handling
Provide helpful guidance for common issues:
**Missing plugin.json**:
```
Error: No plugin.json found at plugin root
This is required for all Claude Code plugins.
To fix:
1. Create plugin.json at plugin root directory
2. Add plugin.json with required fields
3. See: .claude/docs/plugins/plugins-reference.md
```
**Invalid JSON**:
```
Error: Invalid JSON syntax in plugin.json
Line 5: Expected comma or closing brace
To fix:
1. Validate JSON: cat plugin.json | python3 -m json.tool
2. Fix syntax errors
3. Ensure proper formatting
```
**Missing Required Field**:
```
Error: Missing required field: 'author'
The author field identifies the plugin creator.
To fix - Add to plugin.json:
"author": {
"name": "Your Name",
"email": "you@example.com"
}
Or use string format:
"author": "Your Name"
```
## References
- Plugin schema: `.claude/docs/plugins/plugins-reference.md`
- OpenPlugins standards: `open-plugins/CONTRIBUTING.md`
- Best practices: `CLAUDE.md` in project

146
commands/validate-quick.md Normal file
View File

@@ -0,0 +1,146 @@
---
description: Quick validation mode for marketplaces and plugins (essential checks only)
argument-hint: [target-path]
allowed-tools: Bash(${CLAUDE_PLUGIN_ROOT}/scripts/*), Read
---
# Quick Validate
You are a quick validation specialist. Your task is to rapidly validate a marketplace or plugin with essential checks only.
## Process
### 1. Auto-Detect Target Type
Determine if the target is a marketplace or plugin:
- If `target/.claude-plugin/marketplace.json` exists: It's a marketplace
- If `target/plugin.json` exists: It's a plugin
- If neither: Error - not a valid target
### 2. Run Quick Validation
Execute the appropriate quick validation script:
**For Marketplace**:
```bash
${CLAUDE_PLUGIN_ROOT}/scripts/validate-marketplace-quick.sh [path]
```
**For Plugin**:
```bash
${CLAUDE_PLUGIN_ROOT}/scripts/validate-plugin-quick.sh [path]
```
### 3. Essential Checks Only
Quick mode validates:
- ✅ JSON syntax valid
- ✅ Required fields present
- ✅ Basic format compliance
- ❌ Critical security issues
Quick mode skips:
- Detailed quality scoring
- Optional field checks
- Comprehensive recommendations
- URL accessibility tests
### 4. Return Pass/Fail
Output format:
```
🔍 Quick Validation: [target-name]
✅ JSON syntax: PASS
✅ Required fields: PASS
✅ Format compliance: PASS
✅ Security check: PASS
Status: PASS ✅
All essential checks passed. Run full validation for detailed quality assessment.
```
Or on failure:
```
🔍 Quick Validation: [target-name]
✅ JSON syntax: PASS
❌ Required fields: FAIL
- Missing: 'author'
✅ Format compliance: PASS
✅ Security check: PASS
Status: FAIL ❌
Fix critical issues above, then run full validation.
```
### 5. Exit Codes
Return appropriate exit code:
- **0**: All essential checks passed
- **1**: Critical issues found
- **2**: Invalid JSON syntax
- **3**: Missing required fields
## Use Cases
Quick validation is ideal for:
- **CI/CD pipelines**: Fast pre-merge checks
- **Pre-commit hooks**: Immediate feedback
- **Rapid iteration**: Quick verification during development
- **Gate checks**: Binary pass/fail before full validation
## When to Use Full Validation
Recommend full validation when:
- Preparing for publication
- After all quick checks pass
- Need quality scoring
- Want detailed recommendations
Guide the user:
```
Quick validation passed! ✅
For publication readiness, run:
/validate-marketplace [path] # For detailed analysis
/validate-plugin [path] # For comprehensive review
```
## Error Handling
Provide concise, actionable errors:
**Invalid JSON**:
```
❌ JSON syntax: FAIL
- Invalid JSON in line 5
Fix: Validate with: python3 -m json.tool file.json
```
**Missing Field**:
```
❌ Required fields: FAIL
- Missing: name, version
Fix: Add required fields to JSON file
```
**Security Issue**:
```
❌ Security check: FAIL
- Possible exposed secret detected
Fix: Remove sensitive data from files
```
## Performance
Quick validation should complete in:
- Marketplace: < 2 seconds
- Plugin: < 3 seconds
This makes it suitable for automated workflows and immediate feedback.

View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# ============================================================================
# Target Detector Script
# ============================================================================
# Purpose: Auto-detect if target is a marketplace, plugin, or both
# Version: 1.0.0
# Usage: ./target-detector.sh <path>
# Returns: 0=success, 1=error, 2=unknown_target
# ============================================================================
set -euo pipefail
# ====================
# Configuration
# ====================
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly TARGET_PATH="${1:-.}"
# ====================
# Output Functions
# ====================
print_json() {
local target_type="$1"
local confidence="$2"
shift 2
local files=("$@")
cat <<EOF
{
"target_type": "${target_type}",
"path": "$(cd "${TARGET_PATH}" && pwd)",
"files_found": [$(printf '"%s",' "${files[@]}" | sed 's/,$//')],
"confidence": "${confidence}"
}
EOF
}
# ====================
# Detection Logic
# ====================
detect_target() {
local path="$1"
# Verify path exists
if [[ ! -d "${path}" ]]; then
echo "Error: Path does not exist: ${path}" >&2
return 1
fi
# Check for .claude-plugin directory
if [[ ! -d "${path}/.claude-plugin" ]]; then
echo "Error: No .claude-plugin directory found at ${path}" >&2
echo "This does not appear to be a plugin or marketplace." >&2
return 2
fi
# Detect manifest files
local marketplace_json="${path}/.claude-plugin/marketplace.json"
local plugin_json="${path}/plugin.json"
local has_marketplace=false
local has_plugin=false
local files_found=()
if [[ -f "${marketplace_json}" ]]; then
has_marketplace=true
files_found+=("marketplace.json")
fi
if [[ -f "${plugin_json}" ]]; then
has_plugin=true
files_found+=("plugin.json")
fi
# Determine target type
if [[ "${has_marketplace}" == "true" ]] && [[ "${has_plugin}" == "true" ]]; then
print_json "multi-target" "high" "${files_found[@]}"
return 0
elif [[ "${has_marketplace}" == "true" ]]; then
print_json "marketplace" "high" "${files_found[@]}"
return 0
elif [[ "${has_plugin}" == "true" ]]; then
print_json "plugin" "high" "${files_found[@]}"
return 0
else
print_json "unknown" "low" "${files_found[@]}"
echo "Error: .claude-plugin directory exists but no manifest files found" >&2
return 2
fi
}
# ====================
# Main Execution
# ====================
main() {
detect_target "${TARGET_PATH}"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,245 @@
#!/usr/bin/env python3
"""
============================================================================
Validation Dispatcher Script
============================================================================
Purpose: Route validation requests and aggregate results from multiple layers
Version: 1.0.0
Usage: ./validation-dispatcher.py --mode=<mode> [options]
Returns: 0=success, 1=error
============================================================================
"""
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Any
from enum import Enum
class ValidationMode(Enum):
"""Validation dispatch modes"""
ROUTE = "route" # Route to appropriate validator
AGGREGATE = "aggregate" # Aggregate results from multiple validators
class ValidationResult:
"""Validation result structure"""
def __init__(self):
self.layers: Dict[str, Dict[str, Any]] = {}
self.overall_score = 0
self.critical_issues = 0
self.warnings = 0
self.recommendations = 0
def add_layer(self, name: str, result: Dict[str, Any]):
"""Add a validation layer result"""
self.layers[name] = result
# Aggregate counts
if 'critical_issues' in result:
self.critical_issues += result['critical_issues']
if 'warnings' in result:
self.warnings += result['warnings']
if 'recommendations' in result:
self.recommendations += result['recommendations']
def calculate_overall_score(self):
"""Calculate overall quality score from all layers"""
if not self.layers:
return 0
total_score = 0
layer_count = 0
for layer, result in self.layers.items():
if 'score' in result:
total_score += result['score']
layer_count += 1
if layer_count > 0:
self.overall_score = total_score // layer_count
else:
# Fallback calculation based on issues
self.overall_score = max(0, 100 - (self.critical_issues * 20) -
(self.warnings * 10) - (self.recommendations * 5))
return self.overall_score
def get_rating(self) -> str:
"""Get quality rating based on score"""
score = self.overall_score
if score >= 90:
return "Excellent"
elif score >= 75:
return "Good"
elif score >= 60:
return "Fair"
elif score >= 40:
return "Needs Improvement"
else:
return "Poor"
def get_stars(self) -> str:
"""Get star rating"""
score = self.overall_score
if score >= 90:
return "⭐⭐⭐⭐⭐"
elif score >= 75:
return "⭐⭐⭐⭐"
elif score >= 60:
return "⭐⭐⭐"
elif score >= 40:
return "⭐⭐"
else:
return ""
def is_publication_ready(self) -> str:
"""Determine publication readiness"""
if self.critical_issues > 0:
return "Not Ready"
elif self.overall_score >= 75:
return "Ready"
else:
return "Needs Work"
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary"""
return {
"overall_score": self.overall_score,
"rating": self.get_rating(),
"stars": self.get_stars(),
"publication_ready": self.is_publication_ready(),
"critical_issues": self.critical_issues,
"warnings": self.warnings,
"recommendations": self.recommendations,
"layers": self.layers
}
class ValidationDispatcher:
"""Dispatcher for validation routing and aggregation"""
def __init__(self, mode: ValidationMode):
self.mode = mode
self.result = ValidationResult()
def route(self, target_type: str, target_path: str, level: str) -> Dict[str, Any]:
"""Route to appropriate validator based on target type"""
routing = {
"marketplace": {
"quick": "/validate-quick",
"comprehensive": "/validate-marketplace"
},
"plugin": {
"quick": "/validate-quick",
"comprehensive": "/validate-plugin"
}
}
if target_type not in routing:
return {
"error": f"Unknown target type: {target_type}",
"supported": list(routing.keys())
}
command = routing[target_type].get(level, routing[target_type]["comprehensive"])
return {
"target_type": target_type,
"target_path": target_path,
"validation_level": level,
"command": command,
"invocation": f"{command} {target_path}"
}
def aggregate(self, layer_results: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Aggregate results from multiple validation layers"""
for layer_data in layer_results:
layer_name = layer_data.get('layer', 'unknown')
self.result.add_layer(layer_name, layer_data)
self.result.calculate_overall_score()
return self.result.to_dict()
def format_output(self, data: Dict[str, Any]) -> str:
"""Format output for console display"""
if self.mode == ValidationMode.ROUTE:
return json.dumps(data, indent=2)
elif self.mode == ValidationMode.AGGREGATE:
# Pretty print aggregated results
lines = []
lines.append("=" * 60)
lines.append("AGGREGATED VALIDATION RESULTS")
lines.append("=" * 60)
lines.append(f"Overall Score: {data['overall_score']}/100 {data['stars']}")
lines.append(f"Rating: {data['rating']}")
lines.append(f"Publication Ready: {data['publication_ready']}")
lines.append("")
lines.append(f"Critical Issues: {data['critical_issues']}")
lines.append(f"Warnings: {data['warnings']}")
lines.append(f"Recommendations: {data['recommendations']}")
lines.append("")
lines.append("Layer Results:")
for layer, result in data['layers'].items():
status = result.get('status', 'unknown')
lines.append(f" - {layer}: {status}")
lines.append("=" * 60)
return "\n".join(lines)
def main():
"""Main execution"""
parser = argparse.ArgumentParser(description="Validation dispatcher")
parser.add_argument("--mode", required=True, choices=["route", "aggregate"],
help="Dispatch mode")
parser.add_argument("--target-type", help="Target type for routing")
parser.add_argument("--target-path", default=".", help="Target path")
parser.add_argument("--level", default="comprehensive",
choices=["quick", "comprehensive"],
help="Validation level")
parser.add_argument("--results", help="JSON file with layer results for aggregation")
parser.add_argument("--json", action="store_true", help="Output JSON format")
args = parser.parse_args()
mode = ValidationMode(args.mode)
dispatcher = ValidationDispatcher(mode)
try:
if mode == ValidationMode.ROUTE:
if not args.target_type:
print("Error: --target-type required for route mode", file=sys.stderr)
return 1
result = dispatcher.route(args.target_type, args.target_path, args.level)
elif mode == ValidationMode.AGGREGATE:
if not args.results:
print("Error: --results required for aggregate mode", file=sys.stderr)
return 1
with open(args.results, 'r') as f:
layer_results = json.load(f)
result = dispatcher.aggregate(layer_results)
# Output results
if args.json:
print(json.dumps(result, indent=2))
else:
print(dispatcher.format_output(result))
return 0
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,249 @@
## Operation: Auto-Validate (Intelligent Detection + Validation)
Automatically detect target type and execute the most appropriate validation workflow.
### Parameters from $ARGUMENTS
- **path**: Path to validation target (required)
- Format: `path:/path/to/target` or `path:.`
- Default: `.` (current directory)
- **level**: Validation depth (optional)
- Format: `level:quick|comprehensive`
- Default: `comprehensive`
- Options:
- `quick`: Fast critical checks only
- `comprehensive`: Full quality audit
### Auto-Validation Workflow
This operation provides the most intelligent, hands-off validation experience by:
1. Automatically detecting what needs to be validated
2. Choosing the appropriate validation commands
3. Executing the optimal validation workflow
4. Providing actionable results
### Detailed Workflow
1. **Target Detection Phase**
```
Execute .scripts/target-detector.sh "$path"
IF marketplace.json found:
target_type = "marketplace"
recommended_command = "/validate-marketplace"
ELSE IF plugin.json found:
target_type = "plugin"
recommended_command = "/validate-plugin"
ELSE IF both found:
target_type = "multi-target"
recommended_command = "validate both separately"
ELSE:
target_type = "unknown"
REPORT error and exit
```
2. **Validation Level Selection**
```
IF level == "quick" OR user requested quick:
validation_mode = "quick"
Execute fast critical checks
ELSE IF level == "comprehensive" OR default:
validation_mode = "comprehensive"
Execute full validation suite
```
3. **Execute Appropriate Validation**
```
CASE target_type:
"marketplace":
IF validation_mode == "quick":
Invoke /validate-quick (marketplace mode)
ELSE:
Invoke /validate-marketplace full-analysis
"plugin":
IF validation_mode == "quick":
Invoke /validate-quick (plugin mode)
ELSE:
Invoke /validate-plugin full-analysis
"multi-target":
Validate marketplace first
Then validate plugin
Aggregate results
"unknown":
Report detection failure
Provide troubleshooting guidance
```
4. **Post-Validation Actions**
```
Aggregate all validation results
Calculate overall quality assessment
Provide publication readiness determination
Offer next steps and guidance
```
### Intelligence Features
**Smart Defaults**:
- Defaults to comprehensive validation (thoroughness over speed)
- Automatically selects correct validation command
- Handles edge cases gracefully
**Context Awareness**:
- Recognizes marketplace vs plugin automatically
- Adjusts validation criteria accordingly
- Provides context-specific recommendations
**User Guidance**:
- Explains what was detected
- Shows which validation ran
- Provides clear next steps
### Examples
**Auto-validate current directory (comprehensive):**
```bash
/validation-orchestrator auto path:.
```
**Auto-validate with quick mode:**
```bash
/validation-orchestrator auto path:. level:quick
```
**Auto-validate specific plugin:**
```bash
/validation-orchestrator auto path:/path/to/my-plugin
```
**Auto-validate marketplace:**
```bash
/validation-orchestrator auto path:/path/to/marketplace
```
### Typical User Journey
```
User: "Is my plugin ready to submit?"
Agent detects this as validation request
→ Invokes /validation-orchestrator auto path:.
Orchestrator:
1. Detects plugin.json in current directory
2. Determines target is a plugin
3. Executes comprehensive plugin validation
4. Returns quality score and readiness assessment
Agent interprets results and guides user
```
### Error Handling
**Detection Failures**:
```
❌ Unable to detect target type at path: <path>
Troubleshooting:
- Ensure path contains .claude-plugin directory
- Verify plugin.json or marketplace.json exists
- Check file permissions
- Try specifying the path explicitly
Example:
/validation-orchestrator auto path:/correct/path
```
**Validation Failures**:
```
⚠️ Validation completed with errors
Target: <path>
Type: <detected-type>
Status: FAIL
See detailed output above for specific issues.
Next steps:
1. Fix critical errors (❌)
2. Address important warnings (⚠️)
3. Re-run validation: /validation-orchestrator auto path:.
```
**Ambiguous Structure**:
```
⚠️ Multiple targets detected
Found:
- marketplace.json at <path>
- plugin.json at <path>
Validating both...
Marketplace Results:
<marketplace validation output>
Plugin Results:
<plugin validation output>
```
### Output Format
```
Auto-Validation Report
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Detection Phase
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✅ Target detected: <marketplace|plugin>
📁 Path: <absolute-path>
📄 Manifest: <file-found>
🎯 Validation mode: <quick|comprehensive>
Validation Phase
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
<Full validation output from appropriate command>
Summary
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Quality Score: <0-100>/100 <⭐⭐⭐⭐⭐>
Rating: <Excellent|Good|Fair|Needs Improvement|Poor>
Publication Ready: <Yes|No|With Changes>
Critical Issues: <count>
Warnings: <count>
Recommendations: <count>
Next Steps:
<prioritized action items>
```
### Performance
- **Quick mode**: < 2 seconds (detection + quick validation)
- **Comprehensive mode**: 5-10 seconds (detection + full validation)
### Integration with Agent
This operation is ideal for agent invocation because:
- Single command, automatic behavior
- No user decision required (smart defaults)
- Comprehensive results
- Clear publication readiness assessment
The marketplace-validator agent can simply invoke:
```
/validation-orchestrator auto path:.
```
And get complete validation with no additional parameters needed.
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,182 @@
## Operation: Compare Quality Across Multiple Targets
Compare validation quality metrics across multiple plugins or marketplaces for relative analysis.
### Parameters from $ARGUMENTS
- **paths**: Comma-separated list of target paths (required)
- Format: `paths:"./plugin1,./plugin2,./plugin3"`
- Minimum: 2 targets
- Maximum: 10 targets (performance consideration)
- **metrics**: Specific metrics to compare (optional)
- Format: `metrics:"score,security,documentation"`
- Default: All metrics
- Available: `score`, `security`, `documentation`, `schema`, `best-practices`
### Comparison Workflow
1. **Parse Target Paths**
```
Split paths parameter by comma
Validate each path exists
Detect type for each target (marketplace or plugin)
Filter invalid paths with warning
```
2. **Execute Validation for Each Target**
```
FOR each target IN paths:
Run comprehensive validation
Capture quality score
Capture issue counts (critical, warnings, recommendations)
Capture layer results
Store in comparison matrix
```
3. **Calculate Comparative Metrics**
```
FOR each metric:
Rank targets (best to worst)
Calculate average score
Identify outliers
Note significant differences
```
4. **Generate Comparison Report**
```
Create side-by-side comparison table
Highlight best performers (green)
Highlight needs improvement (red)
Show relative rankings
Provide improvement suggestions
```
### Comparison Dimensions
**Overall Quality Score**
- Numeric score (0-100)
- Star rating
- Ranking position
**Security Posture**
- Critical security issues count
- Security warnings count
- Security score
**Documentation Quality**
- README completeness
- CHANGELOG presence
- Documentation score
**Schema Compliance**
- Required fields status
- Format compliance
- Schema score
**Best Practices**
- Standards compliance
- Convention adherence
- Best practices score
### Examples
**Compare two plugins:**
```bash
/validation-orchestrator compare paths:"./plugin1,./plugin2"
```
**Compare with specific metrics:**
```bash
/validation-orchestrator compare paths:"./p1,./p2,./p3" metrics:"score,security"
```
**Compare marketplaces:**
```bash
/validation-orchestrator compare paths:"./marketplace-a,./marketplace-b"
```
### Performance Considerations
- Each target requires full validation (5-10 seconds each)
- Total time = (number of targets) × (validation time)
- Validations can run in parallel for performance
- Limit to 10 targets to prevent excessive runtime
### Error Handling
- **Invalid path**: Skip and warn, continue with valid paths
- **Minimum targets not met**: Require at least 2 valid targets
- **Validation failure**: Include in report with status "Failed to validate"
- **Timeout on target**: Mark as "Validation timeout" and continue
### Output Format
```
Quality Comparison Report
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Comparing <N> targets
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Overall Rankings
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🥇 1st: <target-name> - <score>/100 ⭐⭐⭐⭐⭐
🥈 2nd: <target-name> - <score>/100 ⭐⭐⭐⭐
🥉 3rd: <target-name> - <score>/100 ⭐⭐⭐
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Detailed Comparison
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
| Metric | Target 1 | Target 2 | Target 3 | Best |
|-----------------|----------|----------|----------|--------|
| Quality Score | 92/100 | 78/100 | 65/100 | Target 1 |
| Security | ✅ Pass | ⚠️ Warnings | ❌ Fail | Target 1 |
| Documentation | ✅ Complete | ⚠️ Partial | ⚠️ Partial | Target 1 |
| Schema | ✅ Valid | ✅ Valid | ❌ Invalid | Target 1 |
| Best Practices | ✅ Compliant | ⚠️ Minor | ⚠️ Multiple | Target 1 |
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Key Insights
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Top Performer: Target 1 (92/100)
- Strengths: Excellent security, complete docs
- Areas to maintain: All aspects well-executed
Needs Most Improvement: Target 3 (65/100)
- Critical Issues: Schema validation, security
- Priority Actions:
1. Fix schema validation errors
2. Address security vulnerabilities
3. Complete documentation
Average Score: <calculated>/100
Score Range: <min> - <max>
Standard Deviation: <calculated>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Recommendations
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
For Target 2:
- Add CHANGELOG.md for better version tracking
- Expand README with more examples
- Review security warnings
For Target 3:
- Fix critical schema validation errors (blocking)
- Address all security issues (blocking)
- Complete missing documentation sections
```
### Use Cases
1. **Pre-submission Review**: Compare your plugin against reference plugins
2. **Quality Benchmarking**: Understand where you stand relative to others
3. **Marketplace Curation**: Compare plugins for marketplace inclusion
4. **Team Standards**: Ensure all team plugins meet minimum bar
5. **Continuous Improvement**: Track quality improvements over time
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,80 @@
## Operation: Detect Target Type
Automatically detect whether the validation target is a marketplace or plugin based on file structure.
### Parameters from $ARGUMENTS
- **path**: Path to the target directory (required)
- Format: `path:/path/to/target` or `path:.` for current directory
- Default: `.` (current directory)
### Detection Logic
Execute the target detection algorithm:
```bash
# Run the target detector script
bash .scripts/target-detector.sh "$TARGET_PATH"
```
The detection script will:
1. Check for `.claude-plugin/marketplace.json`**Marketplace**
2. Check for `plugin.json`**Plugin**
3. Check for both → **Multi-target**
4. Check for neither → **Unknown**
### Workflow
1. **Extract Path Parameter**
```
Parse $ARGUMENTS for path parameter
IF path not provided:
SET path="."
```
2. **Execute Detection**
```
RUN .scripts/target-detector.sh "$path"
CAPTURE output and exit code
```
3. **Report Results**
```
Output format:
{
"target_type": "marketplace|plugin|multi-target|unknown",
"path": "/absolute/path/to/target",
"files_found": ["marketplace.json", "plugin.json"],
"confidence": "high|medium|low"
}
```
### Examples
**Detect current directory:**
```bash
/validation-orchestrator detect path:.
```
**Detect specific path:**
```bash
/validation-orchestrator detect path:/path/to/plugin
```
### Error Handling
- **Path does not exist**: Report error with clear message
- **No .claude-plugin directory**: Suggest target may not be a plugin/marketplace
- **Ambiguous structure**: List all potential targets found
- **Permission denied**: Report access issue with remediation steps
### Output Format
Return a structured detection report with:
- Target type identified
- Confidence level
- Files found
- Recommended validation command
- Next steps
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,191 @@
## Operation: Run Comprehensive Validation
Execute complete quality audit with detailed analysis, scoring, and recommendations.
### Parameters from $ARGUMENTS
- **path**: Path to validation target (required)
- Format: `path:/path/to/target`
- Default: `.` (current directory)
- **report**: Generate detailed report (optional)
- Format: `report:true|false`
- Default: `true`
### Comprehensive Validation Scope
Execute **all validation layers**:
1. **Schema Validation** (via `/schema-validation full-schema`)
- JSON syntax and structure
- Required and recommended fields
- Format compliance
- Type validation
2. **Security Scanning** (via `/security-scan full-security-audit`)
- Secret detection
- URL safety checks
- File permission validation
- Vulnerability scanning
3. **Quality Analysis** (via `/quality-analysis full-analysis`)
- Quality score calculation (0-100)
- Star rating generation
- Issue prioritization
- Improvement recommendations
4. **Documentation Validation** (via `/documentation-validation full-docs`)
- README completeness
- CHANGELOG format
- LICENSE presence
- Example quality
5. **Best Practices Enforcement** (via `/best-practices full-standards`)
- Naming conventions
- Versioning compliance
- Category validation
- Keyword quality
### Workflow
1. **Initialize Validation**
```
Detect target type using .scripts/target-detector.sh
Create validation context
Set up result aggregation
```
2. **Execute Validation Layers** (parallel where possible)
```
PARALLEL:
Layer 1: /schema-validation full-schema path:"$path"
Layer 2: /security-scan full-security-audit path:"$path"
Layer 3: /documentation-validation full-docs path:"$path"
Layer 4: /best-practices full-standards path:"$path"
AFTER all complete:
Layer 5: /quality-analysis full-analysis path:"$path" context:"$all_results"
```
3. **Aggregate Results**
```
Execute .scripts/validation-dispatcher.py --mode=aggregate
Compile all layer results
Calculate overall quality score
Prioritize issues (Critical → Important → Recommended)
Generate actionable recommendations
```
4. **Generate Report** (if requested)
```
IF report parameter is true:
Generate comprehensive markdown report
Include all findings with details
Add remediation guidance
Provide next steps
```
### Integration with Other Skills
This operation orchestrates multiple skills:
- **schema-validation**: Structure and format checks
- **security-scan**: Security vulnerability detection
- **documentation-validation**: Documentation quality
- **best-practices**: Standards compliance
- **quality-analysis**: Scoring and recommendations
### Examples
**Comprehensive validation with report:**
```bash
/validation-orchestrator comprehensive path:. report:true
```
**Comprehensive validation, results only:**
```bash
/validation-orchestrator comprehensive path:/my-plugin report:false
```
### Performance Expectations
Comprehensive validation typically takes **5-10 seconds** depending on:
- Target size and complexity
- Number of files to scan
- Documentation completeness
- Script execution time
### Error Handling
- **Validation layer failure**: Continue with other layers, report partial results
- **Aggregation failure**: Return individual layer results
- **Report generation failure**: Return console output
- **Timeout**: Cancel and report completed layers only
### Output Format
```
Comprehensive Validation Results
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: <path>
Type: <marketplace|plugin>
Quality Score: <0-100>/100 <⭐⭐⭐⭐⭐>
Rating: <Excellent|Good|Fair|Needs Improvement|Poor>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Validation Layers
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Schema Validation: <✅ PASS | ❌ FAIL>
- Required fields: <status>
- Format compliance: <status>
- <additional details>
Security Scan: <✅ PASS | ❌ FAIL>
- Secret detection: <status>
- URL safety: <status>
- <additional details>
Documentation: <✅ PASS | ⚠️ WARNINGS>
- README: <status>
- CHANGELOG: <status>
- <additional details>
Best Practices: <✅ PASS | ⚠️ WARNINGS>
- Naming: <status>
- Versioning: <status>
- <additional details>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Issues Summary
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Critical Issues (must fix): <count>
❌ <issue 1>
❌ <issue 2>
Important Warnings (should fix): <count>
⚠️ <warning 1>
⚠️ <warning 2>
Recommendations (improve quality): <count>
💡 <recommendation 1>
💡 <recommendation 2>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Next Steps
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. <prioritized action 1>
2. <prioritized action 2>
3. <prioritized action 3>
Publication Readiness: <Ready | Needs Work | Not Ready>
```
### Report File Location
If report generation is enabled, save to:
```
<target-path>/validation-report-<timestamp>.md
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,112 @@
## Operation: Run Quick Validation
Execute fast essential validation checks for rapid feedback on critical issues only.
### Parameters from $ARGUMENTS
- **path**: Path to validation target (required)
- Format: `path:/path/to/target` or `path:.`
- Default: `.` (current directory)
### Quick Validation Scope
Focus on **critical issues only**:
- JSON syntax validation
- Required fields presence
- Basic format compliance
- Security red flags
**NOT included** (saves time):
- Comprehensive quality scoring
- Detailed documentation analysis
- Best practices enforcement
- Optional field checks
### Workflow
1. **Detect Target Type**
```
Execute .scripts/target-detector.sh "$path"
Determine if marketplace or plugin
```
2. **Route to Appropriate Quick Validator**
```
IF target_type == "marketplace":
Invoke existing /validate-quick for marketplace
ELSE IF target_type == "plugin":
Invoke existing /validate-quick for plugin
ELSE:
Report unable to determine target type
```
3. **Execute Quick Checks**
```
Run only critical validations:
- JSON syntax (MUST pass)
- Required fields (MUST be present)
- Format violations (MUST be valid)
- Security issues (MUST be clean)
```
4. **Aggregate Results**
```
Collect validation output
Count critical errors
Determine pass/fail status
```
### Integration with Existing Commands
This operation leverages the existing quick validation commands:
- `/validate-quick` (marketplace mode)
- `/validate-quick` (plugin mode)
The orchestrator adds:
- Automatic target detection
- Unified interface
- Consistent output format
- Progressive validation routing
### Examples
**Quick check current directory:**
```bash
/validation-orchestrator quick path:.
```
**Quick check specific plugin:**
```bash
/validation-orchestrator quick path:/path/to/my-plugin
```
### Performance Target
Quick validation should complete in **< 2 seconds** for typical targets.
### Error Handling
- **Target not found**: Clear error with path verification
- **Ambiguous target**: Ask user to specify marketplace or plugin
- **Invalid structure**: Report structural issues found
- **Validation script failure**: Fallback to manual checks
### Output Format
```
Quick Validation Results
━━━━━━━━━━━━━━━━━━━━━━
Target: <path>
Type: <marketplace|plugin>
Status: <PASS|FAIL>
Critical Issues: <count>
❌ <issue 1 if any>
❌ <issue 2 if any>
Result: <Ready for comprehensive validation | Fix critical issues first>
```
Return concise, actionable results focusing on blocking issues only.
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,64 @@
---
description: Intelligent validation orchestrator with auto-detection and progressive validation workflows
---
You are the Validation Orchestrator, the central coordinator for all marketplace and plugin validation operations.
## Your Mission
Parse `$ARGUMENTS` to determine the requested validation operation and intelligently route to the appropriate sub-command for execution.
## Available Operations
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
- **detect** → Read `.claude/commands/validation-orchestrator/detect-target.md`
- **quick** → Read `.claude/commands/validation-orchestrator/run-quick.md`
- **comprehensive** → Read `.claude/commands/validation-orchestrator/run-comprehensive.md`
- **compare** → Read `.claude/commands/validation-orchestrator/compare-quality.md`
- **auto** → Read `.claude/commands/validation-orchestrator/auto-validate.md`
## Argument Format
```
/validation-orchestrator <operation> [parameters]
```
### Examples
```bash
# Auto-detect target type and validate
/validation-orchestrator auto path:.
# Run quick validation checks
/validation-orchestrator quick path:/path/to/target
# Run comprehensive quality audit
/validation-orchestrator comprehensive path:/path/to/plugin
# Compare quality across multiple targets
/validation-orchestrator compare paths:"./plugin1,./plugin2"
# Detect target type only
/validation-orchestrator detect path:.
```
## Error Handling
If the operation is not recognized:
1. List all available operations with descriptions
2. Show example usage for each operation
3. Suggest the most likely intended operation based on context
## Base Directory
Base directory for this skill: `.claude/commands/validation-orchestrator/`
## Your Task
1. Parse `$ARGUMENTS` to extract the operation and parameters
2. Read the corresponding operation file from the base directory
3. Execute the instructions with the provided parameters
4. Return structured validation results
**Current Request**: $ARGUMENTS