Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:20:28 +08:00
commit b727790a9e
65 changed files with 16412 additions and 0 deletions

View File

@@ -0,0 +1,254 @@
#!/usr/bin/env bash
# ============================================================================
# Category Validator
# ============================================================================
# Purpose: Validate category against OpenPlugins approved category list
# Version: 1.0.0
# Usage: ./category-validator.sh <category> [--suggest]
# Returns: 0=valid, 1=invalid, 2=missing params
# ============================================================================
set -euo pipefail
# OpenPlugins approved categories (exactly 10)
APPROVED_CATEGORIES=(
"development"
"testing"
"deployment"
"documentation"
"security"
"database"
"monitoring"
"productivity"
"quality"
"collaboration"
)
# Category descriptions
declare -A CATEGORY_DESCRIPTIONS=(
["development"]="Code generation, scaffolding, refactoring"
["testing"]="Test generation, coverage, quality assurance"
["deployment"]="CI/CD, infrastructure, release automation"
["documentation"]="Docs generation, API documentation"
["security"]="Vulnerability scanning, secret detection"
["database"]="Schema design, migrations, queries"
["monitoring"]="Performance analysis, logging"
["productivity"]="Workflow automation, task management"
["quality"]="Linting, formatting, code review"
["collaboration"]="Team tools, communication"
)
# ============================================================================
# Functions
# ============================================================================
usage() {
cat <<EOF
Usage: $0 <category> [--suggest]
Validate category against OpenPlugins approved category list.
Arguments:
category Category name to validate (required)
--suggest Show similar categories if invalid
Approved Categories (exactly 10):
1. development - Code generation, scaffolding
2. testing - Test generation, coverage
3. deployment - CI/CD, infrastructure
4. documentation - Docs generation, API docs
5. security - Vulnerability scanning
6. database - Schema design, migrations
7. monitoring - Performance analysis
8. productivity - Workflow automation
9. quality - Linting, formatting
10. collaboration - Team tools, communication
Exit codes:
0 - Valid category
1 - Invalid category
2 - Missing required parameters
EOF
exit 2
}
# Calculate Levenshtein distance for similarity
levenshtein_distance() {
local s1="$1"
local s2="$2"
local len1=${#s1}
local len2=${#s2}
# Simple implementation
if [ "$s1" = "$s2" ]; then
echo 0
return
fi
# Rough approximation: count different characters
local diff=0
local max_len=$((len1 > len2 ? len1 : len2))
for ((i=0; i<max_len; i++)); do
if [ "${s1:i:1}" != "${s2:i:1}" ]; then
((diff++))
fi
done
echo $diff
}
# Find similar categories
find_similar() {
local category="$1"
local suggestions=()
# Check for common misspellings and variations
case "${category,,}" in
*develop*|*dev*)
suggestions+=("development")
;;
*test*)
suggestions+=("testing")
;;
*deploy*|*devops*|*ci*|*cd*)
suggestions+=("deployment")
;;
*doc*|*docs*)
suggestions+=("documentation")
;;
*secur*|*safe*)
suggestions+=("security")
;;
*data*|*db*|*sql*)
suggestions+=("database")
;;
*monitor*|*observ*|*log*)
suggestions+=("monitoring")
;;
*product*|*work*|*auto*)
suggestions+=("productivity")
;;
*qual*|*lint*|*format*)
suggestions+=("quality")
;;
*collab*|*team*|*comm*)
suggestions+=("collaboration")
;;
esac
# If no keyword matches, use similarity
if [ ${#suggestions[@]} -eq 0 ]; then
# Find categories with lowest distance
local best_dist=999
for cat in "${APPROVED_CATEGORIES[@]}"; do
local dist=$(levenshtein_distance "${category,,}" "$cat")
if [ "$dist" -lt "$best_dist" ]; then
best_dist=$dist
suggestions=("$cat")
elif [ "$dist" -eq "$best_dist" ]; then
suggestions+=("$cat")
fi
done
fi
# Remove duplicates
local unique_suggestions=($(printf "%s\n" "${suggestions[@]}" | sort -u))
# Print suggestions
if [ ${#unique_suggestions[@]} -gt 0 ]; then
echo "Did you mean?"
local count=1
for suggestion in "${unique_suggestions[@]}"; do
echo " $count. $suggestion - ${CATEGORY_DESCRIPTIONS[$suggestion]}"
((count++))
done
fi
}
# List all approved categories
list_all_categories() {
cat <<EOF
All Approved Categories:
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
EOF
local count=1
for category in "${APPROVED_CATEGORIES[@]}"; do
printf "%-2d. %-15s - %s\n" "$count" "$category" "${CATEGORY_DESCRIPTIONS[$category]}"
((count++))
done
}
# ============================================================================
# Main
# ============================================================================
main() {
# Check for help flag
if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
usage
fi
local category="$1"
local suggest=false
if [ $# -gt 1 ] && [ "$2" = "--suggest" ]; then
suggest=true
fi
# Check if category is provided
if [ -z "$category" ]; then
echo "ERROR: Category cannot be empty"
echo ""
list_all_categories
exit 2
fi
# Normalize to lowercase for comparison
category_lower="${category,,}"
# Check if category is in approved list
for approved in "${APPROVED_CATEGORIES[@]}"; do
if [ "$category_lower" = "$approved" ]; then
echo "✅ PASS: Valid OpenPlugins category"
echo ""
echo "Category: $approved"
echo "Valid: Yes"
echo ""
echo "Description: ${CATEGORY_DESCRIPTIONS[$approved]}"
echo ""
echo "Quality Score Impact: +5 points"
echo ""
echo "The category is approved for OpenPlugins marketplace."
exit 0
fi
done
# Category not found
echo "❌ FAIL: Invalid category"
echo ""
echo "Category: $category"
echo "Valid: No"
echo ""
echo "This category is not in the OpenPlugins approved list."
echo ""
if [ "$suggest" = true ]; then
find_similar "$category"
echo ""
fi
list_all_categories
echo ""
echo "Quality Score Impact: 0 points (fix to gain +5)"
echo ""
echo "Choose the most appropriate category from the approved list."
exit 1
}
main "$@"

View File

@@ -0,0 +1,392 @@
#!/usr/bin/env python3
"""
============================================================================
Keyword Quality Analyzer
============================================================================
Purpose: Analyze keyword quality, count, and relevance for OpenPlugins
Version: 1.0.0
Usage: ./keyword-analyzer.py <keywords> [--min N] [--max N]
Returns: 0=valid, 1=count violation, 2=quality issues, 3=missing params
============================================================================
"""
import sys
import re
from typing import List, Tuple, Dict
# Default constraints
DEFAULT_MIN_KEYWORDS = 3
DEFAULT_MAX_KEYWORDS = 7
# Generic terms to avoid
GENERIC_BLOCKLIST = [
'plugin', 'tool', 'utility', 'helper', 'app',
'code', 'software', 'program', 'system',
'awesome', 'best', 'perfect', 'great', 'super',
'amazing', 'cool', 'nice', 'good', 'excellent'
]
# OpenPlugins categories (should not be duplicated as keywords)
CATEGORIES = [
'development', 'testing', 'deployment', 'documentation',
'security', 'database', 'monitoring', 'productivity',
'quality', 'collaboration'
]
# Common keyword types for balance checking
FUNCTIONALITY_KEYWORDS = [
'testing', 'deployment', 'formatting', 'linting', 'migration',
'generation', 'automation', 'analysis', 'monitoring', 'scanning',
'refactoring', 'debugging', 'profiling', 'optimization'
]
TECHNOLOGY_KEYWORDS = [
'python', 'javascript', 'typescript', 'docker', 'kubernetes',
'react', 'vue', 'angular', 'node', 'bash', 'terraform',
'postgresql', 'mysql', 'redis', 'aws', 'azure', 'gcp'
]
def usage():
"""Print usage information"""
print("""Usage: keyword-analyzer.py <keywords> [--min N] [--max N]
Analyze keyword quality and relevance for OpenPlugins standards.
Arguments:
keywords Comma-separated list of keywords (required)
--min N Minimum keyword count (default: 3)
--max N Maximum keyword count (default: 7)
Requirements:
- Count: 3-7 keywords (optimal: 5-6)
- No generic terms (plugin, tool, awesome)
- No marketing fluff (best, perfect, amazing)
- Mix of functionality and technology
- No redundant variations
Good examples:
"testing,pytest,automation,tdd,python"
"deployment,kubernetes,ci-cd,docker"
"linting,javascript,code-quality"
Bad examples:
"plugin,tool,awesome" (generic)
"test,testing,tests" (redundant)
"development" (only one, too generic)
Exit codes:
0 - Valid keyword set
1 - Count violation (too few or too many)
2 - Quality issues (generic terms, duplicates)
3 - Missing required parameters
""")
sys.exit(3)
def parse_keywords(keyword_string: str) -> List[str]:
"""Parse and normalize keyword string"""
if not keyword_string:
return []
# Split by comma, strip whitespace, lowercase
keywords = [k.strip().lower() for k in keyword_string.split(',')]
# Remove empty strings
keywords = [k for k in keywords if k]
# Remove duplicates while preserving order
seen = set()
unique_keywords = []
for k in keywords:
if k not in seen:
seen.add(k)
unique_keywords.append(k)
return unique_keywords
def check_generic_terms(keywords: List[str]) -> Tuple[List[str], List[str]]:
"""
Check for generic and marketing terms
Returns:
(generic_terms, marketing_terms)
"""
generic_terms = []
marketing_terms = []
for keyword in keywords:
if keyword in GENERIC_BLOCKLIST:
if keyword in ['awesome', 'best', 'perfect', 'great', 'super', 'amazing', 'cool', 'nice', 'good', 'excellent']:
marketing_terms.append(keyword)
else:
generic_terms.append(keyword)
return generic_terms, marketing_terms
def check_redundant_variations(keywords: List[str]) -> List[Tuple[str, str]]:
"""
Find redundant keyword variations
Returns:
List of (keyword1, keyword2) pairs that are redundant
"""
redundant = []
for i, kw1 in enumerate(keywords):
for kw2 in keywords[i+1:]:
# Check if one is a substring of the other
if kw1 in kw2 or kw2 in kw1:
redundant.append((kw1, kw2))
# Check for plural variations
elif kw1.rstrip('s') == kw2 or kw2.rstrip('s') == kw1:
redundant.append((kw1, kw2))
return redundant
def check_category_duplication(keywords: List[str]) -> List[str]:
"""Check if any keywords exactly match category names"""
duplicates = []
for keyword in keywords:
if keyword in CATEGORIES:
duplicates.append(keyword)
return duplicates
def analyze_balance(keywords: List[str]) -> Dict[str, int]:
"""
Analyze keyword balance across types
Returns:
Dict with counts for each type
"""
balance = {
'functionality': 0,
'technology': 0,
'other': 0
}
for keyword in keywords:
if keyword in FUNCTIONALITY_KEYWORDS:
balance['functionality'] += 1
elif keyword in TECHNOLOGY_KEYWORDS:
balance['technology'] += 1
else:
balance['other'] += 1
return balance
def calculate_quality_score(
keywords: List[str],
generic_terms: List[str],
marketing_terms: List[str],
redundant: List[Tuple[str, str]],
category_dups: List[str],
min_count: int,
max_count: int
) -> Tuple[int, List[str]]:
"""
Calculate quality score and list issues
Returns:
(score out of 10, list of issues)
"""
score = 10
issues = []
# Count violations
count = len(keywords)
if count < min_count:
score -= 5
issues.append(f"Too few keywords ({count} < {min_count} minimum)")
elif count > max_count:
score -= 3
issues.append(f"Too many keywords ({count} > {max_count} maximum)")
# Generic terms
if generic_terms:
score -= len(generic_terms) * 2
issues.append(f"Generic terms detected: {', '.join(generic_terms)}")
# Marketing terms
if marketing_terms:
score -= len(marketing_terms) * 2
issues.append(f"Marketing terms detected: {', '.join(marketing_terms)}")
# Redundant variations
if redundant:
score -= len(redundant) * 2
redundant_str = ', '.join([f"{a}/{b}" for a, b in redundant])
issues.append(f"Redundant variations: {redundant_str}")
# Category duplication
if category_dups:
score -= len(category_dups) * 1
issues.append(f"Category name duplication: {', '.join(category_dups)}")
# Single-character keywords
single_char = [k for k in keywords if len(k) == 1]
if single_char:
score -= len(single_char) * 2
issues.append(f"Single-character keywords: {', '.join(single_char)}")
# Balance check
balance = analyze_balance(keywords)
if balance['functionality'] == 0 and balance['technology'] == 0:
score -= 2
issues.append("No functional or technical keywords")
return max(0, score), issues
def suggest_improvements(
keywords: List[str],
generic_terms: List[str],
marketing_terms: List[str],
redundant: List[Tuple[str, str]],
min_count: int,
max_count: int
) -> List[str]:
"""Generate improvement suggestions"""
suggestions = []
# Remove generic/marketing terms
if generic_terms or marketing_terms:
suggestions.append("Remove generic/marketing terms")
suggestions.append(" Replace with specific functionality (e.g., testing, deployment, formatting)")
# Consolidate redundant variations
if redundant:
suggestions.append("Consolidate redundant variations")
for kw1, kw2 in redundant:
suggestions.append(f" Keep one of: {kw1}, {kw2}")
# Add more keywords if too few
count = len(keywords)
if count < min_count:
needed = min_count - count
suggestions.append(f"Add {needed} more relevant keyword(s)")
suggestions.append(" Consider: specific technologies, use-cases, or functionalities")
# Remove keywords if too many
elif count > max_count:
excess = count - max_count
suggestions.append(f"Remove {excess} least relevant keyword(s)")
# Balance suggestions
balance = analyze_balance(keywords)
if balance['functionality'] == 0:
suggestions.append("Add functionality keywords (e.g., testing, automation, deployment)")
if balance['technology'] == 0:
suggestions.append("Add technology keywords (e.g., python, docker, kubernetes)")
return suggestions
def main():
"""Main entry point"""
if len(sys.argv) < 2 or sys.argv[1] in ['-h', '--help']:
usage()
keyword_string = sys.argv[1]
# Parse optional arguments
min_count = DEFAULT_MIN_KEYWORDS
max_count = DEFAULT_MAX_KEYWORDS
for i, arg in enumerate(sys.argv[2:], start=2):
if arg == '--min' and i + 1 < len(sys.argv):
min_count = int(sys.argv[i + 1])
elif arg == '--max' and i + 1 < len(sys.argv):
max_count = int(sys.argv[i + 1])
# Parse keywords
keywords = parse_keywords(keyword_string)
if not keywords:
print("ERROR: Keywords cannot be empty\n")
print("Provide 3-7 relevant keywords describing your plugin.\n")
print("Examples:")
print(' "testing,pytest,automation"')
print(' "deployment,kubernetes,ci-cd"')
sys.exit(3)
# Analyze keywords
count = len(keywords)
generic_terms, marketing_terms = check_generic_terms(keywords)
redundant = check_redundant_variations(keywords)
category_dups = check_category_duplication(keywords)
balance = analyze_balance(keywords)
# Calculate quality score
score, issues = calculate_quality_score(
keywords, generic_terms, marketing_terms,
redundant, category_dups, min_count, max_count
)
# Determine status
if score >= 9 and min_count <= count <= max_count:
status = "✅ PASS"
exit_code = 0
elif count < min_count or count > max_count:
status = "❌ FAIL"
exit_code = 1
elif score < 7:
status = "❌ FAIL"
exit_code = 2
else:
status = "⚠️ WARNING"
exit_code = 0
# Print results
print(f"{status}: Keyword validation\n")
print(f"Keywords: {', '.join(keywords)}")
print(f"Count: {count} (valid range: {min_count}-{max_count})")
print(f"Quality Score: {score}/10\n")
if issues:
print("Issues Found:")
for issue in issues:
print(f" - {issue}")
print()
# Balance breakdown
print("Breakdown:")
print(f" - Functionality: {balance['functionality']} keywords")
print(f" - Technology: {balance['technology']} keywords")
print(f" - Other: {balance['other']} keywords")
print()
# Score impact
if score >= 9:
print("Quality Score Impact: +10 points (excellent)\n")
if exit_code == 0:
print("Excellent keyword selection for discoverability!")
elif score >= 7:
print("Quality Score Impact: +7 points (good)\n")
print("Good keywords, but could be improved.")
else:
print("Quality Score Impact: 0 points (fix to gain +10)\n")
print("Keywords need significant improvement.")
# Suggestions
if issues:
suggestions = suggest_improvements(
keywords, generic_terms, marketing_terms,
redundant, min_count, max_count
)
if suggestions:
print("\nSuggestions:")
for suggestion in suggestions:
print(f" {suggestion}")
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,224 @@
#!/usr/bin/env bash
# ============================================================================
# Naming Convention Validator
# ============================================================================
# Purpose: Validate plugin names against OpenPlugins lowercase-hyphen convention
# Version: 1.0.0
# Usage: ./naming-validator.sh <name> [--suggest]
# Returns: 0=valid, 1=invalid, 2=missing params
# ============================================================================
set -euo pipefail
# OpenPlugins naming pattern
NAMING_PATTERN='^[a-z0-9]+(-[a-z0-9]+)*$'
# Generic terms to avoid
GENERIC_TERMS=("plugin" "tool" "utility" "helper" "app" "code" "software")
# ============================================================================
# Functions
# ============================================================================
usage() {
cat <<EOF
Usage: $0 <name> [--suggest]
Validate plugin name against OpenPlugins naming convention.
Arguments:
name Plugin name to validate (required)
--suggest Auto-suggest corrected name if invalid
Pattern: ^[a-z0-9]+(-[a-z0-9]+)*$
Valid examples:
- code-formatter
- test-runner
- api-client
Invalid examples:
- Code-Formatter (uppercase)
- test_runner (underscore)
- -helper (leading hyphen)
Exit codes:
0 - Valid naming convention
1 - Invalid naming convention
2 - Missing required parameters
EOF
exit 2
}
# Convert to lowercase-hyphen format
suggest_correction() {
local name="$1"
local corrected="$name"
# Convert to lowercase
corrected="${corrected,,}"
# Replace underscores with hyphens
corrected="${corrected//_/-}"
# Replace spaces with hyphens
corrected="${corrected// /-}"
# Remove non-alphanumeric except hyphens
corrected="$(echo "$corrected" | sed 's/[^a-z0-9-]//g')"
# Remove leading/trailing hyphens
corrected="$(echo "$corrected" | sed 's/^-*//;s/-*$//')"
# Replace multiple consecutive hyphens with single
corrected="$(echo "$corrected" | sed 's/-\+/-/g')"
echo "$corrected"
}
# Check for generic terms
check_generic_terms() {
local name="$1"
local found_generic=()
for term in "${GENERIC_TERMS[@]}"; do
if [[ "$name" == "$term" ]] || [[ "$name" == *"-$term" ]] || [[ "$name" == "$term-"* ]] || [[ "$name" == *"-$term-"* ]]; then
found_generic+=("$term")
fi
done
if [ ${#found_generic[@]} -gt 0 ]; then
echo "Warning: Contains generic term(s): ${found_generic[*]}"
return 1
fi
return 0
}
# Find specific issues in the name
find_issues() {
local name="$1"
local issues=()
# Check for uppercase
if [[ "$name" =~ [A-Z] ]]; then
local uppercase=$(echo "$name" | grep -o '[A-Z]' | tr '\n' ',' | sed 's/,$//')
issues+=("Contains uppercase characters: $uppercase")
fi
# Check for underscores
if [[ "$name" =~ _ ]]; then
issues+=("Contains underscores instead of hyphens")
fi
# Check for spaces
if [[ "$name" =~ \ ]]; then
issues+=("Contains spaces")
fi
# Check for leading hyphen
if [[ "$name" =~ ^- ]]; then
issues+=("Starts with hyphen")
fi
# Check for trailing hyphen
if [[ "$name" =~ -$ ]]; then
issues+=("Ends with hyphen")
fi
# Check for consecutive hyphens
if [[ "$name" =~ -- ]]; then
issues+=("Contains consecutive hyphens")
fi
# Check for special characters
if [[ "$name" =~ [^a-zA-Z0-9_\ -] ]]; then
issues+=("Contains special characters")
fi
# Check for empty or too short
if [ ${#name} -eq 0 ]; then
issues+=("Name is empty")
elif [ ${#name} -eq 1 ]; then
issues+=("Name is too short (single character)")
fi
# Print issues
if [ ${#issues[@]} -gt 0 ]; then
for issue in "${issues[@]}"; do
echo " - $issue"
done
return 1
fi
return 0
}
# ============================================================================
# Main
# ============================================================================
main() {
# Check for help flag
if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
usage
fi
local name="$1"
local suggest=false
if [ $# -gt 1 ] && [ "$2" = "--suggest" ]; then
suggest=true
fi
# Check if name is provided
if [ -z "$name" ]; then
echo "ERROR: Name cannot be empty"
exit 2
fi
# Validate against pattern
if [[ "$name" =~ $NAMING_PATTERN ]]; then
echo "✅ PASS: Valid naming convention"
echo "Name: $name"
echo "Format: lowercase-hyphen"
# Check for generic terms (warning only)
if ! check_generic_terms "$name"; then
echo ""
echo "Recommendation: Use more descriptive, functionality-specific names"
fi
exit 0
else
echo "❌ FAIL: Invalid naming convention"
echo "Name: $name"
echo ""
echo "Issues Found:"
find_issues "$name"
if [ "$suggest" = true ]; then
local correction=$(suggest_correction "$name")
echo ""
echo "Suggested Correction: $correction"
# Validate the suggestion
if [[ "$correction" =~ $NAMING_PATTERN ]]; then
echo "✓ Suggestion is valid"
else
echo "⚠ Manual correction may be needed"
fi
fi
echo ""
echo "Required Pattern: ^[a-z0-9]+(-[a-z0-9]+)*$"
echo ""
echo "Valid Examples:"
echo " - code-formatter"
echo " - test-runner"
echo " - api-client"
exit 1
fi
}
main "$@"

View File

@@ -0,0 +1,234 @@
#!/usr/bin/env python3
"""
============================================================================
Semantic Version Validator
============================================================================
Purpose: Validate version strings against Semantic Versioning 2.0.0
Version: 1.0.0
Usage: ./semver-checker.py <version> [--strict]
Returns: 0=valid, 1=invalid, 2=missing params, 3=strict mode violation
============================================================================
"""
import re
import sys
from typing import Tuple, Optional, Dict, List
# Semantic versioning patterns
STRICT_SEMVER_PATTERN = r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)$'
FULL_SEMVER_PATTERN = r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$'
def usage():
"""Print usage information"""
print("""Usage: semver-checker.py <version> [--strict]
Validate version string against Semantic Versioning 2.0.0 specification.
Arguments:
version Version string to validate (required)
--strict Enforce strict MAJOR.MINOR.PATCH format (no pre-release/build)
Pattern (strict): MAJOR.MINOR.PATCH (e.g., 1.2.3)
Pattern (full): MAJOR.MINOR.PATCH[-PRERELEASE][+BUILD]
Valid examples:
- 1.0.0 (strict)
- 1.2.3 (strict)
- 1.0.0-alpha.1 (full)
- 1.2.3+build.20241013 (full)
Invalid examples:
- 1.0 (missing PATCH)
- v1.0.0 (has prefix)
- 1.2.x (placeholder)
Exit codes:
0 - Valid semantic version
1 - Invalid format
2 - Missing required parameters
3 - Strict mode violation (valid semver, but has pre-release/build)
Reference: https://semver.org/
""")
sys.exit(2)
def parse_semver(version: str) -> Optional[Dict[str, any]]:
"""
Parse semantic version string into components
Returns:
Dict with major, minor, patch, prerelease, build
None if invalid format
"""
match = re.match(FULL_SEMVER_PATTERN, version)
if not match:
return None
major, minor, patch, prerelease, build = match.groups()
return {
'major': int(major),
'minor': int(minor),
'patch': int(patch),
'prerelease': prerelease or None,
'build': build or None,
'is_strict': prerelease is None and build is None
}
def find_issues(version: str) -> List[str]:
"""Find specific issues with version format"""
issues = []
# Check for common mistakes
if version.startswith('v') or version.startswith('V'):
issues.append("Starts with 'v' prefix (remove it)")
# Check for missing components
parts = version.split('.')
if len(parts) < 3:
issues.append(f"Missing components (has {len(parts)}, needs 3: MAJOR.MINOR.PATCH)")
elif len(parts) > 3:
# Check if extra parts are pre-release or build
if '-' not in version and '+' not in version:
issues.append(f"Too many components (has {len(parts)}, expected 3)")
# Check for placeholders
if 'x' in version.lower() or '*' in version:
issues.append("Contains placeholder values (x or *)")
# Check for non-numeric base version
base_version = version.split('-')[0].split('+')[0]
base_parts = base_version.split('.')
for i, part in enumerate(base_parts):
if not part.isdigit():
component = ['MAJOR', 'MINOR', 'PATCH'][i] if i < 3 else 'component'
issues.append(f"{component} is not numeric: '{part}'")
# Check for leading zeros
for i, part in enumerate(base_parts[:3]):
if len(part) > 1 and part.startswith('0'):
component = ['MAJOR', 'MINOR', 'PATCH'][i]
issues.append(f"{component} has leading zero: '{part}'")
# Check for non-standard identifiers
if version in ['latest', 'stable', 'dev', 'master', 'main']:
issues.append("Using non-numeric identifier (not a version)")
return issues
def validate_version(version: str, strict: bool = False) -> Tuple[bool, int, str]:
"""
Validate semantic version
Returns:
(is_valid, exit_code, message)
"""
if not version or version.strip() == '':
return False, 2, "ERROR: Version cannot be empty"
# Parse the version
parsed = parse_semver(version)
if parsed is None:
# Invalid format
issues = find_issues(version)
message = "❌ FAIL: Invalid semantic version format\n\n"
message += f"Version: {version}\n"
message += "Valid: No\n\n"
message += "Issues Found:\n"
if issues:
for issue in issues:
message += f" - {issue}\n"
else:
message += " - Does not match semantic versioning pattern\n"
message += "\nRequired Format: MAJOR.MINOR.PATCH\n"
message += "\nExamples:\n"
message += " - 1.0.0 (initial release)\n"
message += " - 1.2.3 (standard version)\n"
message += " - 2.0.0-beta.1 (pre-release)\n"
message += "\nReference: https://semver.org/"
return False, 1, message
# Check strict mode
if strict and not parsed['is_strict']:
message = "⚠️ WARNING: Valid semver, but not strict format\n\n"
message += f"Version: {version}\n"
message += "Format: Valid semver with "
if parsed['prerelease']:
message += "pre-release"
if parsed['build']:
message += " and " if parsed['prerelease'] else ""
message += "build metadata"
message += "\n\n"
message += "Note: OpenPlugins recommends strict MAJOR.MINOR.PATCH format\n"
message += "without pre-release or build metadata for marketplace submissions.\n\n"
message += f"Recommended: {parsed['major']}.{parsed['minor']}.{parsed['patch']} (for stable release)\n\n"
message += "Quality Score Impact: +5 points (valid, but consider strict format)"
return True, 3, message
# Valid version
message = "✅ PASS: Valid semantic version\n\n"
message += f"Version: {version}\n"
message += "Format: "
if parsed['is_strict']:
message += "MAJOR.MINOR.PATCH (strict)\n"
else:
message += "MAJOR.MINOR.PATCH"
if parsed['prerelease']:
message += "-PRERELEASE"
if parsed['build']:
message += "+BUILD"
message += "\n"
message += "Valid: Yes\n\n"
message += "Components:\n"
message += f" - MAJOR: {parsed['major']}"
if parsed['major'] > 0:
message += " (breaking changes)"
message += "\n"
message += f" - MINOR: {parsed['minor']}"
if parsed['minor'] > 0:
message += " (new features)"
message += "\n"
message += f" - PATCH: {parsed['patch']}"
if parsed['patch'] > 0:
message += " (bug fixes)"
message += "\n"
if parsed['prerelease']:
message += f" - Pre-release: {parsed['prerelease']}\n"
if parsed['build']:
message += f" - Build: {parsed['build']}\n"
message += "\n"
if parsed['prerelease']:
message += "Note: Pre-release versions indicate unstable releases.\n"
message += "Remove pre-release identifier for stable marketplace submission.\n\n"
message += "Quality Score Impact: +5 points\n\n"
message += "The version follows Semantic Versioning 2.0.0 specification."
return True, 0, message
def main():
"""Main entry point"""
if len(sys.argv) < 2 or sys.argv[1] in ['-h', '--help']:
usage()
version = sys.argv[1]
strict = '--strict' in sys.argv
is_valid, exit_code, message = validate_version(version, strict)
print(message)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,325 @@
## Operation: Check Categories
Validate category assignment against OpenPlugins standard category list.
### Parameters from $ARGUMENTS
- **category**: Category name to validate (required)
- **suggest**: Show similar categories if invalid (optional, default: true)
### OpenPlugins Standard Categories
OpenPlugins defines **exactly 10 approved categories**:
1. **development** - Code generation, scaffolding, refactoring
2. **testing** - Test generation, coverage, quality assurance
3. **deployment** - CI/CD, infrastructure, release automation
4. **documentation** - Docs generation, API documentation
5. **security** - Vulnerability scanning, secret detection
6. **database** - Schema design, migrations, queries
7. **monitoring** - Performance analysis, logging
8. **productivity** - Workflow automation, task management
9. **quality** - Linting, formatting, code review
10. **collaboration** - Team tools, communication
### Category Selection Guidance
**development**:
- Code generators
- Project scaffolding
- Refactoring tools
- Boilerplate generation
**testing**:
- Test generators
- Test runners
- Coverage tools
- QA automation
**deployment**:
- CI/CD pipelines
- Infrastructure as code
- Release automation
- Environment management
**documentation**:
- README generators
- API doc generation
- Changelog automation
- Architecture diagrams
**security**:
- Secret scanning
- Vulnerability detection
- Security audits
- Compliance checking
**database**:
- Schema design
- Migration tools
- Query builders
- Database testing
**monitoring**:
- Performance profiling
- Log analysis
- Metrics collection
- Alert systems
**productivity**:
- Task automation
- Workflow orchestration
- Time management
- Note-taking
**quality**:
- Linters
- Code formatters
- Code review tools
- Complexity analysis
**collaboration**:
- Team communication
- Code review
- Knowledge sharing
- Project management
### Workflow
1. **Extract Category from Arguments**
```
Parse $ARGUMENTS to extract category parameter
If category not provided, return error
Normalize to lowercase
```
2. **Execute Category Validator**
```bash
Execute .scripts/category-validator.sh "$category"
Exit codes:
- 0: Valid category
- 1: Invalid category
- 2: Missing required parameters
```
3. **Check Against Approved List**
```
Compare category against 10 approved categories
Use exact string matching (case-insensitive)
```
4. **Suggest Alternatives (if invalid)**
```
IF category invalid AND suggest:true:
Calculate similarity scores
Suggest closest matching categories
Show category descriptions
```
5. **Return Validation Report**
```
Format results:
- Status: PASS/FAIL
- Category: <provided-category>
- Valid: yes/no
- Description: <category-description> (if valid)
- Suggestions: <list> (if invalid)
- Score impact: +5 points (if valid)
```
### Examples
```bash
# Valid category
/best-practices categories category:development
# Result: PASS - Valid OpenPlugins category
# Invalid category (typo)
/best-practices categories category:developement
# Result: FAIL - Did you mean: development?
# Invalid category (plural)
/best-practices categories category:tests
# Result: FAIL - Did you mean: testing?
# Invalid category (custom)
/best-practices categories category:utilities
# Result: FAIL - Not in approved list
# Suggestions: productivity, quality, development
# Case insensitive
/best-practices categories category:TESTING
# Result: PASS - Valid (normalized to: testing)
```
### Error Handling
**Missing category parameter**:
```
ERROR: Missing required parameter 'category'
Usage: /best-practices categories category:<category-name>
Example: /best-practices categories category:development
```
**Empty category**:
```
ERROR: Category cannot be empty
Choose from 10 approved OpenPlugins categories:
development, testing, deployment, documentation, security,
database, monitoring, productivity, quality, collaboration
```
### Output Format
**Success (Valid Category)**:
```
✅ Category Validation: PASS
Category: development
Valid: Yes
Description: Code generation, scaffolding, refactoring
Use Cases:
- Code generators
- Project scaffolding tools
- Refactoring utilities
- Boilerplate generation
Quality Score Impact: +5 points
The category is approved for OpenPlugins marketplace.
```
**Failure (Invalid Category)**:
```
❌ Category Validation: FAIL
Category: developement
Valid: No
This category is not in the OpenPlugins approved list.
Did you mean?
1. development - Code generation, scaffolding, refactoring
2. deployment - CI/CD, infrastructure, release automation
All Approved Categories:
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. development - Code generation, scaffolding
2. testing - Test generation, coverage
3. deployment - CI/CD, infrastructure
4. documentation - Docs generation, API docs
5. security - Vulnerability scanning
6. database - Schema design, migrations
7. monitoring - Performance analysis
8. productivity - Workflow automation
9. quality - Linting, formatting
10. collaboration - Team tools, communication
Quality Score Impact: 0 points (fix to gain +5)
Choose the most appropriate category from the approved list.
```
**Failure (Multiple Matches)**:
```
❌ Category Validation: FAIL
Category: code-tools
Valid: No
This category is not approved. Consider these alternatives:
Best Matches:
1. development - Code generation, scaffolding, refactoring
2. quality - Linting, formatting, code review
3. productivity - Workflow automation, task management
Which fits your plugin best?
- If generating/scaffolding code → development
- If analyzing/formatting code → quality
- If automating workflows → productivity
Quality Score Impact: 0 points (fix to gain +5)
```
### Category Decision Tree
Use this to select the right category:
```
Does your plugin...
Generate or scaffold code?
→ development
Run tests or check quality?
→ testing (if running tests)
→ quality (if analyzing/formatting code)
Deploy or manage infrastructure?
→ deployment
Generate documentation?
→ documentation
Scan for security issues?
→ security
Work with databases?
→ database
Monitor performance or logs?
→ monitoring
Automate workflows or tasks?
→ productivity
Improve code quality?
→ quality
Facilitate team collaboration?
→ collaboration
```
### Common Mistakes
**Using plural forms**:
- ❌ `tests` → ✅ `testing`
- ❌ `deployments` → ✅ `deployment`
- ❌ `databases` → ✅ `database`
**Using generic terms**:
- ❌ `tools` → Choose specific category
- ❌ `utilities` → Choose specific category
- ❌ `helpers` → Choose specific category
**Using multiple categories**:
- ❌ `development,testing` → Choose ONE primary category
- Use keywords for additional topics
**Using custom categories**:
- ❌ `api-tools` → ✅ `development` or `productivity`
- ❌ `devops` → ✅ `deployment`
- ❌ `ci-cd` → ✅ `deployment`
### Compliance Criteria
**PASS Requirements**:
- Exact match with one of 10 approved categories
- Case-insensitive matching accepted
- Single category only (not multiple)
**FAIL Indicators**:
- Not in approved list
- Plural forms
- Custom categories
- Multiple categories
- Empty or missing
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,174 @@
## Operation: Check Naming Conventions
Validate plugin names against OpenPlugins lowercase-hyphen naming convention.
### Parameters from $ARGUMENTS
- **name**: Plugin name to validate (required)
- **fix**: Auto-suggest corrected name (optional, default: true)
### OpenPlugins Naming Convention
**Pattern**: `^[a-z0-9]+(-[a-z0-9]+)*$`
**Valid Examples**:
- `code-formatter`
- `test-runner`
- `deploy-automation`
- `api-client`
- `database-migration`
**Invalid Examples**:
- `Code-Formatter` (uppercase)
- `test_runner` (underscore)
- `Deploy Automation` (space)
- `APIClient` (camelCase)
- `-helper` (leading hyphen)
- `tool-` (trailing hyphen)
### Workflow
1. **Extract Name from Arguments**
```
Parse $ARGUMENTS to extract name parameter
If name not provided, return error
```
2. **Execute Naming Validator**
```bash
Execute .scripts/naming-validator.sh "$name"
Exit codes:
- 0: Valid naming convention
- 1: Invalid naming convention
- 2: Missing required parameters
```
3. **Process Results**
```
IF valid:
Return success with confirmation
ELSE:
Return failure with specific violations
Suggest corrected name if fix:true
Provide examples
```
4. **Return Compliance Report**
```
Format results:
- Status: PASS/FAIL
- Name: <provided-name>
- Valid: yes/no
- Issues: <list of violations>
- Suggestion: <corrected-name>
- Score impact: +5 points (if valid)
```
### Examples
```bash
# Valid name
/best-practices naming name:my-awesome-plugin
# Result: PASS - Valid lowercase-hyphen format
# Invalid name with uppercase
/best-practices naming name:MyPlugin
# Result: FAIL - Contains uppercase (M, P)
# Suggestion: my-plugin
# Invalid name with underscore
/best-practices naming name:test_runner
# Result: FAIL - Contains underscore (_)
# Suggestion: test-runner
# Invalid name with space
/best-practices naming name:"Test Runner"
# Result: FAIL - Contains space
# Suggestion: test-runner
```
### Error Handling
**Missing name parameter**:
```
ERROR: Missing required parameter 'name'
Usage: /best-practices naming name:<plugin-name>
Example: /best-practices naming name:my-plugin
```
**Empty name**:
```
ERROR: Name cannot be empty
Provide a valid plugin name following lowercase-hyphen convention.
```
### Output Format
**Success (Valid Name)**:
```
✅ Naming Convention: PASS
Name: code-formatter
Format: lowercase-hyphen
Pattern: ^[a-z0-9]+(-[a-z0-9]+)*$
Valid: Yes
Quality Score Impact: +5 points
The name follows OpenPlugins naming conventions perfectly.
```
**Failure (Invalid Name)**:
```
❌ Naming Convention: FAIL
Name: Code_Formatter
Format: Invalid
Valid: No
Issues Found:
1. Contains uppercase characters: C, F
2. Contains underscores instead of hyphens
Suggested Correction: code-formatter
Quality Score Impact: 0 points (fix to gain +5)
Fix these issues to comply with OpenPlugins standards.
```
### Compliance Criteria
**PASS Requirements**:
- All lowercase letters (a-z)
- Numbers allowed (0-9)
- Hyphens for word separation
- No leading or trailing hyphens
- No consecutive hyphens
- No other special characters
- Descriptive (not generic like "plugin" or "tool")
**FAIL Indicators**:
- Uppercase letters
- Underscores, spaces, or special characters
- Leading/trailing hyphens
- Empty or single character names
- Generic non-descriptive names
### Best Practices Guidance
**Good Names**:
- Describe functionality: `code-formatter`, `test-runner`
- Include technology: `python-linter`, `docker-manager`
- Indicate purpose: `api-client`, `database-migrator`
**Avoid**:
- Generic: `plugin`, `tool`, `helper`, `utility`
- Abbreviations only: `fmt`, `tst`, `db`
- Version numbers: `plugin-v2`, `tool-2024`
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,514 @@
## Operation: Full Standards Compliance
Execute comprehensive OpenPlugins and Claude Code best practices validation with complete compliance reporting.
### Parameters from $ARGUMENTS
- **path**: Path to plugin or marketplace directory (required)
- **fix**: Auto-suggest corrections for all issues (optional, default: true)
- **format**: Output format (text|json|markdown) (optional, default: text)
### Complete Standards Check
This operation validates all four best practice categories:
1. **Naming Convention** - Lowercase-hyphen format
2. **Semantic Versioning** - MAJOR.MINOR.PATCH format
3. **Category Assignment** - One of 10 approved categories
4. **Keyword Quality** - 3-7 relevant, non-generic keywords
### Workflow
1. **Detect Target Type**
```
Parse $ARGUMENTS to extract path parameter
Detect if path is plugin or marketplace:
- Plugin: Has plugin.json
- Marketplace: Has .claude-plugin/marketplace.json
```
2. **Load Metadata**
```
IF plugin:
Read plugin.json
Extract: name, version, keywords, category
ELSE IF marketplace:
Read .claude-plugin/marketplace.json
Extract marketplace metadata
Validate each plugin entry
ELSE:
Return error: Invalid target
```
3. **Execute All Validations**
```
Run in parallel or sequence:
A. Naming Validation
Execute check-naming.md with name parameter
Store result
B. Version Validation
Execute validate-versioning.md with version parameter
Store result
C. Category Validation
Execute check-categories.md with category parameter
Store result
D. Keyword Validation
Execute validate-keywords.md with keywords parameter
Store result
```
4. **Aggregate Results**
```
Collect all validation results:
- Individual pass/fail status
- Specific issues found
- Suggested corrections
- Score impact for each
Calculate overall compliance:
- Total score: Sum of individual scores
- Pass count: Number of passing validations
- Fail count: Number of failing validations
- Compliance percentage: (pass / total) × 100
```
5. **Generate Compliance Report**
```
Create comprehensive report:
- Executive summary
- Individual validation details
- Issue prioritization
- Suggested fixes
- Compliance score
- Publication readiness
```
6. **Return Results**
```
Format according to output format:
- text: Human-readable console output
- json: Machine-parseable JSON
- markdown: Documentation-ready markdown
```
### Examples
```bash
# Full compliance check on current directory
/best-practices full-standards path:.
# Check specific plugin with JSON output
/best-practices full-standards path:./my-plugin format:json
# Check with auto-fix suggestions
/best-practices full-standards path:. fix:true
# Marketplace validation
/best-practices full-standards path:./marketplace
```
### Error Handling
**Missing path parameter**:
```
ERROR: Missing required parameter 'path'
Usage: /best-practices full-standards path:<directory>
Examples:
/best-practices full-standards path:.
/best-practices full-standards path:./my-plugin
```
**Invalid path**:
```
ERROR: Invalid path or not a plugin/marketplace
Path: <provided-path>
The path must contain either:
- plugin.json (for plugins)
- .claude-plugin/marketplace.json (for marketplaces)
Check the path and try again.
```
**Missing metadata file**:
```
ERROR: Metadata file not found
Expected one of:
- plugin.json
- .claude-plugin/marketplace.json
This does not appear to be a valid Claude Code plugin or marketplace.
```
### Output Format
**Text Format (Complete Compliance)**:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
OPENPLUGINS BEST PRACTICES COMPLIANCE REPORT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: code-formatter-plugin
Type: Plugin
Date: 2024-10-13
Overall Compliance: 100% ✅
Status: PUBLICATION READY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
VALIDATION RESULTS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. Naming Convention: ✅ PASS
Name: code-formatter
Format: lowercase-hyphen
Score: +5 points
The name follows OpenPlugins naming conventions perfectly.
2. Semantic Versioning: ✅ PASS
Version: 1.2.3
Format: MAJOR.MINOR.PATCH
Score: +5 points
Valid semantic version compliant with semver 2.0.0.
3. Category Assignment: ✅ PASS
Category: quality
Description: Linting, formatting, code review
Score: +5 points
Category is approved and appropriate for this plugin.
4. Keyword Quality: ✅ PASS
Keywords: formatting, javascript, eslint, code-quality, automation
Count: 5 (optimal)
Quality: 10/10
Score: +10 points
Excellent keyword selection with balanced mix.
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPLIANCE SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Validations Passed: 4/4 (100%)
Quality Score: 25/25 points
Scoring Breakdown:
✅ Naming Convention: +5 points
✅ Semantic Versioning: +5 points
✅ Category Assignment: +5 points
✅ Keyword Quality: +10 points
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Total Score: 25/25 points
Publication Status: ✅ READY FOR SUBMISSION
This plugin meets all OpenPlugins best practice standards
and is ready for marketplace submission!
Next Steps:
1. Submit to OpenPlugins marketplace
2. Follow contribution guidelines in CONTRIBUTING.md
3. Open pull request with plugin entry
```
**Text Format (Partial Compliance)**:
```
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
OPENPLUGINS BEST PRACTICES COMPLIANCE REPORT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Target: Test_Runner
Type: Plugin
Date: 2024-10-13
Overall Compliance: 50% ⚠️
Status: NEEDS IMPROVEMENT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
VALIDATION RESULTS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
1. Naming Convention: ❌ FAIL
Name: Test_Runner
Format: Invalid
Score: 0 points
Issues Found:
- Contains uppercase characters: T, R
- Contains underscore instead of hyphen
✏️ Suggested Fix: test-runner
Impact: +5 points (if fixed)
2. Semantic Versioning: ✅ PASS
Version: 1.0.0
Format: MAJOR.MINOR.PATCH
Score: +5 points
Valid semantic version compliant with semver 2.0.0.
3. Category Assignment: ❌ FAIL
Category: test-tools
Valid: No
Score: 0 points
This category is not in the approved list.
✏️ Suggested Fix: testing
Description: Test generation, coverage, quality assurance
Impact: +5 points (if fixed)
4. Keyword Quality: ⚠️ WARNING
Keywords: plugin, tool, awesome
Count: 3 (minimum met)
Quality: 2/10
Score: 2 points
Issues Found:
- Generic terms: plugin, tool
- Marketing terms: awesome
- No functional keywords
✏️ Suggested Fix: testing, automation, pytest, unit-testing, tdd
Impact: +8 points (if improved to excellent)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPLIANCE SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Validations Passed: 1/4 (25%)
Quality Score: 7/25 points
Scoring Breakdown:
❌ Naming Convention: 0/5 points
✅ Semantic Versioning: 5/5 points
❌ Category Assignment: 0/5 points
⚠️ Keyword Quality: 2/10 points
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Total Score: 7/25 points
Publication Status: ⚠️ NOT READY - NEEDS FIXES
Priority Fixes Required:
1. [P0] Fix naming convention: Test_Runner → test-runner
2. [P0] Fix category: test-tools → testing
3. [P1] Improve keywords: Remove generic terms, add functional keywords
After Fixes (Estimated Score):
✅ Naming Convention: +5 points
✅ Semantic Versioning: +5 points
✅ Category Assignment: +5 points
✅ Keyword Quality: +10 points
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Potential Score: 25/25 points
Next Steps:
1. Apply suggested fixes above
2. Re-run validation: /best-practices full-standards path:.
3. Ensure score reaches 25/25 before submission
```
**JSON Format**:
```json
{
"target": "code-formatter",
"type": "plugin",
"timestamp": "2024-10-13T10:00:00Z",
"compliance": {
"overall": 100,
"status": "READY",
"passed": 4,
"failed": 0,
"warnings": 0
},
"validations": {
"naming": {
"status": "pass",
"name": "code-formatter",
"format": "lowercase-hyphen",
"score": 5,
"issues": []
},
"versioning": {
"status": "pass",
"version": "1.2.3",
"format": "MAJOR.MINOR.PATCH",
"score": 5,
"issues": []
},
"category": {
"status": "pass",
"category": "quality",
"valid": true,
"score": 5,
"issues": []
},
"keywords": {
"status": "pass",
"keywords": ["formatting", "javascript", "eslint", "code-quality", "automation"],
"count": 5,
"quality": 10,
"score": 10,
"issues": []
}
},
"score": {
"total": 25,
"maximum": 25,
"percentage": 100,
"breakdown": {
"naming": 5,
"versioning": 5,
"category": 5,
"keywords": 10
}
},
"publication_ready": true,
"next_steps": [
"Submit to OpenPlugins marketplace",
"Follow contribution guidelines",
"Open pull request"
]
}
```
**Markdown Format** (for documentation):
```markdown
# OpenPlugins Best Practices Compliance Report
**Target**: code-formatter
**Type**: Plugin
**Date**: 2024-10-13
**Status**: ✅ PUBLICATION READY
## Overall Compliance
- **Score**: 25/25 points (100%)
- **Validations Passed**: 4/4
- **Publication Ready**: Yes
## Validation Results
### 1. Naming Convention ✅
- **Status**: PASS
- **Name**: code-formatter
- **Format**: lowercase-hyphen
- **Score**: +5 points
The name follows OpenPlugins naming conventions perfectly.
### 2. Semantic Versioning ✅
- **Status**: PASS
- **Version**: 1.2.3
- **Format**: MAJOR.MINOR.PATCH
- **Score**: +5 points
Valid semantic version compliant with semver 2.0.0.
### 3. Category Assignment ✅
- **Status**: PASS
- **Category**: quality
- **Description**: Linting, formatting, code review
- **Score**: +5 points
Category is approved and appropriate for this plugin.
### 4. Keyword Quality ✅
- **Status**: PASS
- **Keywords**: formatting, javascript, eslint, code-quality, automation
- **Count**: 5 (optimal)
- **Quality**: 10/10
- **Score**: +10 points
Excellent keyword selection with balanced mix.
## Score Breakdown
| Validation | Score | Status |
|------------|-------|--------|
| Naming Convention | 5/5 | ✅ Pass |
| Semantic Versioning | 5/5 | ✅ Pass |
| Category Assignment | 5/5 | ✅ Pass |
| Keyword Quality | 10/10 | ✅ Pass |
| **Total** | **25/25** | **✅ Ready** |
## Next Steps
1. Submit to OpenPlugins marketplace
2. Follow contribution guidelines in CONTRIBUTING.md
3. Open pull request with plugin entry
---
*Report generated by marketplace-validator-plugin v1.0.0*
```
### Compliance Scoring
**Total Score Breakdown**:
- Naming Convention: 5 points
- Semantic Versioning: 5 points
- Category Assignment: 5 points
- Keyword Quality: 10 points
- **Maximum Total**: 25 points
**Publication Readiness**:
- **25/25 points (100%)**: ✅ READY - Perfect compliance
- **20-24 points (80-96%)**: ✅ READY - Minor improvements optional
- **15-19 points (60-76%)**: ⚠️ NEEDS WORK - Address issues before submission
- **10-14 points (40-56%)**: ❌ NOT READY - Significant fixes required
- **0-9 points (0-36%)**: ❌ NOT READY - Major compliance issues
### Integration with Quality Analysis
This operation feeds into the overall quality scoring system:
```
Best Practices Score (25 points max)
Quality Analysis (calculate-score)
Overall Quality Score (100 points total)
Publication Readiness Determination
```
### Best Practices Workflow
For complete plugin validation:
```bash
# 1. Run full standards compliance
/best-practices full-standards path:.
# 2. If issues found, fix them, then re-run
# ... apply fixes ...
/best-practices full-standards path:.
# 3. Once compliant, run comprehensive validation
/validation-orchestrator comprehensive path:.
# 4. Review quality report
# Quality score includes best practices (25 points)
```
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,105 @@
---
description: Enforce OpenPlugins and Claude Code best practices for naming, versioning, and standards compliance
---
You are the Best Practices coordinator, ensuring adherence to OpenPlugins and Claude Code standards.
## Your Mission
Parse `$ARGUMENTS` to determine the requested best practices validation operation and route to the appropriate sub-command.
## Available Operations
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
- **naming** → Read `.claude/commands/best-practices/check-naming.md`
- **versioning** → Read `.claude/commands/best-practices/validate-versioning.md`
- **categories** → Read `.claude/commands/best-practices/check-categories.md`
- **keywords** → Read `.claude/commands/best-practices/validate-keywords.md`
- **full-standards** → Read `.claude/commands/best-practices/full-compliance.md`
## Argument Format
```
/best-practices <operation> [parameters]
```
### Examples
```bash
# Check naming conventions
/best-practices naming name:my-plugin-name
# Validate semantic versioning
/best-practices versioning version:1.2.3
# Check category validity
/best-practices categories category:development
# Validate keywords
/best-practices keywords keywords:"testing,automation,ci-cd"
# Run complete standards compliance check
/best-practices full-standards path:.
```
## OpenPlugins Standards
**Naming Convention**:
- Format: lowercase-hyphen (e.g., `code-formatter`, `test-runner`)
- Pattern: `^[a-z0-9]+(-[a-z0-9]+)*$`
- No underscores, spaces, or uppercase
- Descriptive, not generic (avoid: "plugin", "tool", "helper")
**Semantic Versioning**:
- Format: MAJOR.MINOR.PATCH (e.g., 1.2.3)
- Pattern: `^[0-9]+\.[0-9]+\.[0-9]+$`
- Optional pre-release: `-alpha.1`, `-beta.2`
- Optional build metadata: `+20241013`
**Categories** (choose ONE):
1. **development** - Code generation, scaffolding, refactoring
2. **testing** - Test generation, coverage, quality assurance
3. **deployment** - CI/CD, infrastructure, release automation
4. **documentation** - Docs generation, API documentation
5. **security** - Vulnerability scanning, secret detection
6. **database** - Schema design, migrations, queries
7. **monitoring** - Performance analysis, logging
8. **productivity** - Workflow automation, task management
9. **quality** - Linting, formatting, code review
10. **collaboration** - Team tools, communication
**Keywords**:
- Count: 3-7 keywords
- Relevance: Functionality, technology, or use-case based
- Avoid: Generic terms (plugin, tool, utility), category duplication
- Good: `testing`, `automation`, `python`, `ci-cd`, `docker`
- Bad: `best`, `awesome`, `perfect`, `plugin`
## Compliance Scoring
Best practices contribute to quality score:
- Valid naming: +5 points
- Semantic versioning: +5 points
- Valid category: +5 points
- Quality keywords (3-7): +10 points
## Error Handling
If the operation is not recognized:
1. List all available operations
2. Show OpenPlugins standards
3. Provide compliance guidance
## Base Directory
Base directory for this skill: `.claude/commands/best-practices/`
## Your Task
1. Parse `$ARGUMENTS` to extract operation and parameters
2. Read the corresponding operation file
3. Execute best practices validation
4. Return compliance results with specific corrections
**Current Request**: $ARGUMENTS

View File

@@ -0,0 +1,337 @@
## Operation: Validate Keywords
Validate keyword selection for relevance, count, and quality against OpenPlugins standards.
### Parameters from $ARGUMENTS
- **keywords**: Comma-separated keyword list (required)
- **min**: Minimum keyword count (optional, default: 3)
- **max**: Maximum keyword count (optional, default: 7)
- **context**: Plugin context for relevance checking (optional, JSON or description)
### OpenPlugins Keyword Standards
**Count Requirements**:
- Minimum: 3 keywords
- Maximum: 7 keywords
- Optimal: 5-6 keywords
**Quality Requirements**:
- Relevant to plugin functionality
- Searchable terms users would use
- Mix of functionality, technology, and use-case
- No generic marketing terms
- No duplicate category names
### Keyword Categories
**Functionality Keywords** (what it does):
- `testing`, `deployment`, `formatting`, `linting`, `migration`
- `generation`, `automation`, `analysis`, `monitoring`, `scanning`
**Technology Keywords** (what it works with):
- `python`, `javascript`, `docker`, `kubernetes`, `postgresql`
- `react`, `vue`, `typescript`, `bash`, `terraform`
**Use-Case Keywords** (how it's used):
- `ci-cd`, `code-review`, `api-testing`, `performance`
- `tdd`, `bdd`, `refactoring`, `debugging`, `profiling`
### Good Keywords Examples
**Well-balanced sets**:
- `["testing", "pytest", "automation", "tdd", "python"]`
- `["deployment", "kubernetes", "ci-cd", "docker", "helm"]`
- `["linting", "javascript", "eslint", "code-quality", "automation"]`
- `["database", "postgresql", "migration", "schema", "sql"]`
**Poor keyword sets**:
- `["plugin", "tool", "awesome"]` - Generic/marketing terms
- `["test", "testing", "tester", "tests"]` - Redundant variations
- `["development"]` - Only category name, too few
- `["a", "b", "c", "d", "e", "f", "g", "h"]` - Too many, non-descriptive
### Workflow
1. **Extract Keywords from Arguments**
```
Parse $ARGUMENTS to extract keywords parameter
Split by comma, trim whitespace
Normalize to lowercase
Remove duplicates
```
2. **Execute Keyword Analyzer**
```bash
Execute .scripts/keyword-analyzer.py "$keywords" "$min" "$max" "$context"
Exit codes:
- 0: Valid keyword set
- 1: Count violation (too few or too many)
- 2: Quality issues (generic terms, duplicates)
- 3: Missing required parameters
```
3. **Validate Count**
```
count = number of keywords
IF count < min: FAIL (too few)
IF count > max: FAIL (too many)
```
4. **Check for Generic Terms**
```
Generic blocklist:
- plugin, tool, utility, helper, awesome
- best, perfect, great, super, amazing
- code, software, app, program
Flag any generic terms found
```
5. **Analyze Quality**
```
Check for:
- Duplicate category names
- Redundant variations (test, testing, tests)
- Single-character keywords
- Non-descriptive terms
```
6. **Calculate Relevance Score**
```
Base score: 10 points
Deductions:
- Generic term: -2 per term
- Too few keywords: -5
- Too many keywords: -3
- Redundant variations: -2 per redundancy
- Non-descriptive: -1 per term
Final score: max(0, base - deductions)
```
7. **Return Analysis Report**
```
Format results:
- Status: PASS/FAIL/WARNING
- Count: <number> (valid range: min-max)
- Quality: <score>/10
- Issues: <list of problems>
- Suggestions: <improved keyword set>
- Score impact: +10 points (if excellent), +5 (if good)
```
### Examples
```bash
# Valid keyword set
/best-practices keywords keywords:"testing,pytest,automation,tdd,python"
# Result: PASS - 5 keywords, well-balanced, relevant
# Too few keywords
/best-practices keywords keywords:"testing,python"
# Result: FAIL - Only 2 keywords (minimum: 3)
# Too many keywords
/best-practices keywords keywords:"a,b,c,d,e,f,g,h,i,j"
# Result: FAIL - 10 keywords (maximum: 7)
# Generic terms
/best-practices keywords keywords:"plugin,tool,awesome,best"
# Result: FAIL - Contains generic/marketing terms
# With custom range
/best-practices keywords keywords:"ci,cd,docker" min:2 max:5
# Result: PASS - 3 keywords within custom range
```
### Error Handling
**Missing keywords parameter**:
```
ERROR: Missing required parameter 'keywords'
Usage: /best-practices keywords keywords:"keyword1,keyword2,keyword3"
Example: /best-practices keywords keywords:"testing,automation,python"
```
**Empty keywords**:
```
ERROR: Keywords cannot be empty
Provide 3-7 relevant keywords describing your plugin.
Good examples:
- "testing,pytest,automation"
- "deployment,kubernetes,ci-cd"
- "linting,javascript,code-quality"
```
### Output Format
**Success (Excellent Keywords)**:
```
✅ Keyword Validation: PASS
Keywords: testing, pytest, automation, tdd, python
Count: 5 (optimal range: 3-7)
Quality Score: 10/10
Analysis:
✅ Balanced mix of functionality, technology, and use-case
✅ All keywords relevant and searchable
✅ No generic or marketing terms
✅ Good variety without redundancy
Breakdown:
- Functionality: testing, automation, tdd
- Technology: pytest, python
- Use-case: tdd
Quality Score Impact: +10 points
Excellent keyword selection for discoverability!
```
**Failure (Count Violation)**:
```
❌ Keyword Validation: FAIL
Keywords: testing, python
Count: 2 (required: 3-7)
Quality Score: 5/10
Issues Found:
1. Too few keywords (2 < 3 minimum)
2. Missing technology or use-case keywords
Suggestions to improve:
Add 1-3 more relevant keywords such as:
- Functionality: automation, unit-testing
- Use-case: tdd, ci-cd
- Specific tools: pytest, unittest
Recommended: testing, python, pytest, automation, tdd
Quality Score Impact: 0 points (fix to gain +10)
```
**Failure (Generic Terms)**:
```
❌ Keyword Validation: FAIL
Keywords: plugin, tool, awesome, best, helper
Count: 5 (valid range)
Quality Score: 2/10
Issues Found:
1. Generic terms detected: plugin, tool, helper
2. Marketing terms detected: awesome, best
3. No functional or technical keywords
These keywords don't help users find your plugin.
Better alternatives:
Instead of generic terms, describe WHAT it does:
- Replace "plugin" → testing, deployment, formatting
- Replace "tool" → specific functionality
- Replace "awesome/best" → actual features
Suggested keywords based on common patterns:
- testing, automation, ci-cd, docker, python
- deployment, kubernetes, infrastructure, terraform
- linting, formatting, code-quality, javascript
Quality Score Impact: 0 points (fix to gain +10)
```
**Warning (Minor Issues)**:
```
⚠️ Keyword Validation: WARNING
Keywords: testing, tests, test, automation, ci-cd
Count: 5 (valid range)
Quality Score: 7/10
Issues Found:
1. Redundant variations: testing, tests, test
2. Consider consolidating to single term
Suggestions:
- Keep: testing, automation, ci-cd
- Remove: tests, test (redundant)
- Add: 2 more specific keywords (e.g., pytest, junit)
Recommended: testing, automation, ci-cd, pytest, unit-testing
Quality Score Impact: +7 points (good, but could be better)
Your keywords are functional but could be more diverse.
```
### Keyword Quality Checklist
**PASS Requirements**:
- 3-7 keywords total
- No generic terms (plugin, tool, utility, helper)
- No marketing terms (awesome, best, perfect)
- No redundant variations
- Mix of functionality and technology
- Relevant to plugin purpose
- Searchable by target users
**FAIL Indicators**:
- < 3 or > 7 keywords
- Contains generic terms
- Contains marketing fluff
- All keywords same type (only technologies, only functionality)
- Single-character keywords
- Category name duplication
### Best Practices
**Do**:
- Use specific functionality terms
- Include primary technologies
- Add relevant use-cases
- Think about user search intent
- Balance breadth and specificity
**Don't**:
- Use generic words (plugin, tool, utility)
- Add marketing terms (best, awesome, perfect)
- Duplicate category names exactly
- Use redundant variations
- Add irrelevant technologies
- Use abbreviations without context
### Quality Scoring Matrix
**10/10 - Excellent**:
- 5-6 keywords
- Perfect mix of functionality/technology/use-case
- All highly relevant
- Great search discoverability
**7-9/10 - Good**:
- 3-7 keywords
- Good mix with minor issues
- Mostly relevant
- Decent discoverability
**4-6/10 - Fair**:
- Count issues OR some generic terms
- Imbalanced mix
- Partial relevance
- Limited discoverability
**0-3/10 - Poor**:
- Severe count violations OR mostly generic
- No functional keywords
- Poor relevance
- Very poor discoverability
**Request**: $ARGUMENTS

View File

@@ -0,0 +1,254 @@
## Operation: Validate Versioning
Validate version strings against Semantic Versioning 2.0.0 specification.
### Parameters from $ARGUMENTS
- **version**: Version string to validate (required)
- **strict**: Enforce strict semver (no pre-release/build metadata) (optional, default: false)
### Semantic Versioning Standard
**Base Pattern**: `MAJOR.MINOR.PATCH` (e.g., `1.2.3`)
**Strict Format**: `^[0-9]+\.[0-9]+\.[0-9]+$`
**Extended Format** (with pre-release and build metadata):
- Pre-release: `1.2.3-alpha.1`, `2.0.0-beta.2`, `1.0.0-rc.1`
- Build metadata: `1.2.3+20241013`, `1.0.0+build.1`
- Combined: `1.2.3-alpha.1+build.20241013`
### Valid Examples
**Strict Semver** (OpenPlugins recommended):
- `1.0.0` - Initial release
- `1.2.3` - Standard version
- `2.5.13` - Double-digit components
- `0.1.0` - Pre-1.0 development
**Extended Semver** (allowed):
- `1.0.0-alpha` - Alpha release
- `1.0.0-beta.2` - Beta release
- `1.0.0-rc.1` - Release candidate
- `1.2.3+20241013` - With build metadata
### Invalid Examples
- `1.0` - Missing PATCH
- `v1.0.0` - Leading 'v' prefix
- `1.0.0.0` - Too many components
- `1.2.x` - Placeholder values
- `latest` - Non-numeric
- `1.0.0-SNAPSHOT` - Non-standard identifier
### Workflow
1. **Extract Version from Arguments**
```
Parse $ARGUMENTS to extract version parameter
If version not provided, return error
```
2. **Execute Semantic Version Checker**
```bash
Execute .scripts/semver-checker.py "$version" "$strict"
Exit codes:
- 0: Valid semantic version
- 1: Invalid format
- 2: Missing required parameters
- 3: Strict mode violation (valid semver, but has pre-release/build)
```
3. **Parse Version Components**
```
Extract components:
- MAJOR: Breaking changes
- MINOR: Backward-compatible features
- PATCH: Backward-compatible fixes
- Pre-release: Optional identifier (alpha, beta, rc)
- Build metadata: Optional metadata
```
4. **Return Validation Report**
```
Format results:
- Status: PASS/FAIL/WARNING
- Version: <provided-version>
- Valid: yes/no
- Components: MAJOR.MINOR.PATCH breakdown
- Pre-release: <identifier> (if present)
- Build: <metadata> (if present)
- Score impact: +5 points (if valid)
```
### Examples
```bash
# Valid strict semver
/best-practices versioning version:1.2.3
# Result: PASS - Valid semantic version (1.2.3)
# Valid with pre-release
/best-practices versioning version:1.0.0-alpha.1
# Result: PASS - Valid semantic version with pre-release
# Invalid format
/best-practices versioning version:1.0
# Result: FAIL - Missing PATCH component
# Strict mode with pre-release
/best-practices versioning version:1.0.0-beta strict:true
# Result: WARNING - Valid semver but not strict format
# Invalid prefix
/best-practices versioning version:v1.2.3
# Result: FAIL - Contains 'v' prefix (use 1.2.3)
```
### Error Handling
**Missing version parameter**:
```
ERROR: Missing required parameter 'version'
Usage: /best-practices versioning version:<semver>
Example: /best-practices versioning version:1.2.3
```
**Invalid format**:
```
ERROR: Invalid semantic version format
The version must follow MAJOR.MINOR.PATCH format.
Examples:
- 1.0.0 (initial release)
- 1.2.3 (standard version)
- 2.0.0-beta.1 (pre-release)
```
### Output Format
**Success (Valid Semver)**:
```
✅ Semantic Versioning: PASS
Version: 1.2.3
Format: MAJOR.MINOR.PATCH
Valid: Yes
Components:
- MAJOR: 1 (breaking changes)
- MINOR: 2 (new features)
- PATCH: 3 (bug fixes)
Quality Score Impact: +5 points
The version follows Semantic Versioning 2.0.0 specification.
```
**Success with Pre-release**:
```
✅ Semantic Versioning: PASS
Version: 1.0.0-beta.2
Format: MAJOR.MINOR.PATCH-PRERELEASE
Valid: Yes
Components:
- MAJOR: 1
- MINOR: 0
- PATCH: 0
- Pre-release: beta.2
Quality Score Impact: +5 points
Note: Pre-release versions indicate unstable releases.
```
**Failure (Invalid Format)**:
```
❌ Semantic Versioning: FAIL
Version: 1.0
Format: Invalid
Valid: No
Issues Found:
1. Missing PATCH component
2. Expected format: MAJOR.MINOR.PATCH
Suggested Correction: 1.0.0
Quality Score Impact: 0 points (fix to gain +5)
Fix to comply with Semantic Versioning 2.0.0 specification.
Reference: https://semver.org/
```
**Warning (Strict Mode)**:
```
⚠️ Semantic Versioning: WARNING
Version: 1.0.0-alpha.1
Format: Valid semver, but not strict
Valid: Yes (with pre-release)
Note: OpenPlugins recommends strict MAJOR.MINOR.PATCH format
without pre-release or build metadata for marketplace submissions.
Recommended: 1.0.0 (for stable release)
Quality Score Impact: +5 points (valid, but consider strict format)
```
### Versioning Guidelines
**When to increment**:
**MAJOR** (X.0.0):
- Breaking API changes
- Incompatible changes
- Major rewrites
**MINOR** (x.Y.0):
- New features (backward-compatible)
- Deprecations
- Significant improvements
**PATCH** (x.y.Z):
- Bug fixes
- Security patches
- Minor improvements
**Initial Development**:
- Start with `0.1.0`
- Increment MINOR for features
- First stable release: `1.0.0`
**Pre-release Identifiers**:
- `alpha` - Early testing
- `beta` - Feature complete, testing
- `rc` - Release candidate
### Compliance Criteria
**PASS Requirements**:
- Three numeric components (MAJOR.MINOR.PATCH)
- Each component is non-negative integer
- Components separated by dots
- Optional pre-release identifier (hyphen-separated)
- Optional build metadata (plus-separated)
- No leading zeros (except single 0)
**FAIL Indicators**:
- Missing components (1.0)
- Too many components (1.0.0.0)
- Non-numeric components (1.x.0)
- Leading 'v' prefix
- Invalid separators
- Leading zeros (01.02.03)
**Request**: $ARGUMENTS