Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:20:21 +08:00
commit bbbaf7acad
63 changed files with 38552 additions and 0 deletions

View File

@@ -0,0 +1,387 @@
#!/bin/bash
# Purpose: Analyze project dependencies for security, versioning, and usage
# Version: 1.0.0
# Usage: ./analyze-dependencies.sh [path]
# Returns: JSON formatted dependency analysis
# Exit codes: 0=success, 1=error, 2=invalid input
set -euo pipefail
# Configuration
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly PROJECT_DIR="${1:-.}"
readonly OUTPUT_FORMAT="${2:-json}"
# Color codes for output
readonly RED='\033[0;31m'
readonly YELLOW='\033[1;33m'
readonly GREEN='\033[0;32m'
readonly NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${GREEN}[INFO]${NC} $*" >&2
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $*" >&2
}
log_error() {
echo -e "${RED}[ERROR]${NC} $*" >&2
}
# Validate input
validate_input() {
if [[ ! -d "$PROJECT_DIR" ]]; then
log_error "Directory not found: $PROJECT_DIR"
exit 2
fi
}
# Detect package manager and dependency files
detect_package_manager() {
local pkg_manager=""
local dep_file=""
if [[ -f "$PROJECT_DIR/package.json" ]]; then
pkg_manager="npm"
dep_file="package.json"
elif [[ -f "$PROJECT_DIR/requirements.txt" ]]; then
pkg_manager="pip"
dep_file="requirements.txt"
elif [[ -f "$PROJECT_DIR/Pipfile" ]]; then
pkg_manager="pipenv"
dep_file="Pipfile"
elif [[ -f "$PROJECT_DIR/pyproject.toml" ]]; then
pkg_manager="poetry"
dep_file="pyproject.toml"
elif [[ -f "$PROJECT_DIR/Gemfile" ]]; then
pkg_manager="bundler"
dep_file="Gemfile"
elif [[ -f "$PROJECT_DIR/go.mod" ]]; then
pkg_manager="go"
dep_file="go.mod"
elif [[ -f "$PROJECT_DIR/Cargo.toml" ]]; then
pkg_manager="cargo"
dep_file="Cargo.toml"
elif [[ -f "$PROJECT_DIR/composer.json" ]]; then
pkg_manager="composer"
dep_file="composer.json"
else
log_warn "No recognized dependency file found"
pkg_manager="unknown"
dep_file="none"
fi
echo "$pkg_manager|$dep_file"
}
# Count dependencies
count_dependencies() {
local pkg_manager="$1"
local dep_file="$2"
local direct_count=0
local dev_count=0
case "$pkg_manager" in
npm)
if command -v jq &> /dev/null; then
direct_count=$(jq -r '.dependencies // {} | length' "$PROJECT_DIR/$dep_file" 2>/dev/null || echo 0)
dev_count=$(jq -r '.devDependencies // {} | length' "$PROJECT_DIR/$dep_file" 2>/dev/null || echo 0)
else
direct_count=$(grep -c '"' "$PROJECT_DIR/$dep_file" 2>/dev/null || echo 0)
fi
;;
pip)
direct_count=$(grep -v '^#' "$PROJECT_DIR/$dep_file" 2>/dev/null | grep -c . || echo 0)
;;
go)
direct_count=$(grep -c 'require' "$PROJECT_DIR/$dep_file" 2>/dev/null || echo 0)
;;
*)
direct_count=0
;;
esac
echo "$direct_count|$dev_count"
}
# Check for outdated dependencies (simplified - would need package manager specific commands)
check_outdated() {
local pkg_manager="$1"
local outdated_count=0
# This is a simplified check - in practice would run actual package manager commands
case "$pkg_manager" in
npm)
if command -v npm &> /dev/null && [[ -f "$PROJECT_DIR/package-lock.json" ]]; then
log_info "Checking for outdated npm packages..."
# Would run: npm outdated --json in production
outdated_count=0 # Placeholder
fi
;;
pip)
if command -v pip &> /dev/null; then
log_info "Checking for outdated pip packages..."
# Would run: pip list --outdated in production
outdated_count=0 # Placeholder
fi
;;
esac
echo "$outdated_count"
}
# Check for security vulnerabilities (simplified)
check_vulnerabilities() {
local pkg_manager="$1"
local vuln_count=0
local critical=0
local high=0
local medium=0
local low=0
# This would integrate with actual security scanners
case "$pkg_manager" in
npm)
if command -v npm &> /dev/null && [[ -f "$PROJECT_DIR/package-lock.json" ]]; then
log_info "Checking for npm security vulnerabilities..."
# Would run: npm audit --json in production
vuln_count=0 # Placeholder
fi
;;
pip)
if command -v safety &> /dev/null; then
log_info "Checking for Python security vulnerabilities..."
# Would run: safety check in production
vuln_count=0 # Placeholder
fi
;;
esac
echo "$critical|$high|$medium|$low"
}
# Analyze dependency tree depth (simplified)
analyze_tree_depth() {
local pkg_manager="$1"
local max_depth=0
case "$pkg_manager" in
npm)
if [[ -f "$PROJECT_DIR/package-lock.json" ]]; then
# Simplified depth calculation
max_depth=3 # Placeholder - would calculate from lockfile
fi
;;
*)
max_depth=0
;;
esac
echo "$max_depth"
}
# Find unused dependencies (simplified)
find_unused() {
local pkg_manager="$1"
local unused_count=0
# This would require code analysis to see what's actually imported/required
case "$pkg_manager" in
npm)
log_info "Analyzing for unused npm packages..."
# Would use tools like depcheck in production
unused_count=0 # Placeholder
;;
esac
echo "$unused_count"
}
# Check for duplicate dependencies
check_duplicates() {
local pkg_manager="$1"
local duplicate_count=0
case "$pkg_manager" in
npm)
if [[ -f "$PROJECT_DIR/package-lock.json" ]]; then
log_info "Checking for duplicate packages..."
# Would analyze lockfile for version conflicts
duplicate_count=0 # Placeholder
fi
;;
esac
echo "$duplicate_count"
}
# Generate dependency analysis report
generate_report() {
local pkg_manager="$1"
local dep_file="$2"
local dep_counts="$3"
local outdated="$4"
local vulnerabilities="$5"
local tree_depth="$6"
local unused="$7"
local duplicates="$8"
IFS='|' read -r direct_deps dev_deps <<< "$dep_counts"
IFS='|' read -r crit_vulns high_vulns med_vulns low_vulns <<< "$vulnerabilities"
local total_deps=$((direct_deps + dev_deps))
local total_vulns=$((crit_vulns + high_vulns + med_vulns + low_vulns))
if [[ "$OUTPUT_FORMAT" == "json" ]]; then
cat <<EOF
{
"package_manager": "$pkg_manager",
"dependency_file": "$dep_file",
"analysis_date": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
"dependencies": {
"total": $total_deps,
"direct": $direct_deps,
"development": $dev_deps,
"outdated": $outdated,
"unused": $unused,
"duplicates": $duplicates
},
"vulnerabilities": {
"total": $total_vulns,
"critical": $crit_vulns,
"high": $high_vulns,
"medium": $med_vulns,
"low": $low_vulns
},
"tree_depth": $tree_depth,
"health_score": $(calculate_health_score "$total_vulns" "$outdated" "$unused" "$duplicates"),
"recommendations": $(generate_recommendations "$total_vulns" "$outdated" "$unused" "$duplicates")
}
EOF
else
cat <<EOF
==============================================
Dependency Analysis Report
==============================================
Package Manager: $pkg_manager
Dependency File: $dep_file
Analysis Date: $(date)
Dependencies:
Total Dependencies: $total_deps
Direct Dependencies: $direct_deps
Development Dependencies: $dev_deps
Outdated: $outdated
Unused: $unused
Duplicates: $duplicates
Security Vulnerabilities:
Total: $total_vulns
Critical: $crit_vulns
High: $high_vulns
Medium: $med_vulns
Low: $low_vulns
Dependency Tree:
Maximum Depth: $tree_depth
Health Score: $(calculate_health_score "$total_vulns" "$outdated" "$unused" "$duplicates")/10
==============================================
EOF
fi
}
# Calculate health score (0-10)
calculate_health_score() {
local vulns="$1"
local outdated="$2"
local unused="$3"
local duplicates="$4"
local score=10
# Deduct points for issues
score=$((score - vulns)) # -1 per vulnerability
score=$((score - outdated / 5)) # -1 per 5 outdated packages
score=$((score - unused / 3)) # -1 per 3 unused packages
score=$((score - duplicates / 2)) # -1 per 2 duplicates
# Ensure score is between 0 and 10
if (( score < 0 )); then
score=0
fi
echo "$score"
}
# Generate recommendations
generate_recommendations() {
local vulns="$1"
local outdated="$2"
local unused="$3"
local duplicates="$4"
local recommendations="["
if (( vulns > 0 )); then
recommendations+='{"priority":"critical","action":"Update packages with security vulnerabilities immediately"},'
fi
if (( outdated > 10 )); then
recommendations+='{"priority":"high","action":"Review and update outdated dependencies"},'
fi
if (( unused > 5 )); then
recommendations+='{"priority":"medium","action":"Remove unused dependencies to reduce bundle size"},'
fi
if (( duplicates > 0 )); then
recommendations+='{"priority":"medium","action":"Resolve duplicate dependencies with version conflicts"},'
fi
# Remove trailing comma if exists
recommendations="${recommendations%,}"
recommendations+="]"
echo "$recommendations"
}
# Main execution
main() {
log_info "Starting dependency analysis..."
validate_input
# Detect package manager
IFS='|' read -r pkg_manager dep_file <<< "$(detect_package_manager)"
if [[ "$pkg_manager" == "unknown" ]]; then
log_error "Could not detect package manager"
exit 1
fi
log_info "Detected package manager: $pkg_manager"
# Gather metrics
dep_counts=$(count_dependencies "$pkg_manager" "$dep_file")
outdated=$(check_outdated "$pkg_manager")
vulnerabilities=$(check_vulnerabilities "$pkg_manager")
tree_depth=$(analyze_tree_depth "$pkg_manager")
unused=$(find_unused "$pkg_manager")
duplicates=$(check_duplicates "$pkg_manager")
# Generate report
generate_report "$pkg_manager" "$dep_file" "$dep_counts" "$outdated" "$vulnerabilities" "$tree_depth" "$unused" "$duplicates"
log_info "Analysis complete"
exit 0
}
# Run main function
main "$@"

View File

@@ -0,0 +1,367 @@
#!/usr/bin/env python3
"""
Purpose: Calculate code complexity metrics for architecture assessment
Version: 1.0.0
Usage: python3 complexity-metrics.py [path] [--format json|text]
Returns: Complexity metrics including cyclomatic complexity, maintainability index
Exit codes: 0=success, 1=error, 2=invalid input
Dependencies: radon (install with: pip install radon)
If radon is not available, provides simplified metrics
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Tuple, Any
from datetime import datetime
class ComplexityAnalyzer:
"""Analyzes code complexity across a codebase."""
def __init__(self, root_path: str):
self.root_path = Path(root_path)
self.results = {
"analysis_date": datetime.utcnow().isoformat() + "Z",
"root_path": str(self.root_path),
"files_analyzed": 0,
"total_lines": 0,
"total_functions": 0,
"complexity": {
"average": 0.0,
"max": 0,
"distribution": {"simple": 0, "moderate": 0, "complex": 0, "very_complex": 0}
},
"maintainability": {
"average": 0.0,
"distribution": {"high": 0, "medium": 0, "low": 0}
},
"files": []
}
self.has_radon = self._check_radon()
def _check_radon(self) -> bool:
"""Check if radon is available."""
try:
import radon
return True
except ImportError:
print("Warning: radon not installed. Using simplified metrics.", file=sys.stderr)
print("Install with: pip install radon", file=sys.stderr)
return False
def analyze(self) -> Dict[str, Any]:
"""Perform complexity analysis on the codebase."""
if not self.root_path.exists():
raise FileNotFoundError(f"Path not found: {self.root_path}")
# Find all source files
source_files = self._find_source_files()
for file_path in source_files:
self._analyze_file(file_path)
# Calculate summary statistics
self._calculate_summary()
return self.results
def _find_source_files(self) -> List[Path]:
"""Find all source code files in the directory."""
extensions = {'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.go', '.rb', '.php', '.c', '.cpp', '.cs'}
source_files = []
for ext in extensions:
source_files.extend(self.root_path.rglob(f"*{ext}"))
# Exclude common non-source directories
excluded_dirs = {'node_modules', 'venv', 'env', '.venv', 'dist', 'build', '.git', 'vendor', '__pycache__'}
source_files = [f for f in source_files if not any(excluded in f.parts for excluded in excluded_dirs)]
return source_files
def _analyze_file(self, file_path: Path):
"""Analyze a single file."""
try:
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
lines = content.split('\n')
self.results["total_lines"] += len(lines)
self.results["files_analyzed"] += 1
if self.has_radon:
self._analyze_with_radon(file_path, content)
else:
self._analyze_simplified(file_path, content, lines)
except Exception as e:
print(f"Warning: Could not analyze {file_path}: {e}", file=sys.stderr)
def _analyze_with_radon(self, file_path: Path, content: str):
"""Analyze file using radon library."""
from radon.complexity import cc_visit
from radon.metrics import mi_visit
try:
# Cyclomatic complexity
complexity_results = cc_visit(content, no_assert=True)
for result in complexity_results:
self.results["total_functions"] += 1
complexity = result.complexity
# Classify complexity
if complexity <= 5:
self.results["complexity"]["distribution"]["simple"] += 1
elif complexity <= 10:
self.results["complexity"]["distribution"]["moderate"] += 1
elif complexity <= 20:
self.results["complexity"]["distribution"]["complex"] += 1
else:
self.results["complexity"]["distribution"]["very_complex"] += 1
# Track maximum complexity
if complexity > self.results["complexity"]["max"]:
self.results["complexity"]["max"] = complexity
# Maintainability index
mi_score = mi_visit(content, multi=True)
if mi_score:
avg_mi = sum(mi_score) / len(mi_score)
if avg_mi >= 70:
self.results["maintainability"]["distribution"]["high"] += 1
elif avg_mi >= 50:
self.results["maintainability"]["distribution"]["medium"] += 1
else:
self.results["maintainability"]["distribution"]["low"] += 1
except Exception as e:
print(f"Warning: Radon analysis failed for {file_path}: {e}", file=sys.stderr)
def _analyze_simplified(self, file_path: Path, content: str, lines: List[str]):
"""Simplified analysis without radon."""
# Count functions (simplified heuristic)
function_keywords = ['def ', 'function ', 'func ', 'fn ', 'sub ', 'public ', 'private ', 'protected ']
function_count = sum(1 for line in lines if any(keyword in line.lower() for keyword in function_keywords))
self.results["total_functions"] += function_count
# Estimate complexity based on control flow keywords
complexity_keywords = ['if ', 'else', 'elif', 'for ', 'while ', 'switch', 'case ', 'catch', '?', '&&', '||']
total_complexity = sum(1 for line in lines if any(keyword in line for keyword in complexity_keywords))
if function_count > 0:
avg_complexity = total_complexity / function_count
# Classify based on average
if avg_complexity <= 5:
self.results["complexity"]["distribution"]["simple"] += function_count
elif avg_complexity <= 10:
self.results["complexity"]["distribution"]["moderate"] += function_count
elif avg_complexity <= 20:
self.results["complexity"]["distribution"]["complex"] += function_count
else:
self.results["complexity"]["distribution"]["very_complex"] += function_count
# Estimate maintainability based on line count and function size
avg_lines_per_func = len(lines) / max(function_count, 1)
if avg_lines_per_func <= 20:
self.results["maintainability"]["distribution"]["high"] += 1
elif avg_lines_per_func <= 50:
self.results["maintainability"]["distribution"]["medium"] += 1
else:
self.results["maintainability"]["distribution"]["low"] += 1
def _calculate_summary(self):
"""Calculate summary statistics."""
total_funcs = self.results["total_functions"]
if total_funcs > 0:
# Average complexity
dist = self.results["complexity"]["distribution"]
weighted_sum = (dist["simple"] * 3 + dist["moderate"] * 7.5 +
dist["complex"] * 15 + dist["very_complex"] * 25)
self.results["complexity"]["average"] = round(weighted_sum / total_funcs, 2)
# Average maintainability
mi_dist = self.results["maintainability"]["distribution"]
total_mi = sum(mi_dist.values())
if total_mi > 0:
weighted_mi = (mi_dist["high"] * 85 + mi_dist["medium"] * 60 + mi_dist["low"] * 30)
self.results["maintainability"]["average"] = round(weighted_mi / total_mi, 2)
# Add health score (0-10 scale)
self.results["health_score"] = self._calculate_health_score()
# Add recommendations
self.results["recommendations"] = self._generate_recommendations()
def _calculate_health_score(self) -> float:
"""Calculate overall code health score (0-10)."""
score = 10.0
# Deduct for high average complexity
avg_complexity = self.results["complexity"]["average"]
if avg_complexity > 20:
score -= 4
elif avg_complexity > 10:
score -= 2
elif avg_complexity > 5:
score -= 1
# Deduct for very complex functions
very_complex = self.results["complexity"]["distribution"]["very_complex"]
total_funcs = self.results["total_functions"]
if total_funcs > 0:
very_complex_ratio = very_complex / total_funcs
if very_complex_ratio > 0.2:
score -= 3
elif very_complex_ratio > 0.1:
score -= 2
elif very_complex_ratio > 0.05:
score -= 1
# Deduct for low maintainability
low_mi = self.results["maintainability"]["distribution"]["low"]
total_files = self.results["files_analyzed"]
if total_files > 0:
low_mi_ratio = low_mi / total_files
if low_mi_ratio > 0.3:
score -= 2
elif low_mi_ratio > 0.2:
score -= 1
return max(0.0, min(10.0, round(score, 1)))
def _generate_recommendations(self) -> List[Dict[str, str]]:
"""Generate recommendations based on analysis."""
recommendations = []
avg_complexity = self.results["complexity"]["average"]
if avg_complexity > 10:
recommendations.append({
"priority": "high",
"action": f"Reduce average cyclomatic complexity from {avg_complexity} to below 10",
"impact": "Improves code readability and testability"
})
very_complex = self.results["complexity"]["distribution"]["very_complex"]
if very_complex > 0:
recommendations.append({
"priority": "high",
"action": f"Refactor {very_complex} very complex functions (complexity > 20)",
"impact": "Reduces bug risk and maintenance burden"
})
low_mi = self.results["maintainability"]["distribution"]["low"]
if low_mi > 0:
recommendations.append({
"priority": "medium",
"action": f"Improve maintainability of {low_mi} low-scored files",
"impact": "Easier code changes and onboarding"
})
total_funcs = self.results["total_functions"]
total_lines = self.results["total_lines"]
if total_funcs > 0:
avg_lines_per_func = total_lines / total_funcs
if avg_lines_per_func > 50:
recommendations.append({
"priority": "medium",
"action": f"Break down large functions (avg {avg_lines_per_func:.0f} lines/function)",
"impact": "Improves code organization and reusability"
})
return recommendations
def format_output(results: Dict[str, Any], output_format: str) -> str:
"""Format analysis results."""
if output_format == "json":
return json.dumps(results, indent=2)
# Text format
output = []
output.append("\n" + "=" * 60)
output.append("Code Complexity Metrics Report")
output.append("=" * 60)
output.append(f"\nAnalysis Date: {results['analysis_date']}")
output.append(f"Root Path: {results['root_path']}")
output.append(f"Files Analyzed: {results['files_analyzed']}")
output.append(f"Total Lines: {results['total_lines']:,}")
output.append(f"Total Functions: {results['total_functions']:,}")
output.append("\n--- Cyclomatic Complexity ---")
output.append(f"Average Complexity: {results['complexity']['average']}")
output.append(f"Maximum Complexity: {results['complexity']['max']}")
output.append("\nDistribution:")
dist = results['complexity']['distribution']
total = sum(dist.values())
if total > 0:
output.append(f" Simple (1-5): {dist['simple']:4d} ({dist['simple']/total*100:5.1f}%)")
output.append(f" Moderate (6-10): {dist['moderate']:4d} ({dist['moderate']/total*100:5.1f}%)")
output.append(f" Complex (11-20): {dist['complex']:4d} ({dist['complex']/total*100:5.1f}%)")
output.append(f" Very Complex (>20): {dist['very_complex']:4d} ({dist['very_complex']/total*100:5.1f}%)")
output.append("\n--- Maintainability Index ---")
output.append(f"Average Score: {results['maintainability']['average']}")
output.append("\nDistribution:")
mi_dist = results['maintainability']['distribution']
total_mi = sum(mi_dist.values())
if total_mi > 0:
output.append(f" High (70-100): {mi_dist['high']:4d} ({mi_dist['high']/total_mi*100:5.1f}%)")
output.append(f" Medium (50-69): {mi_dist['medium']:4d} ({mi_dist['medium']/total_mi*100:5.1f}%)")
output.append(f" Low (0-49): {mi_dist['low']:4d} ({mi_dist['low']/total_mi*100:5.1f}%)")
output.append(f"\n--- Health Score: {results['health_score']}/10 ---")
if results['recommendations']:
output.append("\n--- Recommendations ---")
for i, rec in enumerate(results['recommendations'], 1):
output.append(f"\n{i}. [{rec['priority'].upper()}] {rec['action']}")
output.append(f" Impact: {rec['impact']}")
output.append("\n" + "=" * 60 + "\n")
return "\n".join(output)
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Analyze code complexity metrics for architecture assessment"
)
parser.add_argument(
"path",
nargs="?",
default=".",
help="Path to analyze (default: current directory)"
)
parser.add_argument(
"--format",
choices=["json", "text"],
default="json",
help="Output format (default: json)"
)
args = parser.parse_args()
try:
analyzer = ComplexityAnalyzer(args.path)
results = analyzer.analyze()
output = format_output(results, args.format)
print(output)
sys.exit(0)
except FileNotFoundError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(2)
except Exception as e:
print(f"Error during analysis: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,449 @@
#!/bin/bash
# Purpose: Generate ASCII architecture diagrams from system descriptions
# Version: 1.0.0
# Usage: ./diagram-generator.sh <type> [options]
# Types: layered, microservices, database, network, component
# Returns: ASCII diagram
# Exit codes: 0=success, 1=error, 2=invalid input
set -euo pipefail
# Configuration
readonly SCRIPT_NAME="$(basename "$0")"
readonly DIAGRAM_TYPE="${1:-}"
# Box drawing characters
readonly TL="┌" # Top-left
readonly TR="┐" # Top-right
readonly BL="└" # Bottom-left
readonly BR="┘" # Bottom-right
readonly H="─" # Horizontal
readonly V="│" # Vertical
readonly VR="├" # Vertical-right
readonly VL="┤" # Vertical-left
readonly HU="┴" # Horizontal-up
readonly HD="┬" # Horizontal-down
readonly X="┼" # Cross
# Arrow characters
readonly ARROW_DOWN="▼"
readonly ARROW_UP="▲"
readonly ARROW_LEFT="◄"
readonly ARROW_RIGHT="►"
readonly ARROW_BIDIRECT="◄►"
# Color codes
readonly BLUE='\033[0;34m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly RED='\033[0;31m'
readonly NC='\033[0m'
# Usage information
usage() {
cat <<EOF
Usage: $SCRIPT_NAME <type> [options]
Diagram Types:
layered Generate layered architecture diagram
microservices Generate microservices architecture diagram
database Generate database architecture diagram
network Generate network topology diagram
component Generate component interaction diagram
dataflow Generate data flow diagram
Options:
--title TEXT Set diagram title (default: architecture type)
--color Enable colored output
--help Show this help message
Examples:
$SCRIPT_NAME layered --title "Web Application Architecture"
$SCRIPT_NAME microservices --color
$SCRIPT_NAME database --title "E-commerce Database"
Exit Codes:
0 - Success
1 - Error during execution
2 - Invalid input
EOF
}
# Parse options
parse_options() {
DIAGRAM_TITLE=""
USE_COLOR=false
while [[ $# -gt 0 ]]; do
case "$1" in
--title)
DIAGRAM_TITLE="$2"
shift 2
;;
--color)
USE_COLOR=true
shift
;;
--help)
usage
exit 0
;;
*)
shift
;;
esac
done
}
# Draw a box
draw_box() {
local width="$1"
local height="$2"
local text="$3"
local color="${4:-$NC}"
# Top border
echo -n "$color$TL"
printf '%*s' "$((width-2))" '' | tr ' ' "$H"
echo "$TR$NC"
# Calculate padding for centered text
local text_len=${#text}
local padding=$(( (width - text_len - 2) / 2 ))
local padding_right=$(( width - text_len - padding - 2 ))
# Middle rows with text
for ((i=1; i<height-1; i++)); do
if [[ $i -eq $((height/2)) ]] && [[ -n "$text" ]]; then
echo -n "$color$V$NC"
printf '%*s' "$padding" ''
echo -n "$text"
printf '%*s' "$padding_right" ''
echo "$color$V$NC"
else
echo -n "$color$V$NC"
printf '%*s' "$((width-2))" ''
echo "$color$V$NC"
fi
done
# Bottom border
echo -n "$color$BL"
printf '%*s' "$((width-2))" '' | tr ' ' "$H"
echo "$BR$NC"
}
# Generate layered architecture diagram
generate_layered() {
local title="${DIAGRAM_TITLE:-Layered Architecture}"
local width=60
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
┌────────────────────────────────────────────────────────────┐
│ Presentation Layer │
│ (UI, Controllers, Views) │
└─────────────────────────┬──────────────────────────────────┘
┌────────────────────────────────────────────────────────────┐
│ Business Layer │
│ (Business Logic, Services, DTOs) │
└─────────────────────────┬──────────────────────────────────┘
┌────────────────────────────────────────────────────────────┐
│ Persistence Layer │
│ (Data Access, Repositories, ORMs) │
└─────────────────────────┬──────────────────────────────────┘
┌────────────────────────────────────────────────────────────┐
│ Database Layer │
│ (PostgreSQL, MongoDB, etc.) │
└────────────────────────────────────────────────────────────┘
Data Flow: Top → Down (Request) | Bottom → Top (Response)
EOF
}
# Generate microservices architecture diagram
generate_microservices() {
local title="${DIAGRAM_TITLE:-Microservices Architecture}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
┌─────────────┐
│ API │
│ Gateway │
└──────┬──────┘
┌──────────────────┼──────────────────┐
│ │ │
▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐
│ User │ │ Product │ │ Order │
│ Service │ │ Service │ │ Service │
└────┬─────┘ └────┬─────┘ └────┬─────┘
│ │ │
▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐
│ Users │ │ Products │ │ Orders │
│ DB │ │ DB │ │ DB │
└──────────┘ └──────────┘ └──────────┘
┌────────────────────────────────────┐
│ Message Queue (RabbitMQ) │
│ Event Distribution │
└────────────────────────────────────┘
┌────────────────────────────────────┐
│ Service Discovery │
│ (Consul/Eureka) │
└────────────────────────────────────┘
EOF
}
# Generate database architecture diagram
generate_database() {
local title="${DIAGRAM_TITLE:-Database Architecture}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
┌──────────────────┐
│ Application │
│ Tier │
└────────┬─────────┘
┌─────────────┼─────────────┐
│ │ │
▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐
│ Read │ │ Write │ │ Cache │
│ Pool │ │ Pool │ │ Layer │
└────┬─────┘ └────┬─────┘ └──────────┘
│ │ (Redis)
│ │
▼ ▼
┌──────────────────────────────┐
│ Load Balancer │
│ (Connection Pool) │
└──────────┬───────────────────┘
┌──────────┼──────────┐
│ │ │
▼ ▼ ▼
┌────────┐ ┌────────┐ ┌────────┐
│ Read │ │Primary │ │ Read │
│Replica │ │Database│ │Replica │
│ 1 │ │ Master │ │ 2 │
└────────┘ └───┬────┘ └────────┘
│ Replication
┌──────────┐
│ Backup │
│ Storage │
└──────────┘
EOF
}
# Generate network topology diagram
generate_network() {
local title="${DIAGRAM_TITLE:-Network Topology}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
Internet
┌──────────┴──────────┐
│ │
▼ ▼
┌──────────┐ ┌──────────┐
│ CDN │ │ WAF │
│ (Static) │ │(Security)│
└──────────┘ └────┬─────┘
┌──────┴──────┐
│ │
▼ ▼
┌──────────┐ ┌──────────┐
│ Load │ │ Load │
│ Balancer │ │ Balancer │
│ (AZ1) │ │ (AZ2) │
└────┬─────┘ └────┬─────┘
│ │
┌────────────────────┼─────────────┼────────────────────┐
│ │ │ │
▼ ▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐
│ App │ │ App │ │ App │ │ App │
│ Server 1 │ │ Server 2 │ │ Server 3 │ │ Server 4 │
│ (AZ1) │ │ (AZ1) │ │ (AZ2) │ │ (AZ2) │
└────┬─────┘ └────┬─────┘ └────┬─────┘ └────┬─────┘
│ │ │ │
└───────────────────┼─────────────┼────────────────────┘
│ │
▼ ▼
┌──────────┐ ┌──────────┐
│ Database │ │ Database │
│ Primary │ │ Standby │
│ (AZ1) │ │ (AZ2) │
└──────────┘ └──────────┘
Availability Zones: AZ1 (Primary), AZ2 (Secondary)
EOF
}
# Generate component interaction diagram
generate_component() {
local title="${DIAGRAM_TITLE:-Component Interaction}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
┌──────────────┐
│ Client │
│ (Browser) │
└───────┬──────┘
│ HTTP/HTTPS
┌──────────────────────────────────────────┐
│ Frontend (React/Vue) │
│ ┌────────────┐ ┌────────────┐ │
│ │ Components │◄────►│ State │ │
│ └────────────┘ │ Management │ │
│ └────────────┘ │
└───────────┬──────────────────────────────┘
│ REST/GraphQL
┌──────────────────────────────────────────┐
│ Backend API (Node.js/Python) │
│ ┌─────────┐ ┌──────────┐ ┌────────┐ │
│ │ Auth │ │ Business │ │ Data │ │
│ │ Service │──│ Logic │──│ Access │ │
│ └─────────┘ └──────────┘ └────┬───┘ │
└────────────────────────────────────┬─────┘
┌────────────────┼────────────────┐
│ │ │
▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐
│PostgreSQL│ │ Redis │ │ S3 │
│ Database │ │ Cache │ │ Storage │
└──────────┘ └──────────┘ └──────────┘
Data Flow:
→ Request (Client to Server)
← Response (Server to Client)
◄► Bidirectional Communication
EOF
}
# Generate data flow diagram
generate_dataflow() {
local title="${DIAGRAM_TITLE:-Data Flow Diagram}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
External Systems Application Data Storage
───────────────── ───────────── ──────────────
┌────────┐ ┌────────┐
│ User │───────(1)────►┌──────────┐ │Primary │
│ Input │ User Request │ API │────(2)───────────►│Database│
└────────┘ │ Gateway │ Query/Write └────┬───┘
└────┬─────┘ │
│ │
│ (3) Process │
▼ │
┌────────┐ ┌──────────┐ │
│External│◄───(4)─────│ Business │◄────(5)─────────────────┘
│ APIs │ Fetch Data│ Logic │ Read Data
└────────┘ └────┬─────┘
│ (6) Cache
┌──────────┐
│ Cache │
│ Layer │
└────┬─────┘
│ (7) Response
┌────────┐ ┌──────────┐
│ User │◄───(8)─────│ Response │
│ Output │ JSON/HTML │Formatter │
└────────┘ └──────────┘
Flow Steps:
(1) User sends request
(2) Gateway queries database
(3) Business logic processes
(4) External API calls
(5) Database read operations
(6) Cache result
(7) Format response
(8) Return to user
EOF
}
# Main execution
main() {
if [[ $# -eq 0 ]] || [[ "$1" == "--help" ]]; then
usage
exit 0
fi
parse_options "$@"
case "$DIAGRAM_TYPE" in
layered)
generate_layered
;;
microservices)
generate_microservices
;;
database)
generate_database
;;
network)
generate_network
;;
component)
generate_component
;;
dataflow)
generate_dataflow
;;
*)
echo "Error: Unknown diagram type: $DIAGRAM_TYPE" >&2
echo "Run '$SCRIPT_NAME --help' for usage information" >&2
exit 2
;;
esac
exit 0
}
# Run main function
main "$@"

View File

@@ -0,0 +1,692 @@
# Architecture Skill
**Comprehensive system architecture design, review, and documentation with ADR creation**
The Architecture skill provides expert-level architectural guidance through four specialized operations: design new architectures, review existing systems, document architectural decisions, and assess architecture health. All operations leverage the **10x-fullstack-engineer** agent for 15+ years of architectural expertise.
---
## Table of Contents
- [Overview](#overview)
- [Operations](#operations)
- [Design](#design---design-new-architecture)
- [Review](#review---review-existing-architecture)
- [ADR](#adr---create-architectural-decision-records)
- [Assess](#assess---architecture-health-assessment)
- [Utility Scripts](#utility-scripts)
- [Usage Examples](#usage-examples)
- [Integration](#integration)
- [Best Practices](#best-practices)
---
## Overview
The Architecture skill operates through a router pattern where the main skill file (`skill.md`) parses arguments and routes to specialized operation files. This modular approach enables:
- **Focused Operations**: Each architectural task has dedicated logic and workflows
- **Agent Integration**: All operations invoke the 10x-fullstack-engineer agent for expert guidance
- **Utility Scripts**: Automated analysis tools for dependencies, complexity, and diagrams
- **Structured Output**: Consistent, comprehensive documentation for all architectural artifacts
**Base Directory**: `.claude/commands/architect/` (or plugin equivalent)
**Agent**: All operations require and invoke the **10x-fullstack-engineer** agent
---
## Operations
### Design - Design New Architecture
Create comprehensive system architecture for new features, projects, or major changes.
**Use When**:
- Starting new projects or features
- Major architectural refactoring
- Greenfield development
- Architecture modernization
**Parameters**:
```
requirements:"description" (required) Feature or system description
scope:"area" (optional) Specific focus area (backend, frontend, full-stack)
constraints:"limitations" (optional) Technical constraints, existing systems, team expertise
scale:"expected-load" (optional) Expected load, user count, data volume, growth
```
**What It Does**:
1. **Requirements Analysis** - Parses requirements, identifies stakeholders, extracts non-functional requirements
2. **Context Gathering** - Examines existing codebase, technology stack, infrastructure, documentation
3. **Architecture Design** - Creates comprehensive design across all layers:
- **Database Layer**: Schema design, query optimization, migration strategy, data consistency
- **Backend Layer**: API design, service architecture, business logic, auth/authz, caching, message queuing
- **Frontend Layer**: Component architecture, state management, routing, data fetching, performance
- **Infrastructure Layer**: Deployment architecture, scaling strategy, CI/CD, monitoring, security, disaster recovery
4. **Trade-off Analysis** - Documents decisions with pros/cons/alternatives for major choices
5. **Deliverables** - Produces architecture diagrams, component breakdown, data flow, technology stack, implementation phases, risk assessment, success metrics
6. **ADR Creation** - Documents significant decisions as ADRs
**Output**: Comprehensive architecture design document with executive summary, detailed layer designs, technology justifications, implementation roadmap, and risk mitigation strategies.
**Example**:
```bash
/10x-fullstack-engineer:architect design requirements:"real-time notification system with WebSockets, push notifications, and email delivery" scale:"10,000 concurrent users" constraints:"must integrate with existing REST API, AWS infrastructure"
```
---
### Review - Review Existing Architecture
Analyze existing architecture for quality, security, performance, scalability, and maintainability issues.
**Use When**:
- Architecture health checks
- Pre-production reviews
- Security audits
- Refactoring planning
- Technical debt assessment
**Parameters**:
```
path:"directory" (optional) Specific directory or component to review (default: entire codebase)
focus:"dimension" (optional) Primary concern area - security, performance, scalability, maintainability, or "all"
depth:"shallow|deep" (optional) Review depth - "shallow" for quick assessment, "deep" for comprehensive analysis (default: "deep")
```
**What It Does**:
1. **Context Discovery** - Analyzes directory structure, technology stack, configuration, documentation, testing infrastructure
2. **Layer-by-Layer Analysis**:
- **Database Layer**: Schema quality, performance, scalability, security
- **Backend Layer**: API design, code organization, business logic, auth/authz, performance, security, maintainability
- **Frontend Layer**: Component architecture, state management, performance, UX, security, build/deployment
- **Infrastructure Layer**: Deployment architecture, scalability, monitoring, CI/CD, security, disaster recovery
3. **Cross-Cutting Concerns**: Security audit (OWASP Top 10), performance analysis, scalability assessment, maintainability review
4. **Issue Identification** - Categorizes issues by severity (Critical/High/Medium/Low) with location, impact, recommendation, effort
5. **Scoring** - Provides 0-10 scores for each dimension with status and trend indicators
6. **Recommendations** - Prioritized roadmap of quick wins, important improvements, strategic initiatives, technical debt
**Output**: Comprehensive architecture review report with health scores, detailed findings by dimension, comparison to industry standards, dependency analysis, and prioritized recommendations roadmap.
**Example**:
```bash
/10x-fullstack-engineer:architect review focus:"security" depth:"deep"
```
---
### ADR - Create Architectural Decision Records
Document significant architectural decisions with context, alternatives, and rationale in standard ADR format.
**Use When**:
- After major design decisions
- Technology selections
- Pattern adoptions
- Architectural pivots
- Documenting trade-offs
**Parameters**:
```
decision:"what-was-decided" (required) Brief summary of the architectural decision
context:"background" (optional) Background, problem being solved, forces at play
alternatives:"other-options" (optional) Other options that were considered
status:"status" (optional) Decision status - "proposed", "accepted", "deprecated", "superseded" (default: "proposed")
```
**What It Does**:
1. **Context Gathering** - Understands decision scope, problem context, decision drivers, researches current state
2. **Alternative Analysis** - Identifies 3-5 alternatives, analyzes pros/cons/trade-offs for each, creates comparison matrix
3. **Decision Rationale** - Documents primary justification, supporting reasons, risk acceptance, decision criteria
4. **Consequences Analysis** - Identifies positive, negative, and neutral consequences, impact assessment (immediate/short-term/long-term), dependencies
5. **ADR Creation** - Generates standard ADR format with proper numbering, saves to `docs/adr/`, updates ADR index
6. **Documentation** - Links related ADRs, provides implementation guidance
**Output**: Complete ADR document saved to `docs/adr/ADR-NNNN-slug.md` with status, date, deciders, context, considered options, decision outcome, consequences, pros/cons analysis, and references.
**ADR Templates Available**:
- Technology Selection
- Architecture Pattern
- Migration Decision
**Example**:
```bash
/10x-fullstack-engineer:architect adr decision:"use PostgreSQL with JSONB for flexible schema" context:"need relational integrity plus document flexibility" alternatives:"MongoDB, DynamoDB, MySQL" status:"accepted"
```
---
### Assess - Architecture Health Assessment
Comprehensive assessment across technical debt, security, performance, scalability, maintainability, and cost dimensions with scoring and trend analysis.
**Use When**:
- Quarterly reviews
- Baseline establishment
- Improvement tracking
- Executive reporting
- Planning refactoring initiatives
**Parameters**:
```
scope:"level" (optional) Assessment scope - "system" (entire architecture), "service", "component" (default: "system")
focus:"dimension" (optional) Specific dimension - "tech-debt", "security", "performance", "scalability", "maintainability", "cost", or "all" (default: "all")
baseline:"reference" (optional) Baseline for comparison - ADR number, date (YYYY-MM-DD), or "previous" for last assessment
```
**What It Does**:
1. **Baseline Discovery** - Finds previous assessments, extracts baseline metrics, tracks issue resolution
2. **Dimensional Assessment** - Scores 0-10 across six dimensions:
- **Technical Debt**: Code quality, outdated dependencies, deprecated patterns, documentation
- **Security**: Authentication, data protection, vulnerability scanning, OWASP Top 10 compliance
- **Performance**: API response times, database queries, frontend load times, resource utilization
- **Scalability**: Horizontal scaling capability, database scaling, auto-scaling, capacity limits
- **Maintainability**: Code organization, test coverage, documentation, deployment frequency
- **Cost Efficiency**: Infrastructure costs, resource utilization, optimization opportunities
3. **Comparative Analysis** - Compares to baseline, tracks resolved/new/persistent issues, analyzes trends, projects future state
4. **Recommendations** - Prioritized roadmap:
- **Immediate Actions** (This Sprint): Critical fixes
- **Quick Wins** (2-4 weeks): High impact, low effort
- **Important Improvements** (1-3 months): Significant value, moderate effort
- **Strategic Initiatives** (3-6 months): Long-term value, high effort
5. **Implementation Roadmap** - Sprint planning, milestone timeline, success metrics, risk assessment
**Output**: Architecture health assessment report with overall health score, dimension-specific scores with trends, detailed findings by category, comparison to baseline, trend analysis, issue tracking, and prioritized recommendations with implementation roadmap.
**Scoring Guide**:
- **9-10 (Excellent)**: Best practices, minimal improvements needed
- **7-8 (Good)**: Solid foundation, minor enhancements possible
- **5-6 (Fair)**: Acceptable but improvements needed
- **3-4 (Poor)**: Significant issues, action required
- **0-2 (Critical)**: Severe problems, urgent action needed
**Example**:
```bash
/10x-fullstack-engineer:architect assess baseline:"previous"
```
---
## Utility Scripts
The Architecture skill includes three utility scripts in the `.scripts/` directory:
### 1. analyze-dependencies.sh
**Purpose**: Analyze project dependencies for security, versioning, and usage
**Usage**:
```bash
./.scripts/analyze-dependencies.sh [path] [json|text]
```
**Features**:
- Detects package manager (npm, pip, pipenv, poetry, bundler, go, cargo, composer)
- Counts direct and development dependencies
- Checks for outdated packages
- Scans for security vulnerabilities (critical/high/medium/low)
- Analyzes dependency tree depth
- Finds unused dependencies
- Detects duplicate dependencies
- Calculates health score (0-10)
- Generates prioritized recommendations
**Output**: JSON or text report with dependency analysis, vulnerability summary, health score, and recommendations
**Exit Codes**:
- 0: Success
- 1: Error during analysis
- 2: Invalid input
---
### 2. complexity-metrics.py
**Purpose**: Calculate code complexity metrics for architecture assessment
**Usage**:
```bash
python3 ./.scripts/complexity-metrics.py [path] [--format json|text]
```
**Features**:
- Analyzes cyclomatic complexity (uses `radon` library if available, falls back to simplified metrics)
- Calculates maintainability index
- Classifies functions: simple (1-5), moderate (6-10), complex (11-20), very complex (>20)
- Tracks average and maximum complexity
- Analyzes maintainability distribution (high/medium/low)
- Calculates overall health score (0-10)
- Generates recommendations for refactoring
**Supported Languages**: Python, JavaScript, TypeScript, Java, Go, Ruby, PHP, C, C++, C#
**Output**: JSON or text report with complexity metrics, maintainability scores, health score, and refactoring recommendations
**Dependencies**: Optional `radon` library (install with `pip install radon`) for enhanced metrics
**Exit Codes**:
- 0: Success
- 1: Error during analysis
- 2: Invalid input
---
### 3. diagram-generator.sh
**Purpose**: Generate ASCII architecture diagrams from system descriptions
**Usage**:
```bash
./.scripts/diagram-generator.sh <type> [--title "Title"] [--color]
```
**Diagram Types**:
- `layered`: Layered architecture diagram (Presentation → Business → Persistence → Database)
- `microservices`: Microservices architecture with API gateway, services, databases, message queue
- `database`: Database architecture with read/write pools, replicas, caching
- `network`: Network topology with CDN, WAF, load balancers, availability zones
- `component`: Component interaction diagram showing client → frontend → backend → data storage
- `dataflow`: Data flow diagram showing step-by-step data movement
**Features**:
- Unicode box drawing characters for clean diagrams
- Optional colored output
- Customizable titles
- Pre-built templates for common architecture patterns
**Output**: ASCII diagram suitable for markdown documentation or terminal display
**Exit Codes**:
- 0: Success
- 1: Error during execution
- 2: Invalid input
---
## Usage Examples
### Complete Architecture Design Workflow
```bash
# 1. Design architecture for new feature
/10x-fullstack-engineer:architectdesign requirements:"multi-tenant SaaS platform with real-time collaboration, file storage, and analytics" scale:"enterprise-level, 100k+ users" constraints:"TypeScript, Node.js, PostgreSQL, horizontal scaling"
# 2. Document key architectural decisions
/10x-fullstack-engineer:architectadr decision:"use PostgreSQL with row-level security for multi-tenancy" alternatives:"separate databases per tenant, schema-based isolation" status:"accepted"
/10x-fullstack-engineer:architectadr decision:"implement CQRS pattern for read-heavy analytics" alternatives:"standard CRUD, event sourcing, materialized views" status:"accepted"
# 3. Assess baseline architecture health
/10x-fullstack-engineer:architectassess
# 4. Review specific component security
/10x-fullstack-engineer:architectreview path:"src/services/auth" focus:"security" depth:"deep"
```
### Quarterly Architecture Review
```bash
# Run comprehensive assessment against last quarter
/10x-fullstack-engineer:architectassess baseline:"2024-01-15"
# Focus on areas that degraded
/10x-fullstack-engineer:architectreview focus:"performance" depth:"deep"
# Document improvement initiatives
/10x-fullstack-engineer:architectadr decision:"implement Redis caching layer to improve performance" context:"assessment showed performance degradation, response times increased 40%" status:"accepted"
```
### Pre-Production Architecture Validation
```bash
# Comprehensive review before launch
/10x-fullstack-engineer:architectreview focus:"all" depth:"deep"
# Security audit
/10x-fullstack-engineer:architectreview focus:"security" depth:"deep"
# Performance validation
/10x-fullstack-engineer:architectreview focus:"performance" depth:"deep"
# Document production readiness decisions
/10x-fullstack-engineer:architectadr decision:"deploy with blue-green strategy for zero-downtime releases" alternatives:"rolling deployment, canary releases" status:"accepted"
```
### Technical Debt Assessment
```bash
# Assess technical debt
/10x-fullstack-engineer:architectassess focus:"tech-debt"
# Review code quality
/10x-fullstack-engineer:architectreview focus:"maintainability" depth:"deep"
# Run complexity analysis
python3 .scripts/complexity-metrics.py . --format json
# Analyze dependencies
./.scripts/analyze-dependencies.sh . json
```
### Architecture Documentation Sprint
```bash
# Document existing system design
/10x-fullstack-engineer:architectdesign requirements:"document existing order processing system" scope:"backend" constraints:"Node.js, PostgreSQL, AWS, existing production system"
# Create ADRs for historical decisions
/10x-fullstack-engineer:architectadr decision:"chose microservices architecture for order processing" context:"monolith scalability limitations" alternatives:"modular monolith, serverless" status:"accepted"
# Generate architecture diagrams
./.scripts/diagram-generator.sh microservices --title "Order Processing Architecture"
./.scripts/diagram-generator.sh database --title "Order Database Architecture"
# Baseline current health
/10x-fullstack-engineer:architectassess
```
---
## Integration
### With Other Skills/Commands
The Architecture skill integrates seamlessly with other development workflows:
**Design Phase**:
- `/10x-fullstack-engineer:architect design` → Design system architecture
- Document decisions with `/architect adr`
- Generate diagrams with `diagram-generator.sh`
**Development Phase**:
- Run `/architect review` on new components
- Check complexity with `complexity-metrics.py`
- Validate dependencies with `analyze-dependencies.sh`
**Testing Phase**:
- `/10x-fullstack-engineer:architect review focus:"performance"` for performance validation
- `/10x-fullstack-engineer:architect assess` for quality gates
**Deployment Phase**:
- `/10x-fullstack-engineer:architect review focus:"security"` before production
- Document deployment decisions with `/architect adr`
**Maintenance Phase**:
- Quarterly `/architect assess` against baseline
- `/10x-fullstack-engineer:architect review focus:"tech-debt"` for refactoring planning
- Update ADRs when superseding decisions
### With Agent System
All operations invoke the **10x-fullstack-engineer** agent, which provides:
- 15+ years of architectural expertise
- Pattern recognition and best practices
- Trade-off analysis and decision guidance
- Production system experience
- Technology stack recommendations
- Scalability and performance insights
- Security and reliability patterns
The agent receives comprehensive context including operation parameters, codebase information, existing architecture, constraints, and scale requirements.
### Continuous Architecture Governance
Integrate architecture operations into your development lifecycle:
**Sprint Planning**:
```bash
# Review technical debt before planning
/10x-fullstack-engineer:architectassess focus:"tech-debt"
# Design new features architecturally
/10x-fullstack-engineer:architectdesign requirements:"sprint feature description"
```
**Code Review**:
```bash
# Review new components
/10x-fullstack-engineer:architectreview path:"src/new-component" depth:"shallow"
# Check complexity
python3 .scripts/complexity-metrics.py src/new-component
```
**Release Process**:
```bash
# Pre-release validation
/10x-fullstack-engineer:architectreview focus:"security" depth:"deep"
/10x-fullstack-engineer:architectreview focus:"performance" depth:"shallow"
# Document release decisions
/10x-fullstack-engineer:architectadr decision:"release decision"
```
**Quarterly Reviews**:
```bash
# Comprehensive health assessment
/10x-fullstack-engineer:architectassess baseline:"previous"
# Trend analysis and planning
/10x-fullstack-engineer:architectreview focus:"all" depth:"deep"
```
---
## Best Practices
### When to Use Each Operation
**Use Design When**:
- Starting new projects or major features
- Need comprehensive architecture documentation
- Evaluating technology stack options
- Planning multi-phase implementation
- Establishing architectural patterns
**Use Review When**:
- Conducting architecture health checks
- Pre-production validation
- Security audits
- Identifying refactoring opportunities
- Onboarding new team members to architecture
**Use ADR When**:
- Making significant architectural decisions
- Choosing technologies or patterns
- Resolving architectural trade-offs
- Documenting rationale for future reference
- Creating decision audit trail
**Use Assess When**:
- Quarterly architecture reviews
- Establishing baseline metrics
- Tracking improvement progress
- Executive reporting on tech health
- Planning major refactoring initiatives
### Architecture Documentation Workflow
1. **Design First**: Start with `/architect design` for new systems
2. **Document Decisions**: Create ADRs for significant choices
3. **Establish Baseline**: Run initial `/architect assess`
4. **Regular Reviews**: Schedule quarterly `/architect assess baseline:"previous"`
5. **Component Reviews**: Review new components with `/architect review`
6. **Update ADRs**: Supersede decisions when architecture evolves
7. **Track Trends**: Monitor health scores over time
### Optimization Tips
**For Design Operations**:
- Provide detailed requirements and constraints upfront
- Specify scale expectations explicitly
- Leverage existing ADRs for consistency
- Use utility scripts for current state analysis
- Review generated architecture with team before implementation
**For Review Operations**:
- Start with shallow reviews for quick feedback
- Use focused reviews (security, performance) for specific concerns
- Run deep reviews before major releases
- Combine with utility scripts for comprehensive analysis
- Address critical issues before continuing to lower priority
**For ADR Creation**:
- Create ADRs immediately after decisions, not retrospectively
- Include alternatives considered, not just chosen option
- Document trade-offs explicitly
- Link related ADRs for context
- Update status as decisions evolve
**For Assessment Operations**:
- Establish baseline early in project lifecycle
- Run assessments consistently (e.g., quarterly)
- Compare to baselines to track trends
- Focus on dimensions with declining scores
- Use assessment output for sprint planning
### Common Workflows
**New Project Setup**:
```bash
/10x-fullstack-engineer:architectdesign requirements:"project description" constraints:"tech stack"
/10x-fullstack-engineer:architectadr decision:"technology choices"
./.scripts/diagram-generator.sh layered --title "Project Architecture"
/10x-fullstack-engineer:architectassess # Establish baseline
```
**Pre-Production Checklist**:
```bash
/10x-fullstack-engineer:architectreview focus:"security" depth:"deep"
/10x-fullstack-engineer:architectreview focus:"performance" depth:"deep"
/10x-fullstack-engineer:architectassess
./.scripts/analyze-dependencies.sh . json
```
**Technical Debt Paydown**:
```bash
/10x-fullstack-engineer:architectassess focus:"tech-debt"
python3 .scripts/complexity-metrics.py . --format json
/10x-fullstack-engineer:architectreview focus:"maintainability" depth:"deep"
# Address top recommendations
/10x-fullstack-engineer:architectassess baseline:"previous" # Verify improvement
```
**Architecture Modernization**:
```bash
/10x-fullstack-engineer:architectreview focus:"all" depth:"deep" # Understand current state
/10x-fullstack-engineer:architectdesign requirements:"modernization goals" constraints:"existing system"
/10x-fullstack-engineer:architectadr decision:"modernization approach"
# Implement incrementally
/10x-fullstack-engineer:architectassess baseline:"pre-modernization" # Track progress
```
---
## File Structure
```
architect/
├── skill.md # Router (invokable via /architect)
├── design.md # Design operation (not directly invokable)
├── review.md # Review operation (not directly invokable)
├── adr.md # ADR operation (not directly invokable)
├── assess.md # Assess operation (not directly invokable)
├── .scripts/
│ ├── analyze-dependencies.sh # Dependency analysis utility
│ ├── complexity-metrics.py # Code complexity analysis utility
│ └── diagram-generator.sh # ASCII diagram generation utility
└── README.md # This file
```
**Note**: Only `skill.md` is directly invokable via `/10x-fullstack-engineer:architect`. Sub-operations are instruction modules read and executed by the router.
---
## Error Handling
All operations include comprehensive error handling:
- **Unknown Operation**: Lists available operations with examples
- **Missing Required Parameters**: Provides parameter format guidance
- **Invalid Parameters**: Suggests correct parameter values
- **File/Directory Not Found**: Lists available paths or creates directories as needed
- **Insufficient Context**: Documents assumptions and requests clarification
Operations gracefully handle missing metrics, incomplete information, and edge cases by providing clear guidance to the user.
---
## Output Locations
**Architecture Designs**: Generated as markdown in operation response, can be saved manually or integrated with documentation system
**ADRs**: Automatically saved to `docs/adr/ADR-NNNN-slug.md` with index updates
**Assessments**: Generated as markdown in operation response, recommended to save to `docs/assessments/architecture-assessment-YYYY-MM-DD.md`
**Reviews**: Generated as markdown in operation response, can be saved for historical reference
**Utility Script Outputs**: JSON or text format, typically piped or redirected as needed
---
## Getting Started
1. **Initial Architecture Design**:
```bash
/architect design requirements:"your project description" scale:"expected scale" constraints:"technical constraints"
```
2. **Document Key Decisions**:
```bash
/architect adr decision:"decision summary" alternatives:"other options" status:"accepted"
```
3. **Establish Baseline**:
```bash
/architect assess
```
4. **Regular Health Checks**:
```bash
/architect assess baseline:"previous"
/architect review focus:"security"
```
5. **Use Utility Scripts**:
```bash
./.scripts/analyze-dependencies.sh . json
python3 .scripts/complexity-metrics.py . --format json
./.scripts/diagram-generator.sh microservices --title "System Architecture"
```
---
## Additional Resources
- **ADR Format**: Based on [Michael Nygard's ADR template](https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions)
- **OWASP Top 10**: [https://owasp.org/www-project-top-ten/](https://owasp.org/www-project-top-ten/)
- **Cyclomatic Complexity**: [https://en.wikipedia.org/wiki/Cyclomatic_complexity](https://en.wikipedia.org/wiki/Cyclomatic_complexity)
- **Architecture Assessment**: Based on industry best practices for architecture health metrics
---
## Support and Contribution
This skill is part of the **10x-fullstack-engineer** plugin. For issues, improvements, or questions:
1. Review the operation documentation in individual `.md` files
2. Examine utility script comments for detailed usage
3. Refer to the 10x-fullstack-engineer agent capabilities
4. Check ADR templates in `adr.md` for decision documentation patterns
---
**Version**: 1.0.0
**Last Updated**: 2025-10-14
**Agent Integration**: 10x-fullstack-engineer (required)

701
commands/architect/adr.md Normal file
View File

@@ -0,0 +1,701 @@
# Architectural Decision Record (ADR) Operation
You are executing the **adr** operation using the 10x-fullstack-engineer agent to document significant architectural decisions in standard ADR format.
## Parameters
**Received**: `$ARGUMENTS` (after removing 'adr' operation name)
Expected format: `decision:"what-was-decided" [context:"background"] [alternatives:"other-options"] [status:"proposed|accepted|deprecated|superseded"]`
Parse the arguments to extract:
- **decision** (required): Brief summary of the architectural decision made
- **context** (optional): Background, problem being solved, forces at play
- **alternatives** (optional): Other options that were considered
- **status** (optional): Decision status - "proposed", "accepted", "deprecated", "superseded" (default: "proposed")
## Workflow
### Phase 1: Context Gathering
Collect comprehensive context about the decision:
1. **Understand the Decision**:
- What is being decided?
- What components or systems are affected?
- What is the scope of this decision?
- Who are the stakeholders?
2. **Gather Problem Context**:
- What problem are we trying to solve?
- What are the pain points with current approach?
- What requirements drive this decision?
- What constraints exist (technical, organizational, budget, timeline)?
3. **Identify Decision Drivers**:
- **Technical Drivers**: Performance, scalability, maintainability, security
- **Business Drivers**: Time-to-market, cost, competitive advantage
- **Organizational Drivers**: Team skills, support, operational capability
- **Regulatory Drivers**: Compliance requirements, industry standards
4. **Research Current State**:
- Examine existing architecture
- Review related ADRs in `docs/adr/`
- Check current technology stack
- Identify dependencies and integrations
Use available tools:
- `Glob` to find existing ADRs and related documentation
- `Read` to examine existing ADRs and documentation
- `Grep` to search for relevant code patterns and usage
- `Bash` to check directory structure and file counts
### Phase 2: Alternative Analysis
Document all alternatives considered:
1. **Identify Alternatives**:
- List all viable options (aim for 3-5 alternatives)
- Include status quo as an alternative
- Research industry standard approaches
- Consider hybrid approaches
2. **Analyze Each Alternative**:
For each alternative, document:
**Description**: What is this approach?
**Pros** (benefits):
- Performance characteristics
- Scalability implications
- Security benefits
- Developer experience improvements
- Cost advantages
- Time-to-implementation benefits
**Cons** (drawbacks):
- Performance concerns
- Scalability limitations
- Security risks
- Complexity additions
- Cost implications
- Learning curve
- Operational overhead
**Trade-offs**:
- What do we gain vs what do we lose?
- Short-term vs long-term implications
- Technical debt considerations
**Examples** (if applicable):
- Companies/projects using this approach
- Success stories and failure stories
- Lessons learned from others
3. **Compare Alternatives**:
Create comparison matrix:
| Criteria | Alternative 1 | Alternative 2 | Alternative 3 |
|----------|---------------|---------------|---------------|
| Performance | High | Medium | Low |
| Complexity | Low | Medium | High |
| Cost | $$ | $$$ | $ |
| Time to implement | 2 weeks | 4 weeks | 1 week |
| Scalability | Excellent | Good | Limited |
| Team familiarity | High | Medium | Low |
| Maintenance | Easy | Moderate | Difficult |
### Phase 3: Decision Rationale
Document why this decision was made:
1. **Primary Justification**:
- Main reason for choosing this approach
- How it solves the problem
- Why it's better than alternatives
2. **Supporting Reasons**:
- Secondary benefits
- Alignment with architectural principles
- Consistency with existing decisions
- Team capability and expertise
3. **Risk Acceptance**:
- Known risks being accepted
- Why these risks are acceptable
- Mitigation strategies for risks
4. **Decision Criteria**:
- Weighted criteria used for decision
- How each alternative scored
- Stakeholder input and consensus
### Phase 4: Consequences Analysis
Document the implications of this decision:
1. **Positive Consequences**:
- Performance improvements
- Reduced complexity
- Better developer experience
- Cost savings
- Improved scalability
- Enhanced security
2. **Negative Consequences**:
- Technical debt introduced
- Migration effort required
- Learning curve for team
- Increased operational complexity
- Cost increases
- Vendor lock-in
3. **Neutral Consequences**:
- Changes to development workflow
- Tool or process changes
- Documentation needs
- Training requirements
4. **Impact Assessment**:
- **Immediate Impact** (next sprint): [Changes needed right away]
- **Short-term Impact** (1-3 months): [Effects in near future]
- **Long-term Impact** (6+ months): [Strategic implications]
5. **Dependencies**:
- Other decisions that depend on this one
- Decisions this depends on
- Systems or components affected
### Phase 5: ADR Structure Creation
Create the ADR document following standard format:
**ADR Numbering**:
- Find existing ADRs in `docs/adr/`
- Determine next sequential number
- Format: `ADR-NNNN-slug.md` (e.g., `ADR-0042-use-postgresql-for-primary-database.md`)
**Standard ADR Format**:
```markdown
# ADR-[NUMBER]: [Decision Title]
**Status**: [Proposed / Accepted / Deprecated / Superseded]
**Date**: [YYYY-MM-DD]
**Deciders**: [List of people involved in the decision]
**Technical Story**: [Ticket/issue URL if applicable]
## Context and Problem Statement
[Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in form of a question.]
[Explain the forces at play: technical, business, political, social, project local, etc.]
### Decision Drivers
* [driver 1, e.g., a force, facing concern, …]
* [driver 2, e.g., a force, facing concern, …]
* [driver 3, e.g., a force, facing concern, …]
## Considered Options
* [option 1]
* [option 2]
* [option 3]
## Decision Outcome
Chosen option: "[option 1]", because [justification. e.g., only option which meets k.o. criterion decision driver | which resolves force force | … | comes out best (see below)].
### Consequences
* Good, because [positive consequence 1]
* Good, because [positive consequence 2]
* Bad, because [negative consequence 1]
* Bad, because [negative consequence 2]
* Neutral, because [neutral consequence]
### Confirmation
[How/when will we know if this decision was correct? What metrics or outcomes will we use to evaluate?]
## Pros and Cons of the Options
### [option 1]
[Brief description of option 1]
* Good, because [argument a]
* Good, because [argument b]
* Bad, because [argument c]
* Bad, because [argument d]
### [option 2]
[Brief description of option 2]
* Good, because [argument a]
* Good, because [argument b]
* Bad, because [argument c]
* Bad, because [argument d]
### [option 3]
[Brief description of option 3]
* Good, because [argument a]
* Good, because [argument b]
* Bad, because [argument c]
* Bad, because [argument d]
## More Information
[Any additional information, references, links, or context that might be helpful.]
### Related Decisions
* [ADR-XXXX]: [Related decision]
* [ADR-YYYY]: [Related decision]
### References
* [Link to documentation]
* [Link to research]
* [Link to examples]
```
### Phase 6: Documentation and Storage
Save the ADR document:
1. **Ensure Directory Exists**:
- Check if `docs/adr/` directory exists
- Create if it doesn't exist
- Maintain README.md in `docs/adr/` with ADR index
2. **Generate File Name**:
- Format: `ADR-NNNN-slug.md`
- Number: Next sequential number (4 digits with leading zeros)
- Slug: Lowercase, hyphen-separated from decision title
- Example: `ADR-0015-migrate-to-microservices.md`
3. **Write ADR File**:
- Save to `docs/adr/ADR-NNNN-slug.md`
- Ensure proper formatting
- Include all required sections
4. **Update ADR Index**:
- Update `docs/adr/README.md` with new ADR entry
- Include: number, title, status, date
- Maintain chronological order
5. **Link Related ADRs**:
- Update related ADRs to reference this new ADR
- Create bidirectional links
- Document superseded relationships
## Output Format
Provide the complete ADR document and confirmation of storage:
```markdown
# ADR Created Successfully
**File**: `docs/adr/ADR-[NUMBER]-[slug].md`
**Status**: [Status]
**Date**: [Date]
---
[Full ADR content in standard format]
---
## ADR Saved
The architectural decision record has been saved to:
`docs/adr/ADR-[NUMBER]-[slug].md`
The ADR index has been updated in:
`docs/adr/README.md`
### Next Steps
1. **Review**: Share this ADR with stakeholders for review
2. **Update Status**: Change status from "Proposed" to "Accepted" once approved
3. **Implementation**: Begin implementing based on this decision
4. **Monitor**: Track the consequences and validate assumptions
5. **Update**: Revise if circumstances change or new information emerges
### Related ADRs
[List any related ADRs that should be reviewed together]
### Communication
Share this ADR with:
- Development team
- Architecture review board
- Product management
- Operations team
- [Other relevant stakeholders]
```
## Agent Invocation
This operation MUST invoke the **10x-fullstack-engineer** agent for expert architectural decision analysis.
**Agent context to provide**:
- Decision to be documented
- Gathered context and constraints
- Alternative approaches identified
- Current architecture state
- Related ADRs and decisions
**Agent responsibilities**:
- Apply 15+ years of architectural decision-making experience
- Identify additional alternatives to consider
- Analyze trade-offs comprehensively
- Provide industry best practices and examples
- Validate decision rationale
- Highlight potential blind spots
- Suggest consequences that may not be obvious
- Ensure decision is well-documented
**Agent invocation approach**:
Present the decision context and explicitly request:
"Using your 15+ years of full-stack architecture experience, help document this architectural decision. Analyze the alternatives, validate the rationale, identify consequences (both obvious and subtle), and ensure this ADR captures the full context for future reference. Draw on your experience with similar decisions in production systems."
## ADR Templates
### Template 1: Technology Selection
```markdown
# ADR-[NUMBER]: Choose [Technology] for [Purpose]
**Status**: Proposed
**Date**: [Date]
**Deciders**: [Names]
## Context and Problem Statement
We need to select [technology category] for [specific use case]. Current approach [describe current state or lack thereof]. This decision affects [scope of impact].
### Decision Drivers
* Performance requirements: [specifics]
* Scalability needs: [specifics]
* Team expertise: [current skills]
* Budget constraints: [limitations]
* Time to implement: [timeline]
## Considered Options
* [Technology 1]
* [Technology 2]
* [Technology 3]
* Status quo (if applicable)
## Decision Outcome
Chosen option: "[Technology]", because it best meets our requirements for [primary reasons].
### Consequences
* Good, because [benefit 1]
* Good, because [benefit 2]
* Bad, because [drawback 1]
* Bad, because [drawback 2]
### Confirmation
We will validate this decision by [metrics/outcomes] after [timeframe].
## Pros and Cons of the Options
### [Technology 1]
[Description]
* Good, because [performance/scalability/cost benefit]
* Good, because [team knows it / easy to learn]
* Bad, because [complexity / cost / limitation]
* Bad, because [vendor lock-in / compatibility issue]
[Repeat for each option]
## More Information
### References
* [Official documentation]
* [Case studies]
* [Comparison articles]
### Related Decisions
* [ADR-XXXX]: [Related decision]
```
### Template 2: Architecture Pattern
```markdown
# ADR-[NUMBER]: Adopt [Pattern] for [Component/System]
**Status**: Proposed
**Date**: [Date]
**Deciders**: [Names]
## Context and Problem Statement
We need to address [architectural challenge] in [system/component]. Current architecture [describe limitations]. This pattern will affect [scope].
### Decision Drivers
* Scalability requirements
* Maintainability concerns
* Team experience
* Performance needs
* Development velocity
## Considered Options
* [Pattern 1]: [Brief description]
* [Pattern 2]: [Brief description]
* [Pattern 3]: [Brief description]
## Decision Outcome
Chosen option: "[Pattern]", because [architectural benefits and trade-off justification].
### Consequences
* Good, because [improved architecture quality]
* Good, because [better scalability/maintainability]
* Bad, because [increased complexity in area]
* Bad, because [migration effort required]
## Implementation Notes
* Phase 1: [Initial steps]
* Phase 2: [Migration approach]
* Phase 3: [Completion]
## Pros and Cons of the Options
[Detailed analysis of each pattern option]
## More Information
### Examples
* [Company/project using this pattern]
* [Success story and lessons learned]
### Related Decisions
* [ADR-XXXX]: [Related architectural decision]
```
### Template 3: Migration Decision
```markdown
# ADR-[NUMBER]: Migrate from [Old] to [New]
**Status**: Proposed
**Date**: [Date]
**Deciders**: [Names]
## Context and Problem Statement
Current [system/technology] has [limitations/problems]. We need to migrate to [new approach] to address [specific issues].
### Decision Drivers
* Current pain points: [list]
* Future requirements: [list]
* Technical debt: [assessment]
* Cost considerations
* Risk tolerance
## Considered Options
* Migrate to [Option 1]
* Migrate to [Option 2]
* Stay with current approach (improved)
* Hybrid approach
## Decision Outcome
Chosen option: "Migrate to [New]", because [clear justification for migration].
### Migration Strategy
* Approach: [Big bang / Phased / Strangler pattern]
* Timeline: [Duration]
* Risk mitigation: [Strategies]
* Rollback plan: [If things go wrong]
### Consequences
* Good, because [benefits of new approach]
* Good, because [problems solved]
* Bad, because [migration cost and effort]
* Bad, because [temporary complexity]
* Neutral, because [team retraining needed]
### Confirmation
Migration success will be measured by:
* [Metric 1]: [Target]
* [Metric 2]: [Target]
* [Metric 3]: [Target]
## Pros and Cons of the Options
[Detailed analysis including migration effort and risk for each option]
## More Information
### Migration Plan
[Link to detailed migration plan]
### Related Decisions
* [ADR-XXXX]: [Original decision being superseded]
```
## Error Handling
### Missing Decision
If no decision is provided:
```
Error: No decision specified.
Please provide the architectural decision to document.
Format: /architect adr decision:"what-was-decided" [context:"background"] [alternatives:"options"]
Examples:
/architect adr decision:"use PostgreSQL for primary database" alternatives:"MySQL, MongoDB"
/architect adr decision:"adopt microservices architecture" context:"scaling challenges with monolith"
/architect adr decision:"implement CQRS pattern for read-heavy workflows"
```
### Invalid Status
If status is not a valid ADR status:
```
Error: Invalid status: [status]
Valid ADR statuses:
- proposed Decision is proposed and under review
- accepted Decision has been approved and is in effect
- deprecated Decision is no longer recommended but still in use
- superseded Decision has been replaced by a newer ADR
Example: /architect adr decision:"use Redis for caching" status:"accepted"
```
### Directory Creation Failed
If cannot create ADR directory:
```
Error: Unable to create ADR directory at docs/adr/
This may be due to:
- Insufficient permissions
- Read-only filesystem
- Invalid path
Please ensure the directory can be created or specify an alternate location.
```
### File Write Failed
If cannot write ADR file:
```
Error: Unable to write ADR file
Attempted to write to: docs/adr/ADR-[NUMBER]-[slug].md
This may be due to:
- Insufficient permissions
- Disk space issues
- File already exists
Please check permissions and try again.
```
## Examples
**Example 1 - Database Technology Selection**:
```
/architect adr decision:"use PostgreSQL with JSONB for flexible schema requirements" context:"need relational integrity plus document flexibility for user-defined fields" alternatives:"MongoDB for pure document model, MySQL with JSON columns, DynamoDB for serverless" status:"accepted"
```
**Example 2 - Architecture Pattern**:
```
/architect adr decision:"migrate from monolith to microservices architecture" context:"scaling bottlenecks and deployment coupling slowing feature delivery" alternatives:"modular monolith with clear boundaries, service-oriented architecture, serverless functions" status:"proposed"
```
**Example 3 - Frontend Framework**:
```
/architect adr decision:"adopt React with TypeScript for frontend" context:"rebuilding legacy jQuery application" alternatives:"Vue.js, Angular, Svelte, continue with jQuery" status:"accepted"
```
**Example 4 - Authentication Strategy**:
```
/architect adr decision:"implement JWT-based authentication with refresh tokens" alternatives:"session-based auth, OAuth 2.0 only, SAML for enterprise SSO" status:"accepted"
```
**Example 5 - Caching Strategy**:
```
/architect adr decision:"implement multi-tier caching with Redis and CDN" context:"database load is causing performance issues under traffic spikes" alternatives:"database query caching only, in-memory application cache, no caching" status:"accepted"
```
**Example 6 - Deployment Strategy**:
```
/architect adr decision:"use blue-green deployment for zero-downtime releases" alternatives:"rolling deployment, canary releases, recreate deployment" status:"proposed"
```
**Example 7 - Superseding Previous Decision**:
```
/architect adr decision:"supersede ADR-0023: migrate from REST to GraphQL for public API" context:"GraphQL complexity and client confusion outweigh benefits" alternatives:"improve REST API versioning, hybrid approach, maintain status quo" status:"accepted"
```
**Example 8 - Minimal ADR (will prompt for more detail)**:
```
/architect adr decision:"implement event sourcing for audit trail"
```
This will trigger the agent to ask clarifying questions about context, alternatives, and rationale.
## Best Practices
### When to Create an ADR
Create an ADR for decisions that:
- Affect system architecture or structure
- Have significant long-term consequences
- Involve trade-offs between multiple approaches
- Impact multiple teams or components
- Require significant effort to reverse
- Set precedent for future decisions
### When NOT to Create an ADR
Don't create ADRs for:
- Minor implementation details
- Obvious technology choices with no alternatives
- Temporary workarounds
- Decisions easily reversed
- Team process decisions (use different document)
### ADR Writing Tips
1. **Be Specific**: Don't just say "improve performance" - specify metrics and targets
2. **Include Context**: Future readers need to understand why this mattered
3. **Document Alternatives**: Show you considered options, not just the chosen one
4. **Acknowledge Trade-offs**: No decision is perfect - document the downsides
5. **Keep It Concise**: Aim for 2-3 pages; link to external docs for details
6. **Update Status**: Keep status current as decisions evolve
7. **Link Related ADRs**: Show how decisions build on each other
8. **Use Examples**: Concrete examples clarify abstract decisions
9. **Define Success**: How will you know if this was the right decision?
10. **Review Regularly**: Revisit ADRs periodically to validate or supersede

1059
commands/architect/assess.md Normal file

File diff suppressed because it is too large Load Diff

1107
commands/architect/design.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,996 @@
# Architecture Review Operation
You are executing the **review** operation using the 10x-fullstack-engineer agent to assess existing architecture quality, security, performance, and maintainability.
## Parameters
**Received**: `$ARGUMENTS` (after removing 'review' operation name)
Expected format: `[path:"directory"] [focus:"security|performance|scalability|maintainability"] [depth:"shallow|deep"]`
Parse the arguments to extract:
- **path** (optional): Specific directory or component to review (defaults to entire codebase)
- **focus** (optional): Primary concern area - security, performance, scalability, maintainability, or "all"
- **depth** (optional): Review depth - "shallow" for quick assessment, "deep" for comprehensive analysis (default: "deep")
## Workflow
### Phase 1: Context Discovery
Discover and understand the existing architecture:
1. **Directory Structure Analysis**:
- Examine project organization
- Identify major components and layers
- Detect framework and patterns used
- Map file relationships and dependencies
2. **Technology Stack Identification**:
- Frontend: Framework, state management, build tools
- Backend: Language, framework, libraries
- Database: Type, ORM/query builder, migrations
- Infrastructure: Deployment, orchestration, monitoring
- Dependencies: Third-party packages and versions
3. **Configuration Review**:
- Environment configuration
- Build and deployment configurations
- Database connection and pooling
- Caching configuration
- Logging and monitoring setup
4. **Documentation Assessment**:
- README quality and completeness
- API documentation
- Architecture diagrams if available
- ADRs in `docs/adr/`
- Code comments and inline documentation
5. **Testing Infrastructure**:
- Unit test coverage
- Integration test presence
- E2E test setup
- Testing frameworks and patterns
Use available tools:
- `Glob` to find relevant files by patterns
- `Read` to examine key architectural files
- `Grep` to search for patterns, anti-patterns, and security issues
- `Bash` to run analysis scripts (e.g., `analyze-dependencies.sh`, `complexity-metrics.py`)
### Phase 2: Layer-by-Layer Analysis
Analyze each architectural layer systematically:
#### Database Layer Review
**Schema Quality**:
- Table design and normalization
- Index coverage for common queries
- Foreign key relationships and referential integrity
- Constraint usage (unique, not null, check)
- Data types appropriateness
**Performance**:
- Index effectiveness (check for missing or unused indexes)
- Query patterns (N+1 queries, table scans)
- Connection pooling configuration
- Transaction isolation levels
- Read replica usage if applicable
**Scalability**:
- Sharding readiness
- Data volume handling
- Migration patterns
- Backup and recovery strategy
**Security**:
- SQL injection protection
- Encryption at rest
- Access control and permissions
- Audit logging
- PII handling
**Issues to Flag**:
- Missing indexes on frequently queried columns
- Lack of foreign key constraints
- Unoptimized queries (SELECT *, missing WHERE clauses)
- Missing migration strategy
- Hardcoded credentials
- Insufficient connection pooling
#### Backend Layer Review
**API Design Quality**:
- RESTful principles adherence
- Consistent naming conventions
- Versioning strategy
- Error response formats
- HTTP status code usage
- Request/response validation
**Code Organization**:
- Separation of concerns
- Layer isolation (controller/service/repository)
- Dependency injection usage
- Module boundaries
- Code duplication
**Business Logic**:
- Complexity and readability
- Error handling completeness
- Input validation and sanitization
- Transaction management
- Domain modeling quality
**Authentication & Authorization**:
- Token management (JWT, OAuth)
- Session handling
- Authorization checks at appropriate layers
- RBAC/ABAC implementation
- Password hashing (bcrypt, argon2)
**Performance**:
- Response time profiling
- Database query efficiency
- Caching effectiveness
- Async/await usage
- Connection pooling
- Rate limiting
**Security**:
- Input validation and sanitization
- SQL injection prevention
- XSS prevention
- CSRF protection
- Secrets management
- Security headers
- Dependency vulnerabilities
**Maintainability**:
- Code complexity metrics
- Test coverage
- Code comments
- Consistent error handling
- Logging completeness
- Dead code elimination
**Issues to Flag**:
- Synchronous blocking operations in async contexts
- Missing error handling
- Hardcoded secrets or credentials
- Insufficient input validation
- Missing authentication/authorization checks
- Poor error messages
- Excessive code complexity
- Lack of logging
- Dependency vulnerabilities
#### Frontend Layer Review
**Component Architecture**:
- Component size and complexity
- Reusability and composition
- Smart vs presentational separation
- Component communication patterns
- Prop drilling issues
**State Management**:
- State organization and structure
- Global vs local state balance
- State update patterns
- Performance implications
- Redux/MobX/Context usage quality
**Performance**:
- Bundle size analysis
- Code splitting effectiveness
- Lazy loading usage
- Rendering optimization (memoization, virtualization)
- Image optimization
- Web Vitals compliance
**User Experience**:
- Loading states
- Error boundaries
- Accessibility (WCAG compliance)
- Responsive design
- Progressive enhancement
- Offline support
**Security**:
- XSS prevention
- Content Security Policy
- Secure cookies
- Token storage
- Sensitive data exposure
**Build & Deployment**:
- Build configuration
- Asset optimization
- Source maps
- Environment configuration
- CI/CD integration
**Issues to Flag**:
- Large bundle sizes (> 500KB)
- Missing code splitting
- Prop drilling through multiple levels
- Unnecessary re-renders
- Missing loading/error states
- Accessibility violations
- Insecure token storage (localStorage for sensitive tokens)
- Missing error boundaries
- Large components (> 300 lines)
- Unused dependencies
#### Infrastructure Layer Review
**Deployment Architecture**:
- Containerization quality
- Orchestration configuration
- Service discovery
- Load balancing
- Auto-scaling configuration
**Scalability**:
- Horizontal scaling readiness
- Stateless service design
- Session management
- Database scaling strategy
- CDN usage
**Monitoring & Observability**:
- Application monitoring
- Infrastructure monitoring
- Log aggregation
- Distributed tracing
- Alerting configuration
- SLO/SLA definition
**CI/CD Pipeline**:
- Build automation
- Test automation
- Deployment automation
- Rollback procedures
- Blue-green or canary deployment
**Security**:
- Network segmentation
- Firewall rules
- WAF configuration
- DDoS protection
- Encryption in transit and at rest
- Secrets management
- Vulnerability scanning
**Disaster Recovery**:
- Backup strategy
- Recovery procedures
- RTO and RPO targets
- Failover mechanisms
**Issues to Flag**:
- Single point of failure
- Missing monitoring/alerting
- No rollback strategy
- Insufficient logging
- Missing backups
- Insecure network configuration
- Hardcoded secrets in deployment configs
- No health checks
- Missing auto-scaling
- Lack of disaster recovery plan
### Phase 3: Cross-Cutting Concerns Analysis
#### Security Audit
**Authentication**:
- Strong password requirements
- Multi-factor authentication
- Token expiration and rotation
- Session management
**Authorization**:
- Proper access control checks
- Principle of least privilege
- Resource-level permissions
**Data Protection**:
- Encryption at rest and in transit
- PII handling and anonymization
- Data retention policies
- GDPR/CCPA compliance
**Dependency Security**:
- Known vulnerabilities in dependencies
- Outdated packages
- License compliance
**Common Vulnerabilities**:
- OWASP Top 10 coverage
- Injection attacks
- Broken authentication
- Sensitive data exposure
- XML external entities
- Broken access control
- Security misconfiguration
- Cross-site scripting
- Insecure deserialization
- Insufficient logging
#### Performance Analysis
**Response Times**:
- API endpoint latency
- Database query performance
- External API call times
- Cache hit rates
**Resource Utilization**:
- CPU usage patterns
- Memory consumption
- Database connections
- Network bandwidth
**Bottlenecks**:
- Slow database queries
- Synchronous blocking calls
- Unoptimized algorithms
- Missing caching
**Frontend Performance**:
- Page load times
- Time to interactive
- Bundle sizes
- Asset optimization
#### Scalability Assessment
**Current Limits**:
- Concurrent user capacity
- Request throughput
- Data volume limits
- Connection pool sizes
**Scaling Strategy**:
- Horizontal scaling readiness
- Database scaling approach
- Stateless design
- Caching layers
**Potential Bottlenecks**:
- Database write contention
- Shared state
- Single-threaded processing
- Synchronous dependencies
#### Maintainability Review
**Code Quality**:
- Cyclomatic complexity
- Code duplication
- Consistent naming conventions
- Code organization
**Testing**:
- Test coverage percentage
- Test quality and effectiveness
- Testing pyramid balance
- Flaky tests
**Documentation**:
- README completeness
- API documentation
- Architecture diagrams
- Onboarding guides
- Runbooks
**Technical Debt**:
- TODO comments
- Deprecated code
- Workarounds and hacks
- Outdated dependencies
### Phase 4: Issue Identification and Scoring
For each issue found, document:
**Issue Template**:
```
**Issue**: [Brief description]
**Category**: [Security/Performance/Scalability/Maintainability]
**Severity**: [Critical/High/Medium/Low]
**Location**: [File and line number or component]
**Impact**: [Detailed explanation of consequences]
**Recommendation**: [How to fix]
**Effort**: [Estimated effort to fix]
**Priority**: [Immediate/High/Medium/Low]
```
**Severity Levels**:
- **Critical**: Security vulnerability, data loss risk, production outage risk
- **High**: Significant performance impact, major security concern, scalability blocker
- **Medium**: Performance degradation, maintainability issues, minor security concerns
- **Low**: Code quality issues, minor optimizations, documentation gaps
**Scoring System** (0-10 scale):
Score each dimension:
- **Security**: 0 (critical vulnerabilities) to 10 (best practices throughout)
- **Performance**: 0 (unacceptably slow) to 10 (optimized)
- **Scalability**: 0 (won't scale) to 10 (proven scalable architecture)
- **Maintainability**: 0 (unmaintainable) to 10 (excellent code quality)
- **Reliability**: 0 (frequent failures) to 10 (highly reliable)
**Overall Architecture Health**: Average of all dimensions
### Phase 5: Recommendations and Roadmap
Provide actionable recommendations prioritized by impact and effort:
**Quick Wins** (High Impact, Low Effort):
- Issues that can be fixed quickly with significant benefit
- Typically security fixes, configuration changes, missing indexes
**Important Improvements** (High Impact, Medium Effort):
- Architectural changes with significant value
- Performance optimizations requiring code changes
- Security hardening requiring moderate work
**Strategic Initiatives** (High Impact, High Effort):
- Major architectural refactoring
- Technology migrations
- Comprehensive test suite development
**Technical Debt Paydown** (Medium Impact, Variable Effort):
- Code quality improvements
- Documentation updates
- Dependency updates
- Test coverage improvements
**Nice-to-Haves** (Low-Medium Impact, Any Effort):
- Minor optimizations
- Code style improvements
- Additional monitoring
## Output Format
Provide a comprehensive architecture review report:
```markdown
# Architecture Review Report
**Review Date**: [Date]
**Scope**: [Full system / specific component]
**Focus**: [All / Security / Performance / Scalability / Maintainability]
**Depth**: [Shallow / Deep]
**Reviewer**: 10x-fullstack-engineer agent
## Executive Summary
[2-3 paragraph summary of findings, overall health, and key recommendations]
**Overall Architecture Health**: [Score]/10
**Key Findings**:
- [Most critical finding]
- [Second most critical finding]
- [Third most critical finding]
**Recommended Priority Actions**:
1. [Top priority action]
2. [Second priority action]
3. [Third priority action]
## Architecture Health Scores
| Dimension | Score | Status | Trend |
|-----------|-------|--------|-------|
| Security | [0-10] | [Critical/Poor/Fair/Good/Excellent] | [↑↓→] |
| Performance | [0-10] | [Critical/Poor/Fair/Good/Excellent] | [↑↓→] |
| Scalability | [0-10] | [Critical/Poor/Fair/Good/Excellent] | [↑↓→] |
| Maintainability | [0-10] | [Critical/Poor/Fair/Good/Excellent] | [↑↓→] |
| Reliability | [0-10] | [Critical/Poor/Fair/Good/Excellent] | [↑↓→] |
| **Overall** | **[0-10]** | **[Status]** | **[Trend]** |
**Score Interpretation**:
- 9-10: Excellent - Industry best practices
- 7-8: Good - Minor improvements needed
- 5-6: Fair - Moderate improvements needed
- 3-4: Poor - Significant issues to address
- 0-2: Critical - Urgent action required
## System Overview
### Technology Stack
**Frontend**: [Technologies]
**Backend**: [Technologies]
**Database**: [Technologies]
**Infrastructure**: [Technologies]
**Monitoring**: [Technologies]
### Architecture Pattern
[Monolith / Microservices / Serverless / Hybrid]
### Key Characteristics
- [Characteristic 1]
- [Characteristic 2]
- [Characteristic 3]
## Detailed Findings
### Security Analysis (Score: [X]/10)
**Strengths**:
- [Positive security practices]
- [What's done well]
**Issues Identified**:
**CRITICAL Issues**:
1. **[Issue Name]**
- **Location**: [File/component]
- **Impact**: [Security risk description]
- **Recommendation**: [How to fix]
- **Effort**: [Time estimate]
**HIGH Severity Issues**:
1. **[Issue Name]**
- **Location**: [File/component]
- **Impact**: [Security risk description]
- **Recommendation**: [How to fix]
- **Effort**: [Time estimate]
**MEDIUM Severity Issues**:
[List of medium issues with brief descriptions]
**LOW Severity Issues**:
[List of low issues with brief descriptions]
**Security Best Practices Compliance**:
- ✅ [Practice followed]
- ✅ [Practice followed]
- ❌ [Practice missing]
- ❌ [Practice missing]
**Recommendations**:
1. [Top security recommendation]
2. [Second security recommendation]
3. [Third security recommendation]
### Performance Analysis (Score: [X]/10)
**Strengths**:
- [What performs well]
- [Good performance practices]
**Performance Metrics** (if available):
- API Response Time (p50): [Xms]
- API Response Time (p95): [Xms]
- API Response Time (p99): [Xms]
- Database Query Time (avg): [Xms]
- Page Load Time: [Xs]
- Bundle Size: [XKB]
**Issues Identified**:
**CRITICAL Issues**:
1. **[Performance bottleneck]**
- **Location**: [File/component]
- **Impact**: [Performance impact - response times, throughput]
- **Current**: [Current performance]
- **Target**: [Target performance]
- **Recommendation**: [Optimization approach]
- **Expected Improvement**: [Performance gain estimate]
- **Effort**: [Time estimate]
**HIGH Severity Issues**:
[Similar format as critical]
**MEDIUM Severity Issues**:
[List with brief descriptions]
**Optimization Opportunities**:
- [Opportunity 1]: [Potential gain]
- [Opportunity 2]: [Potential gain]
- [Opportunity 3]: [Potential gain]
**Recommendations**:
1. [Top performance recommendation]
2. [Second performance recommendation]
3. [Third performance recommendation]
### Scalability Analysis (Score: [X]/10)
**Current Scale**:
- Users: [Estimated current users]
- Requests: [Current request volume]
- Data: [Current data volume]
**Scaling Capabilities**:
- **Horizontal Scaling**: [Yes/No/Limited] - [Explanation]
- **Vertical Scaling**: [Current headroom]
- **Database Scaling**: [Current approach]
**Strengths**:
- [Scalable design elements]
- [Good scaling practices]
**Limitations**:
1. **[Scalability bottleneck]**
- **Current Limit**: [When this breaks]
- **Impact**: [What happens at scale]
- **Recommendation**: [How to scale past this]
- **Effort**: [Time estimate]
**Scaling Readiness Assessment**:
- ✅ Stateless application design
- ✅ Connection pooling configured
- ❌ Database sharding not implemented
- ❌ No caching layer
- ✅ Horizontal auto-scaling configured
- ❌ No rate limiting
**Projected Capacity**:
- Maximum concurrent users: [Estimate]
- Maximum requests/second: [Estimate]
- Bottleneck at: [What fails first]
**Recommendations**:
1. [Top scalability recommendation]
2. [Second scalability recommendation]
3. [Third scalability recommendation]
### Maintainability Analysis (Score: [X]/10)
**Code Quality Metrics** (if available):
- Test Coverage: [X]%
- Average Cyclomatic Complexity: [X]
- Code Duplication: [X]%
- Lines of Code: [X]
- Technical Debt Ratio: [X]%
**Strengths**:
- [Good maintainability practices]
- [What makes code maintainable]
**Issues Identified**:
**HIGH Impact Issues**:
1. **[Maintainability issue]**
- **Location**: [Component/file]
- **Impact**: [How this affects maintenance]
- **Recommendation**: [Improvement approach]
- **Effort**: [Time estimate]
**MEDIUM Impact Issues**:
[List with brief descriptions]
**Technical Debt Items**:
- [Debt item 1]: [Impact]
- [Debt item 2]: [Impact]
- [Debt item 3]: [Impact]
**Documentation Assessment**:
- ✅ [Documentation present]
- ✅ [Documentation present]
- ❌ [Documentation missing]
- ❌ [Documentation missing]
**Testing Assessment**:
- Unit Tests: [X]% coverage - [Quality assessment]
- Integration Tests: [Present/Missing] - [Assessment]
- E2E Tests: [Present/Missing] - [Assessment]
- Test Quality: [Assessment]
**Recommendations**:
1. [Top maintainability recommendation]
2. [Second maintainability recommendation]
3. [Third maintainability recommendation]
### Reliability Analysis (Score: [X]/10)
**Strengths**:
- [Reliability features]
- [Good practices]
**Issues Identified**:
1. **[Reliability concern]**
- **Impact**: [Potential for failure]
- **Likelihood**: [How likely]
- **Recommendation**: [Mitigation]
- **Effort**: [Time estimate]
**Monitoring & Observability**:
- Application Monitoring: [Present/Missing]
- Error Tracking: [Present/Missing]
- Logging: [Assessment]
- Alerting: [Assessment]
- Health Checks: [Present/Missing]
**Error Handling**:
- Error handling coverage: [Assessment]
- Graceful degradation: [Yes/No]
- Circuit breakers: [Present/Missing]
- Retry logic: [Present/Missing]
**Disaster Recovery**:
- Backup strategy: [Assessment]
- Recovery procedures: [Documented/Missing]
- RTO target: [X hours/unknown]
- RPO target: [X hours/unknown]
**Recommendations**:
1. [Top reliability recommendation]
2. [Second reliability recommendation]
3. [Third reliability recommendation]
## Architecture Patterns Analysis
### Positive Patterns Identified
- **[Pattern Name]**: [Where used] - [Benefits]
- **[Pattern Name]**: [Where used] - [Benefits]
### Anti-Patterns Identified
- **[Anti-Pattern Name]**: [Where found] - [Issues] - [Recommendation]
- **[Anti-Pattern Name]**: [Where found] - [Issues] - [Recommendation]
### Recommended Patterns to Adopt
- **[Pattern Name]**: [Use case] - [Benefits] - [Implementation approach]
- **[Pattern Name]**: [Use case] - [Benefits] - [Implementation approach]
## Dependency Analysis
### Security Vulnerabilities
| Package | Severity | Vulnerability | Recommendation |
|---------|----------|---------------|----------------|
| [package] | Critical | [CVE/description] | Update to [version] |
| [package] | High | [CVE/description] | Update to [version] |
### Outdated Dependencies
| Package | Current | Latest | Breaking Changes |
|---------|---------|--------|------------------|
| [package] | [version] | [version] | Yes/No |
### Unused Dependencies
- [package]: [reason it's unused]
- [package]: [reason it's unused]
## Recommendations Roadmap
### Immediate Actions (This Sprint)
**Priority**: CRITICAL - Address immediately
1. **[Action Item]**
- **Category**: [Security/Performance/etc.]
- **Impact**: [What improves]
- **Effort**: [Time estimate]
- **Owner**: [Team/person]
2. **[Action Item]**
[Same format]
### Short-Term Improvements (Next 1-2 Months)
**Priority**: HIGH - Schedule soon
1. **[Action Item]**
[Same format as above]
### Medium-Term Initiatives (Next 3-6 Months)
**Priority**: MEDIUM - Plan and schedule
1. **[Action Item]**
[Same format]
### Long-Term Strategic Changes (6+ Months)
**Priority**: STRATEGIC - Begin planning
1. **[Action Item]**
[Same format]
## Cost-Benefit Analysis
| Recommendation | Impact | Effort | Cost | ROI | Priority |
|----------------|--------|--------|------|-----|----------|
| [Item 1] | High | Low | $X | High | 1 |
| [Item 2] | High | Medium | $X | Medium | 2 |
| [Item 3] | Medium | Low | $X | High | 3 |
## Risk Assessment
### Current Risks
1. **[Risk Description]**
- **Likelihood**: High/Medium/Low
- **Impact**: Critical/High/Medium/Low
- **Mitigation**: [Recommendation]
- **Timeline**: [When to address]
### Risks If Recommendations Not Implemented
1. **[Risk Description]**
- **Likelihood**: [Assessment]
- **Impact**: [Assessment]
- **Timeline**: [When risk materializes]
## Comparison to Industry Standards
| Aspect | Current State | Industry Standard | Gap |
|--------|---------------|-------------------|-----|
| Security | [Assessment] | [Standard] | [Gap] |
| Performance | [Assessment] | [Standard] | [Gap] |
| Scalability | [Assessment] | [Standard] | [Gap] |
| Test Coverage | [X]% | 80%+ | [Gap] |
| Monitoring | [Assessment] | [Standard] | [Gap] |
## Conclusion
[Summary of overall architecture state, key findings, and recommended next steps]
**Overall Assessment**: [Narrative assessment of architecture health]
**Critical Success Factors**:
1. [What needs to happen for success]
2. [Key factor 2]
3. [Key factor 3]
**Next Steps**:
1. [Immediate next step]
2. [Following step]
3. [Third step]
## Appendices
### Appendix A: Detailed Issue List
[Comprehensive list of all issues with full details]
### Appendix B: Performance Profiling Results
[Detailed performance data if available]
### Appendix C: Security Audit Details
[Comprehensive security findings]
### Appendix D: Code Quality Metrics
[Detailed code quality measurements]
### Appendix E: References
- [Related ADRs]
- [Industry standards referenced]
- [Tools used for analysis]
```
## Agent Invocation
This operation MUST invoke the **10x-fullstack-engineer** agent for expert architecture review.
**Agent context to provide**:
- Parsed parameters (path, focus, depth)
- Discovered technology stack
- Current architecture patterns
- Issues found during analysis
- Performance metrics if available
- Security concerns identified
**Agent responsibilities**:
- Apply 15+ years of architectural review experience
- Identify subtle issues and anti-patterns
- Assess architecture health across all dimensions
- Provide actionable recommendations
- Prioritize findings by impact and effort
- Suggest industry best practices
- Compare to similar production systems
**Agent invocation approach**:
Present comprehensive architecture analysis and explicitly request:
"Using your 15+ years of full-stack architecture experience, review this system architecture. Assess security, performance, scalability, maintainability, and reliability. Provide scored assessment, identify critical issues, and recommend prioritized improvements. Consider both immediate risks and long-term technical debt."
## Error Handling
### Path Not Found
If specified path doesn't exist:
```
Error: Path not found: [path]
Available paths to review:
- [directory 1]
- [directory 2]
- [directory 3]
Would you like to:
a) Review the entire codebase (no path specified)
b) Specify a different path
c) List available directories
Please specify a valid path or choose an option.
```
### Insufficient Permissions
If cannot read files:
```
Error: Insufficient permissions to read files in [path]
I need read access to:
- Source code files
- Configuration files
- Documentation
Please ensure the files are readable or specify a different path.
```
### Unknown Focus Area
If focus parameter is invalid:
```
Error: Unknown focus area: [focus]
Valid focus areas:
- security Focus on security vulnerabilities and best practices
- performance Focus on response times, throughput, and optimization
- scalability Focus on scaling capabilities and limitations
- maintainability Focus on code quality, testing, and documentation
- all Comprehensive review across all areas (default)
Example: /architect review focus:"security" depth:"deep"
```
### Empty Codebase
If no code found to review:
```
Error: No code found to review in [path]
The specified path appears empty or contains no reviewable files.
Please specify a path containing:
- Source code files
- Configuration files
- Application logic
Or I can search for code in the current directory.
```
## Examples
**Example 1 - Comprehensive System Review**:
```
/architect review
```
Reviews entire codebase across all dimensions with deep analysis.
**Example 2 - Security-Focused Review**:
```
/architect review focus:"security" depth:"deep"
```
Deep security audit covering OWASP Top 10, dependency vulnerabilities, and security best practices.
**Example 3 - Quick Performance Assessment**:
```
/architect review focus:"performance" depth:"shallow"
```
Quick performance review identifying obvious bottlenecks and optimization opportunities.
**Example 4 - Specific Component Review**:
```
/architect review path:"src/services/payment" focus:"security"
```
Focused security review of payment service component.
**Example 5 - Pre-Production Review**:
```
/architect review focus:"all" depth:"deep"
```
Comprehensive production-readiness review before deployment.
**Example 6 - Scalability Assessment**:
```
/architect review focus:"scalability" depth:"deep"
```
Detailed analysis of scaling capabilities and limitations for capacity planning.
**Example 7 - Code Quality Review**:
```
/architect review path:"src/api" focus:"maintainability"
```
Maintainability review of API layer for technical debt and refactoring opportunities.

187
commands/architect/skill.md Normal file
View File

@@ -0,0 +1,187 @@
---
description: Comprehensive system architecture design, review, and documentation with ADR creation
---
# Architecture Skill Router
You are routing architecture operations using the **10x-fullstack-engineer** agent for expert architectural guidance.
## Request Parsing
**Received**: `$ARGUMENTS`
Parse the first word to determine the operation:
- `design` → Read and execute `.claude/commands/architect/design.md`
- `review` → Read and execute `.claude/commands/architect/review.md`
- `adr` → Read and execute `.claude/commands/architect/adr.md`
- `assess` → Read and execute `.claude/commands/architect/assess.md`
**Base directory**: `/home/danie/projects/plugins/architect/open-plugins/plugins/10x-fullstack-engineer/commands/architect`
Pass all remaining arguments (after the operation name) to the selected operation file.
## Operation Overview
### design - Design New Architecture
Create comprehensive system architecture for new features or projects. Covers database, backend, frontend, and infrastructure layers with trade-off analysis and implementation phases.
**When to use**: New features, new projects, major architectural changes, greenfield development
**Typical parameters**: `requirements:"description" [scope:"area"] [constraints:"limitations"] [scale:"load"]`
### review - Review Existing Architecture
Analyze existing architecture for quality, security, performance, scalability, and maintainability issues. Provides scored assessment and actionable recommendations.
**When to use**: Architecture health checks, pre-production reviews, security audits, refactoring planning
**Typical parameters**: `[path:"directory"] [focus:"security|performance|scalability"] [depth:"shallow|deep"]`
### adr - Create Architectural Decision Record
Document significant architectural decisions with context, alternatives, and rationale in standard ADR format.
**When to use**: After major design decisions, technology selections, pattern adoptions, architectural pivots
**Typical parameters**: `decision:"what-was-decided" [context:"background"] [alternatives:"other-options"] [status:"proposed|accepted|superseded"]`
### assess - Architecture Health Assessment
Comprehensive assessment across technical debt, security, performance, scalability, maintainability, and cost dimensions with scoring and trend analysis.
**When to use**: Quarterly reviews, baseline establishment, improvement tracking, executive reporting
**Typical parameters**: `[scope:"system|service|component"] [focus:"dimension"] [baseline:"ADR-number|date"]`
## Usage Examples
**Example 1 - Design Real-Time Notification System**:
```
/architect design requirements:"real-time notification system with WebSockets, push notifications, and email delivery" scale:"10,000 concurrent users" constraints:"must integrate with existing REST API, AWS infrastructure"
```
**Example 2 - Review Security Architecture**:
```
/architect review focus:"security" depth:"deep"
```
**Example 3 - Document Microservices Decision**:
```
/architect adr decision:"migrate from monolith to microservices architecture" context:"scaling challenges and deployment bottlenecks" alternatives:"modular monolith, service-oriented architecture" status:"accepted"
```
**Example 4 - Assess Architecture Health**:
```
/architect assess scope:"system" baseline:"2024-Q3"
```
**Example 5 - Design Multi-Tenant SaaS**:
```
/architect design requirements:"multi-tenant SaaS platform with real-time collaboration, file storage, and analytics" scale:"enterprise-level, 100k+ users" constraints:"TypeScript, Node.js, PostgreSQL, horizontal scaling"
```
**Example 6 - Review Performance Architecture**:
```
/architect review path:"src/services" focus:"performance" depth:"deep"
```
**Example 7 - Document Database Selection**:
```
/architect adr decision:"use PostgreSQL with JSONB for flexible schema" context:"need relational integrity plus document flexibility" alternatives:"MongoDB, DynamoDB, MySQL" status:"accepted"
```
**Example 8 - Focused Tech Debt Assessment**:
```
/architect assess scope:"service" focus:"tech-debt"
```
## Error Handling
### Unknown Operation
If the first argument doesn't match `design`, `review`, `adr`, or `assess`:
```
Unknown operation: "{operation}"
Available operations:
- design Design new system architecture
- review Review existing architecture
- adr Create architectural decision record
- assess Assess architecture health
Example: /architect design requirements:"real-time notifications" scale:"10k users"
```
### Missing Operation
If no operation is specified:
```
No operation specified. Please provide an operation as the first argument.
Available operations:
- design Design new system architecture for features/projects
- review Review existing architecture for quality/security
- adr Create architectural decision records
- assess Assess architecture health with scoring
Examples:
/architect design requirements:"feature description" scale:"expected load"
/architect review focus:"security" depth:"deep"
/architect adr decision:"technology choice" alternatives:"other options"
/architect assess scope:"system" baseline:"previous assessment"
```
### Invalid Arguments Format
If arguments are malformed, guide the user:
```
Invalid arguments format. Each operation expects specific parameters.
Design operation format:
requirements:"description" [scope:"area"] [constraints:"limitations"] [scale:"load"]
Review operation format:
[path:"directory"] [focus:"security|performance|scalability"] [depth:"shallow|deep"]
ADR operation format:
decision:"what-was-decided" [context:"background"] [alternatives:"options"] [status:"proposed|accepted"]
Assess operation format:
[scope:"system|service|component"] [focus:"dimension"] [baseline:"reference"]
See /architect for examples.
```
## Agent Integration
All operations MUST invoke the **10x-fullstack-engineer** agent for:
- 15+ years of architectural expertise
- Pattern recognition and best practices
- Trade-off analysis and decision guidance
- Production system experience
- Technology stack recommendations
- Scalability and performance insights
- Security and reliability patterns
Ensure the agent receives complete context including:
- Current operation and parameters
- Relevant codebase information
- Existing architecture if available
- Business and technical constraints
- Scale and performance requirements
## Routing Process
1. **Parse** `$ARGUMENTS` to extract operation name
2. **Validate** operation is one of: design, review, adr, assess
3. **Construct** file path: `{base-directory}/{operation}.md`
4. **Read** the operation file contents
5. **Execute** instructions with remaining arguments
6. **Invoke** 10x-fullstack-engineer agent with full context
## Notes
- Sub-operation files have NO frontmatter (not directly invokable)
- Only this router skill.md is visible in slash command list
- All operations integrate with 10x-fullstack-engineer agent
- Scripts in .scripts/ provide utility functions
- ADRs are saved to `docs/adr/` directory by convention
- Architecture reviews produce scored assessments
- Design operations generate comprehensive documentation