Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:20:21 +08:00
commit bbbaf7acad
63 changed files with 38552 additions and 0 deletions

View File

@@ -0,0 +1,387 @@
#!/bin/bash
# Purpose: Analyze project dependencies for security, versioning, and usage
# Version: 1.0.0
# Usage: ./analyze-dependencies.sh [path]
# Returns: JSON formatted dependency analysis
# Exit codes: 0=success, 1=error, 2=invalid input
set -euo pipefail
# Configuration
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly PROJECT_DIR="${1:-.}"
readonly OUTPUT_FORMAT="${2:-json}"
# Color codes for output
readonly RED='\033[0;31m'
readonly YELLOW='\033[1;33m'
readonly GREEN='\033[0;32m'
readonly NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${GREEN}[INFO]${NC} $*" >&2
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $*" >&2
}
log_error() {
echo -e "${RED}[ERROR]${NC} $*" >&2
}
# Validate input
validate_input() {
if [[ ! -d "$PROJECT_DIR" ]]; then
log_error "Directory not found: $PROJECT_DIR"
exit 2
fi
}
# Detect package manager and dependency files
detect_package_manager() {
local pkg_manager=""
local dep_file=""
if [[ -f "$PROJECT_DIR/package.json" ]]; then
pkg_manager="npm"
dep_file="package.json"
elif [[ -f "$PROJECT_DIR/requirements.txt" ]]; then
pkg_manager="pip"
dep_file="requirements.txt"
elif [[ -f "$PROJECT_DIR/Pipfile" ]]; then
pkg_manager="pipenv"
dep_file="Pipfile"
elif [[ -f "$PROJECT_DIR/pyproject.toml" ]]; then
pkg_manager="poetry"
dep_file="pyproject.toml"
elif [[ -f "$PROJECT_DIR/Gemfile" ]]; then
pkg_manager="bundler"
dep_file="Gemfile"
elif [[ -f "$PROJECT_DIR/go.mod" ]]; then
pkg_manager="go"
dep_file="go.mod"
elif [[ -f "$PROJECT_DIR/Cargo.toml" ]]; then
pkg_manager="cargo"
dep_file="Cargo.toml"
elif [[ -f "$PROJECT_DIR/composer.json" ]]; then
pkg_manager="composer"
dep_file="composer.json"
else
log_warn "No recognized dependency file found"
pkg_manager="unknown"
dep_file="none"
fi
echo "$pkg_manager|$dep_file"
}
# Count dependencies
count_dependencies() {
local pkg_manager="$1"
local dep_file="$2"
local direct_count=0
local dev_count=0
case "$pkg_manager" in
npm)
if command -v jq &> /dev/null; then
direct_count=$(jq -r '.dependencies // {} | length' "$PROJECT_DIR/$dep_file" 2>/dev/null || echo 0)
dev_count=$(jq -r '.devDependencies // {} | length' "$PROJECT_DIR/$dep_file" 2>/dev/null || echo 0)
else
direct_count=$(grep -c '"' "$PROJECT_DIR/$dep_file" 2>/dev/null || echo 0)
fi
;;
pip)
direct_count=$(grep -v '^#' "$PROJECT_DIR/$dep_file" 2>/dev/null | grep -c . || echo 0)
;;
go)
direct_count=$(grep -c 'require' "$PROJECT_DIR/$dep_file" 2>/dev/null || echo 0)
;;
*)
direct_count=0
;;
esac
echo "$direct_count|$dev_count"
}
# Check for outdated dependencies (simplified - would need package manager specific commands)
check_outdated() {
local pkg_manager="$1"
local outdated_count=0
# This is a simplified check - in practice would run actual package manager commands
case "$pkg_manager" in
npm)
if command -v npm &> /dev/null && [[ -f "$PROJECT_DIR/package-lock.json" ]]; then
log_info "Checking for outdated npm packages..."
# Would run: npm outdated --json in production
outdated_count=0 # Placeholder
fi
;;
pip)
if command -v pip &> /dev/null; then
log_info "Checking for outdated pip packages..."
# Would run: pip list --outdated in production
outdated_count=0 # Placeholder
fi
;;
esac
echo "$outdated_count"
}
# Check for security vulnerabilities (simplified)
check_vulnerabilities() {
local pkg_manager="$1"
local vuln_count=0
local critical=0
local high=0
local medium=0
local low=0
# This would integrate with actual security scanners
case "$pkg_manager" in
npm)
if command -v npm &> /dev/null && [[ -f "$PROJECT_DIR/package-lock.json" ]]; then
log_info "Checking for npm security vulnerabilities..."
# Would run: npm audit --json in production
vuln_count=0 # Placeholder
fi
;;
pip)
if command -v safety &> /dev/null; then
log_info "Checking for Python security vulnerabilities..."
# Would run: safety check in production
vuln_count=0 # Placeholder
fi
;;
esac
echo "$critical|$high|$medium|$low"
}
# Analyze dependency tree depth (simplified)
analyze_tree_depth() {
local pkg_manager="$1"
local max_depth=0
case "$pkg_manager" in
npm)
if [[ -f "$PROJECT_DIR/package-lock.json" ]]; then
# Simplified depth calculation
max_depth=3 # Placeholder - would calculate from lockfile
fi
;;
*)
max_depth=0
;;
esac
echo "$max_depth"
}
# Find unused dependencies (simplified)
find_unused() {
local pkg_manager="$1"
local unused_count=0
# This would require code analysis to see what's actually imported/required
case "$pkg_manager" in
npm)
log_info "Analyzing for unused npm packages..."
# Would use tools like depcheck in production
unused_count=0 # Placeholder
;;
esac
echo "$unused_count"
}
# Check for duplicate dependencies
check_duplicates() {
local pkg_manager="$1"
local duplicate_count=0
case "$pkg_manager" in
npm)
if [[ -f "$PROJECT_DIR/package-lock.json" ]]; then
log_info "Checking for duplicate packages..."
# Would analyze lockfile for version conflicts
duplicate_count=0 # Placeholder
fi
;;
esac
echo "$duplicate_count"
}
# Generate dependency analysis report
generate_report() {
local pkg_manager="$1"
local dep_file="$2"
local dep_counts="$3"
local outdated="$4"
local vulnerabilities="$5"
local tree_depth="$6"
local unused="$7"
local duplicates="$8"
IFS='|' read -r direct_deps dev_deps <<< "$dep_counts"
IFS='|' read -r crit_vulns high_vulns med_vulns low_vulns <<< "$vulnerabilities"
local total_deps=$((direct_deps + dev_deps))
local total_vulns=$((crit_vulns + high_vulns + med_vulns + low_vulns))
if [[ "$OUTPUT_FORMAT" == "json" ]]; then
cat <<EOF
{
"package_manager": "$pkg_manager",
"dependency_file": "$dep_file",
"analysis_date": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
"dependencies": {
"total": $total_deps,
"direct": $direct_deps,
"development": $dev_deps,
"outdated": $outdated,
"unused": $unused,
"duplicates": $duplicates
},
"vulnerabilities": {
"total": $total_vulns,
"critical": $crit_vulns,
"high": $high_vulns,
"medium": $med_vulns,
"low": $low_vulns
},
"tree_depth": $tree_depth,
"health_score": $(calculate_health_score "$total_vulns" "$outdated" "$unused" "$duplicates"),
"recommendations": $(generate_recommendations "$total_vulns" "$outdated" "$unused" "$duplicates")
}
EOF
else
cat <<EOF
==============================================
Dependency Analysis Report
==============================================
Package Manager: $pkg_manager
Dependency File: $dep_file
Analysis Date: $(date)
Dependencies:
Total Dependencies: $total_deps
Direct Dependencies: $direct_deps
Development Dependencies: $dev_deps
Outdated: $outdated
Unused: $unused
Duplicates: $duplicates
Security Vulnerabilities:
Total: $total_vulns
Critical: $crit_vulns
High: $high_vulns
Medium: $med_vulns
Low: $low_vulns
Dependency Tree:
Maximum Depth: $tree_depth
Health Score: $(calculate_health_score "$total_vulns" "$outdated" "$unused" "$duplicates")/10
==============================================
EOF
fi
}
# Calculate health score (0-10)
calculate_health_score() {
local vulns="$1"
local outdated="$2"
local unused="$3"
local duplicates="$4"
local score=10
# Deduct points for issues
score=$((score - vulns)) # -1 per vulnerability
score=$((score - outdated / 5)) # -1 per 5 outdated packages
score=$((score - unused / 3)) # -1 per 3 unused packages
score=$((score - duplicates / 2)) # -1 per 2 duplicates
# Ensure score is between 0 and 10
if (( score < 0 )); then
score=0
fi
echo "$score"
}
# Generate recommendations
generate_recommendations() {
local vulns="$1"
local outdated="$2"
local unused="$3"
local duplicates="$4"
local recommendations="["
if (( vulns > 0 )); then
recommendations+='{"priority":"critical","action":"Update packages with security vulnerabilities immediately"},'
fi
if (( outdated > 10 )); then
recommendations+='{"priority":"high","action":"Review and update outdated dependencies"},'
fi
if (( unused > 5 )); then
recommendations+='{"priority":"medium","action":"Remove unused dependencies to reduce bundle size"},'
fi
if (( duplicates > 0 )); then
recommendations+='{"priority":"medium","action":"Resolve duplicate dependencies with version conflicts"},'
fi
# Remove trailing comma if exists
recommendations="${recommendations%,}"
recommendations+="]"
echo "$recommendations"
}
# Main execution
main() {
log_info "Starting dependency analysis..."
validate_input
# Detect package manager
IFS='|' read -r pkg_manager dep_file <<< "$(detect_package_manager)"
if [[ "$pkg_manager" == "unknown" ]]; then
log_error "Could not detect package manager"
exit 1
fi
log_info "Detected package manager: $pkg_manager"
# Gather metrics
dep_counts=$(count_dependencies "$pkg_manager" "$dep_file")
outdated=$(check_outdated "$pkg_manager")
vulnerabilities=$(check_vulnerabilities "$pkg_manager")
tree_depth=$(analyze_tree_depth "$pkg_manager")
unused=$(find_unused "$pkg_manager")
duplicates=$(check_duplicates "$pkg_manager")
# Generate report
generate_report "$pkg_manager" "$dep_file" "$dep_counts" "$outdated" "$vulnerabilities" "$tree_depth" "$unused" "$duplicates"
log_info "Analysis complete"
exit 0
}
# Run main function
main "$@"

View File

@@ -0,0 +1,367 @@
#!/usr/bin/env python3
"""
Purpose: Calculate code complexity metrics for architecture assessment
Version: 1.0.0
Usage: python3 complexity-metrics.py [path] [--format json|text]
Returns: Complexity metrics including cyclomatic complexity, maintainability index
Exit codes: 0=success, 1=error, 2=invalid input
Dependencies: radon (install with: pip install radon)
If radon is not available, provides simplified metrics
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Tuple, Any
from datetime import datetime
class ComplexityAnalyzer:
"""Analyzes code complexity across a codebase."""
def __init__(self, root_path: str):
self.root_path = Path(root_path)
self.results = {
"analysis_date": datetime.utcnow().isoformat() + "Z",
"root_path": str(self.root_path),
"files_analyzed": 0,
"total_lines": 0,
"total_functions": 0,
"complexity": {
"average": 0.0,
"max": 0,
"distribution": {"simple": 0, "moderate": 0, "complex": 0, "very_complex": 0}
},
"maintainability": {
"average": 0.0,
"distribution": {"high": 0, "medium": 0, "low": 0}
},
"files": []
}
self.has_radon = self._check_radon()
def _check_radon(self) -> bool:
"""Check if radon is available."""
try:
import radon
return True
except ImportError:
print("Warning: radon not installed. Using simplified metrics.", file=sys.stderr)
print("Install with: pip install radon", file=sys.stderr)
return False
def analyze(self) -> Dict[str, Any]:
"""Perform complexity analysis on the codebase."""
if not self.root_path.exists():
raise FileNotFoundError(f"Path not found: {self.root_path}")
# Find all source files
source_files = self._find_source_files()
for file_path in source_files:
self._analyze_file(file_path)
# Calculate summary statistics
self._calculate_summary()
return self.results
def _find_source_files(self) -> List[Path]:
"""Find all source code files in the directory."""
extensions = {'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.go', '.rb', '.php', '.c', '.cpp', '.cs'}
source_files = []
for ext in extensions:
source_files.extend(self.root_path.rglob(f"*{ext}"))
# Exclude common non-source directories
excluded_dirs = {'node_modules', 'venv', 'env', '.venv', 'dist', 'build', '.git', 'vendor', '__pycache__'}
source_files = [f for f in source_files if not any(excluded in f.parts for excluded in excluded_dirs)]
return source_files
def _analyze_file(self, file_path: Path):
"""Analyze a single file."""
try:
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
lines = content.split('\n')
self.results["total_lines"] += len(lines)
self.results["files_analyzed"] += 1
if self.has_radon:
self._analyze_with_radon(file_path, content)
else:
self._analyze_simplified(file_path, content, lines)
except Exception as e:
print(f"Warning: Could not analyze {file_path}: {e}", file=sys.stderr)
def _analyze_with_radon(self, file_path: Path, content: str):
"""Analyze file using radon library."""
from radon.complexity import cc_visit
from radon.metrics import mi_visit
try:
# Cyclomatic complexity
complexity_results = cc_visit(content, no_assert=True)
for result in complexity_results:
self.results["total_functions"] += 1
complexity = result.complexity
# Classify complexity
if complexity <= 5:
self.results["complexity"]["distribution"]["simple"] += 1
elif complexity <= 10:
self.results["complexity"]["distribution"]["moderate"] += 1
elif complexity <= 20:
self.results["complexity"]["distribution"]["complex"] += 1
else:
self.results["complexity"]["distribution"]["very_complex"] += 1
# Track maximum complexity
if complexity > self.results["complexity"]["max"]:
self.results["complexity"]["max"] = complexity
# Maintainability index
mi_score = mi_visit(content, multi=True)
if mi_score:
avg_mi = sum(mi_score) / len(mi_score)
if avg_mi >= 70:
self.results["maintainability"]["distribution"]["high"] += 1
elif avg_mi >= 50:
self.results["maintainability"]["distribution"]["medium"] += 1
else:
self.results["maintainability"]["distribution"]["low"] += 1
except Exception as e:
print(f"Warning: Radon analysis failed for {file_path}: {e}", file=sys.stderr)
def _analyze_simplified(self, file_path: Path, content: str, lines: List[str]):
"""Simplified analysis without radon."""
# Count functions (simplified heuristic)
function_keywords = ['def ', 'function ', 'func ', 'fn ', 'sub ', 'public ', 'private ', 'protected ']
function_count = sum(1 for line in lines if any(keyword in line.lower() for keyword in function_keywords))
self.results["total_functions"] += function_count
# Estimate complexity based on control flow keywords
complexity_keywords = ['if ', 'else', 'elif', 'for ', 'while ', 'switch', 'case ', 'catch', '?', '&&', '||']
total_complexity = sum(1 for line in lines if any(keyword in line for keyword in complexity_keywords))
if function_count > 0:
avg_complexity = total_complexity / function_count
# Classify based on average
if avg_complexity <= 5:
self.results["complexity"]["distribution"]["simple"] += function_count
elif avg_complexity <= 10:
self.results["complexity"]["distribution"]["moderate"] += function_count
elif avg_complexity <= 20:
self.results["complexity"]["distribution"]["complex"] += function_count
else:
self.results["complexity"]["distribution"]["very_complex"] += function_count
# Estimate maintainability based on line count and function size
avg_lines_per_func = len(lines) / max(function_count, 1)
if avg_lines_per_func <= 20:
self.results["maintainability"]["distribution"]["high"] += 1
elif avg_lines_per_func <= 50:
self.results["maintainability"]["distribution"]["medium"] += 1
else:
self.results["maintainability"]["distribution"]["low"] += 1
def _calculate_summary(self):
"""Calculate summary statistics."""
total_funcs = self.results["total_functions"]
if total_funcs > 0:
# Average complexity
dist = self.results["complexity"]["distribution"]
weighted_sum = (dist["simple"] * 3 + dist["moderate"] * 7.5 +
dist["complex"] * 15 + dist["very_complex"] * 25)
self.results["complexity"]["average"] = round(weighted_sum / total_funcs, 2)
# Average maintainability
mi_dist = self.results["maintainability"]["distribution"]
total_mi = sum(mi_dist.values())
if total_mi > 0:
weighted_mi = (mi_dist["high"] * 85 + mi_dist["medium"] * 60 + mi_dist["low"] * 30)
self.results["maintainability"]["average"] = round(weighted_mi / total_mi, 2)
# Add health score (0-10 scale)
self.results["health_score"] = self._calculate_health_score()
# Add recommendations
self.results["recommendations"] = self._generate_recommendations()
def _calculate_health_score(self) -> float:
"""Calculate overall code health score (0-10)."""
score = 10.0
# Deduct for high average complexity
avg_complexity = self.results["complexity"]["average"]
if avg_complexity > 20:
score -= 4
elif avg_complexity > 10:
score -= 2
elif avg_complexity > 5:
score -= 1
# Deduct for very complex functions
very_complex = self.results["complexity"]["distribution"]["very_complex"]
total_funcs = self.results["total_functions"]
if total_funcs > 0:
very_complex_ratio = very_complex / total_funcs
if very_complex_ratio > 0.2:
score -= 3
elif very_complex_ratio > 0.1:
score -= 2
elif very_complex_ratio > 0.05:
score -= 1
# Deduct for low maintainability
low_mi = self.results["maintainability"]["distribution"]["low"]
total_files = self.results["files_analyzed"]
if total_files > 0:
low_mi_ratio = low_mi / total_files
if low_mi_ratio > 0.3:
score -= 2
elif low_mi_ratio > 0.2:
score -= 1
return max(0.0, min(10.0, round(score, 1)))
def _generate_recommendations(self) -> List[Dict[str, str]]:
"""Generate recommendations based on analysis."""
recommendations = []
avg_complexity = self.results["complexity"]["average"]
if avg_complexity > 10:
recommendations.append({
"priority": "high",
"action": f"Reduce average cyclomatic complexity from {avg_complexity} to below 10",
"impact": "Improves code readability and testability"
})
very_complex = self.results["complexity"]["distribution"]["very_complex"]
if very_complex > 0:
recommendations.append({
"priority": "high",
"action": f"Refactor {very_complex} very complex functions (complexity > 20)",
"impact": "Reduces bug risk and maintenance burden"
})
low_mi = self.results["maintainability"]["distribution"]["low"]
if low_mi > 0:
recommendations.append({
"priority": "medium",
"action": f"Improve maintainability of {low_mi} low-scored files",
"impact": "Easier code changes and onboarding"
})
total_funcs = self.results["total_functions"]
total_lines = self.results["total_lines"]
if total_funcs > 0:
avg_lines_per_func = total_lines / total_funcs
if avg_lines_per_func > 50:
recommendations.append({
"priority": "medium",
"action": f"Break down large functions (avg {avg_lines_per_func:.0f} lines/function)",
"impact": "Improves code organization and reusability"
})
return recommendations
def format_output(results: Dict[str, Any], output_format: str) -> str:
"""Format analysis results."""
if output_format == "json":
return json.dumps(results, indent=2)
# Text format
output = []
output.append("\n" + "=" * 60)
output.append("Code Complexity Metrics Report")
output.append("=" * 60)
output.append(f"\nAnalysis Date: {results['analysis_date']}")
output.append(f"Root Path: {results['root_path']}")
output.append(f"Files Analyzed: {results['files_analyzed']}")
output.append(f"Total Lines: {results['total_lines']:,}")
output.append(f"Total Functions: {results['total_functions']:,}")
output.append("\n--- Cyclomatic Complexity ---")
output.append(f"Average Complexity: {results['complexity']['average']}")
output.append(f"Maximum Complexity: {results['complexity']['max']}")
output.append("\nDistribution:")
dist = results['complexity']['distribution']
total = sum(dist.values())
if total > 0:
output.append(f" Simple (1-5): {dist['simple']:4d} ({dist['simple']/total*100:5.1f}%)")
output.append(f" Moderate (6-10): {dist['moderate']:4d} ({dist['moderate']/total*100:5.1f}%)")
output.append(f" Complex (11-20): {dist['complex']:4d} ({dist['complex']/total*100:5.1f}%)")
output.append(f" Very Complex (>20): {dist['very_complex']:4d} ({dist['very_complex']/total*100:5.1f}%)")
output.append("\n--- Maintainability Index ---")
output.append(f"Average Score: {results['maintainability']['average']}")
output.append("\nDistribution:")
mi_dist = results['maintainability']['distribution']
total_mi = sum(mi_dist.values())
if total_mi > 0:
output.append(f" High (70-100): {mi_dist['high']:4d} ({mi_dist['high']/total_mi*100:5.1f}%)")
output.append(f" Medium (50-69): {mi_dist['medium']:4d} ({mi_dist['medium']/total_mi*100:5.1f}%)")
output.append(f" Low (0-49): {mi_dist['low']:4d} ({mi_dist['low']/total_mi*100:5.1f}%)")
output.append(f"\n--- Health Score: {results['health_score']}/10 ---")
if results['recommendations']:
output.append("\n--- Recommendations ---")
for i, rec in enumerate(results['recommendations'], 1):
output.append(f"\n{i}. [{rec['priority'].upper()}] {rec['action']}")
output.append(f" Impact: {rec['impact']}")
output.append("\n" + "=" * 60 + "\n")
return "\n".join(output)
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Analyze code complexity metrics for architecture assessment"
)
parser.add_argument(
"path",
nargs="?",
default=".",
help="Path to analyze (default: current directory)"
)
parser.add_argument(
"--format",
choices=["json", "text"],
default="json",
help="Output format (default: json)"
)
args = parser.parse_args()
try:
analyzer = ComplexityAnalyzer(args.path)
results = analyzer.analyze()
output = format_output(results, args.format)
print(output)
sys.exit(0)
except FileNotFoundError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(2)
except Exception as e:
print(f"Error during analysis: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,449 @@
#!/bin/bash
# Purpose: Generate ASCII architecture diagrams from system descriptions
# Version: 1.0.0
# Usage: ./diagram-generator.sh <type> [options]
# Types: layered, microservices, database, network, component
# Returns: ASCII diagram
# Exit codes: 0=success, 1=error, 2=invalid input
set -euo pipefail
# Configuration
readonly SCRIPT_NAME="$(basename "$0")"
readonly DIAGRAM_TYPE="${1:-}"
# Box drawing characters
readonly TL="┌" # Top-left
readonly TR="┐" # Top-right
readonly BL="└" # Bottom-left
readonly BR="┘" # Bottom-right
readonly H="─" # Horizontal
readonly V="│" # Vertical
readonly VR="├" # Vertical-right
readonly VL="┤" # Vertical-left
readonly HU="┴" # Horizontal-up
readonly HD="┬" # Horizontal-down
readonly X="┼" # Cross
# Arrow characters
readonly ARROW_DOWN="▼"
readonly ARROW_UP="▲"
readonly ARROW_LEFT="◄"
readonly ARROW_RIGHT="►"
readonly ARROW_BIDIRECT="◄►"
# Color codes
readonly BLUE='\033[0;34m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly RED='\033[0;31m'
readonly NC='\033[0m'
# Usage information
usage() {
cat <<EOF
Usage: $SCRIPT_NAME <type> [options]
Diagram Types:
layered Generate layered architecture diagram
microservices Generate microservices architecture diagram
database Generate database architecture diagram
network Generate network topology diagram
component Generate component interaction diagram
dataflow Generate data flow diagram
Options:
--title TEXT Set diagram title (default: architecture type)
--color Enable colored output
--help Show this help message
Examples:
$SCRIPT_NAME layered --title "Web Application Architecture"
$SCRIPT_NAME microservices --color
$SCRIPT_NAME database --title "E-commerce Database"
Exit Codes:
0 - Success
1 - Error during execution
2 - Invalid input
EOF
}
# Parse options
parse_options() {
DIAGRAM_TITLE=""
USE_COLOR=false
while [[ $# -gt 0 ]]; do
case "$1" in
--title)
DIAGRAM_TITLE="$2"
shift 2
;;
--color)
USE_COLOR=true
shift
;;
--help)
usage
exit 0
;;
*)
shift
;;
esac
done
}
# Draw a box
draw_box() {
local width="$1"
local height="$2"
local text="$3"
local color="${4:-$NC}"
# Top border
echo -n "$color$TL"
printf '%*s' "$((width-2))" '' | tr ' ' "$H"
echo "$TR$NC"
# Calculate padding for centered text
local text_len=${#text}
local padding=$(( (width - text_len - 2) / 2 ))
local padding_right=$(( width - text_len - padding - 2 ))
# Middle rows with text
for ((i=1; i<height-1; i++)); do
if [[ $i -eq $((height/2)) ]] && [[ -n "$text" ]]; then
echo -n "$color$V$NC"
printf '%*s' "$padding" ''
echo -n "$text"
printf '%*s' "$padding_right" ''
echo "$color$V$NC"
else
echo -n "$color$V$NC"
printf '%*s' "$((width-2))" ''
echo "$color$V$NC"
fi
done
# Bottom border
echo -n "$color$BL"
printf '%*s' "$((width-2))" '' | tr ' ' "$H"
echo "$BR$NC"
}
# Generate layered architecture diagram
generate_layered() {
local title="${DIAGRAM_TITLE:-Layered Architecture}"
local width=60
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
┌────────────────────────────────────────────────────────────┐
│ Presentation Layer │
│ (UI, Controllers, Views) │
└─────────────────────────┬──────────────────────────────────┘
┌────────────────────────────────────────────────────────────┐
│ Business Layer │
│ (Business Logic, Services, DTOs) │
└─────────────────────────┬──────────────────────────────────┘
┌────────────────────────────────────────────────────────────┐
│ Persistence Layer │
│ (Data Access, Repositories, ORMs) │
└─────────────────────────┬──────────────────────────────────┘
┌────────────────────────────────────────────────────────────┐
│ Database Layer │
│ (PostgreSQL, MongoDB, etc.) │
└────────────────────────────────────────────────────────────┘
Data Flow: Top → Down (Request) | Bottom → Top (Response)
EOF
}
# Generate microservices architecture diagram
generate_microservices() {
local title="${DIAGRAM_TITLE:-Microservices Architecture}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
┌─────────────┐
│ API │
│ Gateway │
└──────┬──────┘
┌──────────────────┼──────────────────┐
│ │ │
▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐
│ User │ │ Product │ │ Order │
│ Service │ │ Service │ │ Service │
└────┬─────┘ └────┬─────┘ └────┬─────┘
│ │ │
▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐
│ Users │ │ Products │ │ Orders │
│ DB │ │ DB │ │ DB │
└──────────┘ └──────────┘ └──────────┘
┌────────────────────────────────────┐
│ Message Queue (RabbitMQ) │
│ Event Distribution │
└────────────────────────────────────┘
┌────────────────────────────────────┐
│ Service Discovery │
│ (Consul/Eureka) │
└────────────────────────────────────┘
EOF
}
# Generate database architecture diagram
generate_database() {
local title="${DIAGRAM_TITLE:-Database Architecture}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
┌──────────────────┐
│ Application │
│ Tier │
└────────┬─────────┘
┌─────────────┼─────────────┐
│ │ │
▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐
│ Read │ │ Write │ │ Cache │
│ Pool │ │ Pool │ │ Layer │
└────┬─────┘ └────┬─────┘ └──────────┘
│ │ (Redis)
│ │
▼ ▼
┌──────────────────────────────┐
│ Load Balancer │
│ (Connection Pool) │
└──────────┬───────────────────┘
┌──────────┼──────────┐
│ │ │
▼ ▼ ▼
┌────────┐ ┌────────┐ ┌────────┐
│ Read │ │Primary │ │ Read │
│Replica │ │Database│ │Replica │
│ 1 │ │ Master │ │ 2 │
└────────┘ └───┬────┘ └────────┘
│ Replication
┌──────────┐
│ Backup │
│ Storage │
└──────────┘
EOF
}
# Generate network topology diagram
generate_network() {
local title="${DIAGRAM_TITLE:-Network Topology}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
Internet
┌──────────┴──────────┐
│ │
▼ ▼
┌──────────┐ ┌──────────┐
│ CDN │ │ WAF │
│ (Static) │ │(Security)│
└──────────┘ └────┬─────┘
┌──────┴──────┐
│ │
▼ ▼
┌──────────┐ ┌──────────┐
│ Load │ │ Load │
│ Balancer │ │ Balancer │
│ (AZ1) │ │ (AZ2) │
└────┬─────┘ └────┬─────┘
│ │
┌────────────────────┼─────────────┼────────────────────┐
│ │ │ │
▼ ▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐
│ App │ │ App │ │ App │ │ App │
│ Server 1 │ │ Server 2 │ │ Server 3 │ │ Server 4 │
│ (AZ1) │ │ (AZ1) │ │ (AZ2) │ │ (AZ2) │
└────┬─────┘ └────┬─────┘ └────┬─────┘ └────┬─────┘
│ │ │ │
└───────────────────┼─────────────┼────────────────────┘
│ │
▼ ▼
┌──────────┐ ┌──────────┐
│ Database │ │ Database │
│ Primary │ │ Standby │
│ (AZ1) │ │ (AZ2) │
└──────────┘ └──────────┘
Availability Zones: AZ1 (Primary), AZ2 (Secondary)
EOF
}
# Generate component interaction diagram
generate_component() {
local title="${DIAGRAM_TITLE:-Component Interaction}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
┌──────────────┐
│ Client │
│ (Browser) │
└───────┬──────┘
│ HTTP/HTTPS
┌──────────────────────────────────────────┐
│ Frontend (React/Vue) │
│ ┌────────────┐ ┌────────────┐ │
│ │ Components │◄────►│ State │ │
│ └────────────┘ │ Management │ │
│ └────────────┘ │
└───────────┬──────────────────────────────┘
│ REST/GraphQL
┌──────────────────────────────────────────┐
│ Backend API (Node.js/Python) │
│ ┌─────────┐ ┌──────────┐ ┌────────┐ │
│ │ Auth │ │ Business │ │ Data │ │
│ │ Service │──│ Logic │──│ Access │ │
│ └─────────┘ └──────────┘ └────┬───┘ │
└────────────────────────────────────┬─────┘
┌────────────────┼────────────────┐
│ │ │
▼ ▼ ▼
┌──────────┐ ┌──────────┐ ┌──────────┐
│PostgreSQL│ │ Redis │ │ S3 │
│ Database │ │ Cache │ │ Storage │
└──────────┘ └──────────┘ └──────────┘
Data Flow:
→ Request (Client to Server)
← Response (Server to Client)
◄► Bidirectional Communication
EOF
}
# Generate data flow diagram
generate_dataflow() {
local title="${DIAGRAM_TITLE:-Data Flow Diagram}"
cat <<EOF
$title
$( printf '=%.0s' $(seq 1 ${#title}) )
External Systems Application Data Storage
───────────────── ───────────── ──────────────
┌────────┐ ┌────────┐
│ User │───────(1)────►┌──────────┐ │Primary │
│ Input │ User Request │ API │────(2)───────────►│Database│
└────────┘ │ Gateway │ Query/Write └────┬───┘
└────┬─────┘ │
│ │
│ (3) Process │
▼ │
┌────────┐ ┌──────────┐ │
│External│◄───(4)─────│ Business │◄────(5)─────────────────┘
│ APIs │ Fetch Data│ Logic │ Read Data
└────────┘ └────┬─────┘
│ (6) Cache
┌──────────┐
│ Cache │
│ Layer │
└────┬─────┘
│ (7) Response
┌────────┐ ┌──────────┐
│ User │◄───(8)─────│ Response │
│ Output │ JSON/HTML │Formatter │
└────────┘ └──────────┘
Flow Steps:
(1) User sends request
(2) Gateway queries database
(3) Business logic processes
(4) External API calls
(5) Database read operations
(6) Cache result
(7) Format response
(8) Return to user
EOF
}
# Main execution
main() {
if [[ $# -eq 0 ]] || [[ "$1" == "--help" ]]; then
usage
exit 0
fi
parse_options "$@"
case "$DIAGRAM_TYPE" in
layered)
generate_layered
;;
microservices)
generate_microservices
;;
database)
generate_database
;;
network)
generate_network
;;
component)
generate_component
;;
dataflow)
generate_dataflow
;;
*)
echo "Error: Unknown diagram type: $DIAGRAM_TYPE" >&2
echo "Run '$SCRIPT_NAME --help' for usage information" >&2
exit 2
;;
esac
exit 0
}
# Run main function
main "$@"