Initial commit
This commit is contained in:
506
scripts/apply_best_practices.py
Normal file
506
scripts/apply_best_practices.py
Normal file
@@ -0,0 +1,506 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Apply Bubble Tea best practices validation.
|
||||
Validates code against 11 expert tips from tip-bubbltea-apps.md.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Tuple
|
||||
|
||||
|
||||
# Path to tips reference
|
||||
TIPS_FILE = Path("/Users/williamvansickleiii/charmtuitemplate/charm-tui-template/tip-bubbltea-apps.md")
|
||||
|
||||
|
||||
def apply_best_practices(code_path: str, tips_file: str = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate Bubble Tea code against best practices from tip-bubbltea-apps.md.
|
||||
|
||||
Args:
|
||||
code_path: Path to Go file or directory
|
||||
tips_file: Optional path to tips file (defaults to standard location)
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- compliance: Status for each of 11 tips
|
||||
- overall_score: 0-100
|
||||
- recommendations: List of improvements
|
||||
- validation: Validation report
|
||||
"""
|
||||
path = Path(code_path)
|
||||
|
||||
if not path.exists():
|
||||
return {
|
||||
"error": f"Path not found: {code_path}",
|
||||
"validation": {"status": "error", "summary": "Invalid path"}
|
||||
}
|
||||
|
||||
# Collect all .go files
|
||||
go_files = []
|
||||
if path.is_file():
|
||||
if path.suffix == '.go':
|
||||
go_files = [path]
|
||||
else:
|
||||
go_files = list(path.glob('**/*.go'))
|
||||
|
||||
if not go_files:
|
||||
return {
|
||||
"error": "No .go files found",
|
||||
"validation": {"status": "error", "summary": "No Go files"}
|
||||
}
|
||||
|
||||
# Read all Go code
|
||||
all_content = ""
|
||||
for go_file in go_files:
|
||||
try:
|
||||
all_content += go_file.read_text() + "\n"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Check each tip
|
||||
compliance = {}
|
||||
|
||||
compliance["tip_1_fast_event_loop"] = _check_tip_1_fast_event_loop(all_content, go_files)
|
||||
compliance["tip_2_debug_dumping"] = _check_tip_2_debug_dumping(all_content, go_files)
|
||||
compliance["tip_3_live_reload"] = _check_tip_3_live_reload(path)
|
||||
compliance["tip_4_receiver_methods"] = _check_tip_4_receiver_methods(all_content, go_files)
|
||||
compliance["tip_5_message_ordering"] = _check_tip_5_message_ordering(all_content, go_files)
|
||||
compliance["tip_6_model_tree"] = _check_tip_6_model_tree(all_content, go_files)
|
||||
compliance["tip_7_layout_arithmetic"] = _check_tip_7_layout_arithmetic(all_content, go_files)
|
||||
compliance["tip_8_terminal_recovery"] = _check_tip_8_terminal_recovery(all_content, go_files)
|
||||
compliance["tip_9_teatest"] = _check_tip_9_teatest(path)
|
||||
compliance["tip_10_vhs"] = _check_tip_10_vhs(path)
|
||||
compliance["tip_11_resources"] = {"status": "info", "score": 100, "message": "Check leg100.github.io for more tips"}
|
||||
|
||||
# Calculate overall score
|
||||
scores = [tip["score"] for tip in compliance.values()]
|
||||
overall_score = int(sum(scores) / len(scores))
|
||||
|
||||
# Generate recommendations
|
||||
recommendations = []
|
||||
for tip_name, tip_data in compliance.items():
|
||||
if tip_data["status"] == "fail":
|
||||
recommendations.append(tip_data.get("recommendation", f"Implement {tip_name}"))
|
||||
|
||||
# Summary
|
||||
if overall_score >= 90:
|
||||
summary = f"✅ Excellent! Score: {overall_score}/100. Following best practices."
|
||||
elif overall_score >= 70:
|
||||
summary = f"✓ Good. Score: {overall_score}/100. Some improvements possible."
|
||||
elif overall_score >= 50:
|
||||
summary = f"⚠️ Fair. Score: {overall_score}/100. Several best practices missing."
|
||||
else:
|
||||
summary = f"❌ Poor. Score: {overall_score}/100. Many best practices not followed."
|
||||
|
||||
# Validation
|
||||
validation = {
|
||||
"status": "pass" if overall_score >= 70 else "warning" if overall_score >= 50 else "fail",
|
||||
"summary": summary,
|
||||
"checks": {
|
||||
"fast_event_loop": compliance["tip_1_fast_event_loop"]["status"] == "pass",
|
||||
"has_debugging": compliance["tip_2_debug_dumping"]["status"] == "pass",
|
||||
"proper_layout": compliance["tip_7_layout_arithmetic"]["status"] == "pass",
|
||||
"has_recovery": compliance["tip_8_terminal_recovery"]["status"] == "pass"
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
"compliance": compliance,
|
||||
"overall_score": overall_score,
|
||||
"recommendations": recommendations,
|
||||
"summary": summary,
|
||||
"files_analyzed": len(go_files),
|
||||
"validation": validation
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_1_fast_event_loop(content: str, files: List[Path]) -> Dict[str, Any]:
|
||||
"""Tip 1: Keep the event loop fast."""
|
||||
# Check for blocking operations in Update() or View()
|
||||
blocking_patterns = [
|
||||
r'\btime\.Sleep\s*\(',
|
||||
r'\bhttp\.(Get|Post|Do)\s*\(',
|
||||
r'\bos\.Open\s*\(',
|
||||
r'\bio\.ReadAll\s*\(',
|
||||
r'\bexec\.Command\([^)]+\)\.Run\(\)',
|
||||
]
|
||||
|
||||
has_blocking = any(re.search(pattern, content) for pattern in blocking_patterns)
|
||||
has_tea_cmd = bool(re.search(r'tea\.Cmd', content))
|
||||
|
||||
if has_blocking and not has_tea_cmd:
|
||||
return {
|
||||
"status": "fail",
|
||||
"score": 0,
|
||||
"message": "Blocking operations found in event loop without tea.Cmd",
|
||||
"recommendation": "Move blocking operations to tea.Cmd goroutines",
|
||||
"explanation": "Blocking ops in Update()/View() freeze the UI. Use tea.Cmd for I/O."
|
||||
}
|
||||
elif has_blocking and has_tea_cmd:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 50,
|
||||
"message": "Blocking operations present but tea.Cmd is used",
|
||||
"recommendation": "Verify all blocking ops are in tea.Cmd, not Update()/View()",
|
||||
"explanation": "Review code to ensure blocking operations are properly wrapped"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "No blocking operations detected in event loop",
|
||||
"explanation": "Event loop appears to be non-blocking"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_2_debug_dumping(content: str, files: List[Path]) -> Dict[str, Any]:
|
||||
"""Tip 2: Dump messages to a file for debugging."""
|
||||
has_spew = bool(re.search(r'github\.com/davecgh/go-spew', content))
|
||||
has_debug_write = bool(re.search(r'(dump|debug|log)\s+io\.Writer', content))
|
||||
has_fmt_fprintf = bool(re.search(r'fmt\.Fprintf', content))
|
||||
|
||||
if has_spew or has_debug_write:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Debug message dumping capability detected",
|
||||
"explanation": "Using spew or debug writer for message inspection"
|
||||
}
|
||||
elif has_fmt_fprintf:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 60,
|
||||
"message": "Basic logging present, but no structured message dumping",
|
||||
"recommendation": "Add spew.Fdump for detailed message inspection",
|
||||
"explanation": "fmt.Fprintf works but spew provides better message structure"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "fail",
|
||||
"score": 0,
|
||||
"message": "No debug message dumping detected",
|
||||
"recommendation": "Add message dumping with go-spew:\n" +
|
||||
"import \"github.com/davecgh/go-spew/spew\"\n" +
|
||||
"type model struct { dump io.Writer }\n" +
|
||||
"func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {\n" +
|
||||
" if m.dump != nil { spew.Fdump(m.dump, msg) }\n" +
|
||||
" // ... rest of Update()\n" +
|
||||
"}",
|
||||
"explanation": "Message dumping helps debug complex message flows"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_3_live_reload(path: Path) -> Dict[str, Any]:
|
||||
"""Tip 3: Live reload code changes."""
|
||||
# Check for air config or similar
|
||||
has_air_config = (path / ".air.toml").exists()
|
||||
has_makefile_watch = False
|
||||
|
||||
if (path / "Makefile").exists():
|
||||
makefile = (path / "Makefile").read_text()
|
||||
has_makefile_watch = bool(re.search(r'watch:|live:', makefile))
|
||||
|
||||
if has_air_config:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Live reload configured with air",
|
||||
"explanation": "Found .air.toml configuration"
|
||||
}
|
||||
elif has_makefile_watch:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Live reload configured in Makefile",
|
||||
"explanation": "Found watch/live target in Makefile"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "info",
|
||||
"score": 100,
|
||||
"message": "No live reload detected (optional)",
|
||||
"recommendation": "Consider adding air for live reload during development",
|
||||
"explanation": "Live reload improves development speed but is optional"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_4_receiver_methods(content: str, files: List[Path]) -> Dict[str, Any]:
|
||||
"""Tip 4: Use pointer vs value receivers judiciously."""
|
||||
# Check Update() receiver type (should be value receiver)
|
||||
update_value_receiver = bool(re.search(r'func\s+\(m\s+\w+\)\s+Update\s*\(', content))
|
||||
update_pointer_receiver = bool(re.search(r'func\s+\(m\s+\*\w+\)\s+Update\s*\(', content))
|
||||
|
||||
if update_pointer_receiver:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 60,
|
||||
"message": "Update() uses pointer receiver (uncommon pattern)",
|
||||
"recommendation": "Consider value receiver for Update() (standard pattern)",
|
||||
"explanation": "Value receiver is standard for Update() in Bubble Tea"
|
||||
}
|
||||
elif update_value_receiver:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Update() uses value receiver (correct)",
|
||||
"explanation": "Following standard Bubble Tea pattern"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "info",
|
||||
"score": 100,
|
||||
"message": "No Update() method found or unable to detect",
|
||||
"explanation": "Could not determine receiver type"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_5_message_ordering(content: str, files: List[Path]) -> Dict[str, Any]:
|
||||
"""Tip 5: Messages from concurrent commands not guaranteed in order."""
|
||||
has_batch = bool(re.search(r'tea\.Batch\s*\(', content))
|
||||
has_concurrent_cmds = bool(re.search(r'go\s+func\s*\(', content))
|
||||
has_state_tracking = bool(re.search(r'type\s+\w*State\s+(int|string)', content)) or \
|
||||
bool(re.search(r'operations\s+map\[string\]', content))
|
||||
|
||||
if (has_batch or has_concurrent_cmds) and not has_state_tracking:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 50,
|
||||
"message": "Concurrent commands without explicit state tracking",
|
||||
"recommendation": "Add state machine to track concurrent operations",
|
||||
"explanation": "tea.Batch messages arrive in unpredictable order"
|
||||
}
|
||||
elif has_batch or has_concurrent_cmds:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Concurrent commands with state tracking",
|
||||
"explanation": "Proper handling of message ordering"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "No concurrent commands detected",
|
||||
"explanation": "Message ordering is deterministic"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_6_model_tree(content: str, files: List[Path]) -> Dict[str, Any]:
|
||||
"""Tip 6: Build a tree of models for complex apps."""
|
||||
# Count model fields
|
||||
model_match = re.search(r'type\s+(\w*[Mm]odel)\s+struct\s*\{([^}]+)\}', content, re.DOTALL)
|
||||
if not model_match:
|
||||
return {
|
||||
"status": "info",
|
||||
"score": 100,
|
||||
"message": "No model struct found",
|
||||
"explanation": "Could not analyze model structure"
|
||||
}
|
||||
|
||||
model_body = model_match.group(2)
|
||||
field_count = len([line for line in model_body.split('\n') if line.strip() and not line.strip().startswith('//')])
|
||||
|
||||
# Check for child models
|
||||
has_child_models = bool(re.search(r'\w+Model\s+\w+Model', content))
|
||||
|
||||
if field_count > 20 and not has_child_models:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 40,
|
||||
"message": f"Large model ({field_count} fields) without child models",
|
||||
"recommendation": "Consider refactoring to model tree pattern",
|
||||
"explanation": "Large models are hard to maintain. Split into child models."
|
||||
}
|
||||
elif field_count > 15 and not has_child_models:
|
||||
return {
|
||||
"status": "info",
|
||||
"score": 70,
|
||||
"message": f"Medium model ({field_count} fields)",
|
||||
"recommendation": "Consider model tree if complexity increases",
|
||||
"explanation": "Model is getting large, monitor complexity"
|
||||
}
|
||||
elif has_child_models:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Using model tree pattern with child models",
|
||||
"explanation": "Good architecture for complex apps"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": f"Simple model ({field_count} fields)",
|
||||
"explanation": "Model size is appropriate"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_7_layout_arithmetic(content: str, files: List[Path]) -> Dict[str, Any]:
|
||||
"""Tip 7: Layout arithmetic is error-prone."""
|
||||
uses_lipgloss = bool(re.search(r'github\.com/charmbracelet/lipgloss', content))
|
||||
has_lipgloss_helpers = bool(re.search(r'lipgloss\.(Height|Width|GetVertical|GetHorizontal)', content))
|
||||
has_hardcoded_dimensions = bool(re.search(r'\.(Width|Height)\s*\(\s*\d{2,}\s*\)', content))
|
||||
|
||||
if uses_lipgloss and has_lipgloss_helpers and not has_hardcoded_dimensions:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Using lipgloss helpers for dynamic layout",
|
||||
"explanation": "Correct use of lipgloss.Height()/Width()"
|
||||
}
|
||||
elif uses_lipgloss and has_hardcoded_dimensions:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 40,
|
||||
"message": "Hardcoded dimensions detected",
|
||||
"recommendation": "Use lipgloss.Height() and lipgloss.Width() for calculations",
|
||||
"explanation": "Hardcoded dimensions don't adapt to terminal size"
|
||||
}
|
||||
elif uses_lipgloss:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 60,
|
||||
"message": "Using lipgloss but unclear if using helpers",
|
||||
"recommendation": "Use lipgloss.Height() and lipgloss.Width() for layout",
|
||||
"explanation": "Avoid manual height/width calculations"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "info",
|
||||
"score": 100,
|
||||
"message": "Not using lipgloss",
|
||||
"explanation": "Layout tip applies when using lipgloss"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_8_terminal_recovery(content: str, files: List[Path]) -> Dict[str, Any]:
|
||||
"""Tip 8: Recover your terminal after panics."""
|
||||
has_defer_recover = bool(re.search(r'defer\s+func\s*\(\s*\)\s*\{[^}]*recover\(\)', content, re.DOTALL))
|
||||
has_main = bool(re.search(r'func\s+main\s*\(\s*\)', content))
|
||||
has_disable_mouse = bool(re.search(r'tea\.DisableMouseAllMotion', content))
|
||||
|
||||
if has_main and has_defer_recover and has_disable_mouse:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Panic recovery with terminal cleanup",
|
||||
"explanation": "Proper defer recover() with DisableMouseAllMotion"
|
||||
}
|
||||
elif has_main and has_defer_recover:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 70,
|
||||
"message": "Panic recovery but missing DisableMouseAllMotion",
|
||||
"recommendation": "Add tea.DisableMouseAllMotion() in panic handler",
|
||||
"explanation": "Need to cleanup mouse mode on panic"
|
||||
}
|
||||
elif has_main:
|
||||
return {
|
||||
"status": "fail",
|
||||
"score": 0,
|
||||
"message": "Missing panic recovery in main()",
|
||||
"recommendation": "Add defer recover() with terminal cleanup",
|
||||
"explanation": "Panics can leave terminal in broken state"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "info",
|
||||
"score": 100,
|
||||
"message": "No main() found (library code?)",
|
||||
"explanation": "Recovery applies to main applications"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_9_teatest(path: Path) -> Dict[str, Any]:
|
||||
"""Tip 9: Use teatest for end-to-end tests."""
|
||||
# Look for test files using teatest
|
||||
test_files = list(path.glob('**/*_test.go'))
|
||||
has_teatest = False
|
||||
|
||||
for test_file in test_files:
|
||||
try:
|
||||
content = test_file.read_text()
|
||||
if 'teatest' in content or 'tea/teatest' in content:
|
||||
has_teatest = True
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if has_teatest:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": "Using teatest for testing",
|
||||
"explanation": "Found teatest in test files"
|
||||
}
|
||||
elif test_files:
|
||||
return {
|
||||
"status": "warning",
|
||||
"score": 60,
|
||||
"message": "Has tests but not using teatest",
|
||||
"recommendation": "Consider using teatest for TUI integration tests",
|
||||
"explanation": "teatest enables end-to-end TUI testing"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "fail",
|
||||
"score": 0,
|
||||
"message": "No tests found",
|
||||
"recommendation": "Add teatest tests for key interactions",
|
||||
"explanation": "Testing improves reliability"
|
||||
}
|
||||
|
||||
|
||||
def _check_tip_10_vhs(path: Path) -> Dict[str, Any]:
|
||||
"""Tip 10: Use VHS to record demos."""
|
||||
# Look for .tape files (VHS)
|
||||
vhs_files = list(path.glob('**/*.tape'))
|
||||
|
||||
if vhs_files:
|
||||
return {
|
||||
"status": "pass",
|
||||
"score": 100,
|
||||
"message": f"Found {len(vhs_files)} VHS demo file(s)",
|
||||
"explanation": "Using VHS for documentation"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "info",
|
||||
"score": 100,
|
||||
"message": "No VHS demos found (optional)",
|
||||
"recommendation": "Consider adding VHS demos for documentation",
|
||||
"explanation": "VHS creates great animated demos but is optional"
|
||||
}
|
||||
|
||||
|
||||
def validate_best_practices(result: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate best practices result."""
|
||||
if 'error' in result:
|
||||
return {"status": "error", "summary": result['error']}
|
||||
|
||||
overall_score = result.get('overall_score', 0)
|
||||
status = "pass" if overall_score >= 70 else "warning" if overall_score >= 50 else "fail"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": result.get('summary', 'Best practices check complete'),
|
||||
"score": overall_score,
|
||||
"valid": True
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: apply_best_practices.py <code_path> [tips_file]")
|
||||
sys.exit(1)
|
||||
|
||||
code_path = sys.argv[1]
|
||||
tips_file = sys.argv[2] if len(sys.argv) > 2 else None
|
||||
|
||||
result = apply_best_practices(code_path, tips_file)
|
||||
print(json.dumps(result, indent=2))
|
||||
433
scripts/comprehensive_bubbletea_analysis.py
Normal file
433
scripts/comprehensive_bubbletea_analysis.py
Normal file
@@ -0,0 +1,433 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive Bubble Tea application analysis.
|
||||
Orchestrates all analysis functions for complete health check.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any
|
||||
|
||||
# Import all analysis functions
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
from diagnose_issue import diagnose_issue
|
||||
from apply_best_practices import apply_best_practices
|
||||
from debug_performance import debug_performance
|
||||
from suggest_architecture import suggest_architecture
|
||||
from fix_layout_issues import fix_layout_issues
|
||||
|
||||
|
||||
def comprehensive_bubbletea_analysis(code_path: str, detail_level: str = "standard") -> Dict[str, Any]:
|
||||
"""
|
||||
Perform complete health check of Bubble Tea application.
|
||||
|
||||
Args:
|
||||
code_path: Path to Go file or directory containing Bubble Tea code
|
||||
detail_level: "quick", "standard", or "deep"
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- overall_health: 0-100 score
|
||||
- sections: Results from each analysis function
|
||||
- summary: Executive summary
|
||||
- priority_fixes: Ordered list of critical/high-priority issues
|
||||
- estimated_fix_time: Time estimate for addressing issues
|
||||
- validation: Overall validation report
|
||||
"""
|
||||
path = Path(code_path)
|
||||
|
||||
if not path.exists():
|
||||
return {
|
||||
"error": f"Path not found: {code_path}",
|
||||
"validation": {"status": "error", "summary": "Invalid path"}
|
||||
}
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"COMPREHENSIVE BUBBLE TEA ANALYSIS")
|
||||
print(f"{'='*70}")
|
||||
print(f"Analyzing: {path}")
|
||||
print(f"Detail level: {detail_level}\n")
|
||||
|
||||
sections = {}
|
||||
|
||||
# Section 1: Issue Diagnosis
|
||||
print("🔍 [1/5] Diagnosing issues...")
|
||||
try:
|
||||
sections['issues'] = diagnose_issue(str(path))
|
||||
print(f" ✓ Found {len(sections['issues'].get('issues', []))} issue(s)")
|
||||
except Exception as e:
|
||||
sections['issues'] = {"error": str(e)}
|
||||
print(f" ✗ Error: {e}")
|
||||
|
||||
# Section 2: Best Practices Compliance
|
||||
print("📋 [2/5] Checking best practices...")
|
||||
try:
|
||||
sections['best_practices'] = apply_best_practices(str(path))
|
||||
score = sections['best_practices'].get('overall_score', 0)
|
||||
print(f" ✓ Score: {score}/100")
|
||||
except Exception as e:
|
||||
sections['best_practices'] = {"error": str(e)}
|
||||
print(f" ✗ Error: {e}")
|
||||
|
||||
# Section 3: Performance Analysis
|
||||
print("⚡ [3/5] Analyzing performance...")
|
||||
try:
|
||||
sections['performance'] = debug_performance(str(path))
|
||||
bottleneck_count = len(sections['performance'].get('bottlenecks', []))
|
||||
print(f" ✓ Found {bottleneck_count} bottleneck(s)")
|
||||
except Exception as e:
|
||||
sections['performance'] = {"error": str(e)}
|
||||
print(f" ✗ Error: {e}")
|
||||
|
||||
# Section 4: Architecture Recommendations
|
||||
if detail_level in ["standard", "deep"]:
|
||||
print("🏗️ [4/5] Analyzing architecture...")
|
||||
try:
|
||||
sections['architecture'] = suggest_architecture(str(path))
|
||||
current = sections['architecture'].get('current_pattern', 'unknown')
|
||||
recommended = sections['architecture'].get('recommended_pattern', 'unknown')
|
||||
print(f" ✓ Current: {current}, Recommended: {recommended}")
|
||||
except Exception as e:
|
||||
sections['architecture'] = {"error": str(e)}
|
||||
print(f" ✗ Error: {e}")
|
||||
else:
|
||||
print("🏗️ [4/5] Skipping architecture (quick mode)")
|
||||
sections['architecture'] = {"skipped": "quick mode"}
|
||||
|
||||
# Section 5: Layout Validation
|
||||
print("📐 [5/5] Checking layout...")
|
||||
try:
|
||||
sections['layout'] = fix_layout_issues(str(path))
|
||||
issue_count = len(sections['layout'].get('layout_issues', []))
|
||||
print(f" ✓ Found {issue_count} layout issue(s)")
|
||||
except Exception as e:
|
||||
sections['layout'] = {"error": str(e)}
|
||||
print(f" ✗ Error: {e}")
|
||||
|
||||
print()
|
||||
|
||||
# Calculate overall health
|
||||
overall_health = _calculate_overall_health(sections)
|
||||
|
||||
# Extract priority fixes
|
||||
priority_fixes = _extract_priority_fixes(sections)
|
||||
|
||||
# Estimate fix time
|
||||
estimated_fix_time = _estimate_fix_time(priority_fixes)
|
||||
|
||||
# Generate summary
|
||||
summary = _generate_summary(overall_health, sections, priority_fixes)
|
||||
|
||||
# Overall validation
|
||||
validation = {
|
||||
"status": _determine_status(overall_health),
|
||||
"summary": summary,
|
||||
"overall_health": overall_health,
|
||||
"sections_completed": len([s for s in sections.values() if 'error' not in s and 'skipped' not in s]),
|
||||
"total_sections": 5
|
||||
}
|
||||
|
||||
# Print summary
|
||||
_print_summary_report(overall_health, summary, priority_fixes, estimated_fix_time)
|
||||
|
||||
return {
|
||||
"overall_health": overall_health,
|
||||
"sections": sections,
|
||||
"summary": summary,
|
||||
"priority_fixes": priority_fixes,
|
||||
"estimated_fix_time": estimated_fix_time,
|
||||
"validation": validation,
|
||||
"detail_level": detail_level,
|
||||
"analyzed_path": str(path)
|
||||
}
|
||||
|
||||
|
||||
def _calculate_overall_health(sections: Dict[str, Any]) -> int:
|
||||
"""Calculate overall health score (0-100)."""
|
||||
|
||||
scores = []
|
||||
weights = {
|
||||
'issues': 0.25,
|
||||
'best_practices': 0.25,
|
||||
'performance': 0.20,
|
||||
'architecture': 0.15,
|
||||
'layout': 0.15
|
||||
}
|
||||
|
||||
# Issues score (inverse of health_score from diagnose_issue)
|
||||
if 'issues' in sections and 'health_score' in sections['issues']:
|
||||
scores.append((sections['issues']['health_score'], weights['issues']))
|
||||
|
||||
# Best practices score
|
||||
if 'best_practices' in sections and 'overall_score' in sections['best_practices']:
|
||||
scores.append((sections['best_practices']['overall_score'], weights['best_practices']))
|
||||
|
||||
# Performance score (derive from bottlenecks)
|
||||
if 'performance' in sections and 'bottlenecks' in sections['performance']:
|
||||
bottlenecks = sections['performance']['bottlenecks']
|
||||
critical = sum(1 for b in bottlenecks if b['severity'] == 'CRITICAL')
|
||||
high = sum(1 for b in bottlenecks if b['severity'] == 'HIGH')
|
||||
perf_score = max(0, 100 - (critical * 20) - (high * 10))
|
||||
scores.append((perf_score, weights['performance']))
|
||||
|
||||
# Architecture score (based on complexity vs pattern appropriateness)
|
||||
if 'architecture' in sections and 'complexity_score' in sections['architecture']:
|
||||
arch_data = sections['architecture']
|
||||
# Good if recommended == current, or if complexity is low
|
||||
if arch_data.get('recommended_pattern') == arch_data.get('current_pattern'):
|
||||
arch_score = 100
|
||||
elif arch_data.get('complexity_score', 0) < 40:
|
||||
arch_score = 80 # Simple app, pattern less critical
|
||||
else:
|
||||
arch_score = 60 # Should refactor
|
||||
scores.append((arch_score, weights['architecture']))
|
||||
|
||||
# Layout score (inverse of issues)
|
||||
if 'layout' in sections and 'layout_issues' in sections['layout']:
|
||||
layout_issues = sections['layout']['layout_issues']
|
||||
critical = sum(1 for i in layout_issues if i['severity'] == 'CRITICAL')
|
||||
warning = sum(1 for i in layout_issues if i['severity'] == 'WARNING')
|
||||
layout_score = max(0, 100 - (critical * 15) - (warning * 5))
|
||||
scores.append((layout_score, weights['layout']))
|
||||
|
||||
# Weighted average
|
||||
if not scores:
|
||||
return 50 # No data
|
||||
|
||||
weighted_sum = sum(score * weight for score, weight in scores)
|
||||
total_weight = sum(weight for _, weight in scores)
|
||||
|
||||
return int(weighted_sum / total_weight)
|
||||
|
||||
|
||||
def _extract_priority_fixes(sections: Dict[str, Any]) -> List[str]:
|
||||
"""Extract priority fixes across all sections."""
|
||||
|
||||
fixes = []
|
||||
|
||||
# Critical issues
|
||||
if 'issues' in sections and 'issues' in sections['issues']:
|
||||
critical = [i for i in sections['issues']['issues'] if i['severity'] == 'CRITICAL']
|
||||
for issue in critical:
|
||||
fixes.append({
|
||||
"priority": "CRITICAL",
|
||||
"source": "Issues",
|
||||
"description": f"{issue['issue']} ({issue['location']})",
|
||||
"fix": issue.get('fix', 'See issue details')
|
||||
})
|
||||
|
||||
# Critical performance bottlenecks
|
||||
if 'performance' in sections and 'bottlenecks' in sections['performance']:
|
||||
critical = [b for b in sections['performance']['bottlenecks'] if b['severity'] == 'CRITICAL']
|
||||
for bottleneck in critical:
|
||||
fixes.append({
|
||||
"priority": "CRITICAL",
|
||||
"source": "Performance",
|
||||
"description": f"{bottleneck['issue']} ({bottleneck['location']})",
|
||||
"fix": bottleneck.get('fix', 'See bottleneck details')
|
||||
})
|
||||
|
||||
# Critical layout issues
|
||||
if 'layout' in sections and 'layout_issues' in sections['layout']:
|
||||
critical = [i for i in sections['layout']['layout_issues'] if i['severity'] == 'CRITICAL']
|
||||
for issue in critical:
|
||||
fixes.append({
|
||||
"priority": "CRITICAL",
|
||||
"source": "Layout",
|
||||
"description": f"{issue['issue']} ({issue['location']})",
|
||||
"fix": issue.get('explanation', 'See layout details')
|
||||
})
|
||||
|
||||
# Best practice failures
|
||||
if 'best_practices' in sections and 'compliance' in sections['best_practices']:
|
||||
compliance = sections['best_practices']['compliance']
|
||||
failures = [tip for tip, data in compliance.items() if data['status'] == 'fail']
|
||||
for tip in failures[:3]: # Top 3
|
||||
fixes.append({
|
||||
"priority": "WARNING",
|
||||
"source": "Best Practices",
|
||||
"description": f"Missing {tip.replace('_', ' ')}",
|
||||
"fix": compliance[tip].get('recommendation', 'See best practices')
|
||||
})
|
||||
|
||||
# Architecture recommendations (if significant refactoring needed)
|
||||
if 'architecture' in sections and 'complexity_score' in sections['architecture']:
|
||||
arch_data = sections['architecture']
|
||||
if arch_data.get('complexity_score', 0) > 70:
|
||||
if arch_data.get('recommended_pattern') != arch_data.get('current_pattern'):
|
||||
fixes.append({
|
||||
"priority": "INFO",
|
||||
"source": "Architecture",
|
||||
"description": f"Consider refactoring to {arch_data.get('recommended_pattern')}",
|
||||
"fix": f"See architecture recommendations for {len(arch_data.get('refactoring_steps', []))} steps"
|
||||
})
|
||||
|
||||
return fixes
|
||||
|
||||
|
||||
def _estimate_fix_time(priority_fixes: List[Dict[str, str]]) -> str:
|
||||
"""Estimate time to address priority fixes."""
|
||||
|
||||
critical_count = sum(1 for f in priority_fixes if f['priority'] == 'CRITICAL')
|
||||
warning_count = sum(1 for f in priority_fixes if f['priority'] == 'WARNING')
|
||||
info_count = sum(1 for f in priority_fixes if f['priority'] == 'INFO')
|
||||
|
||||
# Time estimates (in hours)
|
||||
critical_time = critical_count * 0.5 # 30 min each
|
||||
warning_time = warning_count * 0.25 # 15 min each
|
||||
info_time = info_count * 1.0 # 1 hour each (refactoring)
|
||||
|
||||
total_hours = critical_time + warning_time + info_time
|
||||
|
||||
if total_hours == 0:
|
||||
return "No fixes needed"
|
||||
elif total_hours < 1:
|
||||
return f"{int(total_hours * 60)} minutes"
|
||||
elif total_hours < 2:
|
||||
return f"1-2 hours"
|
||||
elif total_hours < 4:
|
||||
return f"2-4 hours"
|
||||
elif total_hours < 8:
|
||||
return f"4-8 hours"
|
||||
else:
|
||||
return f"{int(total_hours)} hours (1-2 days)"
|
||||
|
||||
|
||||
def _generate_summary(health: int, sections: Dict[str, Any], fixes: List[Dict[str, str]]) -> str:
|
||||
"""Generate executive summary."""
|
||||
|
||||
if health >= 90:
|
||||
health_desc = "Excellent"
|
||||
emoji = "✅"
|
||||
elif health >= 75:
|
||||
health_desc = "Good"
|
||||
emoji = "✓"
|
||||
elif health >= 60:
|
||||
health_desc = "Fair"
|
||||
emoji = "⚠️"
|
||||
elif health >= 40:
|
||||
health_desc = "Poor"
|
||||
emoji = "❌"
|
||||
else:
|
||||
health_desc = "Critical"
|
||||
emoji = "🚨"
|
||||
|
||||
critical_count = sum(1 for f in fixes if f['priority'] == 'CRITICAL')
|
||||
|
||||
if health >= 80:
|
||||
summary = f"{emoji} {health_desc} health ({health}/100). Application follows most best practices."
|
||||
elif health >= 60:
|
||||
summary = f"{emoji} {health_desc} health ({health}/100). Some improvements recommended."
|
||||
elif health >= 40:
|
||||
summary = f"{emoji} {health_desc} health ({health}/100). Several issues need attention."
|
||||
else:
|
||||
summary = f"{emoji} {health_desc} health ({health}/100). Multiple critical issues require immediate fixes."
|
||||
|
||||
if critical_count > 0:
|
||||
summary += f" {critical_count} critical issue(s) found."
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
def _determine_status(health: int) -> str:
|
||||
"""Determine overall status from health score."""
|
||||
if health >= 80:
|
||||
return "pass"
|
||||
elif health >= 60:
|
||||
return "warning"
|
||||
else:
|
||||
return "critical"
|
||||
|
||||
|
||||
def _print_summary_report(health: int, summary: str, fixes: List[Dict[str, str]], fix_time: str):
|
||||
"""Print formatted summary report."""
|
||||
|
||||
print(f"{'='*70}")
|
||||
print(f"ANALYSIS COMPLETE")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
print(f"Overall Health: {health}/100")
|
||||
print(f"Summary: {summary}\n")
|
||||
|
||||
if fixes:
|
||||
print(f"Priority Fixes ({len(fixes)}):")
|
||||
print(f"{'-'*70}")
|
||||
|
||||
# Group by priority
|
||||
critical = [f for f in fixes if f['priority'] == 'CRITICAL']
|
||||
warnings = [f for f in fixes if f['priority'] == 'WARNING']
|
||||
info = [f for f in fixes if f['priority'] == 'INFO']
|
||||
|
||||
if critical:
|
||||
print(f"\n🔴 CRITICAL ({len(critical)}):")
|
||||
for i, fix in enumerate(critical, 1):
|
||||
print(f" {i}. [{fix['source']}] {fix['description']}")
|
||||
|
||||
if warnings:
|
||||
print(f"\n⚠️ WARNINGS ({len(warnings)}):")
|
||||
for i, fix in enumerate(warnings, 1):
|
||||
print(f" {i}. [{fix['source']}] {fix['description']}")
|
||||
|
||||
if info:
|
||||
print(f"\n💡 INFO ({len(info)}):")
|
||||
for i, fix in enumerate(info, 1):
|
||||
print(f" {i}. [{fix['source']}] {fix['description']}")
|
||||
|
||||
else:
|
||||
print("✅ No priority fixes needed!")
|
||||
|
||||
print(f"\n{'-'*70}")
|
||||
print(f"Estimated Fix Time: {fix_time}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
|
||||
def validate_comprehensive_analysis(result: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate comprehensive analysis result."""
|
||||
if 'error' in result:
|
||||
return {"status": "error", "summary": result['error']}
|
||||
|
||||
validation = result.get('validation', {})
|
||||
status = validation.get('status', 'unknown')
|
||||
summary = validation.get('summary', 'Analysis complete')
|
||||
|
||||
checks = [
|
||||
(result.get('overall_health') is not None, "Health score calculated"),
|
||||
(result.get('sections') is not None, "Sections analyzed"),
|
||||
(result.get('priority_fixes') is not None, "Priority fixes extracted"),
|
||||
(result.get('summary') is not None, "Summary generated"),
|
||||
]
|
||||
|
||||
all_pass = all(check[0] for check in checks)
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"checks": {check[1]: check[0] for check in checks},
|
||||
"valid": all_pass
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: comprehensive_bubbletea_analysis.py <code_path> [detail_level]")
|
||||
print(" detail_level: quick, standard (default), or deep")
|
||||
sys.exit(1)
|
||||
|
||||
code_path = sys.argv[1]
|
||||
detail_level = sys.argv[2] if len(sys.argv) > 2 else "standard"
|
||||
|
||||
if detail_level not in ["quick", "standard", "deep"]:
|
||||
print(f"Invalid detail_level: {detail_level}")
|
||||
print("Must be: quick, standard, or deep")
|
||||
sys.exit(1)
|
||||
|
||||
result = comprehensive_bubbletea_analysis(code_path, detail_level)
|
||||
|
||||
# Save to file
|
||||
output_file = Path(code_path).parent / "bubbletea_analysis_report.json"
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
print(f"Full report saved to: {output_file}\n")
|
||||
731
scripts/debug_performance.py
Normal file
731
scripts/debug_performance.py
Normal file
@@ -0,0 +1,731 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Debug performance issues in Bubble Tea applications.
|
||||
Identifies bottlenecks in Update(), View(), and concurrent operations.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Tuple, Optional
|
||||
|
||||
|
||||
def debug_performance(code_path: str, profile_data: str = "") -> Dict[str, Any]:
|
||||
"""
|
||||
Identify performance bottlenecks in Bubble Tea application.
|
||||
|
||||
Args:
|
||||
code_path: Path to Go file or directory
|
||||
profile_data: Optional profiling data (pprof output, benchmark results)
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- bottlenecks: List of performance issues with locations and fixes
|
||||
- metrics: Performance metrics (if available)
|
||||
- recommendations: Prioritized optimization suggestions
|
||||
- validation: Validation report
|
||||
"""
|
||||
path = Path(code_path)
|
||||
|
||||
if not path.exists():
|
||||
return {
|
||||
"error": f"Path not found: {code_path}",
|
||||
"validation": {"status": "error", "summary": "Invalid path"}
|
||||
}
|
||||
|
||||
# Collect all .go files
|
||||
go_files = []
|
||||
if path.is_file():
|
||||
if path.suffix == '.go':
|
||||
go_files = [path]
|
||||
else:
|
||||
go_files = list(path.glob('**/*.go'))
|
||||
|
||||
if not go_files:
|
||||
return {
|
||||
"error": "No .go files found",
|
||||
"validation": {"status": "error", "summary": "No Go files"}
|
||||
}
|
||||
|
||||
# Analyze performance for each file
|
||||
all_bottlenecks = []
|
||||
for go_file in go_files:
|
||||
bottlenecks = _analyze_performance(go_file)
|
||||
all_bottlenecks.extend(bottlenecks)
|
||||
|
||||
# Sort by severity
|
||||
severity_order = {"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3}
|
||||
all_bottlenecks.sort(key=lambda x: severity_order.get(x['severity'], 999))
|
||||
|
||||
# Generate recommendations
|
||||
recommendations = _generate_performance_recommendations(all_bottlenecks)
|
||||
|
||||
# Estimate metrics
|
||||
metrics = _estimate_metrics(all_bottlenecks, go_files)
|
||||
|
||||
# Summary
|
||||
critical_count = sum(1 for b in all_bottlenecks if b['severity'] == 'CRITICAL')
|
||||
high_count = sum(1 for b in all_bottlenecks if b['severity'] == 'HIGH')
|
||||
|
||||
if critical_count > 0:
|
||||
summary = f"⚠️ Found {critical_count} critical performance issue(s)"
|
||||
elif high_count > 0:
|
||||
summary = f"⚠️ Found {high_count} high-priority performance issue(s)"
|
||||
elif all_bottlenecks:
|
||||
summary = f"Found {len(all_bottlenecks)} potential optimization(s)"
|
||||
else:
|
||||
summary = "✅ No major performance issues detected"
|
||||
|
||||
# Validation
|
||||
validation = {
|
||||
"status": "critical" if critical_count > 0 else "warning" if high_count > 0 else "pass",
|
||||
"summary": summary,
|
||||
"checks": {
|
||||
"fast_update": critical_count == 0,
|
||||
"fast_view": high_count == 0,
|
||||
"no_memory_leaks": not any(b['category'] == 'memory' for b in all_bottlenecks),
|
||||
"efficient_rendering": not any(b['category'] == 'rendering' for b in all_bottlenecks)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
"bottlenecks": all_bottlenecks,
|
||||
"metrics": metrics,
|
||||
"recommendations": recommendations,
|
||||
"summary": summary,
|
||||
"profile_data": profile_data if profile_data else None,
|
||||
"validation": validation
|
||||
}
|
||||
|
||||
|
||||
def _analyze_performance(file_path: Path) -> List[Dict[str, Any]]:
|
||||
"""Analyze a single Go file for performance issues."""
|
||||
bottlenecks = []
|
||||
|
||||
try:
|
||||
content = file_path.read_text()
|
||||
except Exception as e:
|
||||
return []
|
||||
|
||||
lines = content.split('\n')
|
||||
rel_path = file_path.name
|
||||
|
||||
# Performance checks
|
||||
bottlenecks.extend(_check_update_performance(content, lines, rel_path))
|
||||
bottlenecks.extend(_check_view_performance(content, lines, rel_path))
|
||||
bottlenecks.extend(_check_string_operations(content, lines, rel_path))
|
||||
bottlenecks.extend(_check_regex_performance(content, lines, rel_path))
|
||||
bottlenecks.extend(_check_loop_efficiency(content, lines, rel_path))
|
||||
bottlenecks.extend(_check_allocation_patterns(content, lines, rel_path))
|
||||
bottlenecks.extend(_check_concurrent_operations(content, lines, rel_path))
|
||||
bottlenecks.extend(_check_io_operations(content, lines, rel_path))
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _check_update_performance(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check Update() function for performance issues."""
|
||||
bottlenecks = []
|
||||
|
||||
# Find Update() function
|
||||
update_start = -1
|
||||
update_end = -1
|
||||
brace_count = 0
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
if re.search(r'func\s+\([^)]+\)\s+Update\s*\(', line):
|
||||
update_start = i
|
||||
brace_count = line.count('{') - line.count('}')
|
||||
elif update_start >= 0:
|
||||
brace_count += line.count('{') - line.count('}')
|
||||
if brace_count == 0:
|
||||
update_end = i
|
||||
break
|
||||
|
||||
if update_start < 0:
|
||||
return bottlenecks
|
||||
|
||||
update_lines = lines[update_start:update_end+1] if update_end > 0 else lines[update_start:]
|
||||
update_code = '\n'.join(update_lines)
|
||||
|
||||
# Check 1: Blocking I/O in Update()
|
||||
blocking_patterns = [
|
||||
(r'\bhttp\.(Get|Post|Do)\s*\(', "HTTP request", "CRITICAL"),
|
||||
(r'\btime\.Sleep\s*\(', "Sleep call", "CRITICAL"),
|
||||
(r'\bos\.(Open|Read|Write)', "File I/O", "CRITICAL"),
|
||||
(r'\bio\.ReadAll\s*\(', "ReadAll", "CRITICAL"),
|
||||
(r'\bexec\.Command\([^)]+\)\.Run\(\)', "Command execution", "CRITICAL"),
|
||||
(r'\bdb\.(Query|Exec)', "Database operation", "CRITICAL"),
|
||||
]
|
||||
|
||||
for pattern, operation, severity in blocking_patterns:
|
||||
matches = re.finditer(pattern, update_code)
|
||||
for match in matches:
|
||||
# Find line number within Update()
|
||||
line_offset = update_code[:match.start()].count('\n')
|
||||
actual_line = update_start + line_offset
|
||||
|
||||
bottlenecks.append({
|
||||
"severity": severity,
|
||||
"category": "performance",
|
||||
"issue": f"Blocking {operation} in Update()",
|
||||
"location": f"{file_path}:{actual_line+1}",
|
||||
"time_impact": "Blocks event loop (16ms+ delay)",
|
||||
"explanation": f"{operation} blocks the event loop, freezing the UI",
|
||||
"fix": f"Move to tea.Cmd goroutine:\n\n" +
|
||||
f"func fetch{operation.replace(' ', '')}() tea.Msg {{\n" +
|
||||
f" // Runs in background, doesn't block\n" +
|
||||
f" result, err := /* your {operation.lower()} */\n" +
|
||||
f" return resultMsg{{data: result, err: err}}\n" +
|
||||
f"}}\n\n" +
|
||||
f"// In Update():\n" +
|
||||
f"case tea.KeyMsg:\n" +
|
||||
f" if key.String() == \"r\" {{\n" +
|
||||
f" return m, fetch{operation.replace(' ', '')} // Non-blocking\n" +
|
||||
f" }}",
|
||||
"code_example": f"return m, fetch{operation.replace(' ', '')}"
|
||||
})
|
||||
|
||||
# Check 2: Heavy computation in Update()
|
||||
computation_patterns = [
|
||||
(r'for\s+.*range\s+\w+\s*\{[^}]{100,}\}', "Large loop", "HIGH"),
|
||||
(r'json\.(Marshal|Unmarshal)', "JSON processing", "MEDIUM"),
|
||||
(r'regexp\.MustCompile\s*\(', "Regex compilation", "HIGH"),
|
||||
]
|
||||
|
||||
for pattern, operation, severity in computation_patterns:
|
||||
matches = re.finditer(pattern, update_code, re.DOTALL)
|
||||
for match in matches:
|
||||
line_offset = update_code[:match.start()].count('\n')
|
||||
actual_line = update_start + line_offset
|
||||
|
||||
bottlenecks.append({
|
||||
"severity": severity,
|
||||
"category": "performance",
|
||||
"issue": f"Heavy {operation} in Update()",
|
||||
"location": f"{file_path}:{actual_line+1}",
|
||||
"time_impact": "May exceed 16ms budget",
|
||||
"explanation": f"{operation} can be expensive, consider optimizing",
|
||||
"fix": "Optimize:\n" +
|
||||
"- Cache compiled regexes (compile once, reuse)\n" +
|
||||
"- Move heavy processing to tea.Cmd\n" +
|
||||
"- Use incremental updates instead of full recalculation",
|
||||
"code_example": "var cachedRegex = regexp.MustCompile(`pattern`) // Outside Update()"
|
||||
})
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _check_view_performance(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check View() function for performance issues."""
|
||||
bottlenecks = []
|
||||
|
||||
# Find View() function
|
||||
view_start = -1
|
||||
view_end = -1
|
||||
brace_count = 0
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
if re.search(r'func\s+\([^)]+\)\s+View\s*\(', line):
|
||||
view_start = i
|
||||
brace_count = line.count('{') - line.count('}')
|
||||
elif view_start >= 0:
|
||||
brace_count += line.count('{') - line.count('}')
|
||||
if brace_count == 0:
|
||||
view_end = i
|
||||
break
|
||||
|
||||
if view_start < 0:
|
||||
return bottlenecks
|
||||
|
||||
view_lines = lines[view_start:view_end+1] if view_end > 0 else lines[view_start:]
|
||||
view_code = '\n'.join(view_lines)
|
||||
|
||||
# Check 1: String concatenation with +
|
||||
string_concat_pattern = r'(\w+\s*\+\s*"[^"]*"\s*\+\s*\w+|\w+\s*\+=\s*"[^"]*")'
|
||||
if re.search(string_concat_pattern, view_code):
|
||||
matches = list(re.finditer(string_concat_pattern, view_code))
|
||||
if len(matches) > 5: # Multiple concatenations
|
||||
bottlenecks.append({
|
||||
"severity": "HIGH",
|
||||
"category": "rendering",
|
||||
"issue": f"String concatenation with + operator ({len(matches)} occurrences)",
|
||||
"location": f"{file_path}:{view_start+1} (View function)",
|
||||
"time_impact": "Allocates many temporary strings",
|
||||
"explanation": "Using + for strings creates many allocations. Use strings.Builder.",
|
||||
"fix": "Replace with strings.Builder:\n\n" +
|
||||
"import \"strings\"\n\n" +
|
||||
"func (m model) View() string {\n" +
|
||||
" var b strings.Builder\n" +
|
||||
" b.WriteString(\"header\")\n" +
|
||||
" b.WriteString(m.content)\n" +
|
||||
" b.WriteString(\"footer\")\n" +
|
||||
" return b.String()\n" +
|
||||
"}",
|
||||
"code_example": "var b strings.Builder; b.WriteString(...)"
|
||||
})
|
||||
|
||||
# Check 2: Recompiling lipgloss styles
|
||||
style_in_view = re.findall(r'lipgloss\.NewStyle\(\)', view_code)
|
||||
if len(style_in_view) > 3:
|
||||
bottlenecks.append({
|
||||
"severity": "MEDIUM",
|
||||
"category": "rendering",
|
||||
"issue": f"Creating lipgloss styles in View() ({len(style_in_view)} times)",
|
||||
"location": f"{file_path}:{view_start+1} (View function)",
|
||||
"time_impact": "Recreates styles on every render",
|
||||
"explanation": "Style creation is relatively expensive. Cache styles in model.",
|
||||
"fix": "Cache styles in model:\n\n" +
|
||||
"type model struct {\n" +
|
||||
" // ... other fields\n" +
|
||||
" headerStyle lipgloss.Style\n" +
|
||||
" contentStyle lipgloss.Style\n" +
|
||||
"}\n\n" +
|
||||
"func initialModel() model {\n" +
|
||||
" return model{\n" +
|
||||
" headerStyle: lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color(\"#FF00FF\")),\n" +
|
||||
" contentStyle: lipgloss.NewStyle().Padding(1),\n" +
|
||||
" }\n" +
|
||||
"}\n\n" +
|
||||
"func (m model) View() string {\n" +
|
||||
" return m.headerStyle.Render(\"Header\") + m.contentStyle.Render(m.content)\n" +
|
||||
"}",
|
||||
"code_example": "m.headerStyle.Render(...) // Use cached style"
|
||||
})
|
||||
|
||||
# Check 3: Reading files in View()
|
||||
if re.search(r'\b(os\.ReadFile|ioutil\.ReadFile|os\.Open)', view_code):
|
||||
bottlenecks.append({
|
||||
"severity": "CRITICAL",
|
||||
"category": "rendering",
|
||||
"issue": "File I/O in View() function",
|
||||
"location": f"{file_path}:{view_start+1} (View function)",
|
||||
"time_impact": "Massive delay (1-100ms per render)",
|
||||
"explanation": "View() is called frequently. File I/O blocks rendering.",
|
||||
"fix": "Load file in Update(), cache in model:\n\n" +
|
||||
"type model struct {\n" +
|
||||
" fileContent string\n" +
|
||||
"}\n\n" +
|
||||
"func loadFile() tea.Msg {\n" +
|
||||
" content, err := os.ReadFile(\"file.txt\")\n" +
|
||||
" return fileLoadedMsg{content: string(content), err: err}\n" +
|
||||
"}\n\n" +
|
||||
"// In Update():\n" +
|
||||
"case fileLoadedMsg:\n" +
|
||||
" m.fileContent = msg.content\n\n" +
|
||||
"// In View():\n" +
|
||||
"return m.fileContent // Just return cached data",
|
||||
"code_example": "return m.cachedContent // No I/O in View()"
|
||||
})
|
||||
|
||||
# Check 4: Expensive lipgloss operations
|
||||
join_vertical_count = len(re.findall(r'lipgloss\.JoinVertical', view_code))
|
||||
if join_vertical_count > 10:
|
||||
bottlenecks.append({
|
||||
"severity": "LOW",
|
||||
"category": "rendering",
|
||||
"issue": f"Many lipgloss.JoinVertical calls ({join_vertical_count})",
|
||||
"location": f"{file_path}:{view_start+1} (View function)",
|
||||
"time_impact": "Accumulates string operations",
|
||||
"explanation": "Many join operations can add up. Consider batching.",
|
||||
"fix": "Batch related joins:\n\n" +
|
||||
"// Instead of many small joins:\n" +
|
||||
"// line1 := lipgloss.JoinHorizontal(...)\n" +
|
||||
"// line2 := lipgloss.JoinHorizontal(...)\n" +
|
||||
"// ...\n\n" +
|
||||
"// Build all lines first, join once:\n" +
|
||||
"lines := []string{\n" +
|
||||
" lipgloss.JoinHorizontal(...),\n" +
|
||||
" lipgloss.JoinHorizontal(...),\n" +
|
||||
" lipgloss.JoinHorizontal(...),\n" +
|
||||
"}\n" +
|
||||
"return lipgloss.JoinVertical(lipgloss.Left, lines...)",
|
||||
"code_example": "lipgloss.JoinVertical(lipgloss.Left, lines...)"
|
||||
})
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _check_string_operations(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for inefficient string operations."""
|
||||
bottlenecks = []
|
||||
|
||||
# Check for fmt.Sprintf in loops
|
||||
for i, line in enumerate(lines):
|
||||
if 'for' in line:
|
||||
# Check next 20 lines for fmt.Sprintf
|
||||
for j in range(i, min(i+20, len(lines))):
|
||||
if 'fmt.Sprintf' in lines[j] and 'result' in lines[j]:
|
||||
bottlenecks.append({
|
||||
"severity": "MEDIUM",
|
||||
"category": "performance",
|
||||
"issue": "fmt.Sprintf in loop",
|
||||
"location": f"{file_path}:{j+1}",
|
||||
"time_impact": "Allocations on every iteration",
|
||||
"explanation": "fmt.Sprintf allocates. Use strings.Builder or fmt.Fprintf.",
|
||||
"fix": "Use strings.Builder:\n\n" +
|
||||
"var b strings.Builder\n" +
|
||||
"for _, item := range items {\n" +
|
||||
" fmt.Fprintf(&b, \"Item: %s\\n\", item)\n" +
|
||||
"}\n" +
|
||||
"result := b.String()",
|
||||
"code_example": "fmt.Fprintf(&builder, ...)"
|
||||
})
|
||||
break
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _check_regex_performance(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for regex performance issues."""
|
||||
bottlenecks = []
|
||||
|
||||
# Check for regexp.MustCompile in functions (not at package level)
|
||||
in_function = False
|
||||
for i, line in enumerate(lines):
|
||||
if re.match(r'^\s*func\s+', line):
|
||||
in_function = True
|
||||
elif in_function and re.match(r'^\s*$', line):
|
||||
in_function = False
|
||||
|
||||
if in_function and 'regexp.MustCompile' in line:
|
||||
bottlenecks.append({
|
||||
"severity": "HIGH",
|
||||
"category": "performance",
|
||||
"issue": "Compiling regex in function",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"time_impact": "Compiles on every call (1-10ms)",
|
||||
"explanation": "Regex compilation is expensive. Compile once at package level.",
|
||||
"fix": "Move to package level:\n\n" +
|
||||
"// At package level (outside functions)\n" +
|
||||
"var (\n" +
|
||||
" emailRegex = regexp.MustCompile(`^[a-z]+@[a-z]+\\.[a-z]+$`)\n" +
|
||||
" phoneRegex = regexp.MustCompile(`^\\d{3}-\\d{3}-\\d{4}$`)\n" +
|
||||
")\n\n" +
|
||||
"// In function\n" +
|
||||
"func validate(email string) bool {\n" +
|
||||
" return emailRegex.MatchString(email) // Reuse compiled regex\n" +
|
||||
"}",
|
||||
"code_example": "var emailRegex = regexp.MustCompile(...) // Package level"
|
||||
})
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _check_loop_efficiency(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for inefficient loops."""
|
||||
bottlenecks = []
|
||||
|
||||
# Check for nested loops over large data
|
||||
for i, line in enumerate(lines):
|
||||
if re.search(r'for\s+.*range', line):
|
||||
# Look for nested loop within 30 lines
|
||||
for j in range(i+1, min(i+30, len(lines))):
|
||||
if re.search(r'for\s+.*range', lines[j]):
|
||||
# Check indentation (nested)
|
||||
if len(lines[j]) - len(lines[j].lstrip()) > len(line) - len(line.lstrip()):
|
||||
bottlenecks.append({
|
||||
"severity": "MEDIUM",
|
||||
"category": "performance",
|
||||
"issue": "Nested loops detected",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"time_impact": "O(n²) complexity",
|
||||
"explanation": "Nested loops can be slow. Consider optimization.",
|
||||
"fix": "Optimization strategies:\n" +
|
||||
"1. Use map/set for O(1) lookups instead of nested loop\n" +
|
||||
"2. Break early when possible\n" +
|
||||
"3. Process data once, cache results\n" +
|
||||
"4. Use channels/goroutines for parallel processing\n\n" +
|
||||
"Example with map:\n" +
|
||||
"// Instead of:\n" +
|
||||
"for _, a := range listA {\n" +
|
||||
" for _, b := range listB {\n" +
|
||||
" if a.id == b.id { found = true }\n" +
|
||||
" }\n" +
|
||||
"}\n\n" +
|
||||
"// Use map:\n" +
|
||||
"mapB := make(map[string]bool)\n" +
|
||||
"for _, b := range listB {\n" +
|
||||
" mapB[b.id] = true\n" +
|
||||
"}\n" +
|
||||
"for _, a := range listA {\n" +
|
||||
" if mapB[a.id] { found = true }\n" +
|
||||
"}",
|
||||
"code_example": "Use map for O(1) lookup"
|
||||
})
|
||||
break
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _check_allocation_patterns(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for excessive allocations."""
|
||||
bottlenecks = []
|
||||
|
||||
# Check for slice append in loops without pre-allocation
|
||||
for i, line in enumerate(lines):
|
||||
if re.search(r'for\s+.*range', line):
|
||||
# Check next 20 lines for append without make
|
||||
has_append = False
|
||||
for j in range(i, min(i+20, len(lines))):
|
||||
if 'append(' in lines[j]:
|
||||
has_append = True
|
||||
break
|
||||
|
||||
# Check if slice was pre-allocated
|
||||
has_make = False
|
||||
for j in range(max(0, i-10), i):
|
||||
if 'make(' in lines[j] and 'len(' in lines[j]:
|
||||
has_make = True
|
||||
break
|
||||
|
||||
if has_append and not has_make:
|
||||
bottlenecks.append({
|
||||
"severity": "LOW",
|
||||
"category": "memory",
|
||||
"issue": "Slice append in loop without pre-allocation",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"time_impact": "Multiple reallocations",
|
||||
"explanation": "Appending without pre-allocation causes slice to grow, reallocate.",
|
||||
"fix": "Pre-allocate slice:\n\n" +
|
||||
"// Instead of:\n" +
|
||||
"var results []string\n" +
|
||||
"for _, item := range items {\n" +
|
||||
" results = append(results, process(item))\n" +
|
||||
"}\n\n" +
|
||||
"// Pre-allocate:\n" +
|
||||
"results := make([]string, 0, len(items)) // Pre-allocate capacity\n" +
|
||||
"for _, item := range items {\n" +
|
||||
" results = append(results, process(item)) // No reallocation\n" +
|
||||
"}",
|
||||
"code_example": "results := make([]string, 0, len(items))"
|
||||
})
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _check_concurrent_operations(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for concurrency issues."""
|
||||
bottlenecks = []
|
||||
|
||||
# Check for goroutine leaks
|
||||
has_goroutines = bool(re.search(r'\bgo\s+func', content))
|
||||
has_context = bool(re.search(r'context\.', content))
|
||||
has_waitgroup = bool(re.search(r'sync\.WaitGroup', content))
|
||||
|
||||
if has_goroutines and not (has_context or has_waitgroup):
|
||||
bottlenecks.append({
|
||||
"severity": "HIGH",
|
||||
"category": "memory",
|
||||
"issue": "Goroutines without lifecycle management",
|
||||
"location": file_path,
|
||||
"time_impact": "Goroutine leaks consume memory",
|
||||
"explanation": "Goroutines need proper cleanup to prevent leaks.",
|
||||
"fix": "Use context for cancellation:\n\n" +
|
||||
"type model struct {\n" +
|
||||
" ctx context.Context\n" +
|
||||
" cancel context.CancelFunc\n" +
|
||||
"}\n\n" +
|
||||
"func initialModel() model {\n" +
|
||||
" ctx, cancel := context.WithCancel(context.Background())\n" +
|
||||
" return model{ctx: ctx, cancel: cancel}\n" +
|
||||
"}\n\n" +
|
||||
"func worker(ctx context.Context) tea.Msg {\n" +
|
||||
" for {\n" +
|
||||
" select {\n" +
|
||||
" case <-ctx.Done():\n" +
|
||||
" return nil // Stop goroutine\n" +
|
||||
" case <-time.After(time.Second):\n" +
|
||||
" // Do work\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n\n" +
|
||||
"// In Update() on quit:\n" +
|
||||
"m.cancel() // Stops all goroutines",
|
||||
"code_example": "ctx, cancel := context.WithCancel(context.Background())"
|
||||
})
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _check_io_operations(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for I/O operations that should be async."""
|
||||
bottlenecks = []
|
||||
|
||||
# Check for synchronous file reads
|
||||
file_ops = [
|
||||
(r'os\.ReadFile', "os.ReadFile"),
|
||||
(r'ioutil\.ReadFile', "ioutil.ReadFile"),
|
||||
(r'os\.Open', "os.Open"),
|
||||
(r'io\.ReadAll', "io.ReadAll"),
|
||||
]
|
||||
|
||||
for pattern, op_name in file_ops:
|
||||
matches = list(re.finditer(pattern, content))
|
||||
if matches:
|
||||
# Check if in tea.Cmd (good) or in Update/View (bad)
|
||||
for match in matches:
|
||||
# Find which function this is in
|
||||
line_num = content[:match.start()].count('\n')
|
||||
context_lines = content.split('\n')[max(0, line_num-10):line_num+1]
|
||||
context_text = '\n'.join(context_lines)
|
||||
|
||||
in_cmd = bool(re.search(r'func\s+\w+\(\s*\)\s+tea\.Msg', context_text))
|
||||
in_update = bool(re.search(r'func\s+\([^)]+\)\s+Update', context_text))
|
||||
in_view = bool(re.search(r'func\s+\([^)]+\)\s+View', context_text))
|
||||
|
||||
if (in_update or in_view) and not in_cmd:
|
||||
severity = "CRITICAL" if in_view else "HIGH"
|
||||
func_name = "View()" if in_view else "Update()"
|
||||
|
||||
bottlenecks.append({
|
||||
"severity": severity,
|
||||
"category": "io",
|
||||
"issue": f"Synchronous {op_name} in {func_name}",
|
||||
"location": f"{file_path}:{line_num+1}",
|
||||
"time_impact": "1-100ms per call",
|
||||
"explanation": f"{op_name} blocks the event loop",
|
||||
"fix": f"Move to tea.Cmd:\n\n" +
|
||||
f"func loadFileCmd() tea.Msg {{\n" +
|
||||
f" data, err := {op_name}(\"file.txt\")\n" +
|
||||
f" return fileLoadedMsg{{data: data, err: err}}\n" +
|
||||
f"}}\n\n" +
|
||||
f"// In Update():\n" +
|
||||
f"case tea.KeyMsg:\n" +
|
||||
f" if key.String() == \"o\" {{\n" +
|
||||
f" return m, loadFileCmd // Non-blocking\n" +
|
||||
f" }}",
|
||||
"code_example": "return m, loadFileCmd // Async I/O"
|
||||
})
|
||||
|
||||
return bottlenecks
|
||||
|
||||
|
||||
def _generate_performance_recommendations(bottlenecks: List[Dict[str, Any]]) -> List[str]:
|
||||
"""Generate prioritized performance recommendations."""
|
||||
recommendations = []
|
||||
|
||||
# Group by category
|
||||
categories = {}
|
||||
for b in bottlenecks:
|
||||
cat = b['category']
|
||||
if cat not in categories:
|
||||
categories[cat] = []
|
||||
categories[cat].append(b)
|
||||
|
||||
# Priority recommendations
|
||||
if 'performance' in categories:
|
||||
critical = [b for b in categories['performance'] if b['severity'] == 'CRITICAL']
|
||||
if critical:
|
||||
recommendations.append(
|
||||
f"🔴 CRITICAL: Move {len(critical)} blocking operation(s) to tea.Cmd goroutines"
|
||||
)
|
||||
|
||||
if 'rendering' in categories:
|
||||
recommendations.append(
|
||||
f"⚡ Optimize View() rendering: Found {len(categories['rendering'])} issue(s)"
|
||||
)
|
||||
|
||||
if 'memory' in categories:
|
||||
recommendations.append(
|
||||
f"💾 Fix memory issues: Found {len(categories['memory'])} potential leak(s)"
|
||||
)
|
||||
|
||||
if 'io' in categories:
|
||||
recommendations.append(
|
||||
f"💿 Make I/O async: Found {len(categories['io'])} synchronous I/O call(s)"
|
||||
)
|
||||
|
||||
# General recommendations
|
||||
recommendations.extend([
|
||||
"Profile with pprof to get precise measurements",
|
||||
"Use benchmarks to validate optimizations",
|
||||
"Monitor with runtime.ReadMemStats() for memory usage",
|
||||
"Test with large datasets to reveal performance issues"
|
||||
])
|
||||
|
||||
return recommendations
|
||||
|
||||
|
||||
def _estimate_metrics(bottlenecks: List[Dict[str, Any]], files: List[Path]) -> Dict[str, Any]:
|
||||
"""Estimate performance metrics based on analysis."""
|
||||
|
||||
# Estimate Update() time
|
||||
critical_in_update = sum(1 for b in bottlenecks
|
||||
if 'Update()' in b.get('issue', '') and b['severity'] == 'CRITICAL')
|
||||
high_in_update = sum(1 for b in bottlenecks
|
||||
if 'Update()' in b.get('issue', '') and b['severity'] == 'HIGH')
|
||||
|
||||
estimated_update_time = "2-5ms (good)"
|
||||
if critical_in_update > 0:
|
||||
estimated_update_time = "50-200ms (critical - UI freezing)"
|
||||
elif high_in_update > 0:
|
||||
estimated_update_time = "20-50ms (slow - noticeable lag)"
|
||||
|
||||
# Estimate View() time
|
||||
critical_in_view = sum(1 for b in bottlenecks
|
||||
if 'View()' in b.get('issue', '') and b['severity'] == 'CRITICAL')
|
||||
high_in_view = sum(1 for b in bottlenecks
|
||||
if 'View()' in b.get('issue', '') and b['severity'] == 'HIGH')
|
||||
|
||||
estimated_view_time = "1-3ms (good)"
|
||||
if critical_in_view > 0:
|
||||
estimated_view_time = "100-500ms (critical - very slow)"
|
||||
elif high_in_view > 0:
|
||||
estimated_view_time = "10-30ms (slow)"
|
||||
|
||||
# Memory estimate
|
||||
goroutine_leaks = sum(1 for b in bottlenecks if 'leak' in b.get('issue', '').lower())
|
||||
memory_status = "stable"
|
||||
if goroutine_leaks > 0:
|
||||
memory_status = "growing (leaks detected)"
|
||||
|
||||
return {
|
||||
"estimated_update_time": estimated_update_time,
|
||||
"estimated_view_time": estimated_view_time,
|
||||
"memory_status": memory_status,
|
||||
"total_bottlenecks": len(bottlenecks),
|
||||
"critical_issues": sum(1 for b in bottlenecks if b['severity'] == 'CRITICAL'),
|
||||
"files_analyzed": len(files),
|
||||
"note": "Run actual profiling (pprof, benchmarks) for precise measurements"
|
||||
}
|
||||
|
||||
|
||||
def validate_performance_debug(result: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate performance debug result."""
|
||||
if 'error' in result:
|
||||
return {"status": "error", "summary": result['error']}
|
||||
|
||||
validation = result.get('validation', {})
|
||||
status = validation.get('status', 'unknown')
|
||||
summary = validation.get('summary', 'Performance analysis complete')
|
||||
|
||||
checks = [
|
||||
(result.get('bottlenecks') is not None, "Has bottlenecks list"),
|
||||
(result.get('metrics') is not None, "Has metrics"),
|
||||
(result.get('recommendations') is not None, "Has recommendations"),
|
||||
]
|
||||
|
||||
all_pass = all(check[0] for check in checks)
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"checks": {check[1]: check[0] for check in checks},
|
||||
"valid": all_pass
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: debug_performance.py <code_path> [profile_data]")
|
||||
sys.exit(1)
|
||||
|
||||
code_path = sys.argv[1]
|
||||
profile_data = sys.argv[2] if len(sys.argv) > 2 else ""
|
||||
|
||||
result = debug_performance(code_path, profile_data)
|
||||
print(json.dumps(result, indent=2))
|
||||
441
scripts/diagnose_issue.py
Normal file
441
scripts/diagnose_issue.py
Normal file
@@ -0,0 +1,441 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Diagnose issues in existing Bubble Tea applications.
|
||||
Identifies common problems: slow event loop, layout issues, memory leaks, etc.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any
|
||||
|
||||
|
||||
def diagnose_issue(code_path: str, description: str = "") -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze Bubble Tea code to identify common issues.
|
||||
|
||||
Args:
|
||||
code_path: Path to Go file or directory containing Bubble Tea code
|
||||
description: Optional user description of the problem
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- issues: List of identified issues with severity, location, fix
|
||||
- summary: High-level summary
|
||||
- health_score: 0-100 score (higher is better)
|
||||
- validation: Validation report
|
||||
"""
|
||||
path = Path(code_path)
|
||||
|
||||
if not path.exists():
|
||||
return {
|
||||
"error": f"Path not found: {code_path}",
|
||||
"validation": {"status": "error", "summary": "Invalid path"}
|
||||
}
|
||||
|
||||
# Collect all .go files
|
||||
go_files = []
|
||||
if path.is_file():
|
||||
if path.suffix == '.go':
|
||||
go_files = [path]
|
||||
else:
|
||||
go_files = list(path.glob('**/*.go'))
|
||||
|
||||
if not go_files:
|
||||
return {
|
||||
"error": "No .go files found",
|
||||
"validation": {"status": "error", "summary": "No Go files"}
|
||||
}
|
||||
|
||||
# Analyze all files
|
||||
all_issues = []
|
||||
for go_file in go_files:
|
||||
issues = _analyze_go_file(go_file)
|
||||
all_issues.extend(issues)
|
||||
|
||||
# Calculate health score
|
||||
critical_count = sum(1 for i in all_issues if i['severity'] == 'CRITICAL')
|
||||
warning_count = sum(1 for i in all_issues if i['severity'] == 'WARNING')
|
||||
info_count = sum(1 for i in all_issues if i['severity'] == 'INFO')
|
||||
|
||||
health_score = max(0, 100 - (critical_count * 20) - (warning_count * 5) - (info_count * 1))
|
||||
|
||||
# Generate summary
|
||||
if critical_count == 0 and warning_count == 0:
|
||||
summary = "✅ No critical issues found. Application appears healthy."
|
||||
elif critical_count > 0:
|
||||
summary = f"❌ Found {critical_count} critical issue(s) requiring immediate attention"
|
||||
else:
|
||||
summary = f"⚠️ Found {warning_count} warning(s) that should be addressed"
|
||||
|
||||
# Validation
|
||||
validation = {
|
||||
"status": "critical" if critical_count > 0 else "warning" if warning_count > 0 else "pass",
|
||||
"summary": summary,
|
||||
"checks": {
|
||||
"has_blocking_operations": critical_count > 0,
|
||||
"has_layout_issues": any(i['category'] == 'layout' for i in all_issues),
|
||||
"has_performance_issues": any(i['category'] == 'performance' for i in all_issues),
|
||||
"has_architecture_issues": any(i['category'] == 'architecture' for i in all_issues)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
"issues": all_issues,
|
||||
"summary": summary,
|
||||
"health_score": health_score,
|
||||
"statistics": {
|
||||
"total_issues": len(all_issues),
|
||||
"critical": critical_count,
|
||||
"warnings": warning_count,
|
||||
"info": info_count,
|
||||
"files_analyzed": len(go_files)
|
||||
},
|
||||
"validation": validation,
|
||||
"user_description": description
|
||||
}
|
||||
|
||||
|
||||
def _analyze_go_file(file_path: Path) -> List[Dict[str, Any]]:
|
||||
"""Analyze a single Go file for issues."""
|
||||
issues = []
|
||||
|
||||
try:
|
||||
content = file_path.read_text()
|
||||
except Exception as e:
|
||||
return [{
|
||||
"severity": "WARNING",
|
||||
"category": "system",
|
||||
"issue": f"Could not read file: {e}",
|
||||
"location": str(file_path),
|
||||
"explanation": "File access error",
|
||||
"fix": "Check file permissions"
|
||||
}]
|
||||
|
||||
lines = content.split('\n')
|
||||
rel_path = file_path.name
|
||||
|
||||
# Check 1: Blocking operations in Update() or View()
|
||||
issues.extend(_check_blocking_operations(content, lines, rel_path))
|
||||
|
||||
# Check 2: Hardcoded dimensions
|
||||
issues.extend(_check_hardcoded_dimensions(content, lines, rel_path))
|
||||
|
||||
# Check 3: Missing terminal recovery
|
||||
issues.extend(_check_terminal_recovery(content, lines, rel_path))
|
||||
|
||||
# Check 4: Message ordering assumptions
|
||||
issues.extend(_check_message_ordering(content, lines, rel_path))
|
||||
|
||||
# Check 5: Model complexity
|
||||
issues.extend(_check_model_complexity(content, lines, rel_path))
|
||||
|
||||
# Check 6: Memory leaks (goroutine leaks)
|
||||
issues.extend(_check_goroutine_leaks(content, lines, rel_path))
|
||||
|
||||
# Check 7: Layout arithmetic issues
|
||||
issues.extend(_check_layout_arithmetic(content, lines, rel_path))
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def _check_blocking_operations(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for blocking operations in Update() or View()."""
|
||||
issues = []
|
||||
|
||||
# Find Update() and View() function boundaries
|
||||
in_update = False
|
||||
in_view = False
|
||||
func_start_line = 0
|
||||
|
||||
blocking_patterns = [
|
||||
(r'\btime\.Sleep\s*\(', "time.Sleep"),
|
||||
(r'\bhttp\.(Get|Post|Do)\s*\(', "HTTP request"),
|
||||
(r'\bos\.Open\s*\(', "File I/O"),
|
||||
(r'\bio\.ReadAll\s*\(', "Blocking read"),
|
||||
(r'\bexec\.Command\([^)]+\)\.Run\(\)', "Command execution"),
|
||||
(r'\bdb\.Query\s*\(', "Database query"),
|
||||
]
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
# Track function boundaries
|
||||
if re.search(r'func\s+\([^)]+\)\s+Update\s*\(', line):
|
||||
in_update = True
|
||||
func_start_line = i
|
||||
elif re.search(r'func\s+\([^)]+\)\s+View\s*\(', line):
|
||||
in_view = True
|
||||
func_start_line = i
|
||||
elif in_update or in_view:
|
||||
if line.strip().startswith('func '):
|
||||
in_update = False
|
||||
in_view = False
|
||||
|
||||
# Check for blocking operations
|
||||
if in_update or in_view:
|
||||
for pattern, operation in blocking_patterns:
|
||||
if re.search(pattern, line):
|
||||
func_type = "Update()" if in_update else "View()"
|
||||
issues.append({
|
||||
"severity": "CRITICAL",
|
||||
"category": "performance",
|
||||
"issue": f"Blocking {operation} in {func_type}",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"code_snippet": line.strip(),
|
||||
"explanation": f"{operation} blocks the event loop, causing UI to freeze",
|
||||
"fix": f"Move {operation} to tea.Cmd goroutine:\n\n" +
|
||||
f"func load{operation.replace(' ', '')}() tea.Msg {{\n" +
|
||||
f" // Your {operation} here\n" +
|
||||
f" return resultMsg{{}}\n" +
|
||||
f"}}\n\n" +
|
||||
f"// In Update():\n" +
|
||||
f"return m, load{operation.replace(' ', '')}"
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def _check_hardcoded_dimensions(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for hardcoded terminal dimensions."""
|
||||
issues = []
|
||||
|
||||
# Look for hardcoded width/height values
|
||||
patterns = [
|
||||
(r'\.Width\s*\(\s*(\d{2,})\s*\)', "width"),
|
||||
(r'\.Height\s*\(\s*(\d{2,})\s*\)', "height"),
|
||||
(r'MaxWidth\s*:\s*(\d{2,})', "MaxWidth"),
|
||||
(r'MaxHeight\s*:\s*(\d{2,})', "MaxHeight"),
|
||||
]
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
for pattern, dimension in patterns:
|
||||
matches = re.finditer(pattern, line)
|
||||
for match in matches:
|
||||
value = match.group(1)
|
||||
if int(value) >= 20: # Likely a terminal dimension, not small padding
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"category": "layout",
|
||||
"issue": f"Hardcoded {dimension} value: {value}",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"code_snippet": line.strip(),
|
||||
"explanation": "Hardcoded dimensions don't adapt to terminal size",
|
||||
"fix": f"Use dynamic terminal size from tea.WindowSizeMsg:\n\n" +
|
||||
f"type model struct {{\n" +
|
||||
f" termWidth int\n" +
|
||||
f" termHeight int\n" +
|
||||
f"}}\n\n" +
|
||||
f"func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {{\n" +
|
||||
f" switch msg := msg.(type) {{\n" +
|
||||
f" case tea.WindowSizeMsg:\n" +
|
||||
f" m.termWidth = msg.Width\n" +
|
||||
f" m.termHeight = msg.Height\n" +
|
||||
f" }}\n" +
|
||||
f" return m, nil\n" +
|
||||
f"}}"
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def _check_terminal_recovery(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for panic recovery and terminal cleanup."""
|
||||
issues = []
|
||||
|
||||
has_defer_recover = bool(re.search(r'defer\s+func\s*\(\s*\)\s*\{[^}]*recover\(\)', content, re.DOTALL))
|
||||
has_main = bool(re.search(r'func\s+main\s*\(\s*\)', content))
|
||||
|
||||
if has_main and not has_defer_recover:
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"category": "reliability",
|
||||
"issue": "Missing panic recovery in main()",
|
||||
"location": file_path,
|
||||
"explanation": "Panics can leave terminal in broken state (mouse mode enabled, cursor hidden)",
|
||||
"fix": "Add defer recovery:\n\n" +
|
||||
"func main() {\n" +
|
||||
" defer func() {\n" +
|
||||
" if r := recover(); r != nil {\n" +
|
||||
" tea.DisableMouseAllMotion()\n" +
|
||||
" tea.ShowCursor()\n" +
|
||||
" fmt.Println(\"Panic:\", r)\n" +
|
||||
" os.Exit(1)\n" +
|
||||
" }\n" +
|
||||
" }()\n\n" +
|
||||
" // Your program logic\n" +
|
||||
"}"
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def _check_message_ordering(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for assumptions about message ordering from concurrent commands."""
|
||||
issues = []
|
||||
|
||||
# Look for concurrent command patterns without order handling
|
||||
has_batch = bool(re.search(r'tea\.Batch\s*\(', content))
|
||||
has_state_machine = bool(re.search(r'type\s+\w+State\s+(int|string)', content))
|
||||
|
||||
if has_batch and not has_state_machine:
|
||||
issues.append({
|
||||
"severity": "INFO",
|
||||
"category": "architecture",
|
||||
"issue": "Using tea.Batch without explicit state tracking",
|
||||
"location": file_path,
|
||||
"explanation": "Messages from tea.Batch arrive in unpredictable order",
|
||||
"fix": "Use state machine to track operations:\n\n" +
|
||||
"type model struct {\n" +
|
||||
" operations map[string]bool // Track active operations\n" +
|
||||
"}\n\n" +
|
||||
"type opStartMsg struct { id string }\n" +
|
||||
"type opDoneMsg struct { id string, result string }\n\n" +
|
||||
"func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {\n" +
|
||||
" switch msg := msg.(type) {\n" +
|
||||
" case opStartMsg:\n" +
|
||||
" m.operations[msg.id] = true\n" +
|
||||
" case opDoneMsg:\n" +
|
||||
" delete(m.operations, msg.id)\n" +
|
||||
" }\n" +
|
||||
" return m, nil\n" +
|
||||
"}"
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def _check_model_complexity(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check if model is too complex and should use model tree."""
|
||||
issues = []
|
||||
|
||||
# Count fields in model struct
|
||||
model_match = re.search(r'type\s+(\w*[Mm]odel)\s+struct\s*\{([^}]+)\}', content, re.DOTALL)
|
||||
if model_match:
|
||||
model_body = model_match.group(2)
|
||||
field_count = len([line for line in model_body.split('\n') if line.strip() and not line.strip().startswith('//')])
|
||||
|
||||
if field_count > 15:
|
||||
issues.append({
|
||||
"severity": "INFO",
|
||||
"category": "architecture",
|
||||
"issue": f"Model has {field_count} fields (complex)",
|
||||
"location": file_path,
|
||||
"explanation": "Large models are hard to maintain. Consider model tree pattern.",
|
||||
"fix": "Refactor to model tree:\n\n" +
|
||||
"type appModel struct {\n" +
|
||||
" activeView int\n" +
|
||||
" listView listModel\n" +
|
||||
" detailView detailModel\n" +
|
||||
"}\n\n" +
|
||||
"func (m appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {\n" +
|
||||
" switch m.activeView {\n" +
|
||||
" case 0:\n" +
|
||||
" m.listView, cmd = m.listView.Update(msg)\n" +
|
||||
" case 1:\n" +
|
||||
" m.detailView, cmd = m.detailView.Update(msg)\n" +
|
||||
" }\n" +
|
||||
" return m, cmd\n" +
|
||||
"}"
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def _check_goroutine_leaks(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for potential goroutine leaks."""
|
||||
issues = []
|
||||
|
||||
# Look for goroutines without cleanup
|
||||
has_go_statements = bool(re.search(r'\bgo\s+', content))
|
||||
has_context_cancel = bool(re.search(r'ctx,\s*cancel\s*:=\s*context\.', content))
|
||||
|
||||
if has_go_statements and not has_context_cancel:
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"category": "reliability",
|
||||
"issue": "Goroutines without context cancellation",
|
||||
"location": file_path,
|
||||
"explanation": "Goroutines may leak if not properly cancelled",
|
||||
"fix": "Use context for goroutine lifecycle:\n\n" +
|
||||
"type model struct {\n" +
|
||||
" ctx context.Context\n" +
|
||||
" cancel context.CancelFunc\n" +
|
||||
"}\n\n" +
|
||||
"func initialModel() model {\n" +
|
||||
" ctx, cancel := context.WithCancel(context.Background())\n" +
|
||||
" return model{ctx: ctx, cancel: cancel}\n" +
|
||||
"}\n\n" +
|
||||
"// In Update() on quit:\n" +
|
||||
"m.cancel() // Stops all goroutines"
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def _check_layout_arithmetic(content: str, lines: List[str], file_path: str) -> List[Dict[str, Any]]:
|
||||
"""Check for layout arithmetic issues."""
|
||||
issues = []
|
||||
|
||||
# Look for manual height/width calculations instead of lipgloss helpers
|
||||
uses_lipgloss = bool(re.search(r'"github\.com/charmbracelet/lipgloss"', content))
|
||||
has_manual_calc = bool(re.search(r'(height|width)\s*[-+]\s*\d+', content, re.IGNORECASE))
|
||||
has_lipgloss_helpers = bool(re.search(r'lipgloss\.(Height|Width|GetVertical|GetHorizontal)', content))
|
||||
|
||||
if uses_lipgloss and has_manual_calc and not has_lipgloss_helpers:
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"category": "layout",
|
||||
"issue": "Manual layout calculations without lipgloss helpers",
|
||||
"location": file_path,
|
||||
"explanation": "Manual calculations are error-prone. Use lipgloss.Height() and lipgloss.Width()",
|
||||
"fix": "Use lipgloss helpers:\n\n" +
|
||||
"// ❌ BAD:\n" +
|
||||
"availableHeight := termHeight - 5 // Magic number!\n\n" +
|
||||
"// ✅ GOOD:\n" +
|
||||
"headerHeight := lipgloss.Height(header)\n" +
|
||||
"footerHeight := lipgloss.Height(footer)\n" +
|
||||
"availableHeight := termHeight - headerHeight - footerHeight"
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
# Validation function
|
||||
def validate_diagnosis(result: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate diagnosis result."""
|
||||
if 'error' in result:
|
||||
return {"status": "error", "summary": result['error']}
|
||||
|
||||
validation = result.get('validation', {})
|
||||
status = validation.get('status', 'unknown')
|
||||
summary = validation.get('summary', 'Diagnosis complete')
|
||||
|
||||
checks = [
|
||||
(result.get('issues') is not None, "Has issues list"),
|
||||
(result.get('health_score') is not None, "Has health score"),
|
||||
(result.get('summary') is not None, "Has summary"),
|
||||
(len(result.get('issues', [])) >= 0, "Issues analyzed"),
|
||||
]
|
||||
|
||||
all_pass = all(check[0] for check in checks)
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"checks": {check[1]: check[0] for check in checks},
|
||||
"valid": all_pass
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: diagnose_issue.py <code_path> [description]")
|
||||
sys.exit(1)
|
||||
|
||||
code_path = sys.argv[1]
|
||||
description = sys.argv[2] if len(sys.argv) > 2 else ""
|
||||
|
||||
result = diagnose_issue(code_path, description)
|
||||
print(json.dumps(result, indent=2))
|
||||
578
scripts/fix_layout_issues.py
Normal file
578
scripts/fix_layout_issues.py
Normal file
@@ -0,0 +1,578 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix Lipgloss layout issues in Bubble Tea applications.
|
||||
Identifies hardcoded dimensions, incorrect calculations, overflow issues, etc.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Tuple, Optional
|
||||
|
||||
|
||||
def fix_layout_issues(code_path: str, description: str = "") -> Dict[str, Any]:
|
||||
"""
|
||||
Diagnose and fix common Lipgloss layout problems.
|
||||
|
||||
Args:
|
||||
code_path: Path to Go file or directory
|
||||
description: Optional user description of layout issue
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- layout_issues: List of identified layout problems with fixes
|
||||
- lipgloss_improvements: General recommendations
|
||||
- code_fixes: Concrete code changes to apply
|
||||
- validation: Validation report
|
||||
"""
|
||||
path = Path(code_path)
|
||||
|
||||
if not path.exists():
|
||||
return {
|
||||
"error": f"Path not found: {code_path}",
|
||||
"validation": {"status": "error", "summary": "Invalid path"}
|
||||
}
|
||||
|
||||
# Collect all .go files
|
||||
go_files = []
|
||||
if path.is_file():
|
||||
if path.suffix == '.go':
|
||||
go_files = [path]
|
||||
else:
|
||||
go_files = list(path.glob('**/*.go'))
|
||||
|
||||
if not go_files:
|
||||
return {
|
||||
"error": "No .go files found",
|
||||
"validation": {"status": "error", "summary": "No Go files"}
|
||||
}
|
||||
|
||||
# Analyze all files for layout issues
|
||||
all_layout_issues = []
|
||||
all_code_fixes = []
|
||||
|
||||
for go_file in go_files:
|
||||
issues, fixes = _analyze_layout_issues(go_file)
|
||||
all_layout_issues.extend(issues)
|
||||
all_code_fixes.extend(fixes)
|
||||
|
||||
# Generate improvement recommendations
|
||||
lipgloss_improvements = _generate_improvements(all_layout_issues)
|
||||
|
||||
# Summary
|
||||
critical_count = sum(1 for i in all_layout_issues if i['severity'] == 'CRITICAL')
|
||||
warning_count = sum(1 for i in all_layout_issues if i['severity'] == 'WARNING')
|
||||
|
||||
if critical_count > 0:
|
||||
summary = f"🚨 Found {critical_count} critical layout issue(s)"
|
||||
elif warning_count > 0:
|
||||
summary = f"⚠️ Found {warning_count} layout issue(s) to address"
|
||||
elif all_layout_issues:
|
||||
summary = f"Found {len(all_layout_issues)} minor layout improvement(s)"
|
||||
else:
|
||||
summary = "✅ No major layout issues detected"
|
||||
|
||||
# Validation
|
||||
validation = {
|
||||
"status": "critical" if critical_count > 0 else "warning" if warning_count > 0 else "pass",
|
||||
"summary": summary,
|
||||
"checks": {
|
||||
"no_hardcoded_dimensions": not any(i['type'] == 'hardcoded_dimensions' for i in all_layout_issues),
|
||||
"proper_height_calc": not any(i['type'] == 'incorrect_height' for i in all_layout_issues),
|
||||
"handles_padding": not any(i['type'] == 'missing_padding_calc' for i in all_layout_issues),
|
||||
"handles_overflow": not any(i['type'] == 'overflow' for i in all_layout_issues)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
"layout_issues": all_layout_issues,
|
||||
"lipgloss_improvements": lipgloss_improvements,
|
||||
"code_fixes": all_code_fixes,
|
||||
"summary": summary,
|
||||
"user_description": description,
|
||||
"files_analyzed": len(go_files),
|
||||
"validation": validation
|
||||
}
|
||||
|
||||
|
||||
def _analyze_layout_issues(file_path: Path) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
||||
"""Analyze a single Go file for layout issues."""
|
||||
layout_issues = []
|
||||
code_fixes = []
|
||||
|
||||
try:
|
||||
content = file_path.read_text()
|
||||
except Exception as e:
|
||||
return layout_issues, code_fixes
|
||||
|
||||
lines = content.split('\n')
|
||||
rel_path = file_path.name
|
||||
|
||||
# Check if file uses lipgloss
|
||||
uses_lipgloss = bool(re.search(r'"github\.com/charmbracelet/lipgloss"', content))
|
||||
|
||||
if not uses_lipgloss:
|
||||
return layout_issues, code_fixes
|
||||
|
||||
# Issue checks
|
||||
issues, fixes = _check_hardcoded_dimensions(content, lines, rel_path)
|
||||
layout_issues.extend(issues)
|
||||
code_fixes.extend(fixes)
|
||||
|
||||
issues, fixes = _check_incorrect_height_calculations(content, lines, rel_path)
|
||||
layout_issues.extend(issues)
|
||||
code_fixes.extend(fixes)
|
||||
|
||||
issues, fixes = _check_missing_padding_accounting(content, lines, rel_path)
|
||||
layout_issues.extend(issues)
|
||||
code_fixes.extend(fixes)
|
||||
|
||||
issues, fixes = _check_overflow_issues(content, lines, rel_path)
|
||||
layout_issues.extend(issues)
|
||||
code_fixes.extend(fixes)
|
||||
|
||||
issues, fixes = _check_terminal_resize_handling(content, lines, rel_path)
|
||||
layout_issues.extend(issues)
|
||||
code_fixes.extend(fixes)
|
||||
|
||||
issues, fixes = _check_border_accounting(content, lines, rel_path)
|
||||
layout_issues.extend(issues)
|
||||
code_fixes.extend(fixes)
|
||||
|
||||
return layout_issues, code_fixes
|
||||
|
||||
|
||||
def _check_hardcoded_dimensions(content: str, lines: List[str], file_path: str) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
||||
"""Check for hardcoded width/height values."""
|
||||
issues = []
|
||||
fixes = []
|
||||
|
||||
# Pattern: .Width(80), .Height(24), etc.
|
||||
dimension_pattern = r'\.(Width|Height|MaxWidth|MaxHeight)\s*\(\s*(\d{2,})\s*\)'
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
matches = re.finditer(dimension_pattern, line)
|
||||
for match in matches:
|
||||
dimension_type = match.group(1)
|
||||
value = int(match.group(2))
|
||||
|
||||
# Likely a terminal dimension if >= 20
|
||||
if value >= 20:
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"type": "hardcoded_dimensions",
|
||||
"issue": f"Hardcoded {dimension_type}: {value}",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"current_code": line.strip(),
|
||||
"explanation": f"Hardcoded {dimension_type} of {value} won't adapt to different terminal sizes",
|
||||
"impact": "Layout breaks on smaller/larger terminals"
|
||||
})
|
||||
|
||||
# Generate fix
|
||||
if dimension_type in ["Width", "MaxWidth"]:
|
||||
fixed_code = re.sub(
|
||||
rf'\.{dimension_type}\s*\(\s*{value}\s*\)',
|
||||
f'.{dimension_type}(m.termWidth)',
|
||||
line.strip()
|
||||
)
|
||||
else: # Height, MaxHeight
|
||||
fixed_code = re.sub(
|
||||
rf'\.{dimension_type}\s*\(\s*{value}\s*\)',
|
||||
f'.{dimension_type}(m.termHeight)',
|
||||
line.strip()
|
||||
)
|
||||
|
||||
fixes.append({
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"original": line.strip(),
|
||||
"fixed": fixed_code,
|
||||
"explanation": f"Use dynamic terminal size from model (m.termWidth/m.termHeight)",
|
||||
"requires": [
|
||||
"Add termWidth and termHeight fields to model",
|
||||
"Handle tea.WindowSizeMsg in Update()"
|
||||
],
|
||||
"code_example": '''// In model:
|
||||
type model struct {
|
||||
termWidth int
|
||||
termHeight int
|
||||
}
|
||||
|
||||
// In Update():
|
||||
case tea.WindowSizeMsg:
|
||||
m.termWidth = msg.Width
|
||||
m.termHeight = msg.Height'''
|
||||
})
|
||||
|
||||
return issues, fixes
|
||||
|
||||
|
||||
def _check_incorrect_height_calculations(content: str, lines: List[str], file_path: str) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
||||
"""Check for manual height calculations instead of lipgloss.Height()."""
|
||||
issues = []
|
||||
fixes = []
|
||||
|
||||
# Check View() function for manual calculations
|
||||
view_start = -1
|
||||
for i, line in enumerate(lines):
|
||||
if re.search(r'func\s+\([^)]+\)\s+View\s*\(', line):
|
||||
view_start = i
|
||||
break
|
||||
|
||||
if view_start < 0:
|
||||
return issues, fixes
|
||||
|
||||
# Look for manual arithmetic like "height - 5", "24 - headerHeight"
|
||||
manual_calc_pattern = r'(height|Height|termHeight)\s*[-+]\s*\d+'
|
||||
|
||||
for i in range(view_start, min(view_start + 200, len(lines))):
|
||||
if re.search(manual_calc_pattern, lines[i], re.IGNORECASE):
|
||||
# Check if lipgloss.Height() is used in the vicinity
|
||||
context = '\n'.join(lines[max(0, i-5):i+5])
|
||||
uses_lipgloss_height = bool(re.search(r'lipgloss\.Height\s*\(', context))
|
||||
|
||||
if not uses_lipgloss_height:
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"type": "incorrect_height",
|
||||
"issue": "Manual height calculation without lipgloss.Height()",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"current_code": lines[i].strip(),
|
||||
"explanation": "Manual calculations don't account for actual rendered height",
|
||||
"impact": "Incorrect spacing, overflow, or clipping"
|
||||
})
|
||||
|
||||
# Generate fix
|
||||
fixed_code = lines[i].strip().replace(
|
||||
"height - ", "m.termHeight - lipgloss.Height("
|
||||
).replace("termHeight - ", "m.termHeight - lipgloss.Height(")
|
||||
|
||||
fixes.append({
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"original": lines[i].strip(),
|
||||
"fixed": "Use lipgloss.Height() to get actual rendered height",
|
||||
"explanation": "lipgloss.Height() accounts for padding, borders, margins",
|
||||
"code_example": '''// ❌ BAD:
|
||||
availableHeight := termHeight - 5 // Magic number!
|
||||
|
||||
// ✅ GOOD:
|
||||
headerHeight := lipgloss.Height(m.renderHeader())
|
||||
footerHeight := lipgloss.Height(m.renderFooter())
|
||||
availableHeight := m.termHeight - headerHeight - footerHeight'''
|
||||
})
|
||||
|
||||
return issues, fixes
|
||||
|
||||
|
||||
def _check_missing_padding_accounting(content: str, lines: List[str], file_path: str) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
||||
"""Check for nested styles without padding/margin accounting."""
|
||||
issues = []
|
||||
fixes = []
|
||||
|
||||
# Look for nested styles with padding
|
||||
# Pattern: Style().Padding(X).Width(Y).Render(content)
|
||||
nested_style_pattern = r'\.Padding\s*\([^)]+\).*\.Width\s*\(\s*(\w+)\s*\).*\.Render\s*\('
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
matches = re.finditer(nested_style_pattern, line)
|
||||
for match in matches:
|
||||
width_var = match.group(1)
|
||||
|
||||
# Check if GetHorizontalPadding is used
|
||||
context = '\n'.join(lines[max(0, i-10):min(i+10, len(lines))])
|
||||
uses_get_padding = bool(re.search(r'GetHorizontalPadding\s*\(\s*\)', context))
|
||||
|
||||
if not uses_get_padding and width_var != 'm.termWidth':
|
||||
issues.append({
|
||||
"severity": "CRITICAL",
|
||||
"type": "missing_padding_calc",
|
||||
"issue": "Padding not accounted for in nested width calculation",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"current_code": line.strip(),
|
||||
"explanation": "Setting Width() then Padding() makes content area smaller than expected",
|
||||
"impact": "Content gets clipped or wrapped incorrectly"
|
||||
})
|
||||
|
||||
fixes.append({
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"original": line.strip(),
|
||||
"fixed": "Account for padding using GetHorizontalPadding()",
|
||||
"explanation": "Padding reduces available content area",
|
||||
"code_example": '''// ❌ BAD:
|
||||
style := lipgloss.NewStyle().
|
||||
Padding(2).
|
||||
Width(80).
|
||||
Render(text) // Text area is 76, not 80!
|
||||
|
||||
// ✅ GOOD:
|
||||
style := lipgloss.NewStyle().Padding(2)
|
||||
contentWidth := 80 - style.GetHorizontalPadding()
|
||||
content := lipgloss.NewStyle().Width(contentWidth).Render(text)
|
||||
result := style.Width(80).Render(content)'''
|
||||
})
|
||||
|
||||
return issues, fixes
|
||||
|
||||
|
||||
def _check_overflow_issues(content: str, lines: List[str], file_path: str) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
||||
"""Check for potential text overflow."""
|
||||
issues = []
|
||||
fixes = []
|
||||
|
||||
# Check for long strings without wrapping
|
||||
has_wordwrap = bool(re.search(r'"github\.com/muesli/reflow/wordwrap"', content))
|
||||
has_wrap_or_truncate = bool(re.search(r'(wordwrap|truncate|Truncate)', content, re.IGNORECASE))
|
||||
|
||||
# Look for string rendering without width constraints
|
||||
render_pattern = r'\.Render\s*\(\s*(\w+)\s*\)'
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
matches = re.finditer(render_pattern, line)
|
||||
for match in matches:
|
||||
var_name = match.group(1)
|
||||
|
||||
# Check if there's width control
|
||||
has_width_control = bool(re.search(r'\.Width\s*\(', line))
|
||||
|
||||
if not has_width_control and not has_wrap_or_truncate and len(line) > 40:
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"type": "overflow",
|
||||
"issue": f"Rendering '{var_name}' without width constraint",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"current_code": line.strip(),
|
||||
"explanation": "Long content can exceed terminal width",
|
||||
"impact": "Text wraps unexpectedly or overflows"
|
||||
})
|
||||
|
||||
fixes.append({
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"original": line.strip(),
|
||||
"fixed": "Add wordwrap or width constraint",
|
||||
"explanation": "Constrain content to terminal width",
|
||||
"code_example": '''// Option 1: Use wordwrap
|
||||
import "github.com/muesli/reflow/wordwrap"
|
||||
|
||||
content := wordwrap.String(longText, m.termWidth)
|
||||
|
||||
// Option 2: Use lipgloss Width + truncate
|
||||
style := lipgloss.NewStyle().Width(m.termWidth)
|
||||
content := style.Render(longText)
|
||||
|
||||
// Option 3: Manual truncate
|
||||
import "github.com/muesli/reflow/truncate"
|
||||
|
||||
content := truncate.StringWithTail(longText, uint(m.termWidth), "...")'''
|
||||
})
|
||||
|
||||
return issues, fixes
|
||||
|
||||
|
||||
def _check_terminal_resize_handling(content: str, lines: List[str], file_path: str) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
||||
"""Check for proper terminal resize handling."""
|
||||
issues = []
|
||||
fixes = []
|
||||
|
||||
# Check if WindowSizeMsg is handled
|
||||
handles_resize = bool(re.search(r'case\s+tea\.WindowSizeMsg:', content))
|
||||
|
||||
# Check if model stores term dimensions
|
||||
has_term_fields = bool(re.search(r'(termWidth|termHeight|width|height)\s+int', content))
|
||||
|
||||
if not handles_resize and uses_lipgloss(content):
|
||||
issues.append({
|
||||
"severity": "CRITICAL",
|
||||
"type": "missing_resize_handling",
|
||||
"issue": "No tea.WindowSizeMsg handling detected",
|
||||
"location": file_path,
|
||||
"explanation": "Layout won't adapt when terminal is resized",
|
||||
"impact": "Content clipped or misaligned after resize"
|
||||
})
|
||||
|
||||
fixes.append({
|
||||
"location": file_path,
|
||||
"original": "N/A",
|
||||
"fixed": "Add WindowSizeMsg handler",
|
||||
"explanation": "Store terminal dimensions and update on resize",
|
||||
"code_example": '''// In model:
|
||||
type model struct {
|
||||
termWidth int
|
||||
termHeight int
|
||||
}
|
||||
|
||||
// In Update():
|
||||
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case tea.WindowSizeMsg:
|
||||
m.termWidth = msg.Width
|
||||
m.termHeight = msg.Height
|
||||
|
||||
// Update child components with new size
|
||||
m.viewport.Width = msg.Width
|
||||
m.viewport.Height = msg.Height - 2 // Leave room for header
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// In View():
|
||||
func (m model) View() string {
|
||||
// Use m.termWidth and m.termHeight for dynamic layout
|
||||
content := lipgloss.NewStyle().
|
||||
Width(m.termWidth).
|
||||
Height(m.termHeight).
|
||||
Render(m.content)
|
||||
return content
|
||||
}'''
|
||||
})
|
||||
|
||||
elif handles_resize and not has_term_fields:
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"type": "resize_not_stored",
|
||||
"issue": "WindowSizeMsg handled but dimensions not stored",
|
||||
"location": file_path,
|
||||
"explanation": "Handling resize but not storing dimensions for later use",
|
||||
"impact": "Can't use current terminal size in View()"
|
||||
})
|
||||
|
||||
return issues, fixes
|
||||
|
||||
|
||||
def _check_border_accounting(content: str, lines: List[str], file_path: str) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
||||
"""Check for border accounting in layout calculations."""
|
||||
issues = []
|
||||
fixes = []
|
||||
|
||||
# Check for borders without proper accounting
|
||||
has_border = bool(re.search(r'\.Border\s*\(', content))
|
||||
has_border_width_calc = bool(re.search(r'GetHorizontalBorderSize|GetVerticalBorderSize', content))
|
||||
|
||||
if has_border and not has_border_width_calc:
|
||||
# Find border usage lines
|
||||
for i, line in enumerate(lines):
|
||||
if '.Border(' in line:
|
||||
issues.append({
|
||||
"severity": "WARNING",
|
||||
"type": "missing_border_calc",
|
||||
"issue": "Border used without accounting for border size",
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"current_code": line.strip(),
|
||||
"explanation": "Borders take space (2 chars horizontal, 2 chars vertical)",
|
||||
"impact": "Content area smaller than expected"
|
||||
})
|
||||
|
||||
fixes.append({
|
||||
"location": f"{file_path}:{i+1}",
|
||||
"original": line.strip(),
|
||||
"fixed": "Account for border size",
|
||||
"explanation": "Use GetHorizontalBorderSize() and GetVerticalBorderSize()",
|
||||
"code_example": '''// With border:
|
||||
style := lipgloss.NewStyle().
|
||||
Border(lipgloss.RoundedBorder()).
|
||||
Width(80)
|
||||
|
||||
// Calculate content area:
|
||||
contentWidth := 80 - style.GetHorizontalBorderSize()
|
||||
contentHeight := 24 - style.GetVerticalBorderSize()
|
||||
|
||||
// Use for inner content:
|
||||
innerContent := lipgloss.NewStyle().
|
||||
Width(contentWidth).
|
||||
Height(contentHeight).
|
||||
Render(text)
|
||||
|
||||
result := style.Render(innerContent)'''
|
||||
})
|
||||
|
||||
return issues, fixes
|
||||
|
||||
|
||||
def uses_lipgloss(content: str) -> bool:
|
||||
"""Check if file uses lipgloss."""
|
||||
return bool(re.search(r'"github\.com/charmbracelet/lipgloss"', content))
|
||||
|
||||
|
||||
def _generate_improvements(issues: List[Dict[str, Any]]) -> List[str]:
|
||||
"""Generate general improvement recommendations."""
|
||||
improvements = []
|
||||
|
||||
issue_types = set(issue['type'] for issue in issues)
|
||||
|
||||
if 'hardcoded_dimensions' in issue_types:
|
||||
improvements.append(
|
||||
"🎯 Use dynamic terminal sizing: Store termWidth/termHeight in model, update from tea.WindowSizeMsg"
|
||||
)
|
||||
|
||||
if 'incorrect_height' in issue_types:
|
||||
improvements.append(
|
||||
"📏 Use lipgloss.Height() and lipgloss.Width() for accurate measurements"
|
||||
)
|
||||
|
||||
if 'missing_padding_calc' in issue_types:
|
||||
improvements.append(
|
||||
"📐 Account for padding with GetHorizontalPadding() and GetVerticalPadding()"
|
||||
)
|
||||
|
||||
if 'overflow' in issue_types:
|
||||
improvements.append(
|
||||
"📝 Use wordwrap or truncate to prevent text overflow"
|
||||
)
|
||||
|
||||
if 'missing_resize_handling' in issue_types:
|
||||
improvements.append(
|
||||
"🔄 Handle tea.WindowSizeMsg to support terminal resizing"
|
||||
)
|
||||
|
||||
if 'missing_border_calc' in issue_types:
|
||||
improvements.append(
|
||||
"🔲 Account for borders with GetHorizontalBorderSize() and GetVerticalBorderSize()"
|
||||
)
|
||||
|
||||
# General best practices
|
||||
improvements.extend([
|
||||
"✨ Test your TUI at various terminal sizes (80x24, 120x40, 200x50)",
|
||||
"🔍 Use lipgloss debugging: Print style.String() to see computed dimensions",
|
||||
"📦 Cache computed styles in model to avoid recreation on every render",
|
||||
"🎨 Use PlaceHorizontal/PlaceVertical for alignment instead of manual padding"
|
||||
])
|
||||
|
||||
return improvements
|
||||
|
||||
|
||||
def validate_layout_fixes(result: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate layout fixes result."""
|
||||
if 'error' in result:
|
||||
return {"status": "error", "summary": result['error']}
|
||||
|
||||
validation = result.get('validation', {})
|
||||
status = validation.get('status', 'unknown')
|
||||
summary = validation.get('summary', 'Layout analysis complete')
|
||||
|
||||
checks = [
|
||||
(result.get('layout_issues') is not None, "Has issues list"),
|
||||
(result.get('lipgloss_improvements') is not None, "Has improvements"),
|
||||
(result.get('code_fixes') is not None, "Has code fixes"),
|
||||
]
|
||||
|
||||
all_pass = all(check[0] for check in checks)
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"checks": {check[1]: check[0] for check in checks},
|
||||
"valid": all_pass
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: fix_layout_issues.py <code_path> [description]")
|
||||
sys.exit(1)
|
||||
|
||||
code_path = sys.argv[1]
|
||||
description = sys.argv[2] if len(sys.argv) > 2 else ""
|
||||
|
||||
result = fix_layout_issues(code_path, description)
|
||||
print(json.dumps(result, indent=2))
|
||||
736
scripts/suggest_architecture.py
Normal file
736
scripts/suggest_architecture.py
Normal file
@@ -0,0 +1,736 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Suggest architectural improvements for Bubble Tea applications.
|
||||
Analyzes complexity and recommends patterns like model trees, composable views, etc.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Tuple, Optional
|
||||
|
||||
|
||||
def suggest_architecture(code_path: str, complexity_level: str = "auto") -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze code and suggest architectural improvements.
|
||||
|
||||
Args:
|
||||
code_path: Path to Go file or directory
|
||||
complexity_level: "auto" (detect), "simple", "medium", "complex"
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- current_pattern: Detected architectural pattern
|
||||
- complexity_score: 0-100 (higher = more complex)
|
||||
- recommended_pattern: Suggested pattern for improvement
|
||||
- refactoring_steps: List of steps to implement
|
||||
- code_templates: Example code for new pattern
|
||||
- validation: Validation report
|
||||
"""
|
||||
path = Path(code_path)
|
||||
|
||||
if not path.exists():
|
||||
return {
|
||||
"error": f"Path not found: {code_path}",
|
||||
"validation": {"status": "error", "summary": "Invalid path"}
|
||||
}
|
||||
|
||||
# Collect all .go files
|
||||
go_files = []
|
||||
if path.is_file():
|
||||
if path.suffix == '.go':
|
||||
go_files = [path]
|
||||
else:
|
||||
go_files = list(path.glob('**/*.go'))
|
||||
|
||||
if not go_files:
|
||||
return {
|
||||
"error": "No .go files found",
|
||||
"validation": {"status": "error", "summary": "No Go files"}
|
||||
}
|
||||
|
||||
# Read all code
|
||||
all_content = ""
|
||||
for go_file in go_files:
|
||||
try:
|
||||
all_content += go_file.read_text() + "\n"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Analyze current architecture
|
||||
current_pattern = _detect_current_pattern(all_content)
|
||||
complexity_score = _calculate_complexity(all_content, go_files)
|
||||
|
||||
# Auto-detect complexity level if needed
|
||||
if complexity_level == "auto":
|
||||
if complexity_score < 30:
|
||||
complexity_level = "simple"
|
||||
elif complexity_score < 70:
|
||||
complexity_level = "medium"
|
||||
else:
|
||||
complexity_level = "complex"
|
||||
|
||||
# Generate recommendations
|
||||
recommended_pattern = _recommend_pattern(current_pattern, complexity_score, complexity_level)
|
||||
refactoring_steps = _generate_refactoring_steps(current_pattern, recommended_pattern, all_content)
|
||||
code_templates = _generate_code_templates(recommended_pattern, all_content)
|
||||
|
||||
# Summary
|
||||
if recommended_pattern == current_pattern:
|
||||
summary = f"✅ Current architecture ({current_pattern}) is appropriate for complexity level"
|
||||
else:
|
||||
summary = f"💡 Recommend refactoring from {current_pattern} to {recommended_pattern}"
|
||||
|
||||
# Validation
|
||||
validation = {
|
||||
"status": "pass" if recommended_pattern == current_pattern else "info",
|
||||
"summary": summary,
|
||||
"checks": {
|
||||
"complexity_analyzed": complexity_score >= 0,
|
||||
"pattern_detected": current_pattern != "unknown",
|
||||
"has_recommendations": len(refactoring_steps) > 0,
|
||||
"has_templates": len(code_templates) > 0
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
"current_pattern": current_pattern,
|
||||
"complexity_score": complexity_score,
|
||||
"complexity_level": complexity_level,
|
||||
"recommended_pattern": recommended_pattern,
|
||||
"refactoring_steps": refactoring_steps,
|
||||
"code_templates": code_templates,
|
||||
"summary": summary,
|
||||
"analysis": {
|
||||
"files_analyzed": len(go_files),
|
||||
"model_count": _count_models(all_content),
|
||||
"view_functions": _count_view_functions(all_content),
|
||||
"state_fields": _count_state_fields(all_content)
|
||||
},
|
||||
"validation": validation
|
||||
}
|
||||
|
||||
|
||||
def _detect_current_pattern(content: str) -> str:
|
||||
"""Detect the current architectural pattern."""
|
||||
|
||||
# Check for various patterns
|
||||
patterns_detected = []
|
||||
|
||||
# Pattern 1: Flat Model (single model struct, no child models)
|
||||
has_model = bool(re.search(r'type\s+\w*[Mm]odel\s+struct', content))
|
||||
has_child_models = bool(re.search(r'\w+Model\s+\w+Model', content))
|
||||
|
||||
if has_model and not has_child_models:
|
||||
patterns_detected.append("flat_model")
|
||||
|
||||
# Pattern 2: Model Tree (parent model with child models)
|
||||
if has_child_models:
|
||||
patterns_detected.append("model_tree")
|
||||
|
||||
# Pattern 3: Multi-view (multiple view rendering based on state)
|
||||
has_view_switcher = bool(re.search(r'switch\s+m\.\w*(view|mode|screen|state)', content, re.IGNORECASE))
|
||||
if has_view_switcher:
|
||||
patterns_detected.append("multi_view")
|
||||
|
||||
# Pattern 4: Component-based (using Bubble Tea components like list, viewport, etc.)
|
||||
bubbletea_components = [
|
||||
'list.Model',
|
||||
'viewport.Model',
|
||||
'textinput.Model',
|
||||
'textarea.Model',
|
||||
'table.Model',
|
||||
'progress.Model',
|
||||
'spinner.Model'
|
||||
]
|
||||
component_count = sum(1 for comp in bubbletea_components if comp in content)
|
||||
|
||||
if component_count >= 3:
|
||||
patterns_detected.append("component_based")
|
||||
elif component_count >= 1:
|
||||
patterns_detected.append("uses_components")
|
||||
|
||||
# Pattern 5: State Machine (explicit state enums/constants)
|
||||
has_state_enum = bool(re.search(r'type\s+\w*State\s+(int|string)', content))
|
||||
has_iota_states = bool(re.search(r'const\s+\(\s*\w+State\s+\w*State\s+=\s+iota', content))
|
||||
|
||||
if has_state_enum or has_iota_states:
|
||||
patterns_detected.append("state_machine")
|
||||
|
||||
# Pattern 6: Event-driven (heavy use of custom messages)
|
||||
custom_msg_count = len(re.findall(r'type\s+\w+Msg\s+struct', content))
|
||||
if custom_msg_count >= 5:
|
||||
patterns_detected.append("event_driven")
|
||||
|
||||
# Return the most dominant pattern
|
||||
if "model_tree" in patterns_detected:
|
||||
return "model_tree"
|
||||
elif "state_machine" in patterns_detected and "multi_view" in patterns_detected:
|
||||
return "state_machine_multi_view"
|
||||
elif "component_based" in patterns_detected:
|
||||
return "component_based"
|
||||
elif "multi_view" in patterns_detected:
|
||||
return "multi_view"
|
||||
elif "flat_model" in patterns_detected:
|
||||
return "flat_model"
|
||||
elif has_model:
|
||||
return "basic_model"
|
||||
else:
|
||||
return "unknown"
|
||||
|
||||
|
||||
def _calculate_complexity(content: str, files: List[Path]) -> int:
|
||||
"""Calculate complexity score (0-100)."""
|
||||
|
||||
score = 0
|
||||
|
||||
# Factor 1: Number of files (10 points max)
|
||||
file_count = len(files)
|
||||
score += min(10, file_count * 2)
|
||||
|
||||
# Factor 2: Model field count (20 points max)
|
||||
model_match = re.search(r'type\s+(\w*[Mm]odel)\s+struct\s*\{([^}]+)\}', content, re.DOTALL)
|
||||
if model_match:
|
||||
model_body = model_match.group(2)
|
||||
field_count = len([line for line in model_body.split('\n')
|
||||
if line.strip() and not line.strip().startswith('//')])
|
||||
score += min(20, field_count)
|
||||
|
||||
# Factor 3: Number of Update() branches (20 points max)
|
||||
update_match = re.search(r'func\s+\([^)]+\)\s+Update\s*\([^)]+\)\s*\([^)]+\)\s*\{(.+?)^func\s',
|
||||
content, re.DOTALL | re.MULTILINE)
|
||||
if update_match:
|
||||
update_body = update_match.group(1)
|
||||
case_count = len(re.findall(r'case\s+', update_body))
|
||||
score += min(20, case_count * 2)
|
||||
|
||||
# Factor 4: View() complexity (15 points max)
|
||||
view_match = re.search(r'func\s+\([^)]+\)\s+View\s*\(\s*\)\s+string\s*\{(.+?)^func\s',
|
||||
content, re.DOTALL | re.MULTILINE)
|
||||
if view_match:
|
||||
view_body = view_match.group(1)
|
||||
view_lines = len(view_body.split('\n'))
|
||||
score += min(15, view_lines // 2)
|
||||
|
||||
# Factor 5: Custom message types (10 points max)
|
||||
custom_msg_count = len(re.findall(r'type\s+\w+Msg\s+struct', content))
|
||||
score += min(10, custom_msg_count * 2)
|
||||
|
||||
# Factor 6: Number of views/screens (15 points max)
|
||||
view_count = len(re.findall(r'func\s+\([^)]+\)\s+render\w+', content, re.IGNORECASE))
|
||||
score += min(15, view_count * 3)
|
||||
|
||||
# Factor 7: Use of channels/goroutines (10 points max)
|
||||
has_channels = len(re.findall(r'make\s*\(\s*chan\s+', content))
|
||||
has_goroutines = len(re.findall(r'\bgo\s+func', content))
|
||||
score += min(10, (has_channels + has_goroutines) * 2)
|
||||
|
||||
return min(100, score)
|
||||
|
||||
|
||||
def _recommend_pattern(current: str, complexity: int, level: str) -> str:
|
||||
"""Recommend architectural pattern based on current state and complexity."""
|
||||
|
||||
# Simple apps (< 30 complexity)
|
||||
if complexity < 30:
|
||||
if current in ["unknown", "basic_model"]:
|
||||
return "flat_model" # Simple flat model is fine
|
||||
return current # Keep current pattern
|
||||
|
||||
# Medium complexity (30-70)
|
||||
elif complexity < 70:
|
||||
if current == "flat_model":
|
||||
return "multi_view" # Evolve to multi-view
|
||||
elif current == "basic_model":
|
||||
return "component_based" # Start using components
|
||||
return current
|
||||
|
||||
# High complexity (70+)
|
||||
else:
|
||||
if current in ["flat_model", "multi_view"]:
|
||||
return "model_tree" # Need hierarchy
|
||||
elif current == "component_based":
|
||||
return "model_tree_with_components" # Combine patterns
|
||||
return current
|
||||
|
||||
|
||||
def _count_models(content: str) -> int:
|
||||
"""Count model structs."""
|
||||
return len(re.findall(r'type\s+\w*[Mm]odel\s+struct', content))
|
||||
|
||||
|
||||
def _count_view_functions(content: str) -> int:
|
||||
"""Count view rendering functions."""
|
||||
return len(re.findall(r'func\s+\([^)]+\)\s+(View|render\w+)', content, re.IGNORECASE))
|
||||
|
||||
|
||||
def _count_state_fields(content: str) -> int:
|
||||
"""Count state fields in model."""
|
||||
model_match = re.search(r'type\s+(\w*[Mm]odel)\s+struct\s*\{([^}]+)\}', content, re.DOTALL)
|
||||
if not model_match:
|
||||
return 0
|
||||
|
||||
model_body = model_match.group(2)
|
||||
return len([line for line in model_body.split('\n')
|
||||
if line.strip() and not line.strip().startswith('//')])
|
||||
|
||||
|
||||
def _generate_refactoring_steps(current: str, recommended: str, content: str) -> List[str]:
|
||||
"""Generate step-by-step refactoring guide."""
|
||||
|
||||
if current == recommended:
|
||||
return ["No refactoring needed - current architecture is appropriate"]
|
||||
|
||||
steps = []
|
||||
|
||||
# Flat Model → Multi-view
|
||||
if current == "flat_model" and recommended == "multi_view":
|
||||
steps = [
|
||||
"1. Add view state enum to model",
|
||||
"2. Create separate render functions for each view",
|
||||
"3. Add view switching logic in Update()",
|
||||
"4. Implement switch statement in View() to route to render functions",
|
||||
"5. Add keyboard shortcuts for view navigation"
|
||||
]
|
||||
|
||||
# Flat Model → Model Tree
|
||||
elif current == "flat_model" and recommended == "model_tree":
|
||||
steps = [
|
||||
"1. Identify logical groupings of fields in current model",
|
||||
"2. Create child model structs for each grouping",
|
||||
"3. Add Init() methods to child models",
|
||||
"4. Create parent model with child model fields",
|
||||
"5. Implement message routing in parent's Update()",
|
||||
"6. Delegate rendering to child models in View()",
|
||||
"7. Test each child model independently"
|
||||
]
|
||||
|
||||
# Multi-view → Model Tree
|
||||
elif current == "multi_view" and recommended == "model_tree":
|
||||
steps = [
|
||||
"1. Convert each view into a separate child model",
|
||||
"2. Extract view-specific state into child models",
|
||||
"3. Create parent router model with activeView field",
|
||||
"4. Implement message routing based on activeView",
|
||||
"5. Move view rendering logic into child models",
|
||||
"6. Add inter-model communication via custom messages"
|
||||
]
|
||||
|
||||
# Component-based → Model Tree with Components
|
||||
elif current == "component_based" and recommended == "model_tree_with_components":
|
||||
steps = [
|
||||
"1. Group related components into logical views",
|
||||
"2. Create view models that own related components",
|
||||
"3. Create parent model to manage view models",
|
||||
"4. Implement message routing to active view",
|
||||
"5. Keep component updates within their view models",
|
||||
"6. Compose final view from view model renders"
|
||||
]
|
||||
|
||||
# Basic Model → Component-based
|
||||
elif current == "basic_model" and recommended == "component_based":
|
||||
steps = [
|
||||
"1. Identify UI patterns that match Bubble Tea components",
|
||||
"2. Replace custom text input with textinput.Model",
|
||||
"3. Replace custom list with list.Model",
|
||||
"4. Replace custom scrolling with viewport.Model",
|
||||
"5. Update Init() to initialize components",
|
||||
"6. Route messages to components in Update()",
|
||||
"7. Compose View() using component.View() calls"
|
||||
]
|
||||
|
||||
# Generic fallback
|
||||
else:
|
||||
steps = [
|
||||
f"1. Analyze current {current} pattern",
|
||||
f"2. Study {recommended} pattern examples",
|
||||
"3. Plan gradual migration strategy",
|
||||
"4. Implement incrementally with tests",
|
||||
"5. Validate each step before proceeding"
|
||||
]
|
||||
|
||||
return steps
|
||||
|
||||
|
||||
def _generate_code_templates(pattern: str, existing_code: str) -> Dict[str, str]:
|
||||
"""Generate code templates for recommended pattern."""
|
||||
|
||||
templates = {}
|
||||
|
||||
if pattern == "model_tree":
|
||||
templates["parent_model"] = '''// Parent model manages child models
|
||||
type appModel struct {
|
||||
activeView int
|
||||
|
||||
// Child models
|
||||
listView listViewModel
|
||||
detailView detailViewModel
|
||||
searchView searchViewModel
|
||||
}
|
||||
|
||||
func (m appModel) Init() tea.Cmd {
|
||||
return tea.Batch(
|
||||
m.listView.Init(),
|
||||
m.detailView.Init(),
|
||||
m.searchView.Init(),
|
||||
)
|
||||
}
|
||||
|
||||
func (m appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
var cmd tea.Cmd
|
||||
|
||||
// Global navigation
|
||||
if key, ok := msg.(tea.KeyMsg); ok {
|
||||
switch key.String() {
|
||||
case "1":
|
||||
m.activeView = 0
|
||||
return m, nil
|
||||
case "2":
|
||||
m.activeView = 1
|
||||
return m, nil
|
||||
case "3":
|
||||
m.activeView = 2
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Route to active child
|
||||
switch m.activeView {
|
||||
case 0:
|
||||
m.listView, cmd = m.listView.Update(msg)
|
||||
case 1:
|
||||
m.detailView, cmd = m.detailView.Update(msg)
|
||||
case 2:
|
||||
m.searchView, cmd = m.searchView.Update(msg)
|
||||
}
|
||||
|
||||
return m, cmd
|
||||
}
|
||||
|
||||
func (m appModel) View() string {
|
||||
switch m.activeView {
|
||||
case 0:
|
||||
return m.listView.View()
|
||||
case 1:
|
||||
return m.detailView.View()
|
||||
case 2:
|
||||
return m.searchView.View()
|
||||
}
|
||||
return ""
|
||||
}'''
|
||||
|
||||
templates["child_model"] = '''// Child model handles its own state and rendering
|
||||
type listViewModel struct {
|
||||
items []string
|
||||
cursor int
|
||||
selected map[int]bool
|
||||
}
|
||||
|
||||
func (m listViewModel) Init() tea.Cmd {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m listViewModel) Update(msg tea.Msg) (listViewModel, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "up", "k":
|
||||
if m.cursor > 0 {
|
||||
m.cursor--
|
||||
}
|
||||
case "down", "j":
|
||||
if m.cursor < len(m.items)-1 {
|
||||
m.cursor++
|
||||
}
|
||||
case " ":
|
||||
m.selected[m.cursor] = !m.selected[m.cursor]
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m listViewModel) View() string {
|
||||
s := "Select items:\\n\\n"
|
||||
for i, item := range m.items {
|
||||
cursor := " "
|
||||
if m.cursor == i {
|
||||
cursor = ">"
|
||||
}
|
||||
checked := " "
|
||||
if m.selected[i] {
|
||||
checked = "x"
|
||||
}
|
||||
s += fmt.Sprintf("%s [%s] %s\\n", cursor, checked, item)
|
||||
}
|
||||
return s
|
||||
}'''
|
||||
|
||||
templates["message_passing"] = '''// Custom message for inter-model communication
|
||||
type itemSelectedMsg struct {
|
||||
itemID string
|
||||
}
|
||||
|
||||
// In listViewModel:
|
||||
func (m listViewModel) Update(msg tea.Msg) (listViewModel, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case tea.KeyMsg:
|
||||
if msg.String() == "enter" {
|
||||
// Send message to parent (who routes to detail view)
|
||||
return m, func() tea.Msg {
|
||||
return itemSelectedMsg{itemID: m.items[m.cursor]}
|
||||
}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// In appModel:
|
||||
func (m appModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case itemSelectedMsg:
|
||||
// List selected item, switch to detail view
|
||||
m.detailView.LoadItem(msg.itemID)
|
||||
m.activeView = 1 // Switch to detail
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Route to children...
|
||||
return m, nil
|
||||
}'''
|
||||
|
||||
elif pattern == "multi_view":
|
||||
templates["view_state"] = '''type viewState int
|
||||
|
||||
const (
|
||||
listView viewState = iota
|
||||
detailView
|
||||
searchView
|
||||
)
|
||||
|
||||
type model struct {
|
||||
currentView viewState
|
||||
|
||||
// View-specific state
|
||||
listItems []string
|
||||
listCursor int
|
||||
detailItem string
|
||||
searchQuery string
|
||||
}'''
|
||||
|
||||
templates["view_switching"] = '''func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case tea.KeyMsg:
|
||||
// Global navigation
|
||||
switch msg.String() {
|
||||
case "1":
|
||||
m.currentView = listView
|
||||
return m, nil
|
||||
case "2":
|
||||
m.currentView = detailView
|
||||
return m, nil
|
||||
case "3":
|
||||
m.currentView = searchView
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// View-specific handling
|
||||
switch m.currentView {
|
||||
case listView:
|
||||
return m.updateListView(msg)
|
||||
case detailView:
|
||||
return m.updateDetailView(msg)
|
||||
case searchView:
|
||||
return m.updateSearchView(msg)
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) View() string {
|
||||
switch m.currentView {
|
||||
case listView:
|
||||
return m.renderListView()
|
||||
case detailView:
|
||||
return m.renderDetailView()
|
||||
case searchView:
|
||||
return m.renderSearchView()
|
||||
}
|
||||
return ""
|
||||
}'''
|
||||
|
||||
elif pattern == "component_based":
|
||||
templates["using_components"] = '''import (
|
||||
"github.com/charmbracelet/bubbles/list"
|
||||
"github.com/charmbracelet/bubbles/textinput"
|
||||
"github.com/charmbracelet/bubbles/viewport"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
type model struct {
|
||||
list list.Model
|
||||
search textinput.Model
|
||||
viewer viewport.Model
|
||||
activeComponent int
|
||||
}
|
||||
|
||||
func initialModel() model {
|
||||
// Initialize components
|
||||
items := []list.Item{
|
||||
item{title: "Item 1", desc: "Description"},
|
||||
item{title: "Item 2", desc: "Description"},
|
||||
}
|
||||
|
||||
l := list.New(items, list.NewDefaultDelegate(), 20, 10)
|
||||
l.Title = "Items"
|
||||
|
||||
ti := textinput.New()
|
||||
ti.Placeholder = "Search..."
|
||||
ti.Focus()
|
||||
|
||||
vp := viewport.New(80, 20)
|
||||
|
||||
return model{
|
||||
list: l,
|
||||
search: ti,
|
||||
viewer: vp,
|
||||
activeComponent: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
var cmd tea.Cmd
|
||||
|
||||
// Route to active component
|
||||
switch m.activeComponent {
|
||||
case 0:
|
||||
m.list, cmd = m.list.Update(msg)
|
||||
case 1:
|
||||
m.search, cmd = m.search.Update(msg)
|
||||
case 2:
|
||||
m.viewer, cmd = m.viewer.Update(msg)
|
||||
}
|
||||
|
||||
return m, cmd
|
||||
}
|
||||
|
||||
func (m model) View() string {
|
||||
return lipgloss.JoinVertical(
|
||||
lipgloss.Left,
|
||||
m.search.View(),
|
||||
m.list.View(),
|
||||
m.viewer.View(),
|
||||
)
|
||||
}'''
|
||||
|
||||
elif pattern == "state_machine_multi_view":
|
||||
templates["state_machine"] = '''type appState int
|
||||
|
||||
const (
|
||||
loadingState appState = iota
|
||||
listState
|
||||
detailState
|
||||
errorState
|
||||
)
|
||||
|
||||
type model struct {
|
||||
state appState
|
||||
prevState appState
|
||||
|
||||
// State data
|
||||
items []string
|
||||
selected string
|
||||
error error
|
||||
}
|
||||
|
||||
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case itemsLoadedMsg:
|
||||
m.items = msg.items
|
||||
m.state = listState
|
||||
return m, nil
|
||||
|
||||
case itemSelectedMsg:
|
||||
m.selected = msg.item
|
||||
m.state = detailState
|
||||
return m, loadItemDetails
|
||||
|
||||
case errorMsg:
|
||||
m.prevState = m.state
|
||||
m.state = errorState
|
||||
m.error = msg.err
|
||||
return m, nil
|
||||
|
||||
case tea.KeyMsg:
|
||||
if msg.String() == "esc" && m.state == errorState {
|
||||
m.state = m.prevState // Return to previous state
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
|
||||
// State-specific update
|
||||
switch m.state {
|
||||
case listState:
|
||||
return m.updateList(msg)
|
||||
case detailState:
|
||||
return m.updateDetail(msg)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) View() string {
|
||||
switch m.state {
|
||||
case loadingState:
|
||||
return "Loading..."
|
||||
case listState:
|
||||
return m.renderList()
|
||||
case detailState:
|
||||
return m.renderDetail()
|
||||
case errorState:
|
||||
return fmt.Sprintf("Error: %v\\nPress ESC to continue", m.error)
|
||||
}
|
||||
return ""
|
||||
}'''
|
||||
|
||||
return templates
|
||||
|
||||
|
||||
def validate_architecture_suggestion(result: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate architecture suggestion result."""
|
||||
if 'error' in result:
|
||||
return {"status": "error", "summary": result['error']}
|
||||
|
||||
validation = result.get('validation', {})
|
||||
status = validation.get('status', 'unknown')
|
||||
summary = validation.get('summary', 'Architecture analysis complete')
|
||||
|
||||
checks = [
|
||||
(result.get('current_pattern') is not None, "Pattern detected"),
|
||||
(result.get('complexity_score') is not None, "Complexity calculated"),
|
||||
(result.get('recommended_pattern') is not None, "Recommendation generated"),
|
||||
(len(result.get('refactoring_steps', [])) > 0, "Has refactoring steps"),
|
||||
]
|
||||
|
||||
all_pass = all(check[0] for check in checks)
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"checks": {check[1]: check[0] for check in checks},
|
||||
"valid": all_pass
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: suggest_architecture.py <code_path> [complexity_level]")
|
||||
sys.exit(1)
|
||||
|
||||
code_path = sys.argv[1]
|
||||
complexity_level = sys.argv[2] if len(sys.argv) > 2 else "auto"
|
||||
|
||||
result = suggest_architecture(code_path, complexity_level)
|
||||
print(json.dumps(result, indent=2))
|
||||
1
scripts/utils/__init__.py
Normal file
1
scripts/utils/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Utility modules for Bubble Tea maintenance agent
|
||||
328
scripts/utils/go_parser.py
Normal file
328
scripts/utils/go_parser.py
Normal file
@@ -0,0 +1,328 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Go code parser utilities for Bubble Tea maintenance agent.
|
||||
Extracts models, functions, types, and code structure.
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def extract_model_struct(content: str) -> Optional[Dict[str, any]]:
|
||||
"""Extract the main model struct from Go code."""
|
||||
|
||||
# Pattern: type XxxModel struct { ... }
|
||||
pattern = r'type\s+(\w*[Mm]odel)\s+struct\s*\{([^}]+)\}'
|
||||
match = re.search(pattern, content, re.DOTALL)
|
||||
|
||||
if not match:
|
||||
return None
|
||||
|
||||
model_name = match.group(1)
|
||||
model_body = match.group(2)
|
||||
|
||||
# Parse fields
|
||||
fields = []
|
||||
for line in model_body.split('\n'):
|
||||
line = line.strip()
|
||||
if not line or line.startswith('//'):
|
||||
continue
|
||||
|
||||
# Parse field: name type [tag]
|
||||
field_match = re.match(r'(\w+)\s+([^\s`]+)(?:\s+`([^`]+)`)?', line)
|
||||
if field_match:
|
||||
fields.append({
|
||||
"name": field_match.group(1),
|
||||
"type": field_match.group(2),
|
||||
"tag": field_match.group(3) if field_match.group(3) else None
|
||||
})
|
||||
|
||||
return {
|
||||
"name": model_name,
|
||||
"fields": fields,
|
||||
"field_count": len(fields),
|
||||
"raw_body": model_body
|
||||
}
|
||||
|
||||
|
||||
def extract_update_function(content: str) -> Optional[Dict[str, any]]:
|
||||
"""Extract the Update() function."""
|
||||
|
||||
# Find Update function
|
||||
pattern = r'func\s+\((\w+)\s+(\*?)(\w+)\)\s+Update\s*\([^)]*\)\s*\([^)]*\)\s*\{(.+?)(?=\nfunc\s|\Z)'
|
||||
match = re.search(pattern, content, re.DOTALL | re.MULTILINE)
|
||||
|
||||
if not match:
|
||||
return None
|
||||
|
||||
receiver_name = match.group(1)
|
||||
is_pointer = match.group(2) == '*'
|
||||
receiver_type = match.group(3)
|
||||
function_body = match.group(4)
|
||||
|
||||
# Count cases in switch statements
|
||||
case_count = len(re.findall(r'\bcase\s+', function_body))
|
||||
|
||||
# Find message types handled
|
||||
handled_messages = re.findall(r'case\s+(\w+\.?\w*):', function_body)
|
||||
|
||||
return {
|
||||
"receiver_name": receiver_name,
|
||||
"receiver_type": receiver_type,
|
||||
"is_pointer_receiver": is_pointer,
|
||||
"body_lines": len(function_body.split('\n')),
|
||||
"case_count": case_count,
|
||||
"handled_messages": list(set(handled_messages)),
|
||||
"raw_body": function_body
|
||||
}
|
||||
|
||||
|
||||
def extract_view_function(content: str) -> Optional[Dict[str, any]]:
|
||||
"""Extract the View() function."""
|
||||
|
||||
pattern = r'func\s+\((\w+)\s+(\*?)(\w+)\)\s+View\s*\(\s*\)\s+string\s*\{(.+?)(?=\nfunc\s|\Z)'
|
||||
match = re.search(pattern, content, re.DOTALL | re.MULTILINE)
|
||||
|
||||
if not match:
|
||||
return None
|
||||
|
||||
receiver_name = match.group(1)
|
||||
is_pointer = match.group(2) == '*'
|
||||
receiver_type = match.group(3)
|
||||
function_body = match.group(4)
|
||||
|
||||
# Analyze complexity
|
||||
string_concat_count = len(re.findall(r'\+\s*"', function_body))
|
||||
lipgloss_calls = len(re.findall(r'lipgloss\.', function_body))
|
||||
|
||||
return {
|
||||
"receiver_name": receiver_name,
|
||||
"receiver_type": receiver_type,
|
||||
"is_pointer_receiver": is_pointer,
|
||||
"body_lines": len(function_body.split('\n')),
|
||||
"string_concatenations": string_concat_count,
|
||||
"lipgloss_calls": lipgloss_calls,
|
||||
"raw_body": function_body
|
||||
}
|
||||
|
||||
|
||||
def extract_init_function(content: str) -> Optional[Dict[str, any]]:
|
||||
"""Extract the Init() function."""
|
||||
|
||||
pattern = r'func\s+\((\w+)\s+(\*?)(\w+)\)\s+Init\s*\(\s*\)\s+tea\.Cmd\s*\{(.+?)(?=\nfunc\s|\Z)'
|
||||
match = re.search(pattern, content, re.DOTALL | re.MULTILINE)
|
||||
|
||||
if not match:
|
||||
return None
|
||||
|
||||
receiver_name = match.group(1)
|
||||
is_pointer = match.group(2) == '*'
|
||||
receiver_type = match.group(3)
|
||||
function_body = match.group(4)
|
||||
|
||||
return {
|
||||
"receiver_name": receiver_name,
|
||||
"receiver_type": receiver_type,
|
||||
"is_pointer_receiver": is_pointer,
|
||||
"body_lines": len(function_body.split('\n')),
|
||||
"raw_body": function_body
|
||||
}
|
||||
|
||||
|
||||
def extract_custom_messages(content: str) -> List[Dict[str, any]]:
|
||||
"""Extract custom message type definitions."""
|
||||
|
||||
# Pattern: type xxxMsg struct { ... }
|
||||
pattern = r'type\s+(\w+Msg)\s+struct\s*\{([^}]*)\}'
|
||||
matches = re.finditer(pattern, content, re.DOTALL)
|
||||
|
||||
messages = []
|
||||
for match in matches:
|
||||
msg_name = match.group(1)
|
||||
msg_body = match.group(2)
|
||||
|
||||
# Parse fields
|
||||
fields = []
|
||||
for line in msg_body.split('\n'):
|
||||
line = line.strip()
|
||||
if not line or line.startswith('//'):
|
||||
continue
|
||||
|
||||
field_match = re.match(r'(\w+)\s+([^\s]+)', line)
|
||||
if field_match:
|
||||
fields.append({
|
||||
"name": field_match.group(1),
|
||||
"type": field_match.group(2)
|
||||
})
|
||||
|
||||
messages.append({
|
||||
"name": msg_name,
|
||||
"fields": fields,
|
||||
"field_count": len(fields)
|
||||
})
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
def extract_tea_commands(content: str) -> List[Dict[str, any]]:
|
||||
"""Extract tea.Cmd functions."""
|
||||
|
||||
# Pattern: func xxxCmd() tea.Msg { ... }
|
||||
pattern = r'func\s+(\w+)\s*\(\s*\)\s+tea\.Msg\s*\{(.+?)^\}'
|
||||
matches = re.finditer(pattern, content, re.DOTALL | re.MULTILINE)
|
||||
|
||||
commands = []
|
||||
for match in matches:
|
||||
cmd_name = match.group(1)
|
||||
cmd_body = match.group(2)
|
||||
|
||||
# Check for blocking operations
|
||||
has_http = bool(re.search(r'\bhttp\.(Get|Post|Do)', cmd_body))
|
||||
has_sleep = bool(re.search(r'time\.Sleep', cmd_body))
|
||||
has_io = bool(re.search(r'\bos\.(Open|Read|Write)', cmd_body))
|
||||
|
||||
commands.append({
|
||||
"name": cmd_name,
|
||||
"body_lines": len(cmd_body.split('\n')),
|
||||
"has_http": has_http,
|
||||
"has_sleep": has_sleep,
|
||||
"has_io": has_io,
|
||||
"is_blocking": has_http or has_io # sleep is expected in commands
|
||||
})
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
def extract_imports(content: str) -> List[str]:
|
||||
"""Extract import statements."""
|
||||
|
||||
imports = []
|
||||
|
||||
# Single import
|
||||
single_pattern = r'import\s+"([^"]+)"'
|
||||
imports.extend(re.findall(single_pattern, content))
|
||||
|
||||
# Multi-line import block
|
||||
block_pattern = r'import\s+\(([^)]+)\)'
|
||||
block_matches = re.finditer(block_pattern, content, re.DOTALL)
|
||||
for match in block_matches:
|
||||
block_content = match.group(1)
|
||||
# Extract quoted imports
|
||||
quoted = re.findall(r'"([^"]+)"', block_content)
|
||||
imports.extend(quoted)
|
||||
|
||||
return list(set(imports))
|
||||
|
||||
|
||||
def find_bubbletea_components(content: str) -> List[Dict[str, any]]:
|
||||
"""Find usage of Bubble Tea components (list, viewport, etc.)."""
|
||||
|
||||
components = []
|
||||
|
||||
component_patterns = {
|
||||
"list": r'list\.Model',
|
||||
"viewport": r'viewport\.Model',
|
||||
"textinput": r'textinput\.Model',
|
||||
"textarea": r'textarea\.Model',
|
||||
"table": r'table\.Model',
|
||||
"progress": r'progress\.Model',
|
||||
"spinner": r'spinner\.Model',
|
||||
"timer": r'timer\.Model',
|
||||
"stopwatch": r'stopwatch\.Model',
|
||||
"filepicker": r'filepicker\.Model',
|
||||
"paginator": r'paginator\.Model',
|
||||
}
|
||||
|
||||
for comp_name, pattern in component_patterns.items():
|
||||
if re.search(pattern, content):
|
||||
# Count occurrences
|
||||
count = len(re.findall(pattern, content))
|
||||
components.append({
|
||||
"component": comp_name,
|
||||
"occurrences": count
|
||||
})
|
||||
|
||||
return components
|
||||
|
||||
|
||||
def analyze_code_structure(file_path: Path) -> Dict[str, any]:
|
||||
"""Comprehensive code structure analysis."""
|
||||
|
||||
try:
|
||||
content = file_path.read_text()
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
return {
|
||||
"model": extract_model_struct(content),
|
||||
"update": extract_update_function(content),
|
||||
"view": extract_view_function(content),
|
||||
"init": extract_init_function(content),
|
||||
"custom_messages": extract_custom_messages(content),
|
||||
"tea_commands": extract_tea_commands(content),
|
||||
"imports": extract_imports(content),
|
||||
"components": find_bubbletea_components(content),
|
||||
"file_size": len(content),
|
||||
"line_count": len(content.split('\n')),
|
||||
"uses_lipgloss": '"github.com/charmbracelet/lipgloss"' in content,
|
||||
"uses_bubbletea": '"github.com/charmbracelet/bubbletea"' in content
|
||||
}
|
||||
|
||||
|
||||
def find_function_by_name(content: str, func_name: str) -> Optional[str]:
|
||||
"""Find a specific function by name and return its body."""
|
||||
|
||||
pattern = rf'func\s+(?:\([^)]+\)\s+)?{func_name}\s*\([^)]*\)[^{{]*\{{(.+?)(?=\nfunc\s|\Z)'
|
||||
match = re.search(pattern, content, re.DOTALL | re.MULTILINE)
|
||||
|
||||
if match:
|
||||
return match.group(1)
|
||||
return None
|
||||
|
||||
|
||||
def extract_state_machine_states(content: str) -> Optional[Dict[str, any]]:
|
||||
"""Extract state machine enum if present."""
|
||||
|
||||
# Pattern: type xxxState int; const ( state1 state2 = iota ... )
|
||||
state_type_pattern = r'type\s+(\w+State)\s+(int|string)'
|
||||
state_type_match = re.search(state_type_pattern, content)
|
||||
|
||||
if not state_type_match:
|
||||
return None
|
||||
|
||||
state_type = state_type_match.group(1)
|
||||
|
||||
# Find const block with iota
|
||||
const_pattern = rf'const\s+\(([^)]+)\)'
|
||||
const_matches = re.finditer(const_pattern, content, re.DOTALL)
|
||||
|
||||
states = []
|
||||
for const_match in const_matches:
|
||||
const_body = const_match.group(1)
|
||||
if state_type in const_body and 'iota' in const_body:
|
||||
# Extract state names
|
||||
state_names = re.findall(rf'(\w+)\s+{state_type}', const_body)
|
||||
states = state_names
|
||||
break
|
||||
|
||||
return {
|
||||
"type": state_type,
|
||||
"states": states,
|
||||
"count": len(states)
|
||||
}
|
||||
|
||||
|
||||
# Example usage and testing
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: go_parser.py <go_file>")
|
||||
sys.exit(1)
|
||||
|
||||
file_path = Path(sys.argv[1])
|
||||
result = analyze_code_structure(file_path)
|
||||
|
||||
import json
|
||||
print(json.dumps(result, indent=2))
|
||||
1
scripts/utils/validators/__init__.py
Normal file
1
scripts/utils/validators/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Validator modules for Bubble Tea maintenance agent
|
||||
349
scripts/utils/validators/common.py
Normal file
349
scripts/utils/validators/common.py
Normal file
@@ -0,0 +1,349 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Common validation utilities for Bubble Tea maintenance agent.
|
||||
"""
|
||||
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
def validate_result_structure(result: Dict[str, Any], required_keys: List[str]) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate that a result dictionary has required keys.
|
||||
|
||||
Args:
|
||||
result: Result dictionary to validate
|
||||
required_keys: List of required key names
|
||||
|
||||
Returns:
|
||||
Validation dict with status, summary, and checks
|
||||
"""
|
||||
if 'error' in result:
|
||||
return {
|
||||
"status": "error",
|
||||
"summary": result['error'],
|
||||
"valid": False
|
||||
}
|
||||
|
||||
checks = {}
|
||||
for key in required_keys:
|
||||
checks[f"has_{key}"] = key in result and result[key] is not None
|
||||
|
||||
all_pass = all(checks.values())
|
||||
|
||||
status = "pass" if all_pass else "fail"
|
||||
summary = "Validation passed" if all_pass else f"Missing required keys: {[k for k, v in checks.items() if not v]}"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"checks": checks,
|
||||
"valid": all_pass
|
||||
}
|
||||
|
||||
|
||||
def validate_issue_list(issues: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate a list of issues has proper structure.
|
||||
|
||||
Expected issue structure:
|
||||
- severity: CRITICAL, HIGH, WARNING, or INFO
|
||||
- category: performance, layout, reliability, etc.
|
||||
- issue: Description
|
||||
- location: File path and line number
|
||||
- explanation: Why it's a problem
|
||||
- fix: How to fix it
|
||||
"""
|
||||
if not isinstance(issues, list):
|
||||
return {
|
||||
"status": "error",
|
||||
"summary": "Issues must be a list",
|
||||
"valid": False
|
||||
}
|
||||
|
||||
required_fields = ["severity", "issue", "location", "explanation"]
|
||||
valid_severities = ["CRITICAL", "HIGH", "MEDIUM", "WARNING", "LOW", "INFO"]
|
||||
|
||||
checks = {
|
||||
"is_list": True,
|
||||
"all_have_severity": True,
|
||||
"valid_severity_values": True,
|
||||
"all_have_issue": True,
|
||||
"all_have_location": True,
|
||||
"all_have_explanation": True
|
||||
}
|
||||
|
||||
for issue in issues:
|
||||
if not isinstance(issue, dict):
|
||||
checks["is_list"] = False
|
||||
continue
|
||||
|
||||
if "severity" not in issue:
|
||||
checks["all_have_severity"] = False
|
||||
elif issue["severity"] not in valid_severities:
|
||||
checks["valid_severity_values"] = False
|
||||
|
||||
if "issue" not in issue or not issue["issue"]:
|
||||
checks["all_have_issue"] = False
|
||||
|
||||
if "location" not in issue or not issue["location"]:
|
||||
checks["all_have_location"] = False
|
||||
|
||||
if "explanation" not in issue or not issue["explanation"]:
|
||||
checks["all_have_explanation"] = False
|
||||
|
||||
all_pass = all(checks.values())
|
||||
status = "pass" if all_pass else "warning"
|
||||
|
||||
failed = [k for k, v in checks.items() if not v]
|
||||
summary = "All issues properly structured" if all_pass else f"Issues have problems: {failed}"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"checks": checks,
|
||||
"valid": all_pass,
|
||||
"issue_count": len(issues)
|
||||
}
|
||||
|
||||
|
||||
def validate_score(score: int, min_val: int = 0, max_val: int = 100) -> bool:
|
||||
"""Validate a numeric score is in range."""
|
||||
return isinstance(score, (int, float)) and min_val <= score <= max_val
|
||||
|
||||
|
||||
def validate_health_score(health_score: int) -> Dict[str, Any]:
|
||||
"""Validate health score and categorize."""
|
||||
if not validate_score(health_score):
|
||||
return {
|
||||
"status": "error",
|
||||
"summary": "Invalid health score",
|
||||
"valid": False
|
||||
}
|
||||
|
||||
if health_score >= 90:
|
||||
category = "excellent"
|
||||
status = "pass"
|
||||
elif health_score >= 75:
|
||||
category = "good"
|
||||
status = "pass"
|
||||
elif health_score >= 60:
|
||||
category = "fair"
|
||||
status = "warning"
|
||||
elif health_score >= 40:
|
||||
category = "poor"
|
||||
status = "warning"
|
||||
else:
|
||||
category = "critical"
|
||||
status = "critical"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": f"{category.capitalize()} health ({health_score}/100)",
|
||||
"category": category,
|
||||
"valid": True,
|
||||
"score": health_score
|
||||
}
|
||||
|
||||
|
||||
def validate_file_path(file_path: str) -> bool:
|
||||
"""Validate file path format."""
|
||||
from pathlib import Path
|
||||
try:
|
||||
path = Path(file_path)
|
||||
return path.exists()
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def validate_best_practices_compliance(compliance: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Validate best practices compliance structure."""
|
||||
if not isinstance(compliance, dict):
|
||||
return {
|
||||
"status": "error",
|
||||
"summary": "Compliance must be a dictionary",
|
||||
"valid": False
|
||||
}
|
||||
|
||||
required_tip_fields = ["status", "score", "message"]
|
||||
valid_statuses = ["pass", "fail", "warning", "info"]
|
||||
|
||||
checks = {
|
||||
"has_tips": len(compliance) > 0,
|
||||
"all_tips_valid": True,
|
||||
"valid_statuses": True,
|
||||
"valid_scores": True
|
||||
}
|
||||
|
||||
for tip_name, tip_data in compliance.items():
|
||||
if not isinstance(tip_data, dict):
|
||||
checks["all_tips_valid"] = False
|
||||
continue
|
||||
|
||||
for field in required_tip_fields:
|
||||
if field not in tip_data:
|
||||
checks["all_tips_valid"] = False
|
||||
|
||||
if tip_data.get("status") not in valid_statuses:
|
||||
checks["valid_statuses"] = False
|
||||
|
||||
if not validate_score(tip_data.get("score", -1)):
|
||||
checks["valid_scores"] = False
|
||||
|
||||
all_pass = all(checks.values())
|
||||
status = "pass" if all_pass else "warning"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": f"Validated {len(compliance)} tips",
|
||||
"checks": checks,
|
||||
"valid": all_pass,
|
||||
"tip_count": len(compliance)
|
||||
}
|
||||
|
||||
|
||||
def validate_bottlenecks(bottlenecks: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Validate performance bottleneck list."""
|
||||
if not isinstance(bottlenecks, list):
|
||||
return {
|
||||
"status": "error",
|
||||
"summary": "Bottlenecks must be a list",
|
||||
"valid": False
|
||||
}
|
||||
|
||||
required_fields = ["severity", "category", "issue", "location", "explanation", "fix"]
|
||||
valid_severities = ["CRITICAL", "HIGH", "MEDIUM", "LOW"]
|
||||
valid_categories = ["performance", "memory", "io", "rendering"]
|
||||
|
||||
checks = {
|
||||
"is_list": True,
|
||||
"all_have_severity": True,
|
||||
"valid_severities": True,
|
||||
"all_have_category": True,
|
||||
"valid_categories": True,
|
||||
"all_have_fix": True
|
||||
}
|
||||
|
||||
for bottleneck in bottlenecks:
|
||||
if not isinstance(bottleneck, dict):
|
||||
checks["is_list"] = False
|
||||
continue
|
||||
|
||||
if "severity" not in bottleneck:
|
||||
checks["all_have_severity"] = False
|
||||
elif bottleneck["severity"] not in valid_severities:
|
||||
checks["valid_severities"] = False
|
||||
|
||||
if "category" not in bottleneck:
|
||||
checks["all_have_category"] = False
|
||||
elif bottleneck["category"] not in valid_categories:
|
||||
checks["valid_categories"] = False
|
||||
|
||||
if "fix" not in bottleneck or not bottleneck["fix"]:
|
||||
checks["all_have_fix"] = False
|
||||
|
||||
all_pass = all(checks.values())
|
||||
status = "pass" if all_pass else "warning"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": f"Validated {len(bottlenecks)} bottlenecks",
|
||||
"checks": checks,
|
||||
"valid": all_pass,
|
||||
"bottleneck_count": len(bottlenecks)
|
||||
}
|
||||
|
||||
|
||||
def validate_architecture_analysis(result: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate architecture analysis result."""
|
||||
required_keys = ["current_pattern", "complexity_score", "recommended_pattern", "refactoring_steps"]
|
||||
|
||||
checks = {}
|
||||
for key in required_keys:
|
||||
checks[f"has_{key}"] = key in result and result[key] is not None
|
||||
|
||||
# Validate complexity score
|
||||
if "complexity_score" in result:
|
||||
checks["valid_complexity_score"] = validate_score(result["complexity_score"])
|
||||
else:
|
||||
checks["valid_complexity_score"] = False
|
||||
|
||||
# Validate refactoring steps
|
||||
if "refactoring_steps" in result:
|
||||
checks["has_refactoring_steps"] = isinstance(result["refactoring_steps"], list) and len(result["refactoring_steps"]) > 0
|
||||
else:
|
||||
checks["has_refactoring_steps"] = False
|
||||
|
||||
all_pass = all(checks.values())
|
||||
status = "pass" if all_pass else "warning"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": "Architecture analysis validated" if all_pass else "Architecture analysis incomplete",
|
||||
"checks": checks,
|
||||
"valid": all_pass
|
||||
}
|
||||
|
||||
|
||||
def validate_layout_fixes(fixes: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Validate layout fix list."""
|
||||
if not isinstance(fixes, list):
|
||||
return {
|
||||
"status": "error",
|
||||
"summary": "Fixes must be a list",
|
||||
"valid": False
|
||||
}
|
||||
|
||||
required_fields = ["location", "original", "fixed", "explanation"]
|
||||
|
||||
checks = {
|
||||
"is_list": True,
|
||||
"all_have_location": True,
|
||||
"all_have_explanation": True,
|
||||
"all_have_fix": True
|
||||
}
|
||||
|
||||
for fix in fixes:
|
||||
if not isinstance(fix, dict):
|
||||
checks["is_list"] = False
|
||||
continue
|
||||
|
||||
if "location" not in fix or not fix["location"]:
|
||||
checks["all_have_location"] = False
|
||||
|
||||
if "explanation" not in fix or not fix["explanation"]:
|
||||
checks["all_have_explanation"] = False
|
||||
|
||||
if "fixed" not in fix or not fix["fixed"]:
|
||||
checks["all_have_fix"] = False
|
||||
|
||||
all_pass = all(checks.values())
|
||||
status = "pass" if all_pass else "warning"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"summary": f"Validated {len(fixes)} fixes",
|
||||
"checks": checks,
|
||||
"valid": all_pass,
|
||||
"fix_count": len(fixes)
|
||||
}
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Test validation functions
|
||||
test_issues = [
|
||||
{
|
||||
"severity": "CRITICAL",
|
||||
"category": "performance",
|
||||
"issue": "Blocking operation",
|
||||
"location": "main.go:45",
|
||||
"explanation": "HTTP call blocks event loop",
|
||||
"fix": "Move to tea.Cmd"
|
||||
}
|
||||
]
|
||||
|
||||
result = validate_issue_list(test_issues)
|
||||
print(f"Issue validation: {result}")
|
||||
|
||||
health_result = validate_health_score(75)
|
||||
print(f"Health validation: {health_result}")
|
||||
Reference in New Issue
Block a user