Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:50:59 +08:00
commit cee24bf043
13 changed files with 3252 additions and 0 deletions

View File

@@ -0,0 +1,280 @@
#!/usr/bin/env python3
"""
TDD Compliance Checker
Analyzes code to detect if Test-Driven Development was followed.
Identifies code smells and patterns that indicate tests-after-code.
"""
import os
import re
import sys
import json
from pathlib import Path
from typing import Dict, List, Tuple
class TDDComplianceChecker:
"""Checks code for TDD compliance indicators."""
# Code smell patterns that suggest tests-after-code
CODE_SMELLS = {
'nested_conditionals': r'if\s+.*:\s*\n\s+if\s+.*:|if\s+.*:\s*\n\s+elif\s+',
'long_methods': None, # Checked by line count
'complex_conditions': r'if\s+.*\s+(and|or)\s+.*\s+(and|or)\s+',
'multiple_responsibilities': None, # Checked by method analysis
'missing_abstractions': r'if\s+isinstance\(',
'god_class': None, # Checked by class analysis
}
def __init__(self, path: str):
self.path = Path(path)
self.issues = []
self.metrics = {
'files_analyzed': 0,
'test_files_found': 0,
'code_smells': 0,
'tdd_score': 0.0
}
def analyze(self) -> Dict:
"""Run full TDD compliance analysis."""
if self.path.is_file():
self._analyze_file(self.path)
else:
self._analyze_directory(self.path)
self._calculate_tdd_score()
return {
'issues': self.issues,
'metrics': self.metrics,
'compliance': self._get_compliance_level()
}
def _analyze_directory(self, directory: Path):
"""Recursively analyze all source files in directory."""
# Common source file extensions
extensions = {'.py', '.js', '.ts', '.java', '.go', '.rb', '.php', '.c', '.cpp', '.cs'}
for file_path in directory.rglob('*'):
if file_path.suffix in extensions and file_path.is_file():
# Skip test files in analysis (we'll check they exist separately)
if not self._is_test_file(file_path):
self._analyze_file(file_path)
def _analyze_file(self, file_path: Path):
"""Analyze a single source file for TDD compliance."""
self.metrics['files_analyzed'] += 1
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
# Check for code smells
self._check_nested_conditionals(file_path, content)
self._check_long_methods(file_path, lines)
self._check_complex_conditions(file_path, content)
self._check_missing_abstractions(file_path, content)
# Check if corresponding test file exists
self._check_test_coverage(file_path)
except Exception as e:
self.issues.append({
'file': str(file_path),
'type': 'error',
'message': f'Failed to analyze file: {str(e)}'
})
def _check_nested_conditionals(self, file_path: Path, content: str):
"""Detect deeply nested conditional statements."""
pattern = self.CODE_SMELLS['nested_conditionals']
matches = re.finditer(pattern, content)
for match in matches:
line_num = content[:match.start()].count('\n') + 1
self.issues.append({
'file': str(file_path),
'line': line_num,
'type': 'code_smell',
'severity': 'high',
'smell': 'nested_conditionals',
'message': 'Nested conditional statements detected. TDD typically produces flatter, more testable code structures.'
})
self.metrics['code_smells'] += 1
def _check_long_methods(self, file_path: Path, lines: List[str]):
"""Detect methods/functions that are too long."""
# Simple heuristic: methods longer than 20 lines
in_method = False
method_start = 0
method_name = ''
indent_level = 0
for i, line in enumerate(lines):
stripped = line.lstrip()
# Detect method/function definitions (language-agnostic patterns)
if any(keyword in stripped for keyword in ['def ', 'function ', 'func ', 'public ', 'private ', 'protected ']):
if '{' in stripped or ':' in stripped:
in_method = True
method_start = i + 1
method_name = stripped.split('(')[0].split()[-1]
indent_level = len(line) - len(stripped)
# Check if method ended
elif in_method:
current_indent = len(line) - len(line.lstrip())
if stripped and current_indent <= indent_level and stripped not in ['}', 'end']:
method_length = i - method_start
if method_length > 20:
self.issues.append({
'file': str(file_path),
'line': method_start,
'type': 'code_smell',
'severity': 'medium',
'smell': 'long_method',
'message': f'Method "{method_name}" is {method_length} lines long. TDD encourages smaller, focused methods.'
})
self.metrics['code_smells'] += 1
in_method = False
def _check_complex_conditions(self, file_path: Path, content: str):
"""Detect overly complex conditional expressions."""
pattern = self.CODE_SMELLS['complex_conditions']
matches = re.finditer(pattern, content)
for match in matches:
line_num = content[:match.start()].count('\n') + 1
self.issues.append({
'file': str(file_path),
'line': line_num,
'type': 'code_smell',
'severity': 'medium',
'smell': 'complex_conditions',
'message': 'Complex boolean conditions detected. TDD promotes simpler, more testable conditions.'
})
self.metrics['code_smells'] += 1
def _check_missing_abstractions(self, file_path: Path, content: str):
"""Detect type checking that suggests missing abstractions."""
pattern = self.CODE_SMELLS['missing_abstractions']
matches = re.finditer(pattern, content)
for match in matches:
line_num = content[:match.start()].count('\n') + 1
self.issues.append({
'file': str(file_path),
'line': line_num,
'type': 'code_smell',
'severity': 'medium',
'smell': 'missing_abstractions',
'message': 'Type checking detected. TDD encourages polymorphism over type checking.'
})
self.metrics['code_smells'] += 1
def _check_test_coverage(self, file_path: Path):
"""Check if a corresponding test file exists."""
test_file = self._find_test_file(file_path)
if test_file and test_file.exists():
self.metrics['test_files_found'] += 1
else:
self.issues.append({
'file': str(file_path),
'type': 'missing_test',
'severity': 'critical',
'message': f'No corresponding test file found. Expected: {test_file}'
})
def _find_test_file(self, source_file: Path) -> Path:
"""Find the expected test file location for a source file."""
# Common test file patterns
test_patterns = [
lambda p: p.parent / f'test_{p.name}',
lambda p: p.parent / f'{p.stem}_test{p.suffix}',
lambda p: p.parent / 'tests' / f'test_{p.name}',
lambda p: p.parent.parent / 'tests' / p.parent.name / f'test_{p.name}',
lambda p: p.parent.parent / 'test' / p.parent.name / f'test_{p.name}',
]
for pattern in test_patterns:
test_file = pattern(source_file)
if test_file.exists():
return test_file
# Return the most common pattern as expected location
return source_file.parent / f'test_{source_file.name}'
def _is_test_file(self, file_path: Path) -> bool:
"""Check if a file is a test file."""
name = file_path.name.lower()
return any([
name.startswith('test_'),
name.endswith('_test.py'),
name.endswith('_test.js'),
name.endswith('.test.js'),
name.endswith('.spec.js'),
'test' in file_path.parts,
'tests' in file_path.parts,
])
def _calculate_tdd_score(self):
"""Calculate an overall TDD compliance score (0-100)."""
if self.metrics['files_analyzed'] == 0:
self.metrics['tdd_score'] = 0.0
return
# Factors that contribute to score
test_coverage_ratio = self.metrics['test_files_found'] / self.metrics['files_analyzed']
smell_penalty = min(self.metrics['code_smells'] * 5, 50) # Max 50 point penalty
# Score calculation
score = (test_coverage_ratio * 100) - smell_penalty
self.metrics['tdd_score'] = max(0.0, min(100.0, score))
def _get_compliance_level(self) -> str:
"""Get human-readable compliance level."""
score = self.metrics['tdd_score']
if score >= 90:
return 'excellent'
elif score >= 75:
return 'good'
elif score >= 50:
return 'fair'
elif score >= 25:
return 'poor'
else:
return 'critical'
def main():
"""Main entry point for the TDD compliance checker."""
if len(sys.argv) < 2:
print("Usage: check_tdd_compliance.py <path>")
print(" path: File or directory to analyze")
sys.exit(1)
path = sys.argv[1]
if not os.path.exists(path):
print(f"Error: Path '{path}' does not exist")
sys.exit(1)
checker = TDDComplianceChecker(path)
results = checker.analyze()
# Output results as JSON
print(json.dumps(results, indent=2))
# Exit with appropriate code
if results['compliance'] in ['critical', 'poor']:
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,112 @@
#!/bin/bash
#
# Setup TDD Hooks
#
# Installs git hooks and Claude Code hooks for TDD enforcement.
# This script should be run once per project to enable TDD reinforcement.
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SKILL_DIR="$(dirname "$SCRIPT_DIR")"
PROJECT_ROOT="${1:-.}"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo "🔧 Setting up TDD hooks for project: $PROJECT_ROOT"
# Check if git repository
if [ ! -d "$PROJECT_ROOT/.git" ]; then
echo -e "${YELLOW}Warning: Not a git repository. Skipping git hooks.${NC}"
GIT_HOOKS=false
else
GIT_HOOKS=true
fi
# Check if Claude Code project
if [ ! -d "$PROJECT_ROOT/.claude" ]; then
echo -e "${YELLOW}Warning: Not a Claude Code project. Creating .claude directory.${NC}"
mkdir -p "$PROJECT_ROOT/.claude"
fi
# Install git pre-commit hook
if [ "$GIT_HOOKS" = true ]; then
echo "📝 Installing git pre-commit hook..."
HOOKS_DIR="$PROJECT_ROOT/.git/hooks"
mkdir -p "$HOOKS_DIR"
# Copy pre-commit hook template
cp "$SKILL_DIR/assets/hook-templates/pre-commit.sh" "$HOOKS_DIR/pre-commit"
chmod +x "$HOOKS_DIR/pre-commit"
echo -e "${GREEN}✅ Git pre-commit hook installed${NC}"
fi
# Install Claude Code user-prompt-submit hook
echo "📝 Installing Claude Code user-prompt-submit hook..."
CLAUDE_HOOKS_DIR="$PROJECT_ROOT/.claude/hooks"
mkdir -p "$CLAUDE_HOOKS_DIR"
# Copy user-prompt-submit hook template
cp "$SKILL_DIR/assets/hook-templates/user-prompt-submit.sh" "$CLAUDE_HOOKS_DIR/user-prompt-submit"
chmod +x "$CLAUDE_HOOKS_DIR/user-prompt-submit"
echo -e "${GREEN}✅ Claude Code user-prompt-submit hook installed${NC}"
# Create or update CLAUDE.md to mention TDD
CLAUDE_MD="$PROJECT_ROOT/.claude/CLAUDE.md"
if [ ! -f "$CLAUDE_MD" ]; then
echo "📝 Creating CLAUDE.md with TDD requirement..."
cat > "$CLAUDE_MD" << 'EOF'
# Project Guidelines
## Development Methodology
**This project uses Test-Driven Development (TDD).**
All code must be developed following the Red-Green-Refactor cycle:
1. 🔴 Red: Write a failing test first
2. 🟢 Green: Write minimal code to make the test pass
3. 🔵 Refactor: Improve code while keeping tests green
The `tdd-methodology-expert` skill is automatically loaded for this project.
EOF
echo -e "${GREEN}✅ CLAUDE.md created with TDD requirement${NC}"
else
# Check if TDD is already mentioned
if ! grep -q "TDD\|Test-Driven Development" "$CLAUDE_MD"; then
echo "📝 Updating CLAUDE.md with TDD requirement..."
echo "" >> "$CLAUDE_MD"
echo "## Development Methodology" >> "$CLAUDE_MD"
echo "" >> "$CLAUDE_MD"
echo "**This project uses Test-Driven Development (TDD).**" >> "$CLAUDE_MD"
echo "" >> "$CLAUDE_MD"
echo "All code must be developed following the Red-Green-Refactor cycle." >> "$CLAUDE_MD"
echo "The \`tdd-methodology-expert\` skill is automatically loaded for this project." >> "$CLAUDE_MD"
echo -e "${GREEN}✅ CLAUDE.md updated with TDD requirement${NC}"
else
echo -e "${GREEN}✅ CLAUDE.md already mentions TDD${NC}"
fi
fi
# Make scripts executable
chmod +x "$SKILL_DIR/scripts/"*.py
echo ""
echo -e "${GREEN}✅ TDD hooks setup complete!${NC}"
echo ""
echo "The following hooks have been installed:"
if [ "$GIT_HOOKS" = true ]; then
echo " • Git pre-commit hook: Validates TDD compliance before commits"
fi
echo " • Claude Code user-prompt-submit hook: Reinforces TDD in every interaction"
echo ""
echo "To verify installation, run:"
if [ "$GIT_HOOKS" = true ]; then
echo " git hook run pre-commit"
fi
echo " cat $PROJECT_ROOT/.claude/hooks/user-prompt-submit"

View File

@@ -0,0 +1,274 @@
#!/usr/bin/env python3
"""
Test Validator
Validates that tests exist, are properly structured, and follow TDD principles.
"""
import os
import re
import sys
import json
from pathlib import Path
from typing import Dict, List, Set
class TestValidator:
"""Validates test files for TDD compliance."""
def __init__(self, path: str):
self.path = Path(path)
self.results = {
'valid': True,
'test_files': [],
'issues': [],
'stats': {
'total_tests': 0,
'test_files_found': 0,
'well_structured': 0
}
}
def validate(self) -> Dict:
"""Run full test validation."""
test_files = self._find_test_files()
if not test_files:
self.results['valid'] = False
self.results['issues'].append({
'type': 'no_tests',
'severity': 'critical',
'message': 'No test files found. TDD requires writing tests first.'
})
return self.results
for test_file in test_files:
self._validate_test_file(test_file)
return self.results
def _find_test_files(self) -> List[Path]:
"""Find all test files in the path."""
test_files = []
if self.path.is_file():
if self._is_test_file(self.path):
test_files.append(self.path)
else:
for file_path in self.path.rglob('*'):
if file_path.is_file() and self._is_test_file(file_path):
test_files.append(file_path)
return test_files
def _is_test_file(self, file_path: Path) -> bool:
"""Check if a file is a test file."""
name = file_path.name.lower()
return any([
name.startswith('test_'),
name.endswith('_test.py'),
name.endswith('_test.js'),
name.endswith('.test.js'),
name.endswith('.test.ts'),
name.endswith('.spec.js'),
name.endswith('.spec.ts'),
name.endswith('Test.java'),
name.endswith('_test.go'),
'test' in file_path.parts,
'tests' in file_path.parts,
])
def _validate_test_file(self, test_file: Path):
"""Validate a single test file."""
self.results['stats']['test_files_found'] += 1
file_result = {
'file': str(test_file),
'tests_found': 0,
'issues': []
}
try:
with open(test_file, 'r', encoding='utf-8') as f:
content = f.read()
# Count test cases
test_count = self._count_tests(content, test_file.suffix)
file_result['tests_found'] = test_count
self.results['stats']['total_tests'] += test_count
if test_count == 0:
file_result['issues'].append({
'type': 'empty_test_file',
'severity': 'high',
'message': 'Test file contains no test cases'
})
self.results['valid'] = False
# Check test structure
structure_issues = self._check_test_structure(content, test_file)
file_result['issues'].extend(structure_issues)
if not structure_issues:
self.results['stats']['well_structured'] += 1
# Check for TDD patterns
tdd_issues = self._check_tdd_patterns(content, test_file)
file_result['issues'].extend(tdd_issues)
except Exception as e:
file_result['issues'].append({
'type': 'error',
'severity': 'high',
'message': f'Failed to validate test file: {str(e)}'
})
self.results['valid'] = False
self.results['test_files'].append(file_result)
# Aggregate issues
for issue in file_result['issues']:
if issue['severity'] in ['critical', 'high']:
self.results['valid'] = False
self.results['issues'].append({
'file': str(test_file),
**issue
})
def _count_tests(self, content: str, extension: str) -> int:
"""Count the number of test cases in the file."""
count = 0
# Language-specific test detection patterns
patterns = {
'.py': [r'def test_\w+', r'@pytest\.mark\.'],
'.js': [r'(test|it)\s*\(', r'describe\s*\('],
'.ts': [r'(test|it)\s*\(', r'describe\s*\('],
'.java': [r'@Test', r'public\s+void\s+test\w+'],
'.go': [r'func\s+Test\w+'],
'.rb': [r'(it|test)\s+["\']', r'describe\s+["\']'],
'.php': [r'public\s+function\s+test\w+', r'@test'],
}
if extension in patterns:
for pattern in patterns[extension]:
count += len(re.findall(pattern, content))
return count
def _check_test_structure(self, content: str, test_file: Path) -> List[Dict]:
"""Check if tests follow good structure patterns."""
issues = []
# Check for Arrange-Act-Assert pattern (AAA)
lines = content.split('\n')
# Look for test functions
test_functions = self._extract_test_functions(content, test_file.suffix)
for func_name, func_body in test_functions:
# Check if test is too long (suggests poor structure)
func_lines = func_body.split('\n')
if len(func_lines) > 30:
issues.append({
'type': 'long_test',
'severity': 'medium',
'test': func_name,
'message': f'Test "{func_name}" is {len(func_lines)} lines long. Consider breaking it down.'
})
# Check for multiple assertions in one test (might indicate poor isolation)
assertion_count = len(re.findall(r'assert|expect|should', func_body, re.IGNORECASE))
if assertion_count > 5:
issues.append({
'type': 'multiple_assertions',
'severity': 'low',
'test': func_name,
'message': f'Test "{func_name}" has {assertion_count} assertions. Consider splitting into focused tests.'
})
return issues
def _extract_test_functions(self, content: str, extension: str) -> List[tuple]:
"""Extract test function names and bodies."""
functions = []
# Simple extraction for Python
if extension == '.py':
pattern = r'def (test_\w+)\s*\([^)]*\):\s*\n((?: .*\n)*)'
matches = re.finditer(pattern, content)
for match in matches:
functions.append((match.group(1), match.group(2)))
# Simple extraction for JavaScript/TypeScript
elif extension in ['.js', '.ts']:
pattern = r'(test|it)\s*\([\'"]([^\'"]+)[\'"].*?\{([^}]*)\}'
matches = re.finditer(pattern, content, re.DOTALL)
for match in matches:
functions.append((match.group(2), match.group(3)))
return functions
def _check_tdd_patterns(self, content: str, test_file: Path) -> List[Dict]:
"""Check for patterns that indicate TDD was followed."""
issues = []
# Check for test-first indicators
# Red-Green-Refactor should result in:
# 1. Tests that clearly express intent
# 2. Minimal production code to make tests pass
# 3. Clear test names that describe behavior
test_functions = self._extract_test_functions(content, test_file.suffix)
for func_name, func_body in test_functions:
# Check for descriptive test names
if len(func_name) < 10 or not any(word in func_name.lower() for word in ['should', 'when', 'given', 'test']):
issues.append({
'type': 'poor_test_name',
'severity': 'low',
'test': func_name,
'message': f'Test name "{func_name}" is not descriptive. TDD encourages behavior-focused names.'
})
# Check for setup/teardown patterns
if not any(keyword in content for keyword in ['setUp', 'beforeEach', 'before', 'setup', 'fixture']):
# Only flag if multiple tests exist
if len(test_functions) > 3:
issues.append({
'type': 'missing_setup',
'severity': 'low',
'message': 'No setup/fixture detected. Consider DRY principle in test arrangement.'
})
break # Only report once per file
return issues
def main():
"""Main entry point for the test validator."""
if len(sys.argv) < 2:
print("Usage: validate_tests.py <path>")
print(" path: File or directory containing tests to validate")
sys.exit(1)
path = sys.argv[1]
if not os.path.exists(path):
print(f"Error: Path '{path}' does not exist")
sys.exit(1)
validator = TestValidator(path)
results = validator.validate()
# Output results as JSON
print(json.dumps(results, indent=2))
# Exit with appropriate code
if not results['valid']:
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main()