Initial commit
This commit is contained in:
@@ -0,0 +1,5 @@
|
||||
"""
|
||||
Bulletproof React Analyzers
|
||||
|
||||
Specialized analyzers for different aspects of Bulletproof React compliance.
|
||||
"""
|
||||
@@ -0,0 +1,72 @@
|
||||
"""
|
||||
API Layer Analyzer
|
||||
|
||||
Analyzes API organization against Bulletproof React patterns:
|
||||
- Centralized API client
|
||||
- Type-safe request declarations
|
||||
- Colocated in features/
|
||||
- Data fetching hooks
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
import re
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""Analyze API layer architecture."""
|
||||
findings = []
|
||||
src_dir = codebase_path / 'src'
|
||||
|
||||
if not src_dir.exists():
|
||||
return findings
|
||||
|
||||
# Check for centralized API client
|
||||
has_api_config = (src_dir / 'lib').exists() or any(src_dir.rglob('**/api-client.*'))
|
||||
if not has_api_config:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'api',
|
||||
'title': 'No centralized API client detected',
|
||||
'current_state': 'No api-client configuration found in src/lib/',
|
||||
'target_state': 'Create single configured API client instance',
|
||||
'migration_steps': [
|
||||
'Create src/lib/api-client.ts with axios or fetch wrapper',
|
||||
'Configure base URL, headers, interceptors',
|
||||
'Export configured client',
|
||||
'Use in all API calls'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
# Check for scattered fetch calls
|
||||
scattered_fetches = []
|
||||
for file in src_dir.rglob('*.{ts,tsx,js,jsx}'):
|
||||
if 'test' in str(file) or 'spec' in str(file):
|
||||
continue
|
||||
try:
|
||||
with open(file, 'r') as f:
|
||||
content = f.read()
|
||||
if re.search(r'\bfetch\s*\(', content) and 'api' not in str(file).lower():
|
||||
scattered_fetches.append(str(file.relative_to(src_dir)))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(scattered_fetches) > 3:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'api',
|
||||
'title': f'Scattered fetch calls in {len(scattered_fetches)} files',
|
||||
'current_state': 'fetch() calls throughout components',
|
||||
'target_state': 'Centralize API calls in feature api/ directories',
|
||||
'migration_steps': [
|
||||
'Create api/ directory in each feature',
|
||||
'Move API calls to dedicated functions',
|
||||
'Create custom hooks wrapping API calls',
|
||||
'Use React Query or SWR for data fetching'
|
||||
],
|
||||
'effort': 'high',
|
||||
'affected_files': scattered_fetches[:5],
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,323 @@
|
||||
"""
|
||||
Component Architecture Analyzer
|
||||
|
||||
Analyzes React component design against Bulletproof React principles:
|
||||
- Component colocation (near where they're used)
|
||||
- Limited props (< 7-10)
|
||||
- Reasonable component size (< 300 LOC)
|
||||
- No nested render functions
|
||||
- Proper composition over excessive props
|
||||
- Consistent naming (kebab-case files)
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""
|
||||
Analyze component architecture for Bulletproof React compliance.
|
||||
|
||||
Args:
|
||||
codebase_path: Path to React codebase
|
||||
metadata: Project metadata from discovery phase
|
||||
|
||||
Returns:
|
||||
List of findings with severity and migration guidance
|
||||
"""
|
||||
findings = []
|
||||
|
||||
src_dir = codebase_path / 'src'
|
||||
if not src_dir.exists():
|
||||
return findings
|
||||
|
||||
# Analyze all React component files
|
||||
findings.extend(check_component_sizes(src_dir))
|
||||
findings.extend(check_component_props(src_dir))
|
||||
findings.extend(check_nested_render_functions(src_dir))
|
||||
findings.extend(check_file_naming_conventions(src_dir))
|
||||
findings.extend(check_component_colocation(src_dir))
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_component_sizes(src_dir: Path) -> List[Dict]:
|
||||
"""Check for overly large components."""
|
||||
findings = []
|
||||
exclude_dirs = {'node_modules', 'dist', 'build', '.next', 'coverage'}
|
||||
|
||||
large_components = []
|
||||
for component_file in src_dir.rglob('*.{tsx,jsx}'):
|
||||
if any(excluded in component_file.parts for excluded in exclude_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(component_file, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
lines = f.readlines()
|
||||
loc = len([line for line in lines if line.strip() and not line.strip().startswith('//')])
|
||||
|
||||
if loc > 300:
|
||||
large_components.append({
|
||||
'file': str(component_file.relative_to(src_dir)),
|
||||
'lines': loc,
|
||||
'severity': 'critical' if loc > 500 else 'high' if loc > 400 else 'medium'
|
||||
})
|
||||
except:
|
||||
pass
|
||||
|
||||
if large_components:
|
||||
# Report the worst offenders
|
||||
large_components.sort(key=lambda x: x['lines'], reverse=True)
|
||||
|
||||
for comp in large_components[:10]: # Top 10 largest
|
||||
findings.append({
|
||||
'severity': comp['severity'],
|
||||
'category': 'components',
|
||||
'title': f'Large component ({comp["lines"]} LOC)',
|
||||
'current_state': f'{comp["file"]} has {comp["lines"]} lines',
|
||||
'target_state': 'Components should be < 300 lines. Large components are hard to understand and test.',
|
||||
'migration_steps': [
|
||||
'Identify distinct responsibilities in the component',
|
||||
'Extract smaller components for each UI section',
|
||||
'Move business logic to custom hooks',
|
||||
'Extract complex rendering logic to separate components',
|
||||
'Consider splitting into multiple feature components'
|
||||
],
|
||||
'effort': 'high' if comp['lines'] > 400 else 'medium',
|
||||
'file': comp['file'],
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_component_props(src_dir: Path) -> List[Dict]:
|
||||
"""Check for components with excessive props."""
|
||||
findings = []
|
||||
exclude_dirs = {'node_modules', 'dist', 'build', '.next', 'coverage'}
|
||||
|
||||
components_with_many_props = []
|
||||
for component_file in src_dir.rglob('*.{tsx,jsx}'):
|
||||
if any(excluded in component_file.parts for excluded in exclude_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(component_file, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
|
||||
# Find component definitions with props
|
||||
# Pattern matches: function Component({ prop1, prop2, ... })
|
||||
# and: const Component = ({ prop1, prop2, ... }) =>
|
||||
props_pattern = re.compile(
|
||||
r'(?:function|const)\s+(\w+)\s*(?:=\s*)?\(\s*\{([^}]+)\}',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
matches = props_pattern.findall(content)
|
||||
for component_name, props_str in matches:
|
||||
# Count props (split by comma)
|
||||
props = [p.strip() for p in props_str.split(',') if p.strip()]
|
||||
# Filter out destructured nested props
|
||||
actual_props = [p for p in props if not p.startswith('...')]
|
||||
prop_count = len(actual_props)
|
||||
|
||||
if prop_count > 10:
|
||||
components_with_many_props.append({
|
||||
'file': str(component_file.relative_to(src_dir)),
|
||||
'component': component_name,
|
||||
'prop_count': prop_count,
|
||||
})
|
||||
except:
|
||||
pass
|
||||
|
||||
if components_with_many_props:
|
||||
for comp in components_with_many_props:
|
||||
findings.append({
|
||||
'severity': 'critical' if comp['prop_count'] > 15 else 'high',
|
||||
'category': 'components',
|
||||
'title': f'Component with {comp["prop_count"]} props: {comp["component"]}',
|
||||
'current_state': f'{comp["file"]} has {comp["prop_count"]} props',
|
||||
'target_state': 'Components should accept < 7-10 props. Too many props indicates insufficient composition.',
|
||||
'migration_steps': [
|
||||
'Group related props into configuration objects',
|
||||
'Use composition (children prop) instead of render props',
|
||||
'Extract sub-components with their own props',
|
||||
'Consider using Context for deeply shared state',
|
||||
'Use compound component pattern for complex UIs'
|
||||
],
|
||||
'effort': 'medium',
|
||||
'file': comp['file'],
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_nested_render_functions(src_dir: Path) -> List[Dict]:
|
||||
"""Check for nested render functions inside components."""
|
||||
findings = []
|
||||
exclude_dirs = {'node_modules', 'dist', 'build', '.next', 'coverage'}
|
||||
|
||||
nested_render_functions = []
|
||||
for component_file in src_dir.rglob('*.{tsx,jsx}'):
|
||||
if any(excluded in component_file.parts for excluded in exclude_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(component_file, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
lines = content.split('\n')
|
||||
|
||||
# Look for patterns like: const renderSomething = () => { ... }
|
||||
# or: function renderSomething() { ... }
|
||||
nested_render_pattern = re.compile(r'(?:const|function)\s+(render\w+)\s*[=:]?\s*\([^)]*\)\s*(?:=>)?\s*\{')
|
||||
|
||||
for line_num, line in enumerate(lines, start=1):
|
||||
if nested_render_pattern.search(line):
|
||||
nested_render_functions.append({
|
||||
'file': str(component_file.relative_to(src_dir)),
|
||||
'line': line_num,
|
||||
})
|
||||
except:
|
||||
pass
|
||||
|
||||
if nested_render_functions:
|
||||
# Group by file
|
||||
files_with_nested = {}
|
||||
for item in nested_render_functions:
|
||||
file = item['file']
|
||||
if file not in files_with_nested:
|
||||
files_with_nested[file] = []
|
||||
files_with_nested[file].append(item['line'])
|
||||
|
||||
for file, lines in files_with_nested.items():
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'components',
|
||||
'title': f'Nested render functions detected ({len(lines)} instances)',
|
||||
'current_state': f'{file} contains render functions inside component',
|
||||
'target_state': 'Extract nested render functions into separate components for better reusability and testing.',
|
||||
'migration_steps': [
|
||||
'Identify each render function and its dependencies',
|
||||
'Extract to separate component file',
|
||||
'Pass necessary props to new component',
|
||||
'Update tests to test new component in isolation',
|
||||
'Remove render function from parent component'
|
||||
],
|
||||
'effort': 'low',
|
||||
'file': file,
|
||||
'affected_lines': lines[:5], # Show first 5
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_file_naming_conventions(src_dir: Path) -> List[Dict]:
|
||||
"""Check for consistent kebab-case file naming."""
|
||||
findings = []
|
||||
exclude_dirs = {'node_modules', 'dist', 'build', '.next', 'coverage'}
|
||||
|
||||
non_kebab_files = []
|
||||
for file_path in src_dir.rglob('*.{ts,tsx,js,jsx}'):
|
||||
if any(excluded in file_path.parts for excluded in exclude_dirs):
|
||||
continue
|
||||
|
||||
filename = file_path.stem # filename without extension
|
||||
|
||||
# Check if filename is kebab-case (lowercase with hyphens)
|
||||
# Allow: kebab-case.tsx, lowercase.tsx
|
||||
# Disallow: PascalCase.tsx, camelCase.tsx, snake_case.tsx
|
||||
is_kebab_or_lowercase = re.match(r'^[a-z][a-z0-9]*(-[a-z0-9]+)*$', filename)
|
||||
|
||||
if not is_kebab_or_lowercase and filename not in ['index', 'App']: # Allow common exceptions
|
||||
non_kebab_files.append(str(file_path.relative_to(src_dir)))
|
||||
|
||||
if len(non_kebab_files) > 5: # Only report if it's a pattern (>5 files)
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'components',
|
||||
'title': f'Inconsistent file naming ({len(non_kebab_files)} files)',
|
||||
'current_state': f'{len(non_kebab_files)} files not using kebab-case naming',
|
||||
'target_state': 'Bulletproof React recommends kebab-case for all files (e.g., user-profile.tsx)',
|
||||
'migration_steps': [
|
||||
'Rename files to kebab-case format',
|
||||
'Update all import statements',
|
||||
'Run tests to ensure nothing broke',
|
||||
'Add ESLint rule to enforce kebab-case (unicorn/filename-case)'
|
||||
],
|
||||
'effort': 'low',
|
||||
'affected_files': non_kebab_files[:10], # Show first 10
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_component_colocation(src_dir: Path) -> List[Dict]:
|
||||
"""Check if components are colocated near where they're used."""
|
||||
findings = []
|
||||
|
||||
components_dir = src_dir / 'components'
|
||||
if not components_dir.exists():
|
||||
return findings
|
||||
|
||||
# Find components in shared components/ that are only used once
|
||||
single_use_components = []
|
||||
for component_file in components_dir.rglob('*.{tsx,jsx}'):
|
||||
try:
|
||||
component_name = component_file.stem
|
||||
|
||||
# Search for imports of this component across codebase
|
||||
import_pattern = re.compile(rf'import.*{component_name}.*from.*[\'"]/|@/')
|
||||
usage_count = 0
|
||||
used_in_feature = None
|
||||
|
||||
for search_file in src_dir.rglob('*.{ts,tsx,js,jsx}'):
|
||||
if search_file == component_file:
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(search_file, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
if import_pattern.search(content):
|
||||
usage_count += 1
|
||||
|
||||
# Check if used in a feature
|
||||
if 'features' in search_file.parts:
|
||||
features_index = search_file.parts.index('features')
|
||||
if features_index + 1 < len(search_file.parts):
|
||||
feature_name = search_file.parts[features_index + 1]
|
||||
if used_in_feature is None:
|
||||
used_in_feature = feature_name
|
||||
elif used_in_feature != feature_name:
|
||||
used_in_feature = 'multiple'
|
||||
except:
|
||||
pass
|
||||
|
||||
# If used only in one feature, it should be colocated there
|
||||
if usage_count == 1 and used_in_feature and used_in_feature != 'multiple':
|
||||
single_use_components.append({
|
||||
'file': str(component_file.relative_to(src_dir)),
|
||||
'component': component_name,
|
||||
'feature': used_in_feature,
|
||||
})
|
||||
except:
|
||||
pass
|
||||
|
||||
if single_use_components:
|
||||
for comp in single_use_components[:5]: # Top 5
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'components',
|
||||
'title': f'Component used in only one feature: {comp["component"]}',
|
||||
'current_state': f'{comp["file"]} is in shared components/ but only used in {comp["feature"]} feature',
|
||||
'target_state': 'Components used by only one feature should be colocated in that feature directory.',
|
||||
'migration_steps': [
|
||||
f'Move {comp["file"]} to src/features/{comp["feature"]}/components/',
|
||||
'Update import in the feature',
|
||||
'Run tests to verify',
|
||||
'Remove from shared components/'
|
||||
],
|
||||
'effort': 'low',
|
||||
'file': comp['file'],
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,62 @@
|
||||
"""
|
||||
Error Handling Analyzer
|
||||
|
||||
Analyzes error handling patterns:
|
||||
- Error boundaries present
|
||||
- API error interceptors
|
||||
- Error tracking (Sentry)
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
import re
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""Analyze error handling patterns."""
|
||||
findings = []
|
||||
src_dir = codebase_path / 'src'
|
||||
tech_stack = metadata.get('tech_stack', {})
|
||||
|
||||
if not src_dir.exists():
|
||||
return findings
|
||||
|
||||
# Check for error boundaries
|
||||
error_boundaries = list(src_dir.rglob('**/error-boundary.*')) + \
|
||||
list(src_dir.rglob('**/ErrorBoundary.*'))
|
||||
|
||||
if not error_boundaries:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'errors',
|
||||
'title': 'No error boundaries detected',
|
||||
'current_state': 'No ErrorBoundary components found',
|
||||
'target_state': 'Implement multiple error boundaries at strategic locations',
|
||||
'migration_steps': [
|
||||
'Create ErrorBoundary component with componentDidCatch',
|
||||
'Wrap route components with ErrorBoundary',
|
||||
'Add feature-level error boundaries',
|
||||
'Display user-friendly error messages'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
# Check for error tracking
|
||||
if not tech_stack.get('sentry'):
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'errors',
|
||||
'title': 'No error tracking service detected',
|
||||
'current_state': 'No Sentry or similar error tracking',
|
||||
'target_state': 'Use Sentry for production error monitoring',
|
||||
'migration_steps': [
|
||||
'Sign up for Sentry',
|
||||
'Install @sentry/react',
|
||||
'Configure Sentry.init() in app entry',
|
||||
'Add user context and tags',
|
||||
'Set up error alerts'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
Performance Patterns Analyzer
|
||||
|
||||
Analyzes React performance optimizations:
|
||||
- Code splitting at routes
|
||||
- Memoization patterns
|
||||
- Image optimization
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
import re
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""Analyze performance patterns."""
|
||||
findings = []
|
||||
src_dir = codebase_path / 'src'
|
||||
|
||||
if not src_dir.exists():
|
||||
return findings
|
||||
|
||||
# Check for lazy loading
|
||||
has_lazy_loading = False
|
||||
for file in src_dir.rglob('*.{ts,tsx,js,jsx}'):
|
||||
try:
|
||||
with open(file, 'r') as f:
|
||||
content = f.read()
|
||||
if 'React.lazy' in content or 'lazy(' in content:
|
||||
has_lazy_loading = True
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
if not has_lazy_loading:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'performance',
|
||||
'title': 'No code splitting detected',
|
||||
'current_state': 'No React.lazy() usage found',
|
||||
'target_state': 'Use code splitting for routes and large components',
|
||||
'migration_steps': [
|
||||
'Wrap route components with React.lazy()',
|
||||
'Add Suspense boundaries with loading states',
|
||||
'Split large features into separate chunks',
|
||||
'Analyze bundle size with build tools'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
# Check for large images
|
||||
assets_dir = codebase_path / 'public' / 'assets'
|
||||
if assets_dir.exists():
|
||||
large_images = []
|
||||
for img in assets_dir.rglob('*.{jpg,jpeg,png,gif}'):
|
||||
size_mb = img.stat().st_size / (1024 * 1024)
|
||||
if size_mb > 0.5: # Larger than 500KB
|
||||
large_images.append((str(img.name), size_mb))
|
||||
|
||||
if large_images:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'performance',
|
||||
'title': f'{len(large_images)} large images detected',
|
||||
'current_state': f'Images larger than 500KB',
|
||||
'target_state': 'Optimize images with modern formats and lazy loading',
|
||||
'migration_steps': [
|
||||
'Convert to WebP format',
|
||||
'Add lazy loading with loading="lazy"',
|
||||
'Use srcset for responsive images',
|
||||
'Compress images with tools like sharp'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,369 @@
|
||||
"""
|
||||
Project Structure Analyzer
|
||||
|
||||
Analyzes React project structure against Bulletproof React patterns:
|
||||
- Feature-based organization (src/features/)
|
||||
- Unidirectional dependencies (shared → features → app)
|
||||
- No cross-feature imports
|
||||
- Proper folder hierarchy
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Set
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""
|
||||
Analyze project structure for Bulletproof React compliance.
|
||||
|
||||
Args:
|
||||
codebase_path: Path to React codebase
|
||||
metadata: Project metadata from discovery phase
|
||||
|
||||
Returns:
|
||||
List of findings with severity and migration guidance
|
||||
"""
|
||||
findings = []
|
||||
|
||||
src_dir = codebase_path / 'src'
|
||||
if not src_dir.exists():
|
||||
findings.append({
|
||||
'severity': 'critical',
|
||||
'category': 'structure',
|
||||
'title': 'Missing src/ directory',
|
||||
'current_state': 'No src/ directory found',
|
||||
'target_state': 'All source code should be in src/ directory',
|
||||
'migration_steps': [
|
||||
'Create src/ directory',
|
||||
'Move all source files to src/',
|
||||
'Update import paths',
|
||||
'Update build configuration'
|
||||
],
|
||||
'effort': 'medium',
|
||||
})
|
||||
return findings
|
||||
|
||||
# Check for Bulletproof structure
|
||||
findings.extend(check_bulletproof_structure(src_dir))
|
||||
|
||||
# Check for cross-feature imports
|
||||
findings.extend(check_cross_feature_imports(src_dir))
|
||||
|
||||
# Analyze features/ organization
|
||||
findings.extend(analyze_features_directory(src_dir))
|
||||
|
||||
# Check shared code organization
|
||||
findings.extend(check_shared_code_organization(src_dir))
|
||||
|
||||
# Check for architectural violations
|
||||
findings.extend(check_architectural_violations(src_dir))
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_bulletproof_structure(src_dir: Path) -> List[Dict]:
|
||||
"""Check for presence of Bulletproof React folder structure."""
|
||||
findings = []
|
||||
|
||||
# Required top-level directories for Bulletproof React
|
||||
bulletproof_dirs = {
|
||||
'app': 'Application layer (routes, app.tsx, provider.tsx, router.tsx)',
|
||||
'features': 'Feature modules (80%+ of code should be here)',
|
||||
}
|
||||
|
||||
# Recommended directories
|
||||
recommended_dirs = {
|
||||
'components': 'Shared components used across multiple features',
|
||||
'hooks': 'Shared custom hooks',
|
||||
'lib': 'Third-party library configurations',
|
||||
'utils': 'Shared utility functions',
|
||||
'types': 'Shared TypeScript types',
|
||||
}
|
||||
|
||||
# Check required directories
|
||||
for dir_name, description in bulletproof_dirs.items():
|
||||
dir_path = src_dir / dir_name
|
||||
if not dir_path.exists():
|
||||
findings.append({
|
||||
'severity': 'critical' if dir_name == 'features' else 'high',
|
||||
'category': 'structure',
|
||||
'title': f'Missing {dir_name}/ directory',
|
||||
'current_state': f'No {dir_name}/ directory found',
|
||||
'target_state': f'{dir_name}/ directory should exist: {description}',
|
||||
'migration_steps': [
|
||||
f'Create src/{dir_name}/ directory',
|
||||
f'Organize code according to Bulletproof React {dir_name} pattern',
|
||||
'Update imports to use new structure'
|
||||
],
|
||||
'effort': 'high' if dir_name == 'features' else 'medium',
|
||||
})
|
||||
|
||||
# Check recommended directories (lower severity)
|
||||
missing_recommended = []
|
||||
for dir_name, description in recommended_dirs.items():
|
||||
dir_path = src_dir / dir_name
|
||||
if not dir_path.exists():
|
||||
missing_recommended.append(f'{dir_name}/ ({description})')
|
||||
|
||||
if missing_recommended:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'structure',
|
||||
'title': 'Missing recommended directories',
|
||||
'current_state': f'Missing: {", ".join([d.split("/")[0] for d in missing_recommended])}',
|
||||
'target_state': 'Bulletproof React recommends these directories for shared code',
|
||||
'migration_steps': [
|
||||
'Create missing directories as needed',
|
||||
'Move shared code to appropriate directories',
|
||||
'Ensure proper separation between shared and feature-specific code'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_cross_feature_imports(src_dir: Path) -> List[Dict]:
|
||||
"""Detect cross-feature imports (architectural violation)."""
|
||||
findings = []
|
||||
features_dir = src_dir / 'features'
|
||||
|
||||
if not features_dir.exists():
|
||||
return findings
|
||||
|
||||
# Get all feature directories
|
||||
feature_dirs = [d for d in features_dir.iterdir() if d.is_dir() and not d.name.startswith('.')]
|
||||
|
||||
violations = []
|
||||
for feature_dir in feature_dirs:
|
||||
# Find all TypeScript/JavaScript files in this feature
|
||||
for file_path in feature_dir.rglob('*.{ts,tsx,js,jsx}'):
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
|
||||
# Check for imports from other features
|
||||
import_pattern = re.compile(r'from\s+[\'"]([^\'\"]+)[\'"]')
|
||||
imports = import_pattern.findall(content)
|
||||
|
||||
for imp in imports:
|
||||
# Check if importing from another feature
|
||||
if imp.startswith('../') or imp.startswith('@/features/'):
|
||||
# Extract feature name from import path
|
||||
if '@/features/' in imp:
|
||||
imported_feature = imp.split('@/features/')[1].split('/')[0]
|
||||
elif '../' in imp:
|
||||
# Handle relative imports
|
||||
parts = imp.split('/')
|
||||
if 'features' in parts:
|
||||
idx = parts.index('features')
|
||||
if idx + 1 < len(parts):
|
||||
imported_feature = parts[idx + 1]
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
# Check if importing from different feature
|
||||
current_feature = feature_dir.name
|
||||
if imported_feature != current_feature and imported_feature in [f.name for f in feature_dirs]:
|
||||
violations.append({
|
||||
'file': str(file_path.relative_to(src_dir)),
|
||||
'from_feature': current_feature,
|
||||
'to_feature': imported_feature,
|
||||
'import': imp
|
||||
})
|
||||
except:
|
||||
pass
|
||||
|
||||
if violations:
|
||||
# Group violations by feature
|
||||
grouped = {}
|
||||
for v in violations:
|
||||
key = f"{v['from_feature']} → {v['to_feature']}"
|
||||
if key not in grouped:
|
||||
grouped[key] = []
|
||||
grouped[key].append(v['file'])
|
||||
|
||||
for import_path, files in grouped.items():
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'structure',
|
||||
'title': f'Cross-feature import: {import_path}',
|
||||
'current_state': f'{len(files)} file(s) import from another feature',
|
||||
'target_state': 'Features should be independent. Shared code belongs in src/components/, src/hooks/, or src/utils/',
|
||||
'migration_steps': [
|
||||
'Identify what code is being shared between features',
|
||||
'Move truly shared code to src/components/, src/hooks/, or src/utils/',
|
||||
'If code is feature-specific, duplicate it or refactor feature boundaries',
|
||||
'Update imports to use shared code location'
|
||||
],
|
||||
'effort': 'medium',
|
||||
'affected_files': files[:5], # Show first 5 files
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def analyze_features_directory(src_dir: Path) -> List[Dict]:
|
||||
"""Analyze features/ directory structure."""
|
||||
findings = []
|
||||
features_dir = src_dir / 'features'
|
||||
|
||||
if not features_dir.exists():
|
||||
return findings
|
||||
|
||||
feature_dirs = [d for d in features_dir.iterdir() if d.is_dir() and not d.name.startswith('.')]
|
||||
|
||||
if len(feature_dirs) == 0:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'structure',
|
||||
'title': 'Empty features/ directory',
|
||||
'current_state': 'features/ directory exists but contains no features',
|
||||
'target_state': '80%+ of application code should be organized in feature modules',
|
||||
'migration_steps': [
|
||||
'Identify distinct features in your application',
|
||||
'Create a directory for each feature in src/features/',
|
||||
'Move feature-specific code to appropriate feature directories',
|
||||
'Organize each feature with api/, components/, hooks/, stores/, types/, utils/ as needed'
|
||||
],
|
||||
'effort': 'high',
|
||||
})
|
||||
return findings
|
||||
|
||||
# Check each feature for proper internal structure
|
||||
for feature_dir in feature_dirs:
|
||||
feature_name = feature_dir.name
|
||||
|
||||
# Recommended feature subdirectories
|
||||
feature_subdirs = ['api', 'components', 'hooks', 'stores', 'types', 'utils']
|
||||
has_subdirs = any((feature_dir / subdir).exists() for subdir in feature_subdirs)
|
||||
|
||||
# Count files in feature root
|
||||
root_files = [f for f in feature_dir.iterdir() if f.is_file() and f.suffix in {'.ts', '.tsx', '.js', '.jsx'}]
|
||||
|
||||
if len(root_files) > 5 and not has_subdirs:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'structure',
|
||||
'title': f'Feature "{feature_name}" lacks internal organization',
|
||||
'current_state': f'{len(root_files)} files in feature root without subdirectories',
|
||||
'target_state': 'Features should be organized with api/, components/, hooks/, stores/, types/, utils/ subdirectories',
|
||||
'migration_steps': [
|
||||
f'Create subdirectories in src/features/{feature_name}/',
|
||||
'Move API calls to api/',
|
||||
'Move components to components/',
|
||||
'Move hooks to hooks/',
|
||||
'Move types to types/',
|
||||
'Move utilities to utils/'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_shared_code_organization(src_dir: Path) -> List[Dict]:
|
||||
"""Check if shared code is properly organized."""
|
||||
findings = []
|
||||
|
||||
components_dir = src_dir / 'components'
|
||||
features_dir = src_dir / 'features'
|
||||
|
||||
if not components_dir.exists():
|
||||
return findings
|
||||
|
||||
# Count components
|
||||
shared_components = list(components_dir.rglob('*.{tsx,jsx}'))
|
||||
shared_count = len(shared_components)
|
||||
|
||||
# Count feature components
|
||||
feature_count = 0
|
||||
if features_dir.exists():
|
||||
feature_count = len(list(features_dir.rglob('**/components/**/*.{tsx,jsx}')))
|
||||
|
||||
total_components = shared_count + feature_count
|
||||
|
||||
if total_components > 0:
|
||||
shared_percentage = (shared_count / total_components) * 100
|
||||
|
||||
# Bulletproof React recommends 80%+ code in features
|
||||
if shared_percentage > 40:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'structure',
|
||||
'title': 'Too many shared components',
|
||||
'current_state': f'{shared_percentage:.1f}% of components are in src/components/ (shared)',
|
||||
'target_state': 'Most components should be feature-specific. Only truly shared components belong in src/components/',
|
||||
'migration_steps': [
|
||||
'Review each component in src/components/',
|
||||
'Identify components used by only one feature',
|
||||
'Move feature-specific components to their feature directories',
|
||||
'Keep only truly shared components in src/components/'
|
||||
],
|
||||
'effort': 'medium',
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_architectural_violations(src_dir: Path) -> List[Dict]:
|
||||
"""Check for common architectural violations."""
|
||||
findings = []
|
||||
|
||||
# Check for business logic in components/
|
||||
components_dir = src_dir / 'components'
|
||||
if components_dir.exists():
|
||||
large_components = []
|
||||
for component_file in components_dir.rglob('*.{tsx,jsx}'):
|
||||
try:
|
||||
with open(component_file, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
lines = len(f.readlines())
|
||||
if lines > 200:
|
||||
large_components.append((str(component_file.relative_to(src_dir)), lines))
|
||||
except:
|
||||
pass
|
||||
|
||||
if large_components:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'structure',
|
||||
'title': 'Large components in shared components/',
|
||||
'current_state': f'{len(large_components)} component(s) > 200 lines in src/components/',
|
||||
'target_state': 'Shared components should be simple and reusable. Complex components likely belong in features/',
|
||||
'migration_steps': [
|
||||
'Review large shared components',
|
||||
'Extract business logic to feature-specific hooks or utilities',
|
||||
'Consider moving complex components to features/ if feature-specific',
|
||||
'Keep shared components simple and focused'
|
||||
],
|
||||
'effort': 'medium',
|
||||
'affected_files': [f[0] for f in large_components[:5]],
|
||||
})
|
||||
|
||||
# Check for proper app/ structure
|
||||
app_dir = src_dir / 'app'
|
||||
if app_dir.exists():
|
||||
expected_app_files = ['app.tsx', 'provider.tsx', 'router.tsx']
|
||||
has_routing = any((app_dir / f).exists() or (app_dir / 'routes').exists() for f in ['router.tsx', 'routes.tsx'])
|
||||
|
||||
if not has_routing:
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'structure',
|
||||
'title': 'Missing routing configuration in app/',
|
||||
'current_state': 'No router.tsx or routes/ found in src/app/',
|
||||
'target_state': 'Bulletproof React recommends centralizing routing in src/app/router.tsx or src/app/routes/',
|
||||
'migration_steps': [
|
||||
'Create src/app/router.tsx or src/app/routes/',
|
||||
'Define all application routes in one place',
|
||||
'Use code splitting for route-level lazy loading'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,79 @@
|
||||
"""
|
||||
Security Practices Analyzer
|
||||
|
||||
Analyzes React security patterns:
|
||||
- JWT with HttpOnly cookies
|
||||
- Input sanitization
|
||||
- XSS prevention
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
import re
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""Analyze security practices."""
|
||||
findings = []
|
||||
src_dir = codebase_path / 'src'
|
||||
|
||||
if not src_dir.exists():
|
||||
return findings
|
||||
|
||||
# Check for localStorage token storage (security risk)
|
||||
localstorage_auth = []
|
||||
for file in src_dir.rglob('*.{ts,tsx,js,jsx}'):
|
||||
try:
|
||||
with open(file, 'r') as f:
|
||||
content = f.read()
|
||||
if re.search(r'localStorage\.(get|set)Item\s*\(\s*[\'"].*token.*[\'"]\s*\)', content, re.IGNORECASE):
|
||||
localstorage_auth.append(str(file.relative_to(src_dir)))
|
||||
except:
|
||||
pass
|
||||
|
||||
if localstorage_auth:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'security',
|
||||
'title': f'Tokens stored in localStorage ({len(localstorage_auth)} files)',
|
||||
'current_state': 'Authentication tokens in localStorage (XSS vulnerable)',
|
||||
'target_state': 'Use HttpOnly cookies for JWT storage',
|
||||
'migration_steps': [
|
||||
'Configure API to set tokens in HttpOnly cookies',
|
||||
'Remove localStorage token storage',
|
||||
'Use credentials: "include" in fetch requests',
|
||||
'Implement CSRF protection'
|
||||
],
|
||||
'effort': 'medium',
|
||||
'affected_files': localstorage_auth[:3],
|
||||
})
|
||||
|
||||
# Check for dangerouslySetInnerHTML
|
||||
dangerous_html = []
|
||||
for file in src_dir.rglob('*.{tsx,jsx}'):
|
||||
try:
|
||||
with open(file, 'r') as f:
|
||||
content = f.read()
|
||||
if 'dangerouslySetInnerHTML' in content:
|
||||
dangerous_html.append(str(file.relative_to(src_dir)))
|
||||
except:
|
||||
pass
|
||||
|
||||
if dangerous_html:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'security',
|
||||
'title': f'dangerouslySetInnerHTML usage ({len(dangerous_html)} files)',
|
||||
'current_state': 'Using dangerouslySetInnerHTML (XSS risk)',
|
||||
'target_state': 'Sanitize HTML input with DOMPurify',
|
||||
'migration_steps': [
|
||||
'Install dompurify',
|
||||
'Sanitize HTML before rendering',
|
||||
'Prefer safe alternatives when possible',
|
||||
'Add security review for HTML rendering'
|
||||
],
|
||||
'effort': 'low',
|
||||
'affected_files': dangerous_html[:3],
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,105 @@
|
||||
"""
|
||||
Standards Compliance Analyzer
|
||||
|
||||
Analyzes project standards:
|
||||
- ESLint configuration
|
||||
- TypeScript strict mode
|
||||
- Prettier setup
|
||||
- Git hooks (Husky)
|
||||
- Naming conventions
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
import json
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""Analyze standards compliance."""
|
||||
findings = []
|
||||
tech_stack = metadata.get('tech_stack', {})
|
||||
|
||||
# Check ESLint
|
||||
eslint_config = any([
|
||||
(codebase_path / '.eslintrc.js').exists(),
|
||||
(codebase_path / '.eslintrc.json').exists(),
|
||||
(codebase_path / 'eslint.config.js').exists(),
|
||||
])
|
||||
|
||||
if not eslint_config:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'standards',
|
||||
'title': 'No ESLint configuration',
|
||||
'current_state': 'No .eslintrc or eslint.config found',
|
||||
'target_state': 'Configure ESLint with React and TypeScript rules',
|
||||
'migration_steps': [
|
||||
'Install eslint and plugins',
|
||||
'Create .eslintrc.js configuration',
|
||||
'Add recommended rules for React and TS',
|
||||
'Add lint script to package.json',
|
||||
'Fix existing violations'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
# Check TypeScript strict mode
|
||||
tsconfig = codebase_path / 'tsconfig.json'
|
||||
if tsconfig.exists():
|
||||
try:
|
||||
with open(tsconfig, 'r') as f:
|
||||
config = json.load(f)
|
||||
strict = config.get('compilerOptions', {}).get('strict', False)
|
||||
if not strict:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'standards',
|
||||
'title': 'TypeScript strict mode disabled',
|
||||
'current_state': 'strict: false in tsconfig.json',
|
||||
'target_state': 'Enable strict mode for better type safety',
|
||||
'migration_steps': [
|
||||
'Set "strict": true in compilerOptions',
|
||||
'Fix type errors incrementally',
|
||||
'Add explicit return types',
|
||||
'Remove any types'
|
||||
],
|
||||
'effort': 'high',
|
||||
})
|
||||
except:
|
||||
pass
|
||||
|
||||
# Check Prettier
|
||||
if not tech_stack.get('prettier'):
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'standards',
|
||||
'title': 'No Prettier detected',
|
||||
'current_state': 'Prettier not in dependencies',
|
||||
'target_state': 'Use Prettier for consistent code formatting',
|
||||
'migration_steps': [
|
||||
'Install prettier',
|
||||
'Create .prettierrc configuration',
|
||||
'Enable "format on save" in IDE',
|
||||
'Run prettier on all files'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
# Check Husky
|
||||
if not tech_stack.get('husky'):
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'standards',
|
||||
'title': 'No git hooks (Husky) detected',
|
||||
'current_state': 'No pre-commit hooks',
|
||||
'target_state': 'Use Husky for pre-commit linting and testing',
|
||||
'migration_steps': [
|
||||
'Install husky and lint-staged',
|
||||
'Set up pre-commit hook',
|
||||
'Run lint and type-check before commits',
|
||||
'Prevent bad code from entering repo'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,199 @@
|
||||
"""
|
||||
State Management Analyzer
|
||||
|
||||
Analyzes React state management against Bulletproof React principles:
|
||||
- Appropriate tool for each state type (component, app, server, form, URL)
|
||||
- State localized when possible
|
||||
- Server cache separated (React Query/SWR)
|
||||
- No global state overuse
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""
|
||||
Analyze state management patterns.
|
||||
|
||||
Args:
|
||||
codebase_path: Path to React codebase
|
||||
metadata: Project metadata from discovery phase
|
||||
|
||||
Returns:
|
||||
List of findings with severity and migration guidance
|
||||
"""
|
||||
findings = []
|
||||
|
||||
tech_stack = metadata.get('tech_stack', {})
|
||||
src_dir = codebase_path / 'src'
|
||||
|
||||
if not src_dir.exists():
|
||||
return findings
|
||||
|
||||
# Check for appropriate state management tools
|
||||
findings.extend(check_state_management_tools(tech_stack))
|
||||
|
||||
# Check for data fetching library (server cache state)
|
||||
findings.extend(check_data_fetching_library(tech_stack))
|
||||
|
||||
# Check for form state management
|
||||
findings.extend(check_form_state_management(src_dir, tech_stack))
|
||||
|
||||
# Check for potential state management issues
|
||||
findings.extend(check_state_patterns(src_dir))
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_state_management_tools(tech_stack: Dict) -> List[Dict]:
|
||||
"""Check for presence of appropriate state management tools."""
|
||||
findings = []
|
||||
|
||||
# Check if any global state management is present
|
||||
has_state_mgmt = any([
|
||||
tech_stack.get('redux'),
|
||||
tech_stack.get('zustand'),
|
||||
tech_stack.get('jotai'),
|
||||
tech_stack.get('mobx')
|
||||
])
|
||||
|
||||
# If app has many features but no state management, might need it
|
||||
# (This is a heuristic - could be Context-based which is fine)
|
||||
if not has_state_mgmt:
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'state',
|
||||
'title': 'No explicit global state management detected',
|
||||
'current_state': 'No Redux, Zustand, Jotai, or MobX found',
|
||||
'target_state': 'Consider Zustand or Jotai for global state if Context becomes complex. Start with Context + hooks.',
|
||||
'migration_steps': [
|
||||
'Evaluate if Context API is sufficient for your needs',
|
||||
'If Context becomes complex, consider Zustand (simple) or Jotai (atomic)',
|
||||
'Avoid Redux unless you need its ecosystem (Redux Toolkit simplifies it)',
|
||||
'Keep state as local as possible before going global'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_data_fetching_library(tech_stack: Dict) -> List[Dict]:
|
||||
"""Check for React Query, SWR, or similar for server state."""
|
||||
findings = []
|
||||
|
||||
has_data_fetching = any([
|
||||
tech_stack.get('react-query'),
|
||||
tech_stack.get('swr'),
|
||||
tech_stack.get('apollo'),
|
||||
tech_stack.get('rtk-query')
|
||||
])
|
||||
|
||||
if not has_data_fetching:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'state',
|
||||
'title': 'No data fetching library detected',
|
||||
'current_state': 'No React Query, SWR, Apollo Client, or RTK Query found',
|
||||
'target_state': 'Use React Query or SWR for server state management (caching, refetching, optimistic updates)',
|
||||
'migration_steps': [
|
||||
'Install React Query (@tanstack/react-query) or SWR',
|
||||
'Wrap app with QueryClientProvider (React Query) or SWRConfig (SWR)',
|
||||
'Convert fetch calls to useQuery hooks',
|
||||
'Replace manual loading/error states with library patterns',
|
||||
'Add staleTime, cacheTime configurations as needed'
|
||||
],
|
||||
'effort': 'medium',
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_form_state_management(src_dir: Path, tech_stack: Dict) -> List[Dict]:
|
||||
"""Check for form state management."""
|
||||
findings = []
|
||||
|
||||
has_form_lib = any([
|
||||
tech_stack.get('react-hook-form'),
|
||||
tech_stack.get('formik')
|
||||
])
|
||||
|
||||
# Look for form components without form library
|
||||
if not has_form_lib:
|
||||
form_files = []
|
||||
for file_path in src_dir.rglob('*.{tsx,jsx}'):
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
# Look for <form> tags
|
||||
if re.search(r'<form[>\s]', content, re.IGNORECASE):
|
||||
form_files.append(str(file_path.relative_to(src_dir)))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(form_files) > 3: # More than 3 forms suggests need for form library
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'state',
|
||||
'title': f'No form library but {len(form_files)} forms detected',
|
||||
'current_state': f'{len(form_files)} form components without React Hook Form or Formik',
|
||||
'target_state': 'Use React Hook Form for performant form state management',
|
||||
'migration_steps': [
|
||||
'Install react-hook-form',
|
||||
'Replace controlled form state with useForm() hook',
|
||||
'Use register() for input registration',
|
||||
'Handle validation with yup or zod schemas',
|
||||
'Reduce re-renders with uncontrolled inputs'
|
||||
],
|
||||
'effort': 'medium',
|
||||
'affected_files': form_files[:5],
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_state_patterns(src_dir: Path) -> List[Dict]:
|
||||
"""Check for common state management anti-patterns."""
|
||||
findings = []
|
||||
|
||||
# Look for large Context providers (potential performance issue)
|
||||
large_contexts = []
|
||||
for file_path in src_dir.rglob('*.{tsx,jsx}'):
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
|
||||
# Look for Context creation with many values
|
||||
if 'createContext' in content:
|
||||
# Count useState hooks in the provider
|
||||
state_count = len(re.findall(r'useState\s*\(', content))
|
||||
if state_count > 5:
|
||||
large_contexts.append({
|
||||
'file': str(file_path.relative_to(src_dir)),
|
||||
'state_count': state_count
|
||||
})
|
||||
except:
|
||||
pass
|
||||
|
||||
if large_contexts:
|
||||
for ctx in large_contexts:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'state',
|
||||
'title': f'Large Context with {ctx["state_count"]} state values',
|
||||
'current_state': f'{ctx["file"]} has many state values in one Context',
|
||||
'target_state': 'Split large Contexts into smaller, focused Contexts to prevent unnecessary re-renders',
|
||||
'migration_steps': [
|
||||
'Identify which state values change together',
|
||||
'Create separate Contexts for independent state',
|
||||
'Use Context composition for related state',
|
||||
'Consider Zustand/Jotai for complex global state'
|
||||
],
|
||||
'effort': 'medium',
|
||||
'file': ctx['file'],
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,59 @@
|
||||
"""
|
||||
Styling Patterns Analyzer
|
||||
|
||||
Analyzes styling approach against Bulletproof React:
|
||||
- Consistent styling method
|
||||
- Component library usage
|
||||
- Colocated styles
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""Analyze styling patterns."""
|
||||
findings = []
|
||||
tech_stack = metadata.get('tech_stack', {})
|
||||
|
||||
# Check for styling approach
|
||||
styling_tools = []
|
||||
if tech_stack.get('tailwind'): styling_tools.append('Tailwind CSS')
|
||||
if tech_stack.get('styled-components'): styling_tools.append('styled-components')
|
||||
if tech_stack.get('emotion'): styling_tools.append('Emotion')
|
||||
if tech_stack.get('chakra-ui'): styling_tools.append('Chakra UI')
|
||||
if tech_stack.get('mui'): styling_tools.append('Material UI')
|
||||
if tech_stack.get('radix-ui'): styling_tools.append('Radix UI')
|
||||
|
||||
if not styling_tools:
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'styling',
|
||||
'title': 'No component library or utility CSS detected',
|
||||
'current_state': 'No Tailwind, Chakra UI, Radix UI, or other styling system',
|
||||
'target_state': 'Use component library (Chakra, Radix) or utility CSS (Tailwind)',
|
||||
'migration_steps': [
|
||||
'Choose styling approach based on needs',
|
||||
'Install Tailwind CSS (utility-first) or Chakra UI (component library)',
|
||||
'Configure theme and design tokens',
|
||||
'Migrate components gradually'
|
||||
],
|
||||
'effort': 'medium',
|
||||
})
|
||||
elif len(styling_tools) > 2:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'styling',
|
||||
'title': f'Multiple styling approaches ({len(styling_tools)})',
|
||||
'current_state': f'Using: {", ".join(styling_tools)}',
|
||||
'target_state': 'Standardize on single styling approach',
|
||||
'migration_steps': [
|
||||
'Choose primary styling system',
|
||||
'Create migration plan',
|
||||
'Update style guide',
|
||||
'Refactor components incrementally'
|
||||
],
|
||||
'effort': 'high',
|
||||
})
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,313 @@
|
||||
"""
|
||||
Testing Strategy Analyzer
|
||||
|
||||
Analyzes React testing against Bulletproof React and Connor's standards:
|
||||
- Testing trophy distribution (70% integration, 20% unit, 10% E2E)
|
||||
- 80%+ coverage requirement
|
||||
- Semantic queries (getByRole preferred)
|
||||
- User behavior testing (not implementation details)
|
||||
- Test naming ("should X when Y")
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
def analyze(codebase_path: Path, metadata: Dict) -> List[Dict]:
|
||||
"""
|
||||
Analyze testing strategy and quality.
|
||||
|
||||
Args:
|
||||
codebase_path: Path to React codebase
|
||||
metadata: Project metadata from discovery phase
|
||||
|
||||
Returns:
|
||||
List of findings with severity and migration guidance
|
||||
"""
|
||||
findings = []
|
||||
|
||||
tech_stack = metadata.get('tech_stack', {})
|
||||
src_dir = codebase_path / 'src'
|
||||
|
||||
if not src_dir.exists():
|
||||
return findings
|
||||
|
||||
# Check for testing framework
|
||||
findings.extend(check_testing_framework(tech_stack))
|
||||
|
||||
# Check test coverage
|
||||
findings.extend(check_test_coverage(codebase_path))
|
||||
|
||||
# Analyze test distribution (unit vs integration vs E2E)
|
||||
findings.extend(analyze_test_distribution(codebase_path))
|
||||
|
||||
# Check test quality patterns
|
||||
findings.extend(check_test_quality(codebase_path))
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_testing_framework(tech_stack: Dict) -> List[Dict]:
|
||||
"""Check for modern testing setup."""
|
||||
findings = []
|
||||
|
||||
has_test_framework = tech_stack.get('vitest') or tech_stack.get('jest')
|
||||
has_testing_library = tech_stack.get('testing-library')
|
||||
|
||||
if not has_test_framework:
|
||||
findings.append({
|
||||
'severity': 'critical',
|
||||
'category': 'testing',
|
||||
'title': 'No testing framework detected',
|
||||
'current_state': 'No Vitest or Jest found',
|
||||
'target_state': 'Use Vitest (modern, fast) or Jest for testing',
|
||||
'migration_steps': [
|
||||
'Install Vitest (recommended for Vite) or Jest',
|
||||
'Install @testing-library/react',
|
||||
'Configure test setup file',
|
||||
'Add test scripts to package.json',
|
||||
'Set up coverage reporting'
|
||||
],
|
||||
'effort': 'medium',
|
||||
})
|
||||
|
||||
if not has_testing_library:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'testing',
|
||||
'title': 'Testing Library not found',
|
||||
'current_state': 'No @testing-library/react detected',
|
||||
'target_state': 'Use Testing Library for user-centric testing',
|
||||
'migration_steps': [
|
||||
'Install @testing-library/react',
|
||||
'Install @testing-library/jest-dom for assertions',
|
||||
'Use render() and semantic queries (getByRole)',
|
||||
'Follow testing-library principles (test behavior, not implementation)'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_test_coverage(codebase_path: Path) -> List[Dict]:
|
||||
"""Check test coverage if available."""
|
||||
findings = []
|
||||
|
||||
# Look for coverage reports
|
||||
coverage_file = codebase_path / 'coverage' / 'coverage-summary.json'
|
||||
|
||||
if coverage_file.exists():
|
||||
try:
|
||||
with open(coverage_file, 'r') as f:
|
||||
coverage_data = json.load(f)
|
||||
total_coverage = coverage_data.get('total', {})
|
||||
line_coverage = total_coverage.get('lines', {}).get('pct', 0)
|
||||
branch_coverage = total_coverage.get('branches', {}).get('pct', 0)
|
||||
|
||||
if line_coverage < 80:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'testing',
|
||||
'title': f'Test coverage below 80% ({line_coverage:.1f}%)',
|
||||
'current_state': f'Line coverage: {line_coverage:.1f}%, Branch coverage: {branch_coverage:.1f}%',
|
||||
'target_state': 'Maintain 80%+ test coverage on all code',
|
||||
'migration_steps': [
|
||||
'Identify untested files and functions',
|
||||
'Prioritize testing critical paths (authentication, payment, data processing)',
|
||||
'Write integration tests first (70% of tests)',
|
||||
'Add unit tests for complex business logic',
|
||||
'Configure coverage thresholds in test config'
|
||||
],
|
||||
'effort': 'high',
|
||||
})
|
||||
elif line_coverage < 90:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'testing',
|
||||
'title': f'Test coverage at {line_coverage:.1f}%',
|
||||
'current_state': f'Coverage is good but could be excellent (current: {line_coverage:.1f}%)',
|
||||
'target_state': 'Aim for 90%+ coverage for production-ready code',
|
||||
'migration_steps': [
|
||||
'Identify remaining untested code paths',
|
||||
'Focus on edge cases and error handling',
|
||||
'Ensure all critical features have 100% coverage'
|
||||
],
|
||||
'effort': 'medium',
|
||||
})
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
findings.append({
|
||||
'severity': 'high',
|
||||
'category': 'testing',
|
||||
'title': 'No coverage report found',
|
||||
'current_state': 'Cannot find coverage/coverage-summary.json',
|
||||
'target_state': 'Generate coverage reports to track test coverage',
|
||||
'migration_steps': [
|
||||
'Configure coverage in vitest.config.ts or jest.config.js',
|
||||
'Add --coverage flag to test script',
|
||||
'Set coverage thresholds (lines: 80, branches: 75)',
|
||||
'Add coverage/ to .gitignore',
|
||||
'Review coverage reports regularly'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def analyze_test_distribution(codebase_path: Path) -> List[Dict]:
|
||||
"""Analyze testing trophy distribution."""
|
||||
findings = []
|
||||
|
||||
# Count test files by type
|
||||
unit_tests = 0
|
||||
integration_tests = 0
|
||||
e2e_tests = 0
|
||||
|
||||
test_patterns = {
|
||||
'e2e': ['e2e/', '.e2e.test.', '.e2e.spec.', 'playwright/', 'cypress/'],
|
||||
'integration': ['.test.tsx', '.test.jsx', '.spec.tsx', '.spec.jsx'], # Component tests
|
||||
'unit': ['.test.ts', '.test.js', '.spec.ts', '.spec.js'], # Logic tests
|
||||
}
|
||||
|
||||
for test_file in codebase_path.rglob('*.{test,spec}.{ts,tsx,js,jsx}'):
|
||||
test_path_str = str(test_file)
|
||||
|
||||
# E2E tests
|
||||
if any(pattern in test_path_str for pattern in test_patterns['e2e']):
|
||||
e2e_tests += 1
|
||||
# Integration tests (component tests with TSX/JSX)
|
||||
elif any(pattern in test_path_str for pattern in test_patterns['integration']):
|
||||
integration_tests += 1
|
||||
# Unit tests (pure logic, no JSX)
|
||||
else:
|
||||
unit_tests += 1
|
||||
|
||||
total_tests = unit_tests + integration_tests + e2e_tests
|
||||
|
||||
if total_tests > 0:
|
||||
int_pct = (integration_tests / total_tests) * 100
|
||||
unit_pct = (unit_tests / total_tests) * 100
|
||||
e2e_pct = (e2e_tests / total_tests) * 100
|
||||
|
||||
# Testing Trophy: 70% integration, 20% unit, 10% E2E
|
||||
if int_pct < 50: # Should be ~70%
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'testing',
|
||||
'title': 'Testing pyramid instead of testing trophy',
|
||||
'current_state': f'Distribution: {int_pct:.0f}% integration, {unit_pct:.0f}% unit, {e2e_pct:.0f}% E2E',
|
||||
'target_state': 'Testing Trophy: 70% integration, 20% unit, 10% E2E',
|
||||
'migration_steps': [
|
||||
'Write more integration tests (component + hooks + context)',
|
||||
'Test user workflows, not implementation details',
|
||||
'Reduce excessive unit tests of simple functions',
|
||||
'Keep E2E tests for critical user journeys only',
|
||||
'Use Testing Library for integration tests'
|
||||
],
|
||||
'effort': 'medium',
|
||||
})
|
||||
|
||||
if unit_pct > 40: # Should be ~20%
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'testing',
|
||||
'title': 'Too many unit tests',
|
||||
'current_state': f'{unit_pct:.0f}% unit tests (target: ~20%)',
|
||||
'target_state': 'Focus on integration tests that provide more confidence',
|
||||
'migration_steps': [
|
||||
'Review unit tests - many could be integration tests',
|
||||
'Combine related unit tests into integration tests',
|
||||
'Keep unit tests only for complex business logic',
|
||||
'Test components with their hooks and context'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def check_test_quality(codebase_path: Path) -> List[Dict]:
|
||||
"""Check for test quality anti-patterns."""
|
||||
findings = []
|
||||
|
||||
brittle_test_patterns = []
|
||||
bad_query_usage = []
|
||||
bad_naming = []
|
||||
|
||||
for test_file in codebase_path.rglob('*.{test,spec}.{ts,tsx,js,jsx}'):
|
||||
try:
|
||||
with open(test_file, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
|
||||
# Check for brittle tests (testing implementation)
|
||||
if 'getByTestId' in content:
|
||||
bad_query_usage.append(str(test_file))
|
||||
|
||||
# Check for testing exact counts (brittle)
|
||||
if re.search(r'expect\([^)]+\)\.toHaveLength\(\d+\)', content):
|
||||
brittle_test_patterns.append(str(test_file))
|
||||
|
||||
# Check test naming ("should X when Y")
|
||||
test_names = re.findall(r'(?:it|test)\s*\(\s*[\'"]([^\'"]+)[\'"]', content)
|
||||
for name in test_names:
|
||||
if not (name.startswith('should ') or 'when' in name.lower()):
|
||||
bad_naming.append((str(test_file), name))
|
||||
except:
|
||||
pass
|
||||
|
||||
if bad_query_usage:
|
||||
findings.append({
|
||||
'severity': 'medium',
|
||||
'category': 'testing',
|
||||
'title': f'Using getByTestId in {len(bad_query_usage)} test files',
|
||||
'current_state': 'Tests use getByTestId instead of semantic queries',
|
||||
'target_state': 'Use semantic queries: getByRole, getByLabelText, getByText',
|
||||
'migration_steps': [
|
||||
'Replace getByTestId with getByRole (most preferred)',
|
||||
'Use getByLabelText for form inputs',
|
||||
'Use getByText for user-visible content',
|
||||
'Only use getByTestId as last resort',
|
||||
'Add eslint-plugin-testing-library for enforcement'
|
||||
],
|
||||
'effort': 'medium',
|
||||
'affected_files': bad_query_usage[:5],
|
||||
})
|
||||
|
||||
if brittle_test_patterns:
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'testing',
|
||||
'title': f'Brittle test patterns in {len(brittle_test_patterns)} files',
|
||||
'current_state': 'Tests check exact counts and DOM structure',
|
||||
'target_state': 'Test user behavior and outcomes, not exact DOM structure',
|
||||
'migration_steps': [
|
||||
'Avoid testing exact element counts',
|
||||
'Focus on user-visible behavior',
|
||||
'Test functionality, not implementation',
|
||||
'Allow flexibility in DOM structure'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
if len(bad_naming) > 5: # More than 5 tests with poor naming
|
||||
findings.append({
|
||||
'severity': 'low',
|
||||
'category': 'testing',
|
||||
'title': f'{len(bad_naming)} tests with unclear naming',
|
||||
'current_state': 'Test names don\'t follow "should X when Y" pattern',
|
||||
'target_state': 'Use descriptive names: "should display error when API fails"',
|
||||
'migration_steps': [
|
||||
'Rename tests to describe expected behavior',
|
||||
'Use pattern: "should [expected behavior] when [condition]"',
|
||||
'Make tests self-documenting',
|
||||
'Tests should read like requirements'
|
||||
],
|
||||
'effort': 'low',
|
||||
})
|
||||
|
||||
return findings
|
||||
Reference in New Issue
Block a user