Initial commit
This commit is contained in:
23
skills/code-execution/examples/bulk_refactor.py
Normal file
23
skills/code-execution/examples/bulk_refactor.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Example: Bulk Refactoring Across Entire Codebase
|
||||
|
||||
This example shows how to rename an identifier across all Python files
|
||||
in a project with maximum efficiency.
|
||||
"""
|
||||
|
||||
from api.code_transform import rename_identifier
|
||||
|
||||
# Rename function across all Python files
|
||||
result = rename_identifier(
|
||||
pattern='.', # Current directory
|
||||
old_name='getUserData',
|
||||
new_name='fetchUserData',
|
||||
file_pattern='**/*.py', # All Python files recursively
|
||||
regex=False # Exact identifier match
|
||||
)
|
||||
|
||||
# Result contains summary only (not all file contents!)
|
||||
# Token usage: ~500 tokens total
|
||||
# vs ~25,000 tokens with traditional approach
|
||||
print(f"Modified {result['files_modified']} files")
|
||||
print(f"Total replacements: {result['total_replacements']}")
|
||||
76
skills/code-execution/examples/codebase_audit.py
Normal file
76
skills/code-execution/examples/codebase_audit.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
Example: Comprehensive Codebase Audit
|
||||
|
||||
Analyze code quality across entire project with minimal tokens.
|
||||
"""
|
||||
|
||||
from api.code_analysis import analyze_dependencies, find_unused_imports
|
||||
from pathlib import Path
|
||||
|
||||
# Find all Python files
|
||||
files = list(Path('.').glob('**/*.py'))
|
||||
print(f"Analyzing {len(files)} files...")
|
||||
|
||||
issues = {
|
||||
'high_complexity': [],
|
||||
'unused_imports': [],
|
||||
'large_files': [],
|
||||
'no_docstrings': []
|
||||
}
|
||||
|
||||
# Analyze each file (metadata only, not source!)
|
||||
for file in files:
|
||||
file_str = str(file)
|
||||
|
||||
# Get complexity metrics
|
||||
deps = analyze_dependencies(file_str)
|
||||
|
||||
# Flag high complexity
|
||||
if deps.get('complexity', 0) > 15:
|
||||
issues['high_complexity'].append({
|
||||
'file': file_str,
|
||||
'complexity': deps['complexity'],
|
||||
'functions': deps['functions'],
|
||||
'avg_complexity': deps.get('avg_complexity_per_function', 0)
|
||||
})
|
||||
|
||||
# Flag large files
|
||||
if deps.get('lines', 0) > 500:
|
||||
issues['large_files'].append({
|
||||
'file': file_str,
|
||||
'lines': deps['lines'],
|
||||
'functions': deps['functions']
|
||||
})
|
||||
|
||||
# Find unused imports
|
||||
unused = find_unused_imports(file_str)
|
||||
if unused:
|
||||
issues['unused_imports'].append({
|
||||
'file': file_str,
|
||||
'count': len(unused),
|
||||
'imports': unused
|
||||
})
|
||||
|
||||
# Return summary (NOT all the data!)
|
||||
result = {
|
||||
'files_audited': len(files),
|
||||
'total_lines': sum(d.get('lines', 0) for d in [analyze_dependencies(str(f)) for f in files]),
|
||||
'issues': {
|
||||
'high_complexity': len(issues['high_complexity']),
|
||||
'unused_imports': len(issues['unused_imports']),
|
||||
'large_files': len(issues['large_files'])
|
||||
},
|
||||
'top_complexity_issues': sorted(
|
||||
issues['high_complexity'],
|
||||
key=lambda x: x['complexity'],
|
||||
reverse=True
|
||||
)[:5] # Only top 5
|
||||
}
|
||||
|
||||
print(f"\\nAudit complete:")
|
||||
print(f" High complexity files: {result['issues']['high_complexity']}")
|
||||
print(f" Files with unused imports: {result['issues']['unused_imports']}")
|
||||
print(f" Large files (>500 lines): {result['issues']['large_files']}")
|
||||
|
||||
# Token usage: ~2,000 tokens for 100 files
|
||||
# vs ~150,000 tokens loading all files into context
|
||||
36
skills/code-execution/examples/extract_functions.py
Normal file
36
skills/code-execution/examples/extract_functions.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
Example: Extract Functions to New File
|
||||
|
||||
Shows how to find and move functions to a separate file
|
||||
with minimal token usage.
|
||||
"""
|
||||
|
||||
from api.code_analysis import find_functions
|
||||
from api.filesystem import copy_lines, paste_code, read_file, write_file
|
||||
|
||||
# Find utility functions (returns metadata ONLY, not source code)
|
||||
functions = find_functions('app.py', pattern='.*_util$', regex=True)
|
||||
|
||||
print(f"Found {len(functions)} utility functions")
|
||||
|
||||
# Extract imports from original file
|
||||
content = read_file('app.py')
|
||||
imports = [line for line in content.splitlines()
|
||||
if line.strip().startswith(('import ', 'from '))]
|
||||
|
||||
# Create new utils.py with imports
|
||||
write_file('utils.py', '\\n'.join(set(imports)) + '\\n\\n')
|
||||
|
||||
# Copy each function to utils.py
|
||||
for func in functions:
|
||||
print(f" Moving {func['name']} (lines {func['start_line']}-{func['end_line']})")
|
||||
code = copy_lines('app.py', func['start_line'], func['end_line'])
|
||||
paste_code('utils.py', -1, code + '\\n\\n') # -1 = append to end
|
||||
|
||||
result = {
|
||||
'functions_extracted': len(functions),
|
||||
'function_names': [f['name'] for f in functions]
|
||||
}
|
||||
|
||||
# Token usage: ~800 tokens
|
||||
# vs ~15,000 tokens reading full file into context
|
||||
Reference in New Issue
Block a user