Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:40:11 +08:00
commit 8b119df38b
11 changed files with 853 additions and 0 deletions

View File

@@ -0,0 +1,108 @@
---
name: code-execution
description: Execute Python code locally with marketplace API access for 90%+ token savings on bulk operations. Activates when user requests bulk operations (10+ files), complex multi-step workflows, iterative processing, or mentions efficiency/performance.
---
# Code Execution
Execute Python locally with API access. **90-99% token savings** for bulk operations.
## When to Use
- Bulk operations (10+ files)
- Complex multi-step workflows
- Iterative processing across many files
- User mentions efficiency/performance
## How to Use
Use direct Python imports in Claude Code:
```python
from execution_runtime import fs, code, transform, git
# Code analysis (metadata only!)
functions = code.find_functions('app.py', pattern='handle_.*')
# File operations
code_block = fs.copy_lines('source.py', 10, 20)
fs.paste_code('target.py', 50, code_block)
# Bulk transformations
result = transform.rename_identifier('.', 'oldName', 'newName', '**/*.py')
# Git operations
git.git_add(['.'])
git.git_commit('feat: refactor code')
```
**If not installed:** Run `~/.claude/plugins/marketplaces/mhattingpete-claude-skills/execution-runtime/setup.sh`
## Available APIs
- **Filesystem** (`fs`): copy_lines, paste_code, search_replace, batch_copy
- **Code Analysis** (`code`): find_functions, find_classes, analyze_dependencies - returns METADATA only!
- **Transformations** (`transform`): rename_identifier, remove_debug_statements, batch_refactor
- **Git** (`git`): git_status, git_add, git_commit, git_push
## Pattern
1. **Analyze locally** (metadata only, not source)
2. **Process locally** (all operations in execution)
3. **Return summary** (not data!)
## Examples
**Bulk refactor (50 files):**
```python
from execution_runtime import transform
result = transform.rename_identifier('.', 'oldName', 'newName', '**/*.py')
# Returns: {'files_modified': 50, 'total_replacements': 247}
```
**Extract functions:**
```python
from execution_runtime import code, fs
functions = code.find_functions('app.py', pattern='.*_util$') # Metadata only!
for func in functions:
code_block = fs.copy_lines('app.py', func['start_line'], func['end_line'])
fs.paste_code('utils.py', -1, code_block)
result = {'functions_moved': len(functions)}
```
**Code audit (100 files):**
```python
from execution_runtime import code
from pathlib import Path
files = list(Path('.').glob('**/*.py'))
issues = []
for file in files:
deps = code.analyze_dependencies(str(file)) # Metadata only!
if deps.get('complexity', 0) > 15:
issues.append({'file': str(file), 'complexity': deps['complexity']})
result = {'files_audited': len(files), 'high_complexity': len(issues)}
```
## Best Practices
✅ Return summaries, not data
✅ Use code_analysis (returns metadata, not source)
✅ Batch operations
✅ Handle errors, return error count
❌ Don't return all code to context
❌ Don't read full source when you need metadata
❌ Don't process files one by one
## Token Savings
| Files | Traditional | Execution | Savings |
|-------|-------------|-----------|---------|
| 10 | 5K tokens | 500 | 90% |
| 50 | 25K tokens | 600 | 97.6% |
| 100 | 150K tokens | 1K | 99.3% |

View File

@@ -0,0 +1,23 @@
"""
Example: Bulk Refactoring Across Entire Codebase
This example shows how to rename an identifier across all Python files
in a project with maximum efficiency.
"""
from api.code_transform import rename_identifier
# Rename function across all Python files
result = rename_identifier(
pattern='.', # Current directory
old_name='getUserData',
new_name='fetchUserData',
file_pattern='**/*.py', # All Python files recursively
regex=False # Exact identifier match
)
# Result contains summary only (not all file contents!)
# Token usage: ~500 tokens total
# vs ~25,000 tokens with traditional approach
print(f"Modified {result['files_modified']} files")
print(f"Total replacements: {result['total_replacements']}")

View File

@@ -0,0 +1,76 @@
"""
Example: Comprehensive Codebase Audit
Analyze code quality across entire project with minimal tokens.
"""
from api.code_analysis import analyze_dependencies, find_unused_imports
from pathlib import Path
# Find all Python files
files = list(Path('.').glob('**/*.py'))
print(f"Analyzing {len(files)} files...")
issues = {
'high_complexity': [],
'unused_imports': [],
'large_files': [],
'no_docstrings': []
}
# Analyze each file (metadata only, not source!)
for file in files:
file_str = str(file)
# Get complexity metrics
deps = analyze_dependencies(file_str)
# Flag high complexity
if deps.get('complexity', 0) > 15:
issues['high_complexity'].append({
'file': file_str,
'complexity': deps['complexity'],
'functions': deps['functions'],
'avg_complexity': deps.get('avg_complexity_per_function', 0)
})
# Flag large files
if deps.get('lines', 0) > 500:
issues['large_files'].append({
'file': file_str,
'lines': deps['lines'],
'functions': deps['functions']
})
# Find unused imports
unused = find_unused_imports(file_str)
if unused:
issues['unused_imports'].append({
'file': file_str,
'count': len(unused),
'imports': unused
})
# Return summary (NOT all the data!)
result = {
'files_audited': len(files),
'total_lines': sum(d.get('lines', 0) for d in [analyze_dependencies(str(f)) for f in files]),
'issues': {
'high_complexity': len(issues['high_complexity']),
'unused_imports': len(issues['unused_imports']),
'large_files': len(issues['large_files'])
},
'top_complexity_issues': sorted(
issues['high_complexity'],
key=lambda x: x['complexity'],
reverse=True
)[:5] # Only top 5
}
print(f"\\nAudit complete:")
print(f" High complexity files: {result['issues']['high_complexity']}")
print(f" Files with unused imports: {result['issues']['unused_imports']}")
print(f" Large files (>500 lines): {result['issues']['large_files']}")
# Token usage: ~2,000 tokens for 100 files
# vs ~150,000 tokens loading all files into context

View File

@@ -0,0 +1,36 @@
"""
Example: Extract Functions to New File
Shows how to find and move functions to a separate file
with minimal token usage.
"""
from api.code_analysis import find_functions
from api.filesystem import copy_lines, paste_code, read_file, write_file
# Find utility functions (returns metadata ONLY, not source code)
functions = find_functions('app.py', pattern='.*_util$', regex=True)
print(f"Found {len(functions)} utility functions")
# Extract imports from original file
content = read_file('app.py')
imports = [line for line in content.splitlines()
if line.strip().startswith(('import ', 'from '))]
# Create new utils.py with imports
write_file('utils.py', '\\n'.join(set(imports)) + '\\n\\n')
# Copy each function to utils.py
for func in functions:
print(f" Moving {func['name']} (lines {func['start_line']}-{func['end_line']})")
code = copy_lines('app.py', func['start_line'], func['end_line'])
paste_code('utils.py', -1, code + '\\n\\n') # -1 = append to end
result = {
'functions_extracted': len(functions),
'function_names': [f['name'] for f in functions]
}
# Token usage: ~800 tokens
# vs ~15,000 tokens reading full file into context