Initial commit
This commit is contained in:
355
skills/claude-context-manager/scripts/auto_update.py
Executable file
355
skills/claude-context-manager/scripts/auto_update.py
Executable file
@@ -0,0 +1,355 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Auto-Update Context - Intelligent Context Synchronization
|
||||
|
||||
Analyzes code changes and autonomously updates context files.
|
||||
Designed to be run by Claude with minimal supervision.
|
||||
|
||||
Usage:
|
||||
python auto_update.py <directory_path> [--analyze-only] [--verbose]
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Set
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
# Add lib to path for integration imports
|
||||
repo_root = Path(__file__).resolve().parents[6] # Go up to repo root
|
||||
sys.path.insert(0, str(repo_root / "lib"))
|
||||
|
||||
try:
|
||||
from ccmp_integration import CCMPIntegration, is_session_active
|
||||
INTEGRATION_AVAILABLE = True
|
||||
except ImportError:
|
||||
INTEGRATION_AVAILABLE = False
|
||||
|
||||
def get_recent_changes(dir_path: Path, since_days: int = 30) -> Dict:
|
||||
"""Get summary of recent changes in directory."""
|
||||
try:
|
||||
# Get changed files
|
||||
result = subprocess.run(
|
||||
['git', 'diff', '--name-status', f'HEAD~{since_days*4}', 'HEAD', '--', str(dir_path)],
|
||||
cwd=dir_path,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
return {'files_changed': [], 'summary': {}}
|
||||
|
||||
changes = result.stdout.strip().split('\n')
|
||||
|
||||
added = []
|
||||
modified = []
|
||||
deleted = []
|
||||
|
||||
for change in changes:
|
||||
if not change:
|
||||
continue
|
||||
parts = change.split('\t', 1)
|
||||
if len(parts) != 2:
|
||||
continue
|
||||
status, filepath = parts
|
||||
|
||||
if status.startswith('A'):
|
||||
added.append(filepath)
|
||||
elif status.startswith('M'):
|
||||
modified.append(filepath)
|
||||
elif status.startswith('D'):
|
||||
deleted.append(filepath)
|
||||
|
||||
return {
|
||||
'files_changed': added + modified + deleted,
|
||||
'summary': {
|
||||
'added': len(added),
|
||||
'modified': len(modified),
|
||||
'deleted': len(deleted)
|
||||
},
|
||||
'details': {
|
||||
'added': added,
|
||||
'modified': modified,
|
||||
'deleted': deleted
|
||||
}
|
||||
}
|
||||
except:
|
||||
return {'files_changed': [], 'summary': {}}
|
||||
|
||||
def analyze_code_patterns(dir_path: Path) -> Dict:
|
||||
"""Analyze current code patterns in directory."""
|
||||
patterns = {
|
||||
'file_types': {},
|
||||
'common_imports': set(),
|
||||
'naming_patterns': [],
|
||||
'frameworks_detected': set()
|
||||
}
|
||||
|
||||
# Analyze files
|
||||
for item in dir_path.iterdir():
|
||||
if item.is_file() and not item.name.startswith('.'):
|
||||
ext = item.suffix
|
||||
patterns['file_types'][ext] = patterns['file_types'].get(ext, 0) + 1
|
||||
|
||||
# Analyze imports for common patterns
|
||||
if ext in ['.py', '.js', '.ts', '.jsx', '.tsx']:
|
||||
try:
|
||||
content = item.read_text()
|
||||
|
||||
# Python imports
|
||||
if ext == '.py':
|
||||
imports = re.findall(r'^\s*(?:from|import)\s+([a-zA-Z_][a-zA-Z0-9_]*)', content, re.MULTILINE)
|
||||
patterns['common_imports'].update(imports[:5]) # Top 5
|
||||
|
||||
# Detect frameworks
|
||||
if 'fastapi' in content.lower():
|
||||
patterns['frameworks_detected'].add('FastAPI')
|
||||
if 'flask' in content.lower():
|
||||
patterns['frameworks_detected'].add('Flask')
|
||||
|
||||
# JavaScript/TypeScript imports
|
||||
elif ext in ['.js', '.ts', '.jsx', '.tsx']:
|
||||
imports = re.findall(r'(?:from|require\()\s*[\'"]([^\'\"]+)', content)
|
||||
patterns['common_imports'].update(imports[:5])
|
||||
|
||||
# Detect frameworks
|
||||
if 'react' in content.lower():
|
||||
patterns['frameworks_detected'].add('React')
|
||||
if 'express' in content.lower():
|
||||
patterns['frameworks_detected'].add('Express')
|
||||
if 'vue' in content.lower():
|
||||
patterns['frameworks_detected'].add('Vue')
|
||||
except:
|
||||
pass
|
||||
|
||||
patterns['common_imports'] = list(patterns['common_imports'])
|
||||
patterns['frameworks_detected'] = list(patterns['frameworks_detected'])
|
||||
|
||||
return patterns
|
||||
|
||||
def read_existing_context(context_file: Path) -> str:
|
||||
"""Read existing context file."""
|
||||
if context_file.exists():
|
||||
return context_file.read_text()
|
||||
return ""
|
||||
|
||||
def needs_update(existing_context: str, current_patterns: Dict, recent_changes: Dict) -> Dict:
|
||||
"""Determine if context needs updating and what sections."""
|
||||
update_needed = {
|
||||
'should_update': False,
|
||||
'reasons': [],
|
||||
'sections_to_update': []
|
||||
}
|
||||
|
||||
# Check if significant changes occurred
|
||||
total_changes = recent_changes['summary'].get('added', 0) + \
|
||||
recent_changes['summary'].get('modified', 0) + \
|
||||
recent_changes['summary'].get('deleted', 0)
|
||||
|
||||
if total_changes > 5:
|
||||
update_needed['should_update'] = True
|
||||
update_needed['reasons'].append(f'{total_changes} files changed')
|
||||
update_needed['sections_to_update'].append('File Types')
|
||||
update_needed['sections_to_update'].append('Key Files')
|
||||
|
||||
# Check if frameworks mentioned in context match detected
|
||||
for framework in current_patterns.get('frameworks_detected', []):
|
||||
if framework not in existing_context:
|
||||
update_needed['should_update'] = True
|
||||
update_needed['reasons'].append(f'New framework detected: {framework}')
|
||||
update_needed['sections_to_update'].append('Important Patterns')
|
||||
|
||||
# Check if context has TODO markers
|
||||
if 'TODO' in existing_context or '<!-- TODO' in existing_context:
|
||||
update_needed['should_update'] = True
|
||||
update_needed['reasons'].append('Context has TODO markers')
|
||||
update_needed['sections_to_update'].append('All incomplete sections')
|
||||
|
||||
# Check age (if very old, likely needs update)
|
||||
if existing_context and len(existing_context) < 200:
|
||||
update_needed['should_update'] = True
|
||||
update_needed['reasons'].append('Context is minimal')
|
||||
update_needed['sections_to_update'].append('Overview')
|
||||
|
||||
return update_needed
|
||||
|
||||
def generate_updated_sections(existing_context: str, current_patterns: Dict, recent_changes: Dict) -> Dict:
|
||||
"""Generate suggestions for updated context sections."""
|
||||
suggestions = {}
|
||||
|
||||
# File Types section
|
||||
if current_patterns['file_types']:
|
||||
file_types_text = []
|
||||
for ext, count in sorted(current_patterns['file_types'].items()):
|
||||
file_types_text.append(f"- **{ext}** ({count} files): [Describe purpose of these files]")
|
||||
suggestions['File Types'] = "\n".join(file_types_text)
|
||||
|
||||
# Frameworks/Patterns section
|
||||
if current_patterns['frameworks_detected']:
|
||||
frameworks_text = []
|
||||
frameworks_text.append("**Frameworks in use:**")
|
||||
for fw in current_patterns['frameworks_detected']:
|
||||
frameworks_text.append(f"- {fw}")
|
||||
suggestions['Frameworks'] = "\n".join(frameworks_text)
|
||||
|
||||
# Recent changes section
|
||||
if recent_changes['summary']:
|
||||
changes_text = []
|
||||
changes_text.append("**Recent activity:**")
|
||||
s = recent_changes['summary']
|
||||
if s.get('added'):
|
||||
changes_text.append(f"- {s['added']} files added")
|
||||
if s.get('modified'):
|
||||
changes_text.append(f"- {s['modified']} files modified")
|
||||
if s.get('deleted'):
|
||||
changes_text.append(f"- {s['deleted']} files deleted")
|
||||
suggestions['Recent Changes'] = "\n".join(changes_text)
|
||||
|
||||
return suggestions
|
||||
|
||||
def format_update_report(dir_path: Path, update_analysis: Dict, suggestions: Dict, analyze_only: bool) -> str:
|
||||
"""Format update report for Claude to read."""
|
||||
lines = []
|
||||
lines.append("=" * 70)
|
||||
lines.append("CONTEXT UPDATE ANALYSIS")
|
||||
lines.append("=" * 70)
|
||||
lines.append(f"\nDirectory: {dir_path}")
|
||||
lines.append(f"Timestamp: {datetime.now().isoformat()}")
|
||||
lines.append(f"\nMode: {'ANALYZE ONLY' if analyze_only else 'UPDATE READY'}")
|
||||
|
||||
if update_analysis['should_update']:
|
||||
lines.append("\n✅ UPDATE RECOMMENDED")
|
||||
lines.append("\nReasons:")
|
||||
for reason in update_analysis['reasons']:
|
||||
lines.append(f" • {reason}")
|
||||
|
||||
lines.append("\nSections to update:")
|
||||
for section in update_analysis['sections_to_update']:
|
||||
lines.append(f" • {section}")
|
||||
|
||||
if suggestions:
|
||||
lines.append("\n" + "=" * 70)
|
||||
lines.append("SUGGESTED UPDATES")
|
||||
lines.append("=" * 70)
|
||||
|
||||
for section_name, content in suggestions.items():
|
||||
lines.append(f"\n## {section_name}\n")
|
||||
lines.append(content)
|
||||
else:
|
||||
lines.append("\n✓ Context appears current")
|
||||
lines.append("No immediate updates needed")
|
||||
|
||||
lines.append("\n" + "=" * 70)
|
||||
return "\n".join(lines)
|
||||
|
||||
def update_context_file(context_file: Path, suggestions: Dict, existing_context: str) -> bool:
|
||||
"""Update context file with new information."""
|
||||
# This is a smart merge - preserve existing content, update specific sections
|
||||
# For now, append suggestions as new sections if they don't exist
|
||||
|
||||
updated_content = existing_context
|
||||
|
||||
# Add a separator before updates
|
||||
updated_content += "\n\n---\n*Updated: {}*\n".format(datetime.now().strftime("%Y-%m-%d"))
|
||||
|
||||
# Add suggested updates
|
||||
for section_name, content in suggestions.items():
|
||||
if section_name not in existing_context:
|
||||
updated_content += f"\n## {section_name}\n\n{content}\n"
|
||||
|
||||
# Write back
|
||||
try:
|
||||
context_file.write_text(updated_content)
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error writing context file: {e}")
|
||||
return False
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Autonomously update context based on code changes'
|
||||
)
|
||||
parser.add_argument('directory', type=str, help='Directory to analyze')
|
||||
parser.add_argument(
|
||||
'--analyze-only',
|
||||
action='store_true',
|
||||
help='Only analyze, do not update'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose',
|
||||
action='store_true',
|
||||
help='Verbose output'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--force',
|
||||
action='store_true',
|
||||
help='Force update even if no changes detected'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
dir_path = Path(args.directory).resolve()
|
||||
|
||||
if not dir_path.exists() or not dir_path.is_dir():
|
||||
print(f"Error: Invalid directory: {dir_path}")
|
||||
sys.exit(1)
|
||||
|
||||
context_file = dir_path / 'claude.md'
|
||||
|
||||
# Analyze current state
|
||||
print("Analyzing directory..." if args.verbose else "", end='')
|
||||
recent_changes = get_recent_changes(dir_path)
|
||||
current_patterns = analyze_code_patterns(dir_path)
|
||||
existing_context = read_existing_context(context_file)
|
||||
print(" Done." if args.verbose else "")
|
||||
|
||||
# Determine if update needed
|
||||
update_analysis = needs_update(existing_context, current_patterns, recent_changes)
|
||||
|
||||
if args.force:
|
||||
update_analysis['should_update'] = True
|
||||
update_analysis['reasons'].append('Forced update')
|
||||
|
||||
# Generate suggestions
|
||||
suggestions = generate_updated_sections(existing_context, current_patterns, recent_changes)
|
||||
|
||||
# Output report
|
||||
report = format_update_report(dir_path, update_analysis, suggestions, args.analyze_only)
|
||||
print(report)
|
||||
|
||||
# Perform update if not analyze-only
|
||||
if update_analysis['should_update'] and not args.analyze_only:
|
||||
print("\nUpdating context file...")
|
||||
if update_context_file(context_file, suggestions, existing_context):
|
||||
print(f"✅ Updated: {context_file}")
|
||||
|
||||
# BIDIRECTIONAL SYNC: Notify session if active
|
||||
if INTEGRATION_AVAILABLE and is_session_active():
|
||||
try:
|
||||
integration = CCMPIntegration()
|
||||
session_state = integration.get_state("session-management")
|
||||
|
||||
if session_state:
|
||||
print(f"\n📝 Active session detected - context update logged")
|
||||
print(f" Session: {session_state.get('branch', 'unknown')}")
|
||||
print(f" Updated: {dir_path.relative_to(repo_root)}/claude.md")
|
||||
|
||||
# Update context manager state
|
||||
integration.update_state("claude-context-manager", {
|
||||
"last_update": datetime.now().isoformat(),
|
||||
"last_updated_path": str(dir_path.relative_to(repo_root))
|
||||
})
|
||||
except Exception as e:
|
||||
# Don't fail the whole update if logging fails
|
||||
if args.verbose:
|
||||
print(f" (Session logging failed: {e})")
|
||||
else:
|
||||
print(f"❌ Failed to update: {context_file}")
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0 if update_analysis['should_update'] else 0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
226
skills/claude-context-manager/scripts/create_index.py
Executable file
226
skills/claude-context-manager/scripts/create_index.py
Executable file
@@ -0,0 +1,226 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Claude.md Index Creator
|
||||
|
||||
Creates or updates a master index of all claude.md files in a repository.
|
||||
Useful for understanding the documentation structure at a glance.
|
||||
|
||||
Usage:
|
||||
python create_index.py <repo_path> [--output FILE] [--format FORMAT]
|
||||
|
||||
Examples:
|
||||
python create_index.py /path/to/repo
|
||||
python create_index.py /path/to/repo --output CLAUDE_INDEX.md
|
||||
python create_index.py /path/to/repo --format tree
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from typing import List, Dict
|
||||
import re
|
||||
|
||||
|
||||
def find_claude_md_files(root_path: Path) -> List[Path]:
|
||||
"""Find all claude.md files, maintaining relative paths."""
|
||||
claude_md_files = []
|
||||
|
||||
for dirpath, dirnames, filenames in os.walk(root_path):
|
||||
# Skip common ignored directories
|
||||
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d not in {
|
||||
'node_modules', '__pycache__', 'venv', 'env', 'dist', 'build'
|
||||
}]
|
||||
|
||||
if 'claude.md' in filenames:
|
||||
full_path = Path(dirpath) / 'claude.md'
|
||||
claude_md_files.append(full_path)
|
||||
|
||||
return sorted(claude_md_files)
|
||||
|
||||
|
||||
def extract_title_and_overview(file_path: Path) -> Dict[str, str]:
|
||||
"""Extract the title and first line of overview from a claude.md file."""
|
||||
try:
|
||||
with open(file_path, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
lines = content.split('\n')
|
||||
|
||||
# Extract title (first H1 header)
|
||||
title = None
|
||||
for line in lines:
|
||||
if line.startswith('# '):
|
||||
title = line[2:].strip()
|
||||
break
|
||||
|
||||
# Extract first meaningful line after Overview section
|
||||
overview = None
|
||||
in_overview = False
|
||||
for line in lines:
|
||||
if re.match(r'^##?\s+(Overview|Purpose)', line, re.IGNORECASE):
|
||||
in_overview = True
|
||||
continue
|
||||
if in_overview:
|
||||
stripped = line.strip()
|
||||
# Skip empty lines and comments
|
||||
if stripped and not stripped.startswith('<!--') and not stripped.startswith('#'):
|
||||
overview = stripped
|
||||
break
|
||||
# Stop at next section
|
||||
if stripped.startswith('##'):
|
||||
break
|
||||
|
||||
return {
|
||||
'title': title or 'Untitled',
|
||||
'overview': overview or 'No overview available'
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
'title': 'Error reading file',
|
||||
'overview': str(e)
|
||||
}
|
||||
|
||||
|
||||
def create_tree_format(root_path: Path, files: List[Path]) -> str:
|
||||
"""Create a tree-style index."""
|
||||
lines = ["# Claude.md Index", "", "Repository documentation structure:", ""]
|
||||
|
||||
# Group files by directory depth
|
||||
for file_path in files:
|
||||
rel_path = file_path.relative_to(root_path)
|
||||
dir_path = rel_path.parent
|
||||
|
||||
# Calculate depth
|
||||
depth = len(dir_path.parts)
|
||||
indent = " " * depth
|
||||
|
||||
# Get metadata
|
||||
metadata = extract_title_and_overview(file_path)
|
||||
|
||||
# Format entry
|
||||
dir_display = str(dir_path) if str(dir_path) != '.' else '(root)'
|
||||
lines.append(f"{indent}📁 **{dir_display}** ([claude.md]({rel_path}))")
|
||||
lines.append(f"{indent} {metadata['title']}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def create_table_format(root_path: Path, files: List[Path]) -> str:
|
||||
"""Create a table-style index."""
|
||||
lines = [
|
||||
"# Claude.md Index",
|
||||
"",
|
||||
"| Directory | Title | Overview |",
|
||||
"|-----------|-------|----------|"
|
||||
]
|
||||
|
||||
for file_path in files:
|
||||
rel_path = file_path.relative_to(root_path)
|
||||
dir_path = rel_path.parent
|
||||
dir_display = str(dir_path) if str(dir_path) != '.' else '(root)'
|
||||
|
||||
metadata = extract_title_and_overview(file_path)
|
||||
|
||||
# Truncate overview if too long
|
||||
overview = metadata['overview']
|
||||
if len(overview) > 80:
|
||||
overview = overview[:77] + "..."
|
||||
|
||||
# Escape pipe characters
|
||||
title = metadata['title'].replace('|', '\\|')
|
||||
overview = overview.replace('|', '\\|')
|
||||
|
||||
lines.append(f"| [{dir_display}]({rel_path}) | {title} | {overview} |")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def create_detailed_format(root_path: Path, files: List[Path]) -> str:
|
||||
"""Create a detailed list-style index."""
|
||||
lines = ["# Claude.md Index", "", "Complete documentation map for this repository.", ""]
|
||||
|
||||
for i, file_path in enumerate(files, 1):
|
||||
rel_path = file_path.relative_to(root_path)
|
||||
dir_path = rel_path.parent
|
||||
dir_display = str(dir_path) if str(dir_path) != '.' else '(root)'
|
||||
|
||||
metadata = extract_title_and_overview(file_path)
|
||||
|
||||
lines.append(f"## {i}. {dir_display}")
|
||||
lines.append("")
|
||||
lines.append(f"**File:** [{rel_path}]({rel_path})")
|
||||
lines.append("")
|
||||
lines.append(f"**Title:** {metadata['title']}")
|
||||
lines.append("")
|
||||
lines.append(f"**Overview:** {metadata['overview']}")
|
||||
lines.append("")
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Create an index of all claude.md files in a repository'
|
||||
)
|
||||
parser.add_argument(
|
||||
'repo_path',
|
||||
type=str,
|
||||
help='Path to repository root'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
type=str,
|
||||
default='CLAUDE_INDEX.md',
|
||||
help='Output filename (default: CLAUDE_INDEX.md)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--format',
|
||||
type=str,
|
||||
choices=['tree', 'table', 'detailed'],
|
||||
default='tree',
|
||||
help='Index format (default: tree)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
repo_path = Path(args.repo_path).resolve()
|
||||
|
||||
if not repo_path.exists():
|
||||
print(f"Error: Path does not exist: {repo_path}")
|
||||
sys.exit(1)
|
||||
|
||||
if not repo_path.is_dir():
|
||||
print(f"Error: Path is not a directory: {repo_path}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Scanning repository: {repo_path}")
|
||||
files = find_claude_md_files(repo_path)
|
||||
|
||||
if not files:
|
||||
print("No claude.md files found in repository.")
|
||||
sys.exit(0)
|
||||
|
||||
print(f"Found {len(files)} claude.md file(s)")
|
||||
|
||||
# Generate index
|
||||
if args.format == 'tree':
|
||||
content = create_tree_format(repo_path, files)
|
||||
elif args.format == 'table':
|
||||
content = create_table_format(repo_path, files)
|
||||
else: # detailed
|
||||
content = create_detailed_format(repo_path, files)
|
||||
|
||||
# Write output
|
||||
output_path = repo_path / args.output
|
||||
with open(output_path, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"✅ Created index: {output_path}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
265
skills/claude-context-manager/scripts/generate_claude_md.py
Executable file
265
skills/claude-context-manager/scripts/generate_claude_md.py
Executable file
@@ -0,0 +1,265 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Claude.md Generator
|
||||
|
||||
Analyzes a directory and generates an appropriate claude.md file with context
|
||||
about the directory's purpose, structure, and key files.
|
||||
|
||||
Usage:
|
||||
python generate_claude_md.py <directory_path> [--output FILE] [--analyze-depth N]
|
||||
|
||||
Examples:
|
||||
python generate_claude_md.py /path/to/src
|
||||
python generate_claude_md.py /path/to/tests --output claude.md
|
||||
python generate_claude_md.py /path/to/api --analyze-depth 1
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from collections import defaultdict
|
||||
from typing import List, Dict, Set
|
||||
import subprocess
|
||||
|
||||
IGNORE_DIRS = {
|
||||
'.git', '.github', 'node_modules', '__pycache__', '.pytest_cache',
|
||||
'venv', 'env', '.venv', 'dist', 'build', '.egg-info', 'coverage'
|
||||
}
|
||||
|
||||
|
||||
def analyze_directory(dir_path: Path, depth: int = 0, max_depth: int = 1) -> Dict:
|
||||
"""Analyze directory structure and content."""
|
||||
analysis = {
|
||||
'path': dir_path,
|
||||
'files_by_type': defaultdict(list),
|
||||
'subdirs': [],
|
||||
'total_files': 0,
|
||||
'key_files': []
|
||||
}
|
||||
|
||||
# Key files to look for
|
||||
key_filenames = {
|
||||
'README.md', 'README.rst', 'README.txt',
|
||||
'main.py', 'app.py', 'index.py', '__init__.py',
|
||||
'index.js', 'index.ts', 'main.js', 'main.ts',
|
||||
'package.json', 'setup.py', 'pyproject.toml',
|
||||
'Cargo.toml', 'pom.xml', 'build.gradle',
|
||||
'Makefile', 'CMakeLists.txt'
|
||||
}
|
||||
|
||||
try:
|
||||
items = list(dir_path.iterdir())
|
||||
except PermissionError:
|
||||
return analysis
|
||||
|
||||
for item in items:
|
||||
if item.name.startswith('.') and item.name not in {'.gitignore', '.env.example'}:
|
||||
continue
|
||||
|
||||
if item.is_file():
|
||||
analysis['total_files'] += 1
|
||||
ext = item.suffix or 'no_extension'
|
||||
analysis['files_by_type'][ext].append(item.name)
|
||||
|
||||
if item.name in key_filenames:
|
||||
analysis['key_files'].append(item.name)
|
||||
|
||||
elif item.is_dir() and item.name not in IGNORE_DIRS:
|
||||
analysis['subdirs'].append(item.name)
|
||||
|
||||
return analysis
|
||||
|
||||
|
||||
def infer_directory_purpose(dir_name: str, analysis: Dict) -> str:
|
||||
"""Infer the purpose of a directory based on its name and contents."""
|
||||
dir_name_lower = dir_name.lower()
|
||||
|
||||
# Common patterns
|
||||
purposes = {
|
||||
'src': 'source code',
|
||||
'lib': 'library code',
|
||||
'app': 'application code',
|
||||
'api': 'API implementation',
|
||||
'tests': 'test suite',
|
||||
'test': 'test suite',
|
||||
'docs': 'documentation',
|
||||
'documentation': 'documentation',
|
||||
'scripts': 'utility scripts',
|
||||
'utils': 'utility functions',
|
||||
'helpers': 'helper functions',
|
||||
'models': 'data models',
|
||||
'views': 'view templates',
|
||||
'controllers': 'controllers',
|
||||
'routes': 'route definitions',
|
||||
'components': 'reusable components',
|
||||
'services': 'service layer',
|
||||
'middleware': 'middleware functions',
|
||||
'config': 'configuration files',
|
||||
'public': 'public assets',
|
||||
'static': 'static assets',
|
||||
'assets': 'static assets',
|
||||
'migrations': 'database migrations',
|
||||
'fixtures': 'test fixtures',
|
||||
'examples': 'example code',
|
||||
}
|
||||
|
||||
for pattern, purpose in purposes.items():
|
||||
if pattern in dir_name_lower:
|
||||
return purpose
|
||||
|
||||
# Infer from file types
|
||||
file_types = set(analysis['files_by_type'].keys())
|
||||
|
||||
if '.test.py' in str(analysis['files_by_type']) or '.test.js' in str(analysis['files_by_type']):
|
||||
return 'test suite'
|
||||
|
||||
if any('.md' in ext or '.rst' in ext for ext in file_types):
|
||||
return 'documentation'
|
||||
|
||||
return 'implementation'
|
||||
|
||||
|
||||
def generate_claude_md(dir_path: Path, analyze_depth: int = 1) -> str:
|
||||
"""Generate claude.md content for a directory."""
|
||||
analysis = analyze_directory(dir_path, max_depth=analyze_depth)
|
||||
dir_name = dir_path.name if dir_path.name else 'root'
|
||||
purpose = infer_directory_purpose(dir_name, analysis)
|
||||
|
||||
# Build the claude.md content
|
||||
content = []
|
||||
|
||||
# Header
|
||||
content.append(f"# {dir_name}/")
|
||||
content.append("")
|
||||
|
||||
# Purpose section
|
||||
content.append(f"This directory contains the {purpose}.")
|
||||
content.append("")
|
||||
|
||||
# Overview section
|
||||
content.append("## Overview")
|
||||
content.append("")
|
||||
content.append(f"<!-- TODO: Add detailed description of what this directory contains and its role in the project -->")
|
||||
content.append("")
|
||||
|
||||
# Structure section if there are subdirectories
|
||||
if analysis['subdirs']:
|
||||
content.append("## Directory Structure")
|
||||
content.append("")
|
||||
content.append("```")
|
||||
content.append(f"{dir_name}/")
|
||||
for subdir in sorted(analysis['subdirs'])[:10]: # Limit to first 10
|
||||
content.append(f"├── {subdir}/")
|
||||
if len(analysis['subdirs']) > 10:
|
||||
content.append(f"└── ... ({len(analysis['subdirs']) - 10} more)")
|
||||
content.append("```")
|
||||
content.append("")
|
||||
|
||||
# Key files section
|
||||
if analysis['key_files']:
|
||||
content.append("## Key Files")
|
||||
content.append("")
|
||||
for key_file in sorted(analysis['key_files']):
|
||||
content.append(f"- **{key_file}**: <!-- TODO: Describe purpose -->")
|
||||
content.append("")
|
||||
|
||||
# File types section
|
||||
if analysis['files_by_type']:
|
||||
content.append("## File Types")
|
||||
content.append("")
|
||||
for ext, files in sorted(analysis['files_by_type'].items()):
|
||||
if ext != 'no_extension':
|
||||
content.append(f"- **{ext}** ({len(files)} files): <!-- TODO: Describe purpose -->")
|
||||
content.append("")
|
||||
|
||||
# Important patterns section
|
||||
content.append("## Important Patterns")
|
||||
content.append("")
|
||||
content.append("<!-- TODO: Document key patterns, conventions, or architectural decisions -->")
|
||||
content.append("")
|
||||
content.append("- Pattern 1: Description")
|
||||
content.append("- Pattern 2: Description")
|
||||
content.append("")
|
||||
|
||||
# Dependencies section
|
||||
content.append("## Dependencies")
|
||||
content.append("")
|
||||
content.append("<!-- TODO: List key dependencies or relationships with other parts of the codebase -->")
|
||||
content.append("")
|
||||
|
||||
# Usage/Entry Points section
|
||||
content.append("## Usage")
|
||||
content.append("")
|
||||
content.append("<!-- TODO: Explain how to use or interact with code in this directory -->")
|
||||
content.append("")
|
||||
|
||||
# Notes section
|
||||
content.append("## Notes")
|
||||
content.append("")
|
||||
content.append("<!-- TODO: Add any additional context, gotchas, or important information -->")
|
||||
content.append("")
|
||||
|
||||
return "\n".join(content)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate claude.md file for a directory'
|
||||
)
|
||||
parser.add_argument(
|
||||
'directory',
|
||||
type=str,
|
||||
help='Path to directory'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
type=str,
|
||||
default='claude.md',
|
||||
help='Output filename (default: claude.md)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--analyze-depth',
|
||||
type=int,
|
||||
default=1,
|
||||
help='How deep to analyze subdirectories (default: 1)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--force',
|
||||
action='store_true',
|
||||
help='Overwrite existing claude.md file'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
dir_path = Path(args.directory).resolve()
|
||||
|
||||
if not dir_path.exists():
|
||||
print(f"Error: Directory does not exist: {dir_path}")
|
||||
sys.exit(1)
|
||||
|
||||
if not dir_path.is_dir():
|
||||
print(f"Error: Path is not a directory: {dir_path}")
|
||||
sys.exit(1)
|
||||
|
||||
output_path = dir_path / args.output
|
||||
|
||||
if output_path.exists() and not args.force:
|
||||
print(f"Error: {output_path} already exists. Use --force to overwrite.")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Analyzing directory: {dir_path}")
|
||||
content = generate_claude_md(dir_path, args.analyze_depth)
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"✅ Generated {output_path}")
|
||||
print(f"\nNext steps:")
|
||||
print(f"1. Review the generated file and fill in TODO sections")
|
||||
print(f"2. Add specific details about the directory's purpose")
|
||||
print(f"3. Document key patterns and conventions")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
306
skills/claude-context-manager/scripts/monitor.py
Executable file
306
skills/claude-context-manager/scripts/monitor.py
Executable file
@@ -0,0 +1,306 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Context Monitor - Autonomous Context Health Checker
|
||||
|
||||
This script is designed to be run by Claude autonomously to monitor
|
||||
context health and identify what needs attention.
|
||||
|
||||
Outputs structured data that Claude can interpret and act on.
|
||||
|
||||
Usage:
|
||||
python monitor.py <repo_path> [--format json|text]
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Dict, Optional
|
||||
import subprocess
|
||||
|
||||
def get_git_last_modified(file_path: Path) -> Optional[datetime]:
|
||||
"""Get the last git modification time for a file."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['git', 'log', '-1', '--format=%ai', str(file_path)],
|
||||
cwd=file_path.parent,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
return datetime.fromisoformat(result.stdout.strip().rsplit(' ', 1)[0])
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
def get_directory_last_modified(dir_path: Path) -> Optional[datetime]:
|
||||
"""Get the last git modification time for any file in directory."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['git', 'log', '-1', '--format=%ai', '--', str(dir_path)],
|
||||
cwd=dir_path if dir_path.is_dir() else dir_path.parent,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
return datetime.fromisoformat(result.stdout.strip().rsplit(' ', 1)[0])
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
def count_commits_since(path: Path, since_date: datetime) -> int:
|
||||
"""Count commits affecting path since a given date."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['git', 'rev-list', '--count', f'--since={since_date.isoformat()}', 'HEAD', '--', str(path)],
|
||||
cwd=path if path.is_dir() else path.parent,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return int(result.stdout.strip())
|
||||
except:
|
||||
pass
|
||||
return 0
|
||||
|
||||
def calculate_staleness_score(context_age_days: int, commits_since_update: int) -> Dict:
|
||||
"""Calculate staleness score and priority."""
|
||||
# Scoring algorithm
|
||||
age_score = min(context_age_days / 30, 3) # Cap at 3 (90+ days)
|
||||
commit_score = min(commits_since_update / 10, 3) # Cap at 3 (30+ commits)
|
||||
|
||||
total_score = age_score + commit_score
|
||||
|
||||
if total_score >= 4:
|
||||
priority = 'critical'
|
||||
action = 'UPDATE_NOW'
|
||||
elif total_score >= 2.5:
|
||||
priority = 'high'
|
||||
action = 'UPDATE_SOON'
|
||||
elif total_score >= 1.5:
|
||||
priority = 'medium'
|
||||
action = 'REVIEW'
|
||||
else:
|
||||
priority = 'low'
|
||||
action = 'MONITOR'
|
||||
|
||||
return {
|
||||
'score': round(total_score, 2),
|
||||
'priority': priority,
|
||||
'action': action,
|
||||
'age_score': round(age_score, 2),
|
||||
'commit_score': round(commit_score, 2)
|
||||
}
|
||||
|
||||
def find_claude_md_files(root_path: Path) -> List[Path]:
|
||||
"""Find all claude.md files."""
|
||||
claude_md_files = []
|
||||
for dirpath, dirnames, filenames in os.walk(root_path):
|
||||
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d not in {
|
||||
'node_modules', '__pycache__', 'venv', 'env', 'dist', 'build'
|
||||
}]
|
||||
if 'claude.md' in filenames:
|
||||
claude_md_files.append(Path(dirpath) / 'claude.md')
|
||||
return sorted(claude_md_files)
|
||||
|
||||
def analyze_context_file(file_path: Path, root_path: Path) -> Dict:
|
||||
"""Analyze a single context file for staleness."""
|
||||
now = datetime.now()
|
||||
|
||||
# Get context file last modified
|
||||
context_modified = get_git_last_modified(file_path)
|
||||
if not context_modified:
|
||||
# Fall back to filesystem mtime
|
||||
context_modified = datetime.fromtimestamp(file_path.stat().st_mtime)
|
||||
|
||||
# Get directory last modified
|
||||
dir_path = file_path.parent
|
||||
dir_modified = get_directory_last_modified(dir_path)
|
||||
|
||||
# Calculate age
|
||||
context_age = now - context_modified
|
||||
context_age_days = context_age.days
|
||||
|
||||
# Count commits since context update
|
||||
commits_since_update = count_commits_since(dir_path, context_modified)
|
||||
|
||||
# Calculate staleness
|
||||
staleness = calculate_staleness_score(context_age_days, commits_since_update)
|
||||
|
||||
# Relative path for display
|
||||
rel_path = file_path.relative_to(root_path)
|
||||
|
||||
return {
|
||||
'path': str(rel_path),
|
||||
'directory': str(rel_path.parent),
|
||||
'context_age_days': context_age_days,
|
||||
'context_last_updated': context_modified.isoformat(),
|
||||
'directory_last_modified': dir_modified.isoformat() if dir_modified else None,
|
||||
'commits_since_update': commits_since_update,
|
||||
'staleness': staleness,
|
||||
'needs_attention': staleness['action'] in ['UPDATE_NOW', 'UPDATE_SOON']
|
||||
}
|
||||
|
||||
def monitor_repository(repo_path: Path) -> Dict:
|
||||
"""Monitor entire repository for context health."""
|
||||
files = find_claude_md_files(repo_path)
|
||||
|
||||
if not files:
|
||||
return {
|
||||
'status': 'no_context_files',
|
||||
'message': 'No claude.md files found in repository',
|
||||
'files': []
|
||||
}
|
||||
|
||||
analyses = [analyze_context_file(f, repo_path) for f in files]
|
||||
|
||||
# Categorize by priority
|
||||
critical = [a for a in analyses if a['staleness']['priority'] == 'critical']
|
||||
high = [a for a in analyses if a['staleness']['priority'] == 'high']
|
||||
medium = [a for a in analyses if a['staleness']['priority'] == 'medium']
|
||||
low = [a for a in analyses if a['staleness']['priority'] == 'low']
|
||||
|
||||
# Overall health score (0-100, higher is better)
|
||||
avg_staleness = sum(a['staleness']['score'] for a in analyses) / len(analyses)
|
||||
health_score = max(0, 100 - (avg_staleness * 20))
|
||||
|
||||
return {
|
||||
'status': 'analyzed',
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'repository': str(repo_path),
|
||||
'summary': {
|
||||
'total_files': len(analyses),
|
||||
'critical': len(critical),
|
||||
'high': len(high),
|
||||
'medium': len(medium),
|
||||
'low': len(low),
|
||||
'health_score': round(health_score, 1)
|
||||
},
|
||||
'files': {
|
||||
'critical': critical,
|
||||
'high': high,
|
||||
'medium': medium,
|
||||
'low': low
|
||||
},
|
||||
'recommendations': generate_recommendations(critical, high, medium)
|
||||
}
|
||||
|
||||
def generate_recommendations(critical: List, high: List, medium: List) -> List[str]:
|
||||
"""Generate action recommendations for Claude."""
|
||||
recommendations = []
|
||||
|
||||
if critical:
|
||||
recommendations.append(
|
||||
f"IMMEDIATE ACTION: {len(critical)} context file(s) are critically stale. "
|
||||
f"Update: {', '.join([c['directory'] for c in critical[:3]])}"
|
||||
)
|
||||
|
||||
if high:
|
||||
recommendations.append(
|
||||
f"HIGH PRIORITY: {len(high)} context file(s) need updating soon. "
|
||||
f"Review: {', '.join([h['directory'] for h in high[:3]])}"
|
||||
)
|
||||
|
||||
if medium:
|
||||
recommendations.append(
|
||||
f"MEDIUM PRIORITY: {len(medium)} context file(s) should be reviewed. "
|
||||
f"Consider updating when convenient."
|
||||
)
|
||||
|
||||
if not critical and not high:
|
||||
recommendations.append("All context files are reasonably current. Continue monitoring.")
|
||||
|
||||
return recommendations
|
||||
|
||||
def format_text_output(data: Dict) -> str:
|
||||
"""Format output as readable text for Claude."""
|
||||
lines = []
|
||||
lines.append("=" * 70)
|
||||
lines.append("CONTEXT HEALTH MONITOR")
|
||||
lines.append("=" * 70)
|
||||
|
||||
if data['status'] == 'no_context_files':
|
||||
lines.append(f"\n{data['message']}")
|
||||
return "\n".join(lines)
|
||||
|
||||
summary = data['summary']
|
||||
lines.append(f"\nRepository: {data['repository']}")
|
||||
lines.append(f"Timestamp: {data['timestamp']}")
|
||||
lines.append(f"\n📊 Health Score: {summary['health_score']}/100")
|
||||
lines.append(f"\n📁 Context Files: {summary['total_files']}")
|
||||
|
||||
if summary['critical']:
|
||||
lines.append(f" 🔴 Critical: {summary['critical']}")
|
||||
if summary['high']:
|
||||
lines.append(f" 🟠 High: {summary['high']}")
|
||||
if summary['medium']:
|
||||
lines.append(f" 🟡 Medium: {summary['medium']}")
|
||||
if summary['low']:
|
||||
lines.append(f" 🟢 Low: {summary['low']}")
|
||||
|
||||
lines.append("\n" + "=" * 70)
|
||||
lines.append("RECOMMENDATIONS")
|
||||
lines.append("=" * 70)
|
||||
|
||||
for i, rec in enumerate(data['recommendations'], 1):
|
||||
lines.append(f"\n{i}. {rec}")
|
||||
|
||||
# Show details for files needing attention
|
||||
needs_attention = data['files']['critical'] + data['files']['high']
|
||||
if needs_attention:
|
||||
lines.append("\n" + "=" * 70)
|
||||
lines.append("DETAILS - FILES NEEDING ATTENTION")
|
||||
lines.append("=" * 70)
|
||||
|
||||
for file_data in needs_attention:
|
||||
lines.append(f"\n📁 {file_data['directory']}")
|
||||
lines.append(f" Path: {file_data['path']}")
|
||||
lines.append(f" Age: {file_data['context_age_days']} days")
|
||||
lines.append(f" Commits since update: {file_data['commits_since_update']}")
|
||||
lines.append(f" Priority: {file_data['staleness']['priority'].upper()}")
|
||||
lines.append(f" Action: {file_data['staleness']['action']}")
|
||||
|
||||
lines.append("\n" + "=" * 70)
|
||||
return "\n".join(lines)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Monitor context health and identify stale files'
|
||||
)
|
||||
parser.add_argument('repo_path', type=str, help='Repository path')
|
||||
parser.add_argument(
|
||||
'--format',
|
||||
choices=['json', 'text'],
|
||||
default='text',
|
||||
help='Output format (default: text)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
repo_path = Path(args.repo_path).resolve()
|
||||
|
||||
if not repo_path.exists():
|
||||
print(f"Error: Repository path does not exist: {repo_path}")
|
||||
sys.exit(1)
|
||||
|
||||
# Analyze repository
|
||||
results = monitor_repository(repo_path)
|
||||
|
||||
# Output results
|
||||
if args.format == 'json':
|
||||
print(json.dumps(results, indent=2))
|
||||
else:
|
||||
print(format_text_output(results))
|
||||
|
||||
# Exit code based on health
|
||||
if results['status'] == 'analyzed':
|
||||
if results['summary']['critical'] > 0:
|
||||
sys.exit(2) # Critical issues
|
||||
elif results['summary']['high'] > 0:
|
||||
sys.exit(1) # High priority issues
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
169
skills/claude-context-manager/scripts/scan_repo.py
Executable file
169
skills/claude-context-manager/scripts/scan_repo.py
Executable file
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Repository Scanner for claude.md Management
|
||||
|
||||
Analyzes repository structure and suggests where claude.md files should exist.
|
||||
Helps identify directories that need documentation.
|
||||
|
||||
Usage:
|
||||
python scan_repo.py <repo_path> [--min-files N] [--show-existing]
|
||||
|
||||
Examples:
|
||||
python scan_repo.py /path/to/repo
|
||||
python scan_repo.py /path/to/repo --min-files 3
|
||||
python scan_repo.py /path/to/repo --show-existing
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Set
|
||||
|
||||
# Directories to ignore
|
||||
IGNORE_DIRS = {
|
||||
'.git', '.github', 'node_modules', '__pycache__', '.pytest_cache',
|
||||
'venv', 'env', '.venv', 'dist', 'build', '.egg-info', 'coverage',
|
||||
'.tox', '.mypy_cache', '.ruff_cache', 'target', 'bin', 'obj'
|
||||
}
|
||||
|
||||
# File extensions to consider when calculating "significance"
|
||||
SIGNIFICANT_EXTENSIONS = {
|
||||
'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.go', '.rs', '.cpp',
|
||||
'.c', '.h', '.hpp', '.cs', '.rb', '.php', '.swift', '.kt', '.scala',
|
||||
'.sh', '.bash', '.md', '.yaml', '.yml', '.json', '.toml', '.xml'
|
||||
}
|
||||
|
||||
|
||||
def scan_directory(root_path: Path, min_files: int = 2) -> Dict:
|
||||
"""
|
||||
Scan directory tree and identify directories that should have claude.md files.
|
||||
|
||||
Args:
|
||||
root_path: Root directory to scan
|
||||
min_files: Minimum number of significant files to warrant a claude.md
|
||||
|
||||
Returns:
|
||||
Dictionary with analysis results
|
||||
"""
|
||||
results = {
|
||||
'needs_claude_md': [],
|
||||
'has_claude_md': [],
|
||||
'stats': {
|
||||
'total_dirs': 0,
|
||||
'dirs_scanned': 0,
|
||||
'significant_dirs': 0
|
||||
}
|
||||
}
|
||||
|
||||
for dirpath, dirnames, filenames in os.walk(root_path):
|
||||
# Filter out ignored directories
|
||||
dirnames[:] = [d for d in dirnames if d not in IGNORE_DIRS]
|
||||
|
||||
results['stats']['total_dirs'] += 1
|
||||
current_path = Path(dirpath)
|
||||
|
||||
# Check if this directory has claude.md
|
||||
has_claude_md = 'claude.md' in filenames
|
||||
|
||||
# Count significant files
|
||||
significant_files = [
|
||||
f for f in filenames
|
||||
if Path(f).suffix in SIGNIFICANT_EXTENSIONS and f != 'claude.md'
|
||||
]
|
||||
|
||||
# Determine if this directory is "significant" enough
|
||||
is_significant = len(significant_files) >= min_files
|
||||
|
||||
if is_significant:
|
||||
results['stats']['significant_dirs'] += 1
|
||||
results['stats']['dirs_scanned'] += 1
|
||||
|
||||
rel_path = current_path.relative_to(root_path)
|
||||
dir_info = {
|
||||
'path': str(rel_path) if str(rel_path) != '.' else '(root)',
|
||||
'file_count': len(significant_files),
|
||||
'file_types': sorted(set(Path(f).suffix for f in significant_files))
|
||||
}
|
||||
|
||||
if has_claude_md:
|
||||
results['has_claude_md'].append(dir_info)
|
||||
else:
|
||||
results['needs_claude_md'].append(dir_info)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def print_results(results: Dict, show_existing: bool = False):
|
||||
"""Print scan results in a readable format."""
|
||||
stats = results['stats']
|
||||
|
||||
print("\n" + "="*70)
|
||||
print("REPOSITORY SCAN RESULTS")
|
||||
print("="*70)
|
||||
|
||||
print(f"\n📊 Statistics:")
|
||||
print(f" Total directories: {stats['total_dirs']}")
|
||||
print(f" Significant directories: {stats['significant_dirs']}")
|
||||
print(f" Directories with claude.md: {len(results['has_claude_md'])}")
|
||||
print(f" Directories needing claude.md: {len(results['needs_claude_md'])}")
|
||||
|
||||
if results['needs_claude_md']:
|
||||
print(f"\n❌ Directories that should have claude.md:")
|
||||
print("-" * 70)
|
||||
for dir_info in results['needs_claude_md']:
|
||||
print(f"\n📁 {dir_info['path']}")
|
||||
print(f" Files: {dir_info['file_count']}")
|
||||
print(f" Types: {', '.join(dir_info['file_types'])}")
|
||||
|
||||
if show_existing and results['has_claude_md']:
|
||||
print(f"\n✅ Directories with existing claude.md:")
|
||||
print("-" * 70)
|
||||
for dir_info in results['has_claude_md']:
|
||||
print(f"\n📁 {dir_info['path']}")
|
||||
print(f" Files: {dir_info['file_count']}")
|
||||
print(f" Types: {', '.join(dir_info['file_types'])}")
|
||||
|
||||
print("\n" + "="*70)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Scan repository for claude.md file coverage'
|
||||
)
|
||||
parser.add_argument(
|
||||
'repo_path',
|
||||
type=str,
|
||||
help='Path to repository root'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--min-files',
|
||||
type=int,
|
||||
default=2,
|
||||
help='Minimum significant files to warrant a claude.md (default: 2)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--show-existing',
|
||||
action='store_true',
|
||||
help='Show directories that already have claude.md files'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
repo_path = Path(args.repo_path).resolve()
|
||||
|
||||
if not repo_path.exists():
|
||||
print(f"Error: Path does not exist: {repo_path}")
|
||||
sys.exit(1)
|
||||
|
||||
if not repo_path.is_dir():
|
||||
print(f"Error: Path is not a directory: {repo_path}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Scanning repository: {repo_path}")
|
||||
results = scan_directory(repo_path, args.min_files)
|
||||
print_results(results, args.show_existing)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
247
skills/claude-context-manager/scripts/validate_claude_md.py
Executable file
247
skills/claude-context-manager/scripts/validate_claude_md.py
Executable file
@@ -0,0 +1,247 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Claude.md Validator
|
||||
|
||||
Validates existing claude.md files for completeness, accuracy, and quality.
|
||||
Checks for TODO markers, outdated information, and missing key sections.
|
||||
|
||||
Usage:
|
||||
python validate_claude_md.py <path> [--strict] [--auto-fix]
|
||||
|
||||
Examples:
|
||||
python validate_claude_md.py /path/to/repo
|
||||
python validate_claude_md.py /path/to/src/claude.md
|
||||
python validate_claude_md.py /path/to/repo --strict
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Tuple
|
||||
import re
|
||||
|
||||
REQUIRED_SECTIONS = [
|
||||
'Overview',
|
||||
'Purpose' # Alternative to Overview
|
||||
]
|
||||
|
||||
RECOMMENDED_SECTIONS = [
|
||||
'Directory Structure',
|
||||
'Key Files',
|
||||
'Important Patterns',
|
||||
'Dependencies',
|
||||
'Usage'
|
||||
]
|
||||
|
||||
|
||||
def find_claude_md_files(root_path: Path) -> List[Path]:
|
||||
"""Find all claude.md files in the directory tree."""
|
||||
claude_md_files = []
|
||||
|
||||
for dirpath, dirnames, filenames in os.walk(root_path):
|
||||
# Skip common ignored directories
|
||||
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d not in {
|
||||
'node_modules', '__pycache__', 'venv', 'env', 'dist', 'build'
|
||||
}]
|
||||
|
||||
if 'claude.md' in filenames:
|
||||
claude_md_files.append(Path(dirpath) / 'claude.md')
|
||||
|
||||
return claude_md_files
|
||||
|
||||
|
||||
def validate_claude_md(file_path: Path, strict: bool = False) -> Dict:
|
||||
"""Validate a single claude.md file."""
|
||||
issues = []
|
||||
warnings = []
|
||||
|
||||
try:
|
||||
with open(file_path, 'r') as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
return {
|
||||
'valid': False,
|
||||
'issues': [f"Could not read file: {e}"],
|
||||
'warnings': [],
|
||||
'stats': {}
|
||||
}
|
||||
|
||||
lines = content.split('\n')
|
||||
|
||||
# Check for empty or very short files
|
||||
if len(content.strip()) < 50:
|
||||
issues.append("File is too short (less than 50 characters)")
|
||||
|
||||
# Check for TODO markers
|
||||
todo_count = len(re.findall(r'TODO|FIXME|XXX', content, re.IGNORECASE))
|
||||
if todo_count > 0:
|
||||
if strict:
|
||||
issues.append(f"Found {todo_count} TODO/FIXME markers")
|
||||
else:
|
||||
warnings.append(f"Found {todo_count} TODO/FIXME markers")
|
||||
|
||||
# Check for required sections
|
||||
has_overview = any(re.search(r'^##?\s+(Overview|Purpose)', line, re.IGNORECASE) for line in lines)
|
||||
if not has_overview:
|
||||
issues.append("Missing required section: Overview or Purpose")
|
||||
|
||||
# Check for recommended sections
|
||||
found_sections = []
|
||||
for section in RECOMMENDED_SECTIONS:
|
||||
if any(re.search(rf'^##?\s+{section}', line, re.IGNORECASE) for line in lines):
|
||||
found_sections.append(section)
|
||||
|
||||
missing_recommended = set(RECOMMENDED_SECTIONS) - set(found_sections)
|
||||
if missing_recommended and strict:
|
||||
warnings.append(f"Missing recommended sections: {', '.join(missing_recommended)}")
|
||||
|
||||
# Check for placeholder text
|
||||
if '<!-- TODO' in content or 'Description' in content and 'TODO' in content:
|
||||
if strict:
|
||||
issues.append("Contains placeholder TODO comments that need completion")
|
||||
else:
|
||||
warnings.append("Contains placeholder TODO comments")
|
||||
|
||||
# Check for minimal content in sections
|
||||
sections = re.split(r'^##?\s+', content, flags=re.MULTILINE)[1:] # Split by headers
|
||||
for section in sections:
|
||||
lines_in_section = [l.strip() for l in section.split('\n')[1:] if l.strip() and not l.strip().startswith('<!--')]
|
||||
if len(lines_in_section) < 2:
|
||||
section_name = section.split('\n')[0]
|
||||
warnings.append(f"Section '{section_name}' has minimal content")
|
||||
|
||||
# Check for broken links (basic check)
|
||||
broken_link_pattern = r'\[([^\]]+)\]\(([^\)]+)\)'
|
||||
links = re.findall(broken_link_pattern, content)
|
||||
for link_text, link_url in links:
|
||||
if link_url.startswith('./') or link_url.startswith('../'):
|
||||
# Check if relative path exists
|
||||
target_path = file_path.parent / link_url
|
||||
if not target_path.exists():
|
||||
warnings.append(f"Potentially broken relative link: {link_url}")
|
||||
|
||||
# Check age (if git is available)
|
||||
stats = {
|
||||
'line_count': len(lines),
|
||||
'word_count': len(content.split()),
|
||||
'todo_count': todo_count,
|
||||
'sections_found': len(found_sections)
|
||||
}
|
||||
|
||||
return {
|
||||
'valid': len(issues) == 0,
|
||||
'issues': issues,
|
||||
'warnings': warnings,
|
||||
'stats': stats
|
||||
}
|
||||
|
||||
|
||||
def print_validation_results(results: Dict[Path, Dict], strict: bool):
|
||||
"""Print validation results in a readable format."""
|
||||
print("\n" + "="*70)
|
||||
print("CLAUDE.MD VALIDATION RESULTS")
|
||||
print("="*70)
|
||||
|
||||
total_files = len(results)
|
||||
valid_files = sum(1 for r in results.values() if r['valid'])
|
||||
files_with_warnings = sum(1 for r in results.values() if r['warnings'])
|
||||
|
||||
print(f"\n📊 Summary:")
|
||||
print(f" Total files checked: {total_files}")
|
||||
print(f" Valid files: {valid_files}")
|
||||
print(f" Files with issues: {total_files - valid_files}")
|
||||
print(f" Files with warnings: {files_with_warnings}")
|
||||
|
||||
# Show files with issues
|
||||
files_with_issues = {p: r for p, r in results.items() if not r['valid']}
|
||||
if files_with_issues:
|
||||
print(f"\n❌ Files with issues:")
|
||||
print("-" * 70)
|
||||
for file_path, result in files_with_issues.items():
|
||||
print(f"\n📄 {file_path}")
|
||||
for issue in result['issues']:
|
||||
print(f" ❌ {issue}")
|
||||
if result['warnings']:
|
||||
for warning in result['warnings']:
|
||||
print(f" ⚠️ {warning}")
|
||||
|
||||
# Show files with warnings only
|
||||
files_with_only_warnings = {
|
||||
p: r for p, r in results.items()
|
||||
if r['valid'] and r['warnings']
|
||||
}
|
||||
if files_with_only_warnings:
|
||||
print(f"\n⚠️ Files with warnings:")
|
||||
print("-" * 70)
|
||||
for file_path, result in files_with_only_warnings.items():
|
||||
print(f"\n📄 {file_path}")
|
||||
for warning in result['warnings']:
|
||||
print(f" ⚠️ {warning}")
|
||||
|
||||
# Show fully valid files
|
||||
fully_valid = {
|
||||
p: r for p, r in results.items()
|
||||
if r['valid'] and not r['warnings']
|
||||
}
|
||||
if fully_valid:
|
||||
print(f"\n✅ Fully valid files:")
|
||||
print("-" * 70)
|
||||
for file_path in fully_valid.keys():
|
||||
print(f" 📄 {file_path}")
|
||||
|
||||
print("\n" + "="*70)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Validate claude.md files'
|
||||
)
|
||||
parser.add_argument(
|
||||
'path',
|
||||
type=str,
|
||||
help='Path to directory or specific claude.md file'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--strict',
|
||||
action='store_true',
|
||||
help='Enable strict validation (TODOs become errors)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
path = Path(args.path).resolve()
|
||||
|
||||
if not path.exists():
|
||||
print(f"Error: Path does not exist: {path}")
|
||||
sys.exit(1)
|
||||
|
||||
# Find claude.md files
|
||||
if path.is_file() and path.name == 'claude.md':
|
||||
files_to_validate = [path]
|
||||
elif path.is_dir():
|
||||
files_to_validate = find_claude_md_files(path)
|
||||
if not files_to_validate:
|
||||
print(f"No claude.md files found in {path}")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f"Error: Path must be a directory or a claude.md file")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Validating {len(files_to_validate)} claude.md file(s)...")
|
||||
|
||||
# Validate each file
|
||||
results = {}
|
||||
for file_path in files_to_validate:
|
||||
results[file_path] = validate_claude_md(file_path, strict=args.strict)
|
||||
|
||||
# Print results
|
||||
print_validation_results(results, args.strict)
|
||||
|
||||
# Exit with error code if any files have issues
|
||||
if any(not r['valid'] for r in results.values()):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user