Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 17:55:11 +08:00
commit f9707d7bd8
16 changed files with 3770 additions and 0 deletions

View File

@@ -0,0 +1,12 @@
{
"name": "claude-context-manager",
"description": "Autonomous context management for codebases through claude.md files with monitoring, staleness detection, and intelligent updates",
"version": "0.0.5",
"author": {
"name": "AnthemFlynn",
"email": "AnthemFlynn@users.noreply.github.com"
},
"skills": [
"./skills"
]
}

3
README.md Normal file
View File

@@ -0,0 +1,3 @@
# claude-context-manager
Autonomous context management for codebases through claude.md files with monitoring, staleness detection, and intelligent updates

93
plugin.lock.json Normal file
View File

@@ -0,0 +1,93 @@
{
"$schema": "internal://schemas/plugin.lock.v1.json",
"pluginId": "gh:AnthemFlynn/ccmp:plugins/claude-context-manager",
"normalized": {
"repo": null,
"ref": "refs/tags/v20251128.0",
"commit": "2601debf491333b0652b87c943bd6baa920829e2",
"treeHash": "7d5310f2d15e2e5515f5254ba994faf45a51299ec1fe8398ae563ce5e1ddb544",
"generatedAt": "2025-11-28T10:24:51.709965Z",
"toolVersion": "publish_plugins.py@0.2.0"
},
"origin": {
"remote": "git@github.com:zhongweili/42plugin-data.git",
"branch": "master",
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
},
"manifest": {
"name": "claude-context-manager",
"description": "Autonomous context management for codebases through claude.md files with monitoring, staleness detection, and intelligent updates",
"version": "0.0.5"
},
"content": {
"files": [
{
"path": "README.md",
"sha256": "3a0211d7f44cca99d1fae0ba66e282e58f9a9dc5d84b32eec7980d9b8a6bf260"
},
{
"path": ".claude-plugin/plugin.json",
"sha256": "9eed64a89b91b11d43a50a24acdbbf73e49ee187a3efa1a1db09db1d453f859b"
},
{
"path": "skills/claude-context-manager/SKILL.md",
"sha256": "a9e1909fd7a0261b1b627a4207aa6ce8b984e8c23a82e9da09bf7c8b8bc486bf"
},
{
"path": "skills/claude-context-manager/LICENSE.txt",
"sha256": "f87634f0681b3f5c968701d3bd1014d2576ef8e5b8a26307f859923aa0ab7aff"
},
{
"path": "skills/claude-context-manager/references/examples.md",
"sha256": "cc0666d4c167dffe4b7c0b452487fea74badf41a5191b32fc46e3e6754ac3283"
},
{
"path": "skills/claude-context-manager/references/structure_guide.md",
"sha256": "50ce531192726dd97ea29648ff33a460e4fe181146007e8f7384c9b07dbbd001"
},
{
"path": "skills/claude-context-manager/references/context_manager_mode.md",
"sha256": "a92f9a25c40fd03b379c358c76befd64f3b758d6322c600cb9ade492aa3b87b2"
},
{
"path": "skills/claude-context-manager/scripts/auto_update.py",
"sha256": "8a771889205a6f44d91498a92a840ee47713cac12b6246d0a573356668c7eb54"
},
{
"path": "skills/claude-context-manager/scripts/monitor.py",
"sha256": "85cdd0baaf13120db919b84d747e49e3af9b4d4e00c131cad43b511ae4bb6c17"
},
{
"path": "skills/claude-context-manager/scripts/validate_claude_md.py",
"sha256": "2ce31ad1f4bff0de7f0244270f6cc952033ff49237b4165c9d303fe0359725d9"
},
{
"path": "skills/claude-context-manager/scripts/scan_repo.py",
"sha256": "e80397def7ee7c40171f42644812ff4fc2ea1570e5f3104d37723db14b5b4ce1"
},
{
"path": "skills/claude-context-manager/scripts/create_index.py",
"sha256": "32df3d83fa1f8ca23856e674534d516470dda1beec7b60e6190ad3043d164c8a"
},
{
"path": "skills/claude-context-manager/scripts/generate_claude_md.py",
"sha256": "70b08e75d33382b197a0f53ba3c84df148d22274a44c5823699153be14f5cf1c"
},
{
"path": "skills/claude-context-manager/assets/templates/test-directory-template.md",
"sha256": "b42c5f1b2b64b20e56bd2dbc08a23afa30a4db6d93fa7d0d11f04bbec7e5ae9e"
},
{
"path": "skills/claude-context-manager/assets/templates/source-code-template.md",
"sha256": "9543fc5ba11487b7b14edea484c013e01455e362b1384a5b5a6e2f074de93323"
}
],
"dirSha256": "7d5310f2d15e2e5515f5254ba994faf45a51299ec1fe8398ae563ce5e1ddb544"
},
"security": {
"scannedAt": null,
"scannerVersion": null,
"flags": []
}
}

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Claude Context Manager Skill
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,523 @@
---
name: claude-context-manager
description: Enables autonomous context management for codebases through claude.md files. Use when creating, maintaining, or synchronizing AI agent context. Provides tools and workflows for monitoring context health, detecting staleness, and updating intelligently. Helps Claude work proactively as a context manager.
---
# Claude Context Manager
This skill enables you to work as an autonomous **context manager** for codebases, maintaining accurate and actionable context intelligence through `claude.md` files.
## What This Skill Provides
**Behavioral guidance**: Instructions for working proactively as a context manager
**Monitoring tools**: Scripts to assess context health and detect staleness
**Update automation**: Intelligent context synchronization based on code changes
**Quality standards**: Patterns for creating actionable, dense agent context
## Core Concept
`claude.md` files are **cognitive maps** - operational intelligence that helps you:
- Navigate faster (know structure and entry points)
- Generate better (follow existing patterns)
- Avoid errors (understand constraints and gotchas)
- Make decisions (know the rules and conventions)
This is **agent context**, not documentation. The goal is making future-Claude more effective.
## Context Manager Mode
Before starting, read `references/context_manager_mode.md` to understand how to work autonomously and proactively as a context manager.
**Key operating principles:**
- **Proactive**: Monitor and update without being asked
- **Surgical**: Update only what's needed
- **Communicative**: Explain actions clearly
- **Autonomous**: Make decisions within boundaries
## Workflow Decision Tree
**Starting fresh in a repository?** → "Initial Setup" workflow
**Working in active codebase?** → "Continuous Maintenance" workflow
**Code just changed significantly?** → "Change-Responsive Update" workflow
**Exploring existing context?** → "Context Exploration" workflow
## Initial Setup
When first working in a repository that needs context management:
### 1. Assess Current State
```bash
python scripts/scan_repo.py /path/to/repo
```
This shows:
- Directories that should have context
- Directories that already have context
- Coverage gaps
### 2. Prioritize Areas
Focus on high-impact directories first:
- Entry points (src/main, src/index, etc.)
- Core business logic (src/services, src/api)
- Complex areas (src/db, src/auth)
- Active development areas (check git activity)
### 3. Generate Initial Context
For each priority directory:
```bash
python scripts/generate_claude_md.py /path/to/directory
```
This creates structured context with:
- Auto-detected purpose
- File analysis
- Pattern placeholders
- TODO markers for manual completion
### 4. Customize and Refine
Review generated files and:
- Fill in TODO markers with specific information
- Add patterns you observe
- Document gotchas
- Note relationships
Use `references/structure_guide.md` and `references/examples.md` for guidance.
### 5. Create Index
```bash
python scripts/create_index.py /path/to/repo
```
Generates navigable index of all context files.
## Continuous Maintenance
Once context exists, maintain it autonomously:
### 1. Regular Health Checks
Run periodically (start of session, after major work):
```bash
python scripts/monitor.py /path/to/repo
```
Provides:
- Health score (0-100)
- Files by priority (critical/high/medium/low)
- Specific recommendations
- Staleness metrics
### 2. Act on Findings
**Critical priority (immediate action):**
```bash
python scripts/auto_update.py /path/to/directory
```
**High priority (soon):**
```bash
python scripts/auto_update.py /path/to/directory --analyze-only
# Review suggestions, then update
```
**Medium/Low priority (monitor)**:
Note for later, continue monitoring
### 3. Validate Quality
After updates:
```bash
python scripts/validate_claude_md.py /path/to/directory/claude.md
```
Checks for:
- Required sections
- Actionable content
- TODO markers
- Broken links
### 4. Update Index
Periodically refresh the index:
```bash
python scripts/create_index.py /path/to/repo
```
## Change-Responsive Update
When code changes occur (you made changes or observed changes):
### 1. Detect Affected Context
For each changed directory:
```bash
python scripts/auto_update.py /path/to/directory --analyze-only
```
This analyzes:
- Recent changes (git history)
- Current patterns
- Framework detection
- Update recommendations
### 2. Review and Update
If update recommended:
```bash
python scripts/auto_update.py /path/to/directory
```
This performs surgical updates:
- Preserves accurate content
- Updates specific sections
- Adds newly discovered patterns
- Timestamps changes
### 3. Verify
```bash
python scripts/validate_claude_md.py /path/to/directory/claude.md
```
## Context Exploration
When entering an area with existing context:
### 1. Read Context
Before working, read the `claude.md` file:
```bash
view /path/to/directory/claude.md
```
Understand:
- Directory purpose
- Pattern expectations
- Key files and relationships
- Known gotchas
### 2. Verify Accuracy
As you work, note:
- ✅ Information that was helpful
- ❌ Information that was wrong/misleading
- 📝 Information that's missing
### 3. Update Immediately
If you discover inaccuracies or important missing info:
```bash
# Update the specific file
str_replace /path/to/directory/claude.md
```
Or use auto-update for comprehensive refresh.
### 4. Note Patterns
When you discover patterns not documented:
- Add them to context immediately
- Include examples
- Note why they matter
## Autonomous Decision-Making
### You CAN Act Autonomously
**Update context when:**
- Staleness score > 4 (critical)
- You just changed code affecting patterns
- You discover inaccuracies while working
- You have info to fill TODO markers
**Generate new context when:**
- Directory has 3+ files and no context
- You struggled without context here
- Clear patterns emerge
### You SHOULD Ask First
⚠️ **Before:**
- Deleting existing context
- Major restructuring
- Updating very recent context (<7 days)
- Bulk operations on many files
## Tools Reference
### monitoring scripts/monitor.py
Assesses context health across repository.
**Key outputs:**
- Health score (0-100)
- Staleness metrics
- Priority categorization
- Action recommendations
**Usage:**
```bash
python scripts/monitor.py /path/to/repo [--format json|text]
```
**Exit codes:**
- 0: Healthy
- 1: High priority issues
- 2: Critical issues
### Auto-Update: scripts/auto_update.py
Intelligently updates context based on code changes.
**What it does:**
- Analyzes recent git changes
- Detects current patterns
- Identifies needed updates
- Surgically updates context
**Usage:**
```bash
python scripts/auto_update.py <directory> [--analyze-only] [--force]
```
**Modes:**
- Default: Analyze and update
- `--analyze-only`: Show recommendations only
- `--force`: Update even if no changes detected
### Scanning: scripts/scan_repo.py
Identifies directories needing context.
**Usage:**
```bash
python scripts/scan_repo.py <repo_path> [--min-files N] [--show-existing]
```
### Generation: scripts/generate_claude_md.py
Creates new context files with smart defaults.
**Usage:**
```bash
python scripts/generate_claude_md.py <directory> [--output FILE] [--force]
```
### Validation: scripts/validate_claude_md.py
Checks context quality and completeness.
**Usage:**
```bash
python scripts/validate_claude_md.py <path> [--strict]
```
### Indexing: scripts/create_index.py
Builds master index of all context files.
**Usage:**
```bash
python scripts/create_index.py <repo_path> [--format tree|table|detailed]
```
## Reference Materials
### Essential Reading
**`references/context_manager_mode.md`**
Read this first. Explains how to work autonomously as a context manager - mindset, workflows, communication patterns, quality standards.
**`references/structure_guide.md`**
Best practices for agent context - what to include, what to avoid, how to structure, maintenance triggers.
**`references/examples.md`**
Real-world examples for different directory types - API layers, services, tests, config, models.
### Templates
**`assets/templates/source-code-template.md`**
Starting template for general source directories.
**`assets/templates/test-directory-template.md`**
Starting template for test suites.
## Communication Patterns
### When Monitoring
**Do:**
> Context health check complete. 3 files need attention:
> - src/api/ (critical - 45 days, 23 commits) → Updating now
> - src/services/ (high - 30 days, 15 commits) → Should I update?
> - tests/integration/ (medium - 20 days) → Monitoring
**Don't:**
> I checked and there are issues.
### When Updating
**Do:**
> Updated src/api/claude.md:
> • Added rate limiting pattern (introduced last sprint)
> • Updated middleware chain (auth-jwt.ts now handles tokens)
> • Removed deprecated cors-handler.ts reference
>
> Context now current with HEAD.
**Don't:**
> Updated file.
### When Suggesting
**Do:**
> src/utils/ has 12 files but no context. Analysis shows:
> • Mix of helpers (strings, dates, validation)
> • No clear pattern - might need reorganization
> • Create context as-is, or refactor first?
**Don't:**
> You should add context there.
## Quality Standards
### Actionable Over Descriptive
Every section should answer: "What does this tell me to DO differently?"
**Descriptive:**
```markdown
This directory contains services.
```
**Actionable:**
```markdown
**Service Pattern**: Class-based with constructor DI
**Rule**: All async methods, throw domain errors (never return errors)
**Example**: `class UserService { constructor(db, logger) {} }`
```
### Dense Over Verbose
Use tokens efficiently.
**Verbose:**
```markdown
The API directory is important. It handles requests from the frontend.
It communicates with backend services. It uses Express.js.
```
**Dense:**
```markdown
**Framework**: Express 4.x
**Pattern**: Route → Validator → Service → Serializer
**Rule**: No direct DB, asyncHandler required
```
### Current Over Historical
Context must reflect reality, not history.
**Historical:**
```markdown
We migrated from MySQL to PostgreSQL in 2023.
```
**Current:**
```markdown
**Database**: PostgreSQL 15, Prisma ORM
**Migrations**: prisma/migrations/
```
## CCMP Plugin Integration
Context manager **automatically integrates** with other CCMP plugins:
### With session-management 🔄
**Sessions load relevant context automatically:**
When a session starts, relevant `claude.md` files are loaded based on objectives.
**Context health in session handoffs:**
Session handoffs include context health reports and update recommendations.
**Checkpoints trigger health checks:**
Session checkpoints automatically check if changed directories have stale context.
**To enable:** Use `lib/session_integration.py` in your session workflow.
### With tdd-workflow 🧪
**TDD cycles update test context:**
When TDD GREEN checkpoints succeed, test documentation can be auto-updated with discovered patterns.
**Integration API:**
```python
from lib.ccmp_integration import CCMPIntegration
integration = CCMPIntegration()
integration.update_state("claude-context-manager", {
"health_score": 87,
"last_scan": "2025-11-01T10:00:00Z",
"critical_files": ["src/api/"]
})
```
## Integration with Development
### As You Code
Maintain context awareness:
```
Working in new directory?
→ Check for context
→ Note if missing
Discovering pattern?
→ Check if documented
→ Add if missing
Finding gotcha?
→ Add to context immediately
```
### Before Finishing
Quick maintenance check:
1. Run: `python scripts/monitor.py .`
2. Update critical items
3. Note medium/low for later
4. Leave breadcrumbs for next session
## Success Indicators
You're managing context well when:
- ✅ Context helps you work faster
- ✅ Updates are small and frequent
- ✅ You rarely hit outdated info
- ✅ Code generation follows patterns correctly
- ✅ New areas are easier to understand
## Remember
Context management isn't about perfect documentation - it's about **maintaining cognitive maps that multiply your effectiveness**.
Every `claude.md` file should make future-you faster, more accurate, and more pattern-aware.
**Your mission**: Make the next Claude session in this codebase even better.

View File

@@ -0,0 +1,80 @@
# [directory-name]/
<!-- Replace [directory-name] with the actual directory name -->
This directory contains [brief description of what this directory contains].
## Overview
<!-- 2-3 sentences explaining the purpose and scope -->
## Directory Structure
<!-- If the directory has subdirectories, show them here -->
```
[directory-name]/
├── subdirectory1/ # Description
├── subdirectory2/ # Description
└── file.ext # Description
```
## Key Files
<!-- Highlight the most important files and their roles -->
- **file1.ext**: Description of what this file does
- **file2.ext**: Description of what this file does
## Important Patterns
<!-- Document conventions, patterns, or standards used -->
### Pattern Name
Description and example of the pattern.
## Dependencies
### Internal Dependencies
<!-- What this code depends on from elsewhere in the codebase -->
- `src/path/to/dependency` - Description
### External Dependencies
<!-- Third-party libraries or services -->
- Package Name - Purpose
### Used By
<!-- What other parts of the codebase depend on this -->
- `src/path/to/dependent` - How it uses this code
## Usage
<!-- How to work with this code, with examples -->
Example usage:
```typescript
// Code example
```
## Testing
<!-- How to test this code -->
```bash
npm run test:this-area
```
## Notes
<!-- Any gotchas, known issues, or additional context -->
- Important note 1
- Important note 2
---
*Last Updated: YYYY-MM-DD*

View File

@@ -0,0 +1,105 @@
# tests/[test-type]/
<!-- Replace [test-type] with: unit, integration, e2e, etc. -->
This directory contains [test type] tests for [what is being tested].
## Overview
<!-- Explain what these tests cover and their purpose -->
## Test Structure
```
[test-type]/
├── subdirectory1/ # Tests for X
├── subdirectory2/ # Tests for Y
└── helpers/ # Test utilities and fixtures
```
## Running Tests
```bash
# All tests in this directory
npm run test:[test-type]
# Specific test file
npm run test:[test-type] -- path/to/test.test.ts
# Watch mode
npm run test:[test-type]:watch
```
## Test Environment
<!-- Describe the test environment setup -->
These tests use:
- **Database**: [Description]
- **External Services**: [How mocked/handled]
- **Test Data**: [Where fixtures are located]
## Patterns
### Test Structure
<!-- Show the typical test structure -->
```typescript
describe('Feature Name', () => {
beforeEach(() => {
// Setup
});
afterEach(() => {
// Cleanup
});
it('should do something', () => {
// Arrange
// Act
// Assert
});
});
```
### Fixtures and Factories
<!-- How to use test data -->
```typescript
import { fixtures } from './fixtures';
const user = fixtures.user();
const admin = fixtures.user({ role: 'admin' });
```
## Common Patterns
<!-- Document common testing patterns used -->
- Pattern 1: Description
- Pattern 2: Description
## Troubleshooting
<!-- Common issues and solutions -->
### Issue 1
Problem description and solution.
### Issue 2
Problem description and solution.
## Notes
<!-- Additional context -->
- Important note about test conventions
- Known limitations
---
*Last Updated: YYYY-MM-DD*

View File

@@ -0,0 +1,355 @@
# Context Manager Mode
This guide helps you operate in **Context Manager Mode** - a specialized mode where your primary role is maintaining accurate, actionable context intelligence about a codebase.
## Your Role as Context Manager
When working in context manager mode, you are:
- **Proactive**: Anticipate context needs before being asked
- **Vigilant**: Monitor for staleness and inaccuracies
- **Surgical**: Update only what's needed, preserve what's accurate
- **Communicative**: Explain actions and recommendations clearly
- **Autonomous**: Make decisions within defined boundaries
## Operating Mindset
### Think Like an Intelligence Officer
Your job is to maintain **operational intelligence** that helps the primary development work succeed. You're not writing documentation - you're maintaining cognitive maps.
**Key questions to ask:**
- Will this information help me (Claude) work faster here?
- Is this actionable or just descriptive?
- Is this current or will it mislead?
- Is this dense enough to justify the tokens?
### Proactive Behaviors
1. **When you first enter a codebase**: Run monitor.py to assess health
2. **When code changes**: Check if affected context needs updating
3. **When patterns emerge**: Document them immediately
4. **When you struggle**: Note it as a signal context is inadequate
5. **Before finishing**: Verify context is current
## Autonomous Workflows
### Workflow 1: Monitoring Loop
When asked to "monitor" or "maintain" context:
```bash
# 1. Initial health check
python scripts/monitor.py /path/to/repo
# 2. Based on results, prioritize actions:
# - Critical/High: Update immediately
# - Medium: Schedule for review
# - Low: Continue monitoring
# 3. For items needing update:
python scripts/auto_update.py /path/to/directory --analyze-only
# 4. Review suggestions, then update:
python scripts/auto_update.py /path/to/directory
# 5. Verify update:
python scripts/validate_claude_md.py /path/to/directory/claude.md
```
### Workflow 2: Change-Responsive Update
When code changes in a directory with context:
```python
# Decision tree:
if significant_changes_detected():
if context_exists():
run_auto_update()
else:
run_generate_context()
verify_accuracy()
report_actions()
```
### Workflow 3: Proactive Discovery
When exploring new areas:
```python
# As you navigate:
if directory_seems_important() and not has_context():
note_for_context_creation()
# Periodically:
run_scan_repo()
identify_missing_context()
prioritize_by_importance()
```
## Decision Authority
### You CAN decide autonomously:
**Update context when:**
- Staleness score is critical (score > 4)
- You just made code changes affecting patterns
- You discover inaccuracies while working
- TODO markers remain and you have info to fill them
**Generate new context when:**
- Directory has 3+ significant files and no context
- You struggled to understand the directory
- Patterns are clear and worth documenting
**Mark for review when:**
- Staleness is high but you're unsure what changed
- Context exists but seems incorrect
- Significant refactor occurred
### You SHOULD ask first:
⚠️ **Before:**
- Deleting existing context
- Major restructuring of context
- Updating context that's < 7 days old
- Making bulk updates to many files
## Communication Patterns
### When Monitoring
**Good:**
> I checked context health across the repo. Found 3 files needing attention:
> - src/api/ (critical - 45 days old, 23 commits)
> - src/services/ (high - 30 days old, 15 commits)
> - tests/integration/ (medium - 20 days old, 8 commits)
>
> I'll start with src/api/. Should I proceed with all three?
**Bad:**
> I ran a script and there are some issues.
### When Updating
**Good:**
> Updated src/api/claude.md:
> - Added new rate limiting pattern (introduced in last sprint)
> - Updated middleware chain (auth-jwt.ts now handles tokens)
> - Removed reference to deprecated cors-handler.ts
>
> Context is now current with HEAD.
**Bad:**
> Updated the file.
### When Suggesting
**Good:**
> I noticed src/utils/ has grown to 12 files but has no context. Based on my analysis:
> - Mix of string helpers, date formatters, validation utils
> - No clear pattern - might benefit from reorganization
> - Should I create context as-is, or would you like to refactor first?
**Bad:**
> You should add context to src/utils/.
## Context Quality Standards
### Actionable Content
Every section should answer: "What does this tell Claude to DO differently?"
**Not actionable:**
```markdown
## Overview
This directory contains services.
```
**Actionable:**
```markdown
## Service Pattern
**Structure**: Class-based with constructor DI
**Rules**:
- All async methods (no sync operations)
- Throw domain-specific errors (never return error objects)
- Transaction-aware (accept optional `trx` parameter)
**Example**:
```typescript
class UserService {
constructor(private db: DB, private logger: Logger) {}
async getUser(id: string, trx?: Transaction): Promise<User> { ... }
}
```
```
### Dense Information
Use token budget efficiently - every sentence should add value.
❌ **Not dense:**
```markdown
## Overview
This is the API directory. It contains all the API-related code. The API
is an important part of our application. It handles requests from the
frontend and communicates with the backend services.
```
**Dense:**
```markdown
## API Layer
**Framework**: Express 4.x
**Pattern**: Route → Validator → Service → Serializer
**Rules**: No direct DB access, asyncHandler wrapper required
**Entry**: index.ts registers all routes
```
### Current Information
Context must reflect current reality, not history.
**Historical:**
```markdown
We used to use MySQL but migrated to PostgreSQL in 2023.
```
**Current:**
```markdown
**Database**: PostgreSQL 15
**ORM**: Prisma
**Migrations**: prisma/migrations/
```
## Handling Uncertainty
### When Unsure About Changes
```python
if unsure_about_impact():
run_analyze_only()
present_findings()
request_confirmation()
else:
update_autonomously()
report_action()
```
### When Context Conflicts with Code
```python
if code_contradicts_context():
verify_code_is_source_of_truth()
update_context_to_match()
note_the_discrepancy()
```
### When Patterns are Unclear
```python
if pattern_unclear():
note_uncertainty_in_context()
provide_examples_observed()
mark_for_human_review()
```
## Continuous Improvement
### Learn from Usage
When you find yourself repeatedly:
- Looking for information that's not in context → Add it
- Confused by outdated context → Trigger more frequent updates
- Generating similar code → Document the pattern
### Measure Effectiveness
Track (mentally):
- How often you reference context files
- How often context helps vs. misleads
- How much time saved by good context
### Iterate
After completing work:
1. Quick context health check
2. Update what you learned
3. Note what would help next time
## Integration with Development Work
### Context Awareness During Development
While coding, maintain awareness:
```python
# As you work:
if entering_new_directory():
check_for_context()
note_if_missing()
if discovering_pattern():
check_context_documents_it()
update_if_missing()
if finding_gotcha():
immediately_add_to_context()
```
### Context Handoff
Before finishing a session:
1. Quick scan: `python scripts/monitor.py .`
2. Update critical items
3. Note remaining medium/low items
4. Leave breadcrumbs for next session
## Example Session
```
[User asks to add feature to API]
1. Check context: Read src/api/claude.md
2. Work on feature: Follow patterns in context
3. Notice new pattern: Middleware chaining changed
4. Update context: Add new middleware pattern
5. Verify: Run validation
6. Report: "Feature added, context updated with new middleware pattern"
[Result: Feature works correctly AND context stays current]
```
## Anti-Patterns to Avoid
**Passive monitoring**: Waiting to be asked
**Active monitoring**: Regularly checking health
**Bulk updates**: Updating everything at once
**Targeted updates**: Update what matters most
**Over-documentation**: Writing essays
**Dense intelligence**: Every word counts
**Ignoring staleness**: "It's probably fine"
**Vigilant maintenance**: Trust the metrics
**Silent operation**: Just doing things
**Communicative operation**: Explaining actions
## Success Metrics
You're doing well when:
- Context helps you work faster
- Updates are small and frequent (not big and rare)
- You rarely encounter outdated information
- New contributors can onboard quickly
- Code generation follows patterns correctly
## Remember
Context management is not about perfect documentation - it's about **maintaining cognitive maps that multiply your effectiveness**. Every context file should make you faster, more accurate, and more pattern-aware.
Your goal: Make future-Claude work better in this codebase.

View File

@@ -0,0 +1,694 @@
# Claude.md Examples
This file provides concrete examples of well-structured `claude.md` files for different types of directories.
## Example 1: Source Code Directory
### src/services/
```markdown
# src/services/
This directory contains the business logic layer that sits between API routes
and the database. Services handle complex operations, business rules, and
orchestrate multiple database operations.
## Purpose
Services provide:
- Business logic implementation
- Transaction management
- Data validation and transformation
- Integration with external APIs
- Caching logic
## Directory Structure
```
services/
├── user-service.ts # User management and authentication
├── order-service.ts # Order processing and fulfillment
├── payment-service.ts # Payment processing with Stripe
├── email-service.ts # Email sending via SendGrid
└── cache-service.ts # Redis caching utilities
```
```
## Key Patterns
### Service Class Structure
All services follow this pattern:
```typescript
export class UserService {
constructor(
private db: Database,
private cache: CacheService,
private logger: Logger
) {}
async getUser(id: string): Promise<User> {
// 1. Check cache
// 2. Query database if cache miss
// 3. Transform data
// 4. Update cache
// 5. Return result
}
}
```
### Error Handling
Services throw domain-specific errors:
- `NotFoundError` - Resource doesn't exist
- `ValidationError` - Business rule violation
- `ConflictError` - Duplicate or conflicting resource
- `ExternalServiceError` - Third-party API failure
### Transaction Management
Multi-step operations use database transactions:
```typescript
async transferFunds(from: string, to: string, amount: number) {
return this.db.transaction(async (trx) => {
await this.debit(from, amount, trx);
await this.credit(to, amount, trx);
});
}
```
## Dependencies
### Internal
- `src/database/` - Database client and models
- `src/lib/logger` - Logging utilities
- `src/lib/config` - Configuration management
### External
- Stripe SDK - Payment processing
- SendGrid - Email delivery
- Redis - Caching layer
## Testing
Service tests are in `tests/services/`:
```bash
npm run test:services
```
Each service has:
- Unit tests with mocked dependencies
- Integration tests with test database
## Notes
- Services never directly access HTTP request/response objects
- All external API calls should have timeout and retry logic
- Cache invalidation happens within the service that modifies data
```
---
## Example 2: Test Directory
### tests/integration/
```markdown
# tests/integration/
Integration tests that verify multiple components working together, including
database operations, API endpoints, and external service integrations.
## Overview
These tests use a real test database and mock external services. They run
slower than unit tests but provide higher confidence in system behavior.
## Structure
```
integration/
├── api/ # Full API endpoint tests
├── services/ # Service layer with real database
├── workflows/ # End-to-end user workflows
└── fixtures/ # Shared test data and utilities
```
```
## Running Tests
```bash
# All integration tests (takes ~5 minutes)
npm run test:integration
# Specific test file
npm run test:integration -- api/user-endpoints.test.ts
# Watch mode for development
npm run test:integration:watch
```
## Test Environment
Integration tests use:
- **Database**: PostgreSQL test database (auto-created/destroyed)
- **Cache**: In-memory Redis
- **External APIs**: Mocked using `nock`
- **Time**: Frozen at `2025-01-01T00:00:00Z` using `timekeeper`
## Patterns
### Test Structure
```typescript
describe('User Registration Flow', () => {
beforeAll(async () => {
await setupTestDatabase();
});
afterAll(async () => {
await teardownTestDatabase();
});
beforeEach(async () => {
await clearTestData();
});
it('should create user and send welcome email', async () => {
// Arrange
const userData = fixtures.newUser();
// Act
const response = await api.post('/users', userData);
// Assert
expect(response.status).toBe(201);
expect(emailMock).toHaveBeenCalledWith(
expect.objectContaining({ to: userData.email })
);
});
});
```
### Fixtures
Use shared fixtures for consistency:
```typescript
import { fixtures } from './fixtures';
const user = fixtures.user(); // Random valid user
const admin = fixtures.user({ role: 'admin' }); // Admin user
```
## Database Management
### Test Database Setup
Test database is created automatically but can be manually reset:
```bash
npm run test:db:reset
```
### Migrations
Integration tests run against the latest schema. If migrations change:
1. Stop running tests
2. Run `npm run test:db:reset`
3. Restart tests
## Common Issues
- **Timeout errors**: Increase timeout for slow operations
```typescript
it('slow operation', async () => {
// ...
}, 10000); // 10 second timeout
```
- **Port conflicts**: Tests use port 3001. Ensure nothing else uses it.
- **Database locks**: Tests should clean up connections. If stuck:
```bash
npm run test:db:kill-connections
```
## Notes
- Integration tests should NOT make real external API calls
- Each test should be independent (no shared state)
- Use transactions for faster test data cleanup when possible
```
---
## Example 3: Configuration Directory
### config/
```markdown
# config/
Configuration management for all environments (development, staging, production).
Uses environment variables with typed validation and sensible defaults.
## Overview
Configuration is loaded from:
1. Environment variables (highest priority)
2. `.env` file (development only)
3. Default values (fallback)
## Files
- **index.ts**: Main config loader and validation
- **schema.ts**: Zod schemas for type-safe config
- **defaults.ts**: Default values for all settings
- **database.ts**: Database connection configuration
- **redis.ts**: Redis connection configuration
- **stripe.ts**: Stripe API configuration
## Usage
```typescript
import { config } from '@/config';
// Type-safe access
const dbUrl = config.database.url;
const stripeKey = config.stripe.apiKey;
// Environment check
if (config.isProduction) {
// Production-specific logic
}
```
## Environment Variables
### Required (No Defaults)
These MUST be set in production:
```bash
DATABASE_URL=postgresql://user:pass@host:5432/db
REDIS_URL=redis://host:6379
STRIPE_API_KEY=sk_live_xxxxx
JWT_SECRET=your-secret-key
```
### Optional (With Defaults)
```bash
# Server
PORT=3000
HOST=0.0.0.0
NODE_ENV=development
# Logging
LOG_LEVEL=info
LOG_FORMAT=json
# Features
ENABLE_RATE_LIMITING=true
ENABLE_ANALYTICS=false
```
### Development Setup
Copy example file:
```bash
cp .env.example .env
```
Then edit `.env` with your local values.
## Validation
Config is validated on startup using Zod schemas. Invalid config causes
immediate application failure with clear error messages:
```
❌ Configuration Error:
- DATABASE_URL is required
- STRIPE_API_KEY must start with 'sk_'
- PORT must be a number between 1024 and 65535
```
## Adding New Config
1. Add schema to `schema.ts`:
```typescript
export const newFeatureSchema = z.object({
enabled: z.boolean().default(false),
apiKey: z.string().min(1),
});
```
2. Add to main config schema in `index.ts`
3. Update `.env.example` with documentation
4. Add to TypeScript types (auto-generated from schema)
## Security
- **Never commit .env files** (in .gitignore)
- **Never log sensitive config values**
- **Use separate keys per environment**
- **Rotate secrets regularly**
## Notes
- Config is loaded once at startup and cached
- Changes require application restart
- Use feature flags for runtime configuration changes
```
---
## Example 4: API Routes Directory
### src/api/routes/
```markdown
# src/api/routes/
HTTP route handlers that process requests, validate input, call services,
and format responses. All routes follow RESTful conventions.
## Structure
Routes are organized by resource:
```
routes/
├── users.ts # User CRUD operations
├── posts.ts # Post management
├── comments.ts # Comment operations
├── auth.ts # Authentication endpoints
└── health.ts # Health check endpoints
```
```
## Route Pattern
Every route file exports a router:
```typescript
import { Router } from 'express';
import { authenticate, validate } from '../middleware';
import { userService } from '@/services';
import { createUserSchema, updateUserSchema } from '@/validators';
const router = Router();
// GET /users/:id
router.get('/:id',
authenticate,
asyncHandler(async (req, res) => {
const user = await userService.getUser(req.params.id);
res.json(serialize.user(user));
})
);
// POST /users
router.post('/',
validate(createUserSchema),
asyncHandler(async (req, res) => {
const user = await userService.createUser(req.body);
res.status(201).json(serialize.user(user));
})
);
export default router;
```
## Conventions
### HTTP Methods
- `GET` - Retrieve resources (idempotent)
- `POST` - Create new resources
- `PUT` - Replace entire resource
- `PATCH` - Partial update
- `DELETE` - Remove resource
### Status Codes
- `200` - Success with body
- `201` - Resource created
- `204` - Success without body
- `400` - Bad request (validation error)
- `401` - Unauthorized (not logged in)
- `403` - Forbidden (logged in but no permission)
- `404` - Not found
- `409` - Conflict (duplicate resource)
- `500` - Server error
### URL Structure
- Plural nouns: `/users`, `/posts`
- Resource IDs: `/users/123`
- Nested resources: `/posts/123/comments`
- Actions as verbs: `/users/123/activate`
## Middleware Order
1. **Global middleware** (in app.ts)
- Logging
- CORS
- Body parsing
2. **Route-specific middleware**
- Authentication
- Validation
- Rate limiting
3. **Handler**
- Business logic call
- Response formatting
## Response Format
All JSON responses follow this structure:
```typescript
// Success
{
"data": { /* resource or array */ },
"meta": { /* pagination, etc */ }
}
// Error
{
"error": {
"code": "VALIDATION_ERROR",
"message": "Invalid input",
"details": [/* field errors */]
}
}
```
## Adding New Routes
1. Create route file in this directory
2. Define handlers with validation
3. Register in `src/api/index.ts`:
```typescript
app.use('/api/resource', resourceRoutes);
```
4. Add tests in `tests/api/`
5. Update OpenAPI spec in `docs/openapi.yaml`
## Testing Routes
```bash
# All route tests
npm run test:routes
# Specific route file
npm run test:routes -- users.test.ts
```
## Dependencies
- **Express Router** - Routing
- **express-validator** - Input validation
- **Services** - Business logic (in `src/services/`)
- **Serializers** - Response formatting (in `src/serializers/`)
```
---
## Example 5: Database/Models Directory
### src/models/
```markdown
# src/models/
TypeScript interfaces and Prisma schema definitions that define the data
models used throughout the application.
## Overview
Models are the single source of truth for data structure. They are used by:
- Database operations (Prisma)
- API validation
- Service layer type checking
- Response serialization
## Structure
```
models/
├── user.ts # User model and types
├── post.ts # Post model and types
├── comment.ts # Comment model and types
└── index.ts # Re-exports all models
```
```
The actual database schema is in `prisma/schema.prisma`.
## Model Pattern
```typescript
// Database model (from Prisma)
export type User = {
id: string;
email: string;
passwordHash: string;
createdAt: Date;
updatedAt: Date;
};
// API representation (subset, no sensitive fields)
export type PublicUser = Omit<User, 'passwordHash'>;
// Creation input
export type CreateUserInput = {
email: string;
password: string;
};
// Update input (all optional)
export type UpdateUserInput = Partial<{
email: string;
password: string;
}>;
```
## Database Schema
The source of truth is `prisma/schema.prisma`:
```prisma
model User {
id String @id @default(uuid())
email String @unique
passwordHash String
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
posts Post[]
}
```
Generate TypeScript types after schema changes:
```bash
npm run prisma:generate
```
## Relationships
- **User** → has many → **Post**
- **Post** → has many → **Comment**
- **Comment** → belongs to → **User**
Query with relations:
```typescript
const user = await prisma.user.findUnique({
where: { id },
include: { posts: true }
});
```
## Migrations
After modifying `schema.prisma`:
```bash
# Create migration
npm run prisma:migrate:dev
# Apply to production
npm run prisma:migrate:deploy
```
## Type Generation
After database changes, regenerate Prisma types:
```bash
npm run prisma:generate
```
This updates types in `node_modules/.prisma/client/` used throughout the app.
## Best Practices
- Keep models focused (single responsibility)
- Use explicit types for inputs/outputs
- Never expose `passwordHash` or other sensitive fields
- Use unions for status enums:
```typescript
export type PostStatus = 'draft' | 'published' | 'archived';
```
## Notes
- Models are pure data structures (no methods)
- Business logic belongs in services, not models
- Use Prisma for all database operations (no raw SQL unless necessary)
```
---
## Template: Generic Directory
For directories that don't fit other categories:
```markdown
# [directory-name]/
[One sentence describing what this directory contains]
## Overview
[2-3 sentences explaining the purpose and scope of this directory]
## Structure
[If complex, show directory tree with brief descriptions]
## Key Files
[Highlight 3-5 most important files and their roles]
## Important Patterns
[Document any conventions, patterns, or standards used here]
## Dependencies
[What this depends on and what depends on this]
## Usage
[How to work with code in this directory, with examples if helpful]
## Notes
[Any gotchas, known issues, or additional context]
```
Use this template as a starting point and customize based on the specific directory's needs.

View File

@@ -0,0 +1,316 @@
# Claude.md Structure Guide
This guide explains what makes an effective `claude.md` file and how to structure documentation for maximum utility when working with Claude.
## Purpose of claude.md Files
A `claude.md` file serves as context documentation that helps Claude (and humans) understand:
1. **What this directory contains** - Purpose and scope
2. **How code is organized** - Structure and patterns
3. **Key relationships** - Dependencies and connections
4. **Important context** - Gotchas, conventions, and decisions
## Core Principles
### 1. Be Specific and Concrete
**Avoid vague descriptions:**
```markdown
## Overview
This directory contains utilities.
```
**Be specific:**
```markdown
## Overview
This directory contains database utility functions for connection pooling,
query building, and transaction management. These utilities are used across
all services to ensure consistent database interactions.
```
### 2. Prioritize Actionable Information
Focus on information that helps someone understand how to work with the code:
- **Entry points** - Where to start reading/editing
- **Key patterns** - How things are typically done
- **Dependencies** - What relies on what
- **Gotchas** - Non-obvious behavior or limitations
### 3. Keep it Current
A stale `claude.md` is worse than none at all. Include:
- Maintenance notes ("Last updated: 2025-10-21")
- Who maintains this area
- How to update the documentation
## Recommended Structure
### Essential Sections
#### 1. Header and Overview (Required)
Start with a clear title and 1-3 sentence overview:
```markdown
# src/api/
This directory implements the REST API layer, handling HTTP requests,
validation, and response formatting. All endpoints follow OpenAPI 3.0
specification defined in `openapi.yaml`.
```
#### 2. Directory Structure (If Complex)
Show the layout with brief descriptions:
```markdown
## Directory Structure
```
api/
├── routes/ # Route handlers grouped by resource
├── middleware/ # Express middleware (auth, validation, logging)
├── validators/ # Request/response validation schemas
└── serializers/ # Response formatting utilities
```
```
#### 3. Key Files (If Applicable)
Highlight important files and their roles:
```markdown
## Key Files
- **index.ts**: API server initialization and middleware setup
- **routes.ts**: Central route registration
- **error-handler.ts**: Global error handling middleware
- **openapi.yaml**: API specification (source of truth for endpoints)
```
#### 4. Important Patterns (Critical)
Document conventions and patterns used in this directory:
```markdown
## Important Patterns
### Route Handler Structure
All route handlers follow this pattern:
1. Validate request (using Zod schemas in `validators/`)
2. Call service layer (never direct database access)
3. Serialize response (using serializers)
4. Handle errors (throw specific error types)
### Error Handling
- Use `ApiError` class for all API errors
- Status codes: 400 (validation), 401 (auth), 404 (not found), 500 (server)
- Errors are caught by global error handler middleware
### Naming Conventions
- Routes: kebab-case (`/user-profiles`)
- Files: kebab-case (`user-profile-routes.ts`)
- Handlers: camelCase (`getUserProfile`)
```
#### 5. Dependencies (Important)
Explain relationships with other parts of the codebase:
```markdown
## Dependencies
### Imports From
- `src/services/` - Business logic layer
- `src/models/` - Data models and types
- `src/lib/` - Shared utilities (logger, config)
### Used By
- All services make HTTP requests to these endpoints
- Frontend application at `/frontend`
### External Dependencies
- Express.js for routing
- Zod for validation
- OpenAPI Tools for spec validation
```
#### 6. Usage/Getting Started (For Complex Areas)
Help someone get started quickly:
```markdown
## Usage
### Adding a New Endpoint
1. Define the route in `openapi.yaml`
2. Create handler in `routes/{resource}-routes.ts`
3. Add validation schema in `validators/{resource}-validator.ts`
4. Add serializer in `serializers/{resource}-serializer.ts`
5. Register route in `routes.ts`
Example:
[Include minimal working example]
### Testing
Run API tests:
```bash
npm run test:api
```
Test single endpoint:
```bash
npm run test:api -- --grep "GET /users"
```
```
### Optional Sections
#### Architecture Decisions
Document important "why" decisions:
```markdown
## Architecture Decisions
### Why Express over Fastify?
- Team familiarity
- Better TypeScript support at time of decision
- Larger middleware ecosystem
### Why Separate Validators?
- Reusable across routes and tests
- Type inference for request/response objects
- Easier to maintain OpenAPI spec sync
```
#### Gotchas and Known Issues
Save others from pain:
```markdown
## Gotchas
- **Async Handler Wrapping**: All async route handlers must be wrapped with
`asyncHandler()` or errors won't be caught properly
- **Query Parameter Parsing**: Express doesn't parse nested query params by
default. Use `qs` library for complex queries.
- **Rate Limiting**: Applied at middleware level, not per-route. See
`middleware/rate-limiter.ts` for configuration.
```
## What NOT to Include
Avoid these common pitfalls:
**Don't duplicate what's obvious from code:**
```markdown
## Files
- user.ts - Contains user-related code
- product.ts - Contains product-related code
```
**Don't write tutorials for basics:**
```markdown
## What is an API?
An API is an Application Programming Interface...
```
**Don't include things that change frequently:**
```markdown
## Team
Current maintainers:
- Alice (alice@company.com) - Lead
- Bob (bob@company.com) - Backend
[This will be stale immediately]
```
**Instead, link to maintained sources:**
```markdown
## Maintenance
See [CODEOWNERS](../../CODEOWNERS) for current maintainers.
```
## Hierarchy and Inheritance
### Root claude.md
The root `claude.md` provides high-level project context:
```markdown
# Project Name
## Overview
[High-level description of the entire project]
## Architecture
[System architecture overview]
## Directory Guide
- `/src` - Application source code ([claude.md](src/claude.md))
- `/tests` - Test suite ([claude.md](tests/claude.md))
- `/docs` - Documentation ([claude.md](docs/claude.md))
## Getting Started
[Setup and development workflow]
```
### Child claude.md Files
Child files inherit context from parent, so avoid repetition:
```markdown
# src/api/routes/
Specific information about route handlers only. See parent
[src/api/claude.md](../claude.md) for overall API structure.
```
## Maintenance
### Keep It Fresh
Add maintenance metadata:
```markdown
---
Last Updated: 2025-10-21
Maintainer: API Team (@api-team)
---
```
### Update Triggers
Update `claude.md` when:
- Major architectural changes occur
- New patterns are introduced
- Important files are added/removed/renamed
- Gotchas are discovered
- Team conventions change
### Review Regularly
- Quarterly reviews of all `claude.md` files
- Update during major refactors
- Validate during onboarding (new team members test docs)
## Templates by Directory Type
See [examples.md](examples.md) for complete templates for:
- Source code directories
- Test directories
- API/Service layers
- Configuration directories
- Documentation directories
- Library/utility directories

View File

@@ -0,0 +1,355 @@
#!/usr/bin/env python3
"""
Auto-Update Context - Intelligent Context Synchronization
Analyzes code changes and autonomously updates context files.
Designed to be run by Claude with minimal supervision.
Usage:
python auto_update.py <directory_path> [--analyze-only] [--verbose]
"""
import os
import sys
import argparse
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Set
import subprocess
import re
# Add lib to path for integration imports
repo_root = Path(__file__).resolve().parents[6] # Go up to repo root
sys.path.insert(0, str(repo_root / "lib"))
try:
from ccmp_integration import CCMPIntegration, is_session_active
INTEGRATION_AVAILABLE = True
except ImportError:
INTEGRATION_AVAILABLE = False
def get_recent_changes(dir_path: Path, since_days: int = 30) -> Dict:
"""Get summary of recent changes in directory."""
try:
# Get changed files
result = subprocess.run(
['git', 'diff', '--name-status', f'HEAD~{since_days*4}', 'HEAD', '--', str(dir_path)],
cwd=dir_path,
capture_output=True,
text=True
)
if result.returncode != 0:
return {'files_changed': [], 'summary': {}}
changes = result.stdout.strip().split('\n')
added = []
modified = []
deleted = []
for change in changes:
if not change:
continue
parts = change.split('\t', 1)
if len(parts) != 2:
continue
status, filepath = parts
if status.startswith('A'):
added.append(filepath)
elif status.startswith('M'):
modified.append(filepath)
elif status.startswith('D'):
deleted.append(filepath)
return {
'files_changed': added + modified + deleted,
'summary': {
'added': len(added),
'modified': len(modified),
'deleted': len(deleted)
},
'details': {
'added': added,
'modified': modified,
'deleted': deleted
}
}
except:
return {'files_changed': [], 'summary': {}}
def analyze_code_patterns(dir_path: Path) -> Dict:
"""Analyze current code patterns in directory."""
patterns = {
'file_types': {},
'common_imports': set(),
'naming_patterns': [],
'frameworks_detected': set()
}
# Analyze files
for item in dir_path.iterdir():
if item.is_file() and not item.name.startswith('.'):
ext = item.suffix
patterns['file_types'][ext] = patterns['file_types'].get(ext, 0) + 1
# Analyze imports for common patterns
if ext in ['.py', '.js', '.ts', '.jsx', '.tsx']:
try:
content = item.read_text()
# Python imports
if ext == '.py':
imports = re.findall(r'^\s*(?:from|import)\s+([a-zA-Z_][a-zA-Z0-9_]*)', content, re.MULTILINE)
patterns['common_imports'].update(imports[:5]) # Top 5
# Detect frameworks
if 'fastapi' in content.lower():
patterns['frameworks_detected'].add('FastAPI')
if 'flask' in content.lower():
patterns['frameworks_detected'].add('Flask')
# JavaScript/TypeScript imports
elif ext in ['.js', '.ts', '.jsx', '.tsx']:
imports = re.findall(r'(?:from|require\()\s*[\'"]([^\'\"]+)', content)
patterns['common_imports'].update(imports[:5])
# Detect frameworks
if 'react' in content.lower():
patterns['frameworks_detected'].add('React')
if 'express' in content.lower():
patterns['frameworks_detected'].add('Express')
if 'vue' in content.lower():
patterns['frameworks_detected'].add('Vue')
except:
pass
patterns['common_imports'] = list(patterns['common_imports'])
patterns['frameworks_detected'] = list(patterns['frameworks_detected'])
return patterns
def read_existing_context(context_file: Path) -> str:
"""Read existing context file."""
if context_file.exists():
return context_file.read_text()
return ""
def needs_update(existing_context: str, current_patterns: Dict, recent_changes: Dict) -> Dict:
"""Determine if context needs updating and what sections."""
update_needed = {
'should_update': False,
'reasons': [],
'sections_to_update': []
}
# Check if significant changes occurred
total_changes = recent_changes['summary'].get('added', 0) + \
recent_changes['summary'].get('modified', 0) + \
recent_changes['summary'].get('deleted', 0)
if total_changes > 5:
update_needed['should_update'] = True
update_needed['reasons'].append(f'{total_changes} files changed')
update_needed['sections_to_update'].append('File Types')
update_needed['sections_to_update'].append('Key Files')
# Check if frameworks mentioned in context match detected
for framework in current_patterns.get('frameworks_detected', []):
if framework not in existing_context:
update_needed['should_update'] = True
update_needed['reasons'].append(f'New framework detected: {framework}')
update_needed['sections_to_update'].append('Important Patterns')
# Check if context has TODO markers
if 'TODO' in existing_context or '<!-- TODO' in existing_context:
update_needed['should_update'] = True
update_needed['reasons'].append('Context has TODO markers')
update_needed['sections_to_update'].append('All incomplete sections')
# Check age (if very old, likely needs update)
if existing_context and len(existing_context) < 200:
update_needed['should_update'] = True
update_needed['reasons'].append('Context is minimal')
update_needed['sections_to_update'].append('Overview')
return update_needed
def generate_updated_sections(existing_context: str, current_patterns: Dict, recent_changes: Dict) -> Dict:
"""Generate suggestions for updated context sections."""
suggestions = {}
# File Types section
if current_patterns['file_types']:
file_types_text = []
for ext, count in sorted(current_patterns['file_types'].items()):
file_types_text.append(f"- **{ext}** ({count} files): [Describe purpose of these files]")
suggestions['File Types'] = "\n".join(file_types_text)
# Frameworks/Patterns section
if current_patterns['frameworks_detected']:
frameworks_text = []
frameworks_text.append("**Frameworks in use:**")
for fw in current_patterns['frameworks_detected']:
frameworks_text.append(f"- {fw}")
suggestions['Frameworks'] = "\n".join(frameworks_text)
# Recent changes section
if recent_changes['summary']:
changes_text = []
changes_text.append("**Recent activity:**")
s = recent_changes['summary']
if s.get('added'):
changes_text.append(f"- {s['added']} files added")
if s.get('modified'):
changes_text.append(f"- {s['modified']} files modified")
if s.get('deleted'):
changes_text.append(f"- {s['deleted']} files deleted")
suggestions['Recent Changes'] = "\n".join(changes_text)
return suggestions
def format_update_report(dir_path: Path, update_analysis: Dict, suggestions: Dict, analyze_only: bool) -> str:
"""Format update report for Claude to read."""
lines = []
lines.append("=" * 70)
lines.append("CONTEXT UPDATE ANALYSIS")
lines.append("=" * 70)
lines.append(f"\nDirectory: {dir_path}")
lines.append(f"Timestamp: {datetime.now().isoformat()}")
lines.append(f"\nMode: {'ANALYZE ONLY' if analyze_only else 'UPDATE READY'}")
if update_analysis['should_update']:
lines.append("\n✅ UPDATE RECOMMENDED")
lines.append("\nReasons:")
for reason in update_analysis['reasons']:
lines.append(f"{reason}")
lines.append("\nSections to update:")
for section in update_analysis['sections_to_update']:
lines.append(f"{section}")
if suggestions:
lines.append("\n" + "=" * 70)
lines.append("SUGGESTED UPDATES")
lines.append("=" * 70)
for section_name, content in suggestions.items():
lines.append(f"\n## {section_name}\n")
lines.append(content)
else:
lines.append("\n✓ Context appears current")
lines.append("No immediate updates needed")
lines.append("\n" + "=" * 70)
return "\n".join(lines)
def update_context_file(context_file: Path, suggestions: Dict, existing_context: str) -> bool:
"""Update context file with new information."""
# This is a smart merge - preserve existing content, update specific sections
# For now, append suggestions as new sections if they don't exist
updated_content = existing_context
# Add a separator before updates
updated_content += "\n\n---\n*Updated: {}*\n".format(datetime.now().strftime("%Y-%m-%d"))
# Add suggested updates
for section_name, content in suggestions.items():
if section_name not in existing_context:
updated_content += f"\n## {section_name}\n\n{content}\n"
# Write back
try:
context_file.write_text(updated_content)
return True
except Exception as e:
print(f"Error writing context file: {e}")
return False
def main():
parser = argparse.ArgumentParser(
description='Autonomously update context based on code changes'
)
parser.add_argument('directory', type=str, help='Directory to analyze')
parser.add_argument(
'--analyze-only',
action='store_true',
help='Only analyze, do not update'
)
parser.add_argument(
'--verbose',
action='store_true',
help='Verbose output'
)
parser.add_argument(
'--force',
action='store_true',
help='Force update even if no changes detected'
)
args = parser.parse_args()
dir_path = Path(args.directory).resolve()
if not dir_path.exists() or not dir_path.is_dir():
print(f"Error: Invalid directory: {dir_path}")
sys.exit(1)
context_file = dir_path / 'claude.md'
# Analyze current state
print("Analyzing directory..." if args.verbose else "", end='')
recent_changes = get_recent_changes(dir_path)
current_patterns = analyze_code_patterns(dir_path)
existing_context = read_existing_context(context_file)
print(" Done." if args.verbose else "")
# Determine if update needed
update_analysis = needs_update(existing_context, current_patterns, recent_changes)
if args.force:
update_analysis['should_update'] = True
update_analysis['reasons'].append('Forced update')
# Generate suggestions
suggestions = generate_updated_sections(existing_context, current_patterns, recent_changes)
# Output report
report = format_update_report(dir_path, update_analysis, suggestions, args.analyze_only)
print(report)
# Perform update if not analyze-only
if update_analysis['should_update'] and not args.analyze_only:
print("\nUpdating context file...")
if update_context_file(context_file, suggestions, existing_context):
print(f"✅ Updated: {context_file}")
# BIDIRECTIONAL SYNC: Notify session if active
if INTEGRATION_AVAILABLE and is_session_active():
try:
integration = CCMPIntegration()
session_state = integration.get_state("session-management")
if session_state:
print(f"\n📝 Active session detected - context update logged")
print(f" Session: {session_state.get('branch', 'unknown')}")
print(f" Updated: {dir_path.relative_to(repo_root)}/claude.md")
# Update context manager state
integration.update_state("claude-context-manager", {
"last_update": datetime.now().isoformat(),
"last_updated_path": str(dir_path.relative_to(repo_root))
})
except Exception as e:
# Don't fail the whole update if logging fails
if args.verbose:
print(f" (Session logging failed: {e})")
else:
print(f"❌ Failed to update: {context_file}")
sys.exit(1)
sys.exit(0 if update_analysis['should_update'] else 0)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,226 @@
#!/usr/bin/env python3
"""
Claude.md Index Creator
Creates or updates a master index of all claude.md files in a repository.
Useful for understanding the documentation structure at a glance.
Usage:
python create_index.py <repo_path> [--output FILE] [--format FORMAT]
Examples:
python create_index.py /path/to/repo
python create_index.py /path/to/repo --output CLAUDE_INDEX.md
python create_index.py /path/to/repo --format tree
"""
import os
import sys
import argparse
from pathlib import Path
from typing import List, Dict
import re
def find_claude_md_files(root_path: Path) -> List[Path]:
"""Find all claude.md files, maintaining relative paths."""
claude_md_files = []
for dirpath, dirnames, filenames in os.walk(root_path):
# Skip common ignored directories
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d not in {
'node_modules', '__pycache__', 'venv', 'env', 'dist', 'build'
}]
if 'claude.md' in filenames:
full_path = Path(dirpath) / 'claude.md'
claude_md_files.append(full_path)
return sorted(claude_md_files)
def extract_title_and_overview(file_path: Path) -> Dict[str, str]:
"""Extract the title and first line of overview from a claude.md file."""
try:
with open(file_path, 'r') as f:
content = f.read()
lines = content.split('\n')
# Extract title (first H1 header)
title = None
for line in lines:
if line.startswith('# '):
title = line[2:].strip()
break
# Extract first meaningful line after Overview section
overview = None
in_overview = False
for line in lines:
if re.match(r'^##?\s+(Overview|Purpose)', line, re.IGNORECASE):
in_overview = True
continue
if in_overview:
stripped = line.strip()
# Skip empty lines and comments
if stripped and not stripped.startswith('<!--') and not stripped.startswith('#'):
overview = stripped
break
# Stop at next section
if stripped.startswith('##'):
break
return {
'title': title or 'Untitled',
'overview': overview or 'No overview available'
}
except Exception as e:
return {
'title': 'Error reading file',
'overview': str(e)
}
def create_tree_format(root_path: Path, files: List[Path]) -> str:
"""Create a tree-style index."""
lines = ["# Claude.md Index", "", "Repository documentation structure:", ""]
# Group files by directory depth
for file_path in files:
rel_path = file_path.relative_to(root_path)
dir_path = rel_path.parent
# Calculate depth
depth = len(dir_path.parts)
indent = " " * depth
# Get metadata
metadata = extract_title_and_overview(file_path)
# Format entry
dir_display = str(dir_path) if str(dir_path) != '.' else '(root)'
lines.append(f"{indent}📁 **{dir_display}** ([claude.md]({rel_path}))")
lines.append(f"{indent} {metadata['title']}")
lines.append("")
return "\n".join(lines)
def create_table_format(root_path: Path, files: List[Path]) -> str:
"""Create a table-style index."""
lines = [
"# Claude.md Index",
"",
"| Directory | Title | Overview |",
"|-----------|-------|----------|"
]
for file_path in files:
rel_path = file_path.relative_to(root_path)
dir_path = rel_path.parent
dir_display = str(dir_path) if str(dir_path) != '.' else '(root)'
metadata = extract_title_and_overview(file_path)
# Truncate overview if too long
overview = metadata['overview']
if len(overview) > 80:
overview = overview[:77] + "..."
# Escape pipe characters
title = metadata['title'].replace('|', '\\|')
overview = overview.replace('|', '\\|')
lines.append(f"| [{dir_display}]({rel_path}) | {title} | {overview} |")
return "\n".join(lines)
def create_detailed_format(root_path: Path, files: List[Path]) -> str:
"""Create a detailed list-style index."""
lines = ["# Claude.md Index", "", "Complete documentation map for this repository.", ""]
for i, file_path in enumerate(files, 1):
rel_path = file_path.relative_to(root_path)
dir_path = rel_path.parent
dir_display = str(dir_path) if str(dir_path) != '.' else '(root)'
metadata = extract_title_and_overview(file_path)
lines.append(f"## {i}. {dir_display}")
lines.append("")
lines.append(f"**File:** [{rel_path}]({rel_path})")
lines.append("")
lines.append(f"**Title:** {metadata['title']}")
lines.append("")
lines.append(f"**Overview:** {metadata['overview']}")
lines.append("")
lines.append("---")
lines.append("")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(
description='Create an index of all claude.md files in a repository'
)
parser.add_argument(
'repo_path',
type=str,
help='Path to repository root'
)
parser.add_argument(
'--output',
type=str,
default='CLAUDE_INDEX.md',
help='Output filename (default: CLAUDE_INDEX.md)'
)
parser.add_argument(
'--format',
type=str,
choices=['tree', 'table', 'detailed'],
default='tree',
help='Index format (default: tree)'
)
args = parser.parse_args()
repo_path = Path(args.repo_path).resolve()
if not repo_path.exists():
print(f"Error: Path does not exist: {repo_path}")
sys.exit(1)
if not repo_path.is_dir():
print(f"Error: Path is not a directory: {repo_path}")
sys.exit(1)
print(f"Scanning repository: {repo_path}")
files = find_claude_md_files(repo_path)
if not files:
print("No claude.md files found in repository.")
sys.exit(0)
print(f"Found {len(files)} claude.md file(s)")
# Generate index
if args.format == 'tree':
content = create_tree_format(repo_path, files)
elif args.format == 'table':
content = create_table_format(repo_path, files)
else: # detailed
content = create_detailed_format(repo_path, files)
# Write output
output_path = repo_path / args.output
with open(output_path, 'w') as f:
f.write(content)
print(f"✅ Created index: {output_path}")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,265 @@
#!/usr/bin/env python3
"""
Claude.md Generator
Analyzes a directory and generates an appropriate claude.md file with context
about the directory's purpose, structure, and key files.
Usage:
python generate_claude_md.py <directory_path> [--output FILE] [--analyze-depth N]
Examples:
python generate_claude_md.py /path/to/src
python generate_claude_md.py /path/to/tests --output claude.md
python generate_claude_md.py /path/to/api --analyze-depth 1
"""
import os
import sys
import argparse
from pathlib import Path
from collections import defaultdict
from typing import List, Dict, Set
import subprocess
IGNORE_DIRS = {
'.git', '.github', 'node_modules', '__pycache__', '.pytest_cache',
'venv', 'env', '.venv', 'dist', 'build', '.egg-info', 'coverage'
}
def analyze_directory(dir_path: Path, depth: int = 0, max_depth: int = 1) -> Dict:
"""Analyze directory structure and content."""
analysis = {
'path': dir_path,
'files_by_type': defaultdict(list),
'subdirs': [],
'total_files': 0,
'key_files': []
}
# Key files to look for
key_filenames = {
'README.md', 'README.rst', 'README.txt',
'main.py', 'app.py', 'index.py', '__init__.py',
'index.js', 'index.ts', 'main.js', 'main.ts',
'package.json', 'setup.py', 'pyproject.toml',
'Cargo.toml', 'pom.xml', 'build.gradle',
'Makefile', 'CMakeLists.txt'
}
try:
items = list(dir_path.iterdir())
except PermissionError:
return analysis
for item in items:
if item.name.startswith('.') and item.name not in {'.gitignore', '.env.example'}:
continue
if item.is_file():
analysis['total_files'] += 1
ext = item.suffix or 'no_extension'
analysis['files_by_type'][ext].append(item.name)
if item.name in key_filenames:
analysis['key_files'].append(item.name)
elif item.is_dir() and item.name not in IGNORE_DIRS:
analysis['subdirs'].append(item.name)
return analysis
def infer_directory_purpose(dir_name: str, analysis: Dict) -> str:
"""Infer the purpose of a directory based on its name and contents."""
dir_name_lower = dir_name.lower()
# Common patterns
purposes = {
'src': 'source code',
'lib': 'library code',
'app': 'application code',
'api': 'API implementation',
'tests': 'test suite',
'test': 'test suite',
'docs': 'documentation',
'documentation': 'documentation',
'scripts': 'utility scripts',
'utils': 'utility functions',
'helpers': 'helper functions',
'models': 'data models',
'views': 'view templates',
'controllers': 'controllers',
'routes': 'route definitions',
'components': 'reusable components',
'services': 'service layer',
'middleware': 'middleware functions',
'config': 'configuration files',
'public': 'public assets',
'static': 'static assets',
'assets': 'static assets',
'migrations': 'database migrations',
'fixtures': 'test fixtures',
'examples': 'example code',
}
for pattern, purpose in purposes.items():
if pattern in dir_name_lower:
return purpose
# Infer from file types
file_types = set(analysis['files_by_type'].keys())
if '.test.py' in str(analysis['files_by_type']) or '.test.js' in str(analysis['files_by_type']):
return 'test suite'
if any('.md' in ext or '.rst' in ext for ext in file_types):
return 'documentation'
return 'implementation'
def generate_claude_md(dir_path: Path, analyze_depth: int = 1) -> str:
"""Generate claude.md content for a directory."""
analysis = analyze_directory(dir_path, max_depth=analyze_depth)
dir_name = dir_path.name if dir_path.name else 'root'
purpose = infer_directory_purpose(dir_name, analysis)
# Build the claude.md content
content = []
# Header
content.append(f"# {dir_name}/")
content.append("")
# Purpose section
content.append(f"This directory contains the {purpose}.")
content.append("")
# Overview section
content.append("## Overview")
content.append("")
content.append(f"<!-- TODO: Add detailed description of what this directory contains and its role in the project -->")
content.append("")
# Structure section if there are subdirectories
if analysis['subdirs']:
content.append("## Directory Structure")
content.append("")
content.append("```")
content.append(f"{dir_name}/")
for subdir in sorted(analysis['subdirs'])[:10]: # Limit to first 10
content.append(f"├── {subdir}/")
if len(analysis['subdirs']) > 10:
content.append(f"└── ... ({len(analysis['subdirs']) - 10} more)")
content.append("```")
content.append("")
# Key files section
if analysis['key_files']:
content.append("## Key Files")
content.append("")
for key_file in sorted(analysis['key_files']):
content.append(f"- **{key_file}**: <!-- TODO: Describe purpose -->")
content.append("")
# File types section
if analysis['files_by_type']:
content.append("## File Types")
content.append("")
for ext, files in sorted(analysis['files_by_type'].items()):
if ext != 'no_extension':
content.append(f"- **{ext}** ({len(files)} files): <!-- TODO: Describe purpose -->")
content.append("")
# Important patterns section
content.append("## Important Patterns")
content.append("")
content.append("<!-- TODO: Document key patterns, conventions, or architectural decisions -->")
content.append("")
content.append("- Pattern 1: Description")
content.append("- Pattern 2: Description")
content.append("")
# Dependencies section
content.append("## Dependencies")
content.append("")
content.append("<!-- TODO: List key dependencies or relationships with other parts of the codebase -->")
content.append("")
# Usage/Entry Points section
content.append("## Usage")
content.append("")
content.append("<!-- TODO: Explain how to use or interact with code in this directory -->")
content.append("")
# Notes section
content.append("## Notes")
content.append("")
content.append("<!-- TODO: Add any additional context, gotchas, or important information -->")
content.append("")
return "\n".join(content)
def main():
parser = argparse.ArgumentParser(
description='Generate claude.md file for a directory'
)
parser.add_argument(
'directory',
type=str,
help='Path to directory'
)
parser.add_argument(
'--output',
type=str,
default='claude.md',
help='Output filename (default: claude.md)'
)
parser.add_argument(
'--analyze-depth',
type=int,
default=1,
help='How deep to analyze subdirectories (default: 1)'
)
parser.add_argument(
'--force',
action='store_true',
help='Overwrite existing claude.md file'
)
args = parser.parse_args()
dir_path = Path(args.directory).resolve()
if not dir_path.exists():
print(f"Error: Directory does not exist: {dir_path}")
sys.exit(1)
if not dir_path.is_dir():
print(f"Error: Path is not a directory: {dir_path}")
sys.exit(1)
output_path = dir_path / args.output
if output_path.exists() and not args.force:
print(f"Error: {output_path} already exists. Use --force to overwrite.")
sys.exit(1)
print(f"Analyzing directory: {dir_path}")
content = generate_claude_md(dir_path, args.analyze_depth)
with open(output_path, 'w') as f:
f.write(content)
print(f"✅ Generated {output_path}")
print(f"\nNext steps:")
print(f"1. Review the generated file and fill in TODO sections")
print(f"2. Add specific details about the directory's purpose")
print(f"3. Document key patterns and conventions")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,306 @@
#!/usr/bin/env python3
"""
Context Monitor - Autonomous Context Health Checker
This script is designed to be run by Claude autonomously to monitor
context health and identify what needs attention.
Outputs structured data that Claude can interpret and act on.
Usage:
python monitor.py <repo_path> [--format json|text]
"""
import os
import sys
import json
import argparse
from pathlib import Path
from datetime import datetime, timedelta
from typing import List, Dict, Optional
import subprocess
def get_git_last_modified(file_path: Path) -> Optional[datetime]:
"""Get the last git modification time for a file."""
try:
result = subprocess.run(
['git', 'log', '-1', '--format=%ai', str(file_path)],
cwd=file_path.parent,
capture_output=True,
text=True
)
if result.returncode == 0 and result.stdout.strip():
return datetime.fromisoformat(result.stdout.strip().rsplit(' ', 1)[0])
except:
pass
return None
def get_directory_last_modified(dir_path: Path) -> Optional[datetime]:
"""Get the last git modification time for any file in directory."""
try:
result = subprocess.run(
['git', 'log', '-1', '--format=%ai', '--', str(dir_path)],
cwd=dir_path if dir_path.is_dir() else dir_path.parent,
capture_output=True,
text=True
)
if result.returncode == 0 and result.stdout.strip():
return datetime.fromisoformat(result.stdout.strip().rsplit(' ', 1)[0])
except:
pass
return None
def count_commits_since(path: Path, since_date: datetime) -> int:
"""Count commits affecting path since a given date."""
try:
result = subprocess.run(
['git', 'rev-list', '--count', f'--since={since_date.isoformat()}', 'HEAD', '--', str(path)],
cwd=path if path.is_dir() else path.parent,
capture_output=True,
text=True
)
if result.returncode == 0:
return int(result.stdout.strip())
except:
pass
return 0
def calculate_staleness_score(context_age_days: int, commits_since_update: int) -> Dict:
"""Calculate staleness score and priority."""
# Scoring algorithm
age_score = min(context_age_days / 30, 3) # Cap at 3 (90+ days)
commit_score = min(commits_since_update / 10, 3) # Cap at 3 (30+ commits)
total_score = age_score + commit_score
if total_score >= 4:
priority = 'critical'
action = 'UPDATE_NOW'
elif total_score >= 2.5:
priority = 'high'
action = 'UPDATE_SOON'
elif total_score >= 1.5:
priority = 'medium'
action = 'REVIEW'
else:
priority = 'low'
action = 'MONITOR'
return {
'score': round(total_score, 2),
'priority': priority,
'action': action,
'age_score': round(age_score, 2),
'commit_score': round(commit_score, 2)
}
def find_claude_md_files(root_path: Path) -> List[Path]:
"""Find all claude.md files."""
claude_md_files = []
for dirpath, dirnames, filenames in os.walk(root_path):
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d not in {
'node_modules', '__pycache__', 'venv', 'env', 'dist', 'build'
}]
if 'claude.md' in filenames:
claude_md_files.append(Path(dirpath) / 'claude.md')
return sorted(claude_md_files)
def analyze_context_file(file_path: Path, root_path: Path) -> Dict:
"""Analyze a single context file for staleness."""
now = datetime.now()
# Get context file last modified
context_modified = get_git_last_modified(file_path)
if not context_modified:
# Fall back to filesystem mtime
context_modified = datetime.fromtimestamp(file_path.stat().st_mtime)
# Get directory last modified
dir_path = file_path.parent
dir_modified = get_directory_last_modified(dir_path)
# Calculate age
context_age = now - context_modified
context_age_days = context_age.days
# Count commits since context update
commits_since_update = count_commits_since(dir_path, context_modified)
# Calculate staleness
staleness = calculate_staleness_score(context_age_days, commits_since_update)
# Relative path for display
rel_path = file_path.relative_to(root_path)
return {
'path': str(rel_path),
'directory': str(rel_path.parent),
'context_age_days': context_age_days,
'context_last_updated': context_modified.isoformat(),
'directory_last_modified': dir_modified.isoformat() if dir_modified else None,
'commits_since_update': commits_since_update,
'staleness': staleness,
'needs_attention': staleness['action'] in ['UPDATE_NOW', 'UPDATE_SOON']
}
def monitor_repository(repo_path: Path) -> Dict:
"""Monitor entire repository for context health."""
files = find_claude_md_files(repo_path)
if not files:
return {
'status': 'no_context_files',
'message': 'No claude.md files found in repository',
'files': []
}
analyses = [analyze_context_file(f, repo_path) for f in files]
# Categorize by priority
critical = [a for a in analyses if a['staleness']['priority'] == 'critical']
high = [a for a in analyses if a['staleness']['priority'] == 'high']
medium = [a for a in analyses if a['staleness']['priority'] == 'medium']
low = [a for a in analyses if a['staleness']['priority'] == 'low']
# Overall health score (0-100, higher is better)
avg_staleness = sum(a['staleness']['score'] for a in analyses) / len(analyses)
health_score = max(0, 100 - (avg_staleness * 20))
return {
'status': 'analyzed',
'timestamp': datetime.now().isoformat(),
'repository': str(repo_path),
'summary': {
'total_files': len(analyses),
'critical': len(critical),
'high': len(high),
'medium': len(medium),
'low': len(low),
'health_score': round(health_score, 1)
},
'files': {
'critical': critical,
'high': high,
'medium': medium,
'low': low
},
'recommendations': generate_recommendations(critical, high, medium)
}
def generate_recommendations(critical: List, high: List, medium: List) -> List[str]:
"""Generate action recommendations for Claude."""
recommendations = []
if critical:
recommendations.append(
f"IMMEDIATE ACTION: {len(critical)} context file(s) are critically stale. "
f"Update: {', '.join([c['directory'] for c in critical[:3]])}"
)
if high:
recommendations.append(
f"HIGH PRIORITY: {len(high)} context file(s) need updating soon. "
f"Review: {', '.join([h['directory'] for h in high[:3]])}"
)
if medium:
recommendations.append(
f"MEDIUM PRIORITY: {len(medium)} context file(s) should be reviewed. "
f"Consider updating when convenient."
)
if not critical and not high:
recommendations.append("All context files are reasonably current. Continue monitoring.")
return recommendations
def format_text_output(data: Dict) -> str:
"""Format output as readable text for Claude."""
lines = []
lines.append("=" * 70)
lines.append("CONTEXT HEALTH MONITOR")
lines.append("=" * 70)
if data['status'] == 'no_context_files':
lines.append(f"\n{data['message']}")
return "\n".join(lines)
summary = data['summary']
lines.append(f"\nRepository: {data['repository']}")
lines.append(f"Timestamp: {data['timestamp']}")
lines.append(f"\n📊 Health Score: {summary['health_score']}/100")
lines.append(f"\n📁 Context Files: {summary['total_files']}")
if summary['critical']:
lines.append(f" 🔴 Critical: {summary['critical']}")
if summary['high']:
lines.append(f" 🟠 High: {summary['high']}")
if summary['medium']:
lines.append(f" 🟡 Medium: {summary['medium']}")
if summary['low']:
lines.append(f" 🟢 Low: {summary['low']}")
lines.append("\n" + "=" * 70)
lines.append("RECOMMENDATIONS")
lines.append("=" * 70)
for i, rec in enumerate(data['recommendations'], 1):
lines.append(f"\n{i}. {rec}")
# Show details for files needing attention
needs_attention = data['files']['critical'] + data['files']['high']
if needs_attention:
lines.append("\n" + "=" * 70)
lines.append("DETAILS - FILES NEEDING ATTENTION")
lines.append("=" * 70)
for file_data in needs_attention:
lines.append(f"\n📁 {file_data['directory']}")
lines.append(f" Path: {file_data['path']}")
lines.append(f" Age: {file_data['context_age_days']} days")
lines.append(f" Commits since update: {file_data['commits_since_update']}")
lines.append(f" Priority: {file_data['staleness']['priority'].upper()}")
lines.append(f" Action: {file_data['staleness']['action']}")
lines.append("\n" + "=" * 70)
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(
description='Monitor context health and identify stale files'
)
parser.add_argument('repo_path', type=str, help='Repository path')
parser.add_argument(
'--format',
choices=['json', 'text'],
default='text',
help='Output format (default: text)'
)
args = parser.parse_args()
repo_path = Path(args.repo_path).resolve()
if not repo_path.exists():
print(f"Error: Repository path does not exist: {repo_path}")
sys.exit(1)
# Analyze repository
results = monitor_repository(repo_path)
# Output results
if args.format == 'json':
print(json.dumps(results, indent=2))
else:
print(format_text_output(results))
# Exit code based on health
if results['status'] == 'analyzed':
if results['summary']['critical'] > 0:
sys.exit(2) # Critical issues
elif results['summary']['high'] > 0:
sys.exit(1) # High priority issues
sys.exit(0)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,169 @@
#!/usr/bin/env python3
"""
Repository Scanner for claude.md Management
Analyzes repository structure and suggests where claude.md files should exist.
Helps identify directories that need documentation.
Usage:
python scan_repo.py <repo_path> [--min-files N] [--show-existing]
Examples:
python scan_repo.py /path/to/repo
python scan_repo.py /path/to/repo --min-files 3
python scan_repo.py /path/to/repo --show-existing
"""
import os
import sys
import argparse
from pathlib import Path
from typing import List, Dict, Set
# Directories to ignore
IGNORE_DIRS = {
'.git', '.github', 'node_modules', '__pycache__', '.pytest_cache',
'venv', 'env', '.venv', 'dist', 'build', '.egg-info', 'coverage',
'.tox', '.mypy_cache', '.ruff_cache', 'target', 'bin', 'obj'
}
# File extensions to consider when calculating "significance"
SIGNIFICANT_EXTENSIONS = {
'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.go', '.rs', '.cpp',
'.c', '.h', '.hpp', '.cs', '.rb', '.php', '.swift', '.kt', '.scala',
'.sh', '.bash', '.md', '.yaml', '.yml', '.json', '.toml', '.xml'
}
def scan_directory(root_path: Path, min_files: int = 2) -> Dict:
"""
Scan directory tree and identify directories that should have claude.md files.
Args:
root_path: Root directory to scan
min_files: Minimum number of significant files to warrant a claude.md
Returns:
Dictionary with analysis results
"""
results = {
'needs_claude_md': [],
'has_claude_md': [],
'stats': {
'total_dirs': 0,
'dirs_scanned': 0,
'significant_dirs': 0
}
}
for dirpath, dirnames, filenames in os.walk(root_path):
# Filter out ignored directories
dirnames[:] = [d for d in dirnames if d not in IGNORE_DIRS]
results['stats']['total_dirs'] += 1
current_path = Path(dirpath)
# Check if this directory has claude.md
has_claude_md = 'claude.md' in filenames
# Count significant files
significant_files = [
f for f in filenames
if Path(f).suffix in SIGNIFICANT_EXTENSIONS and f != 'claude.md'
]
# Determine if this directory is "significant" enough
is_significant = len(significant_files) >= min_files
if is_significant:
results['stats']['significant_dirs'] += 1
results['stats']['dirs_scanned'] += 1
rel_path = current_path.relative_to(root_path)
dir_info = {
'path': str(rel_path) if str(rel_path) != '.' else '(root)',
'file_count': len(significant_files),
'file_types': sorted(set(Path(f).suffix for f in significant_files))
}
if has_claude_md:
results['has_claude_md'].append(dir_info)
else:
results['needs_claude_md'].append(dir_info)
return results
def print_results(results: Dict, show_existing: bool = False):
"""Print scan results in a readable format."""
stats = results['stats']
print("\n" + "="*70)
print("REPOSITORY SCAN RESULTS")
print("="*70)
print(f"\n📊 Statistics:")
print(f" Total directories: {stats['total_dirs']}")
print(f" Significant directories: {stats['significant_dirs']}")
print(f" Directories with claude.md: {len(results['has_claude_md'])}")
print(f" Directories needing claude.md: {len(results['needs_claude_md'])}")
if results['needs_claude_md']:
print(f"\n❌ Directories that should have claude.md:")
print("-" * 70)
for dir_info in results['needs_claude_md']:
print(f"\n📁 {dir_info['path']}")
print(f" Files: {dir_info['file_count']}")
print(f" Types: {', '.join(dir_info['file_types'])}")
if show_existing and results['has_claude_md']:
print(f"\n✅ Directories with existing claude.md:")
print("-" * 70)
for dir_info in results['has_claude_md']:
print(f"\n📁 {dir_info['path']}")
print(f" Files: {dir_info['file_count']}")
print(f" Types: {', '.join(dir_info['file_types'])}")
print("\n" + "="*70)
def main():
parser = argparse.ArgumentParser(
description='Scan repository for claude.md file coverage'
)
parser.add_argument(
'repo_path',
type=str,
help='Path to repository root'
)
parser.add_argument(
'--min-files',
type=int,
default=2,
help='Minimum significant files to warrant a claude.md (default: 2)'
)
parser.add_argument(
'--show-existing',
action='store_true',
help='Show directories that already have claude.md files'
)
args = parser.parse_args()
repo_path = Path(args.repo_path).resolve()
if not repo_path.exists():
print(f"Error: Path does not exist: {repo_path}")
sys.exit(1)
if not repo_path.is_dir():
print(f"Error: Path is not a directory: {repo_path}")
sys.exit(1)
print(f"Scanning repository: {repo_path}")
results = scan_directory(repo_path, args.min_files)
print_results(results, args.show_existing)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,247 @@
#!/usr/bin/env python3
"""
Claude.md Validator
Validates existing claude.md files for completeness, accuracy, and quality.
Checks for TODO markers, outdated information, and missing key sections.
Usage:
python validate_claude_md.py <path> [--strict] [--auto-fix]
Examples:
python validate_claude_md.py /path/to/repo
python validate_claude_md.py /path/to/src/claude.md
python validate_claude_md.py /path/to/repo --strict
"""
import os
import sys
import argparse
from pathlib import Path
from typing import List, Dict, Tuple
import re
REQUIRED_SECTIONS = [
'Overview',
'Purpose' # Alternative to Overview
]
RECOMMENDED_SECTIONS = [
'Directory Structure',
'Key Files',
'Important Patterns',
'Dependencies',
'Usage'
]
def find_claude_md_files(root_path: Path) -> List[Path]:
"""Find all claude.md files in the directory tree."""
claude_md_files = []
for dirpath, dirnames, filenames in os.walk(root_path):
# Skip common ignored directories
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d not in {
'node_modules', '__pycache__', 'venv', 'env', 'dist', 'build'
}]
if 'claude.md' in filenames:
claude_md_files.append(Path(dirpath) / 'claude.md')
return claude_md_files
def validate_claude_md(file_path: Path, strict: bool = False) -> Dict:
"""Validate a single claude.md file."""
issues = []
warnings = []
try:
with open(file_path, 'r') as f:
content = f.read()
except Exception as e:
return {
'valid': False,
'issues': [f"Could not read file: {e}"],
'warnings': [],
'stats': {}
}
lines = content.split('\n')
# Check for empty or very short files
if len(content.strip()) < 50:
issues.append("File is too short (less than 50 characters)")
# Check for TODO markers
todo_count = len(re.findall(r'TODO|FIXME|XXX', content, re.IGNORECASE))
if todo_count > 0:
if strict:
issues.append(f"Found {todo_count} TODO/FIXME markers")
else:
warnings.append(f"Found {todo_count} TODO/FIXME markers")
# Check for required sections
has_overview = any(re.search(r'^##?\s+(Overview|Purpose)', line, re.IGNORECASE) for line in lines)
if not has_overview:
issues.append("Missing required section: Overview or Purpose")
# Check for recommended sections
found_sections = []
for section in RECOMMENDED_SECTIONS:
if any(re.search(rf'^##?\s+{section}', line, re.IGNORECASE) for line in lines):
found_sections.append(section)
missing_recommended = set(RECOMMENDED_SECTIONS) - set(found_sections)
if missing_recommended and strict:
warnings.append(f"Missing recommended sections: {', '.join(missing_recommended)}")
# Check for placeholder text
if '<!-- TODO' in content or 'Description' in content and 'TODO' in content:
if strict:
issues.append("Contains placeholder TODO comments that need completion")
else:
warnings.append("Contains placeholder TODO comments")
# Check for minimal content in sections
sections = re.split(r'^##?\s+', content, flags=re.MULTILINE)[1:] # Split by headers
for section in sections:
lines_in_section = [l.strip() for l in section.split('\n')[1:] if l.strip() and not l.strip().startswith('<!--')]
if len(lines_in_section) < 2:
section_name = section.split('\n')[0]
warnings.append(f"Section '{section_name}' has minimal content")
# Check for broken links (basic check)
broken_link_pattern = r'\[([^\]]+)\]\(([^\)]+)\)'
links = re.findall(broken_link_pattern, content)
for link_text, link_url in links:
if link_url.startswith('./') or link_url.startswith('../'):
# Check if relative path exists
target_path = file_path.parent / link_url
if not target_path.exists():
warnings.append(f"Potentially broken relative link: {link_url}")
# Check age (if git is available)
stats = {
'line_count': len(lines),
'word_count': len(content.split()),
'todo_count': todo_count,
'sections_found': len(found_sections)
}
return {
'valid': len(issues) == 0,
'issues': issues,
'warnings': warnings,
'stats': stats
}
def print_validation_results(results: Dict[Path, Dict], strict: bool):
"""Print validation results in a readable format."""
print("\n" + "="*70)
print("CLAUDE.MD VALIDATION RESULTS")
print("="*70)
total_files = len(results)
valid_files = sum(1 for r in results.values() if r['valid'])
files_with_warnings = sum(1 for r in results.values() if r['warnings'])
print(f"\n📊 Summary:")
print(f" Total files checked: {total_files}")
print(f" Valid files: {valid_files}")
print(f" Files with issues: {total_files - valid_files}")
print(f" Files with warnings: {files_with_warnings}")
# Show files with issues
files_with_issues = {p: r for p, r in results.items() if not r['valid']}
if files_with_issues:
print(f"\n❌ Files with issues:")
print("-" * 70)
for file_path, result in files_with_issues.items():
print(f"\n📄 {file_path}")
for issue in result['issues']:
print(f"{issue}")
if result['warnings']:
for warning in result['warnings']:
print(f" ⚠️ {warning}")
# Show files with warnings only
files_with_only_warnings = {
p: r for p, r in results.items()
if r['valid'] and r['warnings']
}
if files_with_only_warnings:
print(f"\n⚠️ Files with warnings:")
print("-" * 70)
for file_path, result in files_with_only_warnings.items():
print(f"\n📄 {file_path}")
for warning in result['warnings']:
print(f" ⚠️ {warning}")
# Show fully valid files
fully_valid = {
p: r for p, r in results.items()
if r['valid'] and not r['warnings']
}
if fully_valid:
print(f"\n✅ Fully valid files:")
print("-" * 70)
for file_path in fully_valid.keys():
print(f" 📄 {file_path}")
print("\n" + "="*70)
def main():
parser = argparse.ArgumentParser(
description='Validate claude.md files'
)
parser.add_argument(
'path',
type=str,
help='Path to directory or specific claude.md file'
)
parser.add_argument(
'--strict',
action='store_true',
help='Enable strict validation (TODOs become errors)'
)
args = parser.parse_args()
path = Path(args.path).resolve()
if not path.exists():
print(f"Error: Path does not exist: {path}")
sys.exit(1)
# Find claude.md files
if path.is_file() and path.name == 'claude.md':
files_to_validate = [path]
elif path.is_dir():
files_to_validate = find_claude_md_files(path)
if not files_to_validate:
print(f"No claude.md files found in {path}")
sys.exit(0)
else:
print(f"Error: Path must be a directory or a claude.md file")
sys.exit(1)
print(f"Validating {len(files_to_validate)} claude.md file(s)...")
# Validate each file
results = {}
for file_path in files_to_validate:
results[file_path] = validate_claude_md(file_path, strict=args.strict)
# Print results
print_validation_results(results, args.strict)
# Exit with error code if any files have issues
if any(not r['valid'] for r in results.values()):
sys.exit(1)
if __name__ == '__main__':
main()