commit b0bc2cf2e383b612bea89a43b56706907bfeabdf Author: Zhongwei Li Date: Sun Nov 30 08:48:32 2025 +0800 Initial commit diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..82a8f63 --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,12 @@ +{ + "name": "claude-powerpack", + "description": "Essential productivity tools for Claude Code: expert consultation docs, code extraction, and more", + "version": "1.1.0", + "author": { + "name": "Propstreet", + "url": "https://github.com/propstreet" + }, + "skills": [ + "./skills" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..404739f --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# claude-powerpack + +Essential productivity tools for Claude Code: expert consultation docs, code extraction, and more diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..ec7f944 --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,73 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:propstreet/claude-powerpack:", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "ba3eff42a584087e83986cc5dab4084c7b918a27", + "treeHash": "48a82cf928344d7688cf9e43b6325904a97c4131c91e6381fe3e6680e58a902e", + "generatedAt": "2025-11-28T10:27:41.581588Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "claude-powerpack", + "description": "Essential productivity tools for Claude Code: expert consultation docs, code extraction, and more", + "version": "1.1.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "b28575106037ab7b5c2ae4455d4b06c55fffe14415c75cdbec8d54627671e7c0" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "fde1715bdfc982ab8b7bde16ac702cde8643790a9c45f8c352855c517a6c1e03" + }, + { + "path": "skills/ask-expert/EXAMPLES.md", + "sha256": "5139ae6c8e5717fdf46fb40fd0caa14665b50fb292dd2db427dfa5200892069b" + }, + { + "path": "skills/ask-expert/README.md", + "sha256": "bb70b3de1f8541e8d2333ff94a6911d7e0cd27814ec11db4139f8f705667afc9" + }, + { + "path": "skills/ask-expert/SKILL.md", + "sha256": "af2b1a4fc3d26133b8f4adcadc7b649a2e472d1de58a75b5ebc19e3b9f809157" + }, + { + "path": "skills/ask-expert/scripts/extract-code-example.json", + "sha256": "e1bb82a2c933e500cb2e291a7344bbdb2da640e7c537d693041debea26cfd1da" + }, + { + "path": "skills/ask-expert/scripts/extract-code.js", + "sha256": "0d8f5adacf2f44d7cf03b14e3de231de5b525fe467a3782c15fe9cb788b42b99" + }, + { + "path": "skills/update-pr/EXAMPLES.md", + "sha256": "0eaa732d2e0194f4f478f4029a222a5cf16ef2c9008188a8c55aa250caf9205c" + }, + { + "path": "skills/update-pr/README.md", + "sha256": "090f691ff2261ac2bfbdc669b940dd8385f08f89b2d6e79fca2db113722a8b4a" + }, + { + "path": "skills/update-pr/SKILL.md", + "sha256": "03a45e5c94fe086d0dbfd133d696499e76b63beb55bd8cdaee4d4db378c650ab" + } + ], + "dirSha256": "48a82cf928344d7688cf9e43b6325904a97c4131c91e6381fe3e6680e58a902e" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/ask-expert/EXAMPLES.md b/skills/ask-expert/EXAMPLES.md new file mode 100644 index 0000000..527bbb3 --- /dev/null +++ b/skills/ask-expert/EXAMPLES.md @@ -0,0 +1,391 @@ +# Expert Consultation Examples + +Complete usage examples for the ask-expert skill. + +## Complete Workflow Examples + +### Example 1: Bug Investigation + +**Scenario**: JWT refresh token causing unexpected logouts + +```bash +# 1. Create consultation document +cat > auth-bug-consultation.md << 'EOF' +# Expert Consultation: JWT Refresh Token Bug + +## 1. Problem +Users are getting logged out unexpectedly after 15 minutes despite having valid refresh tokens. + +## 2. Our Solution +Modified the token refresh logic in AuthService to use a sliding window approach instead of fixed expiration. + +## 3. Concerns +- This couples authentication to session management +- Might introduce race conditions with concurrent requests +- Token refresh happens in middleware which feels wrong + +## 4. Alternatives +- Separate auth service with dedicated refresh endpoint +- Use Redis for session management +- Switch to stateless JWTs + +## 5. Architecture Overview +``` +┌─────────┐ ┌──────────────┐ ┌───────────┐ +│ Client │────▶│ Middleware │────▶│ Auth │ +│ │◀────│ (Refresh) │◀────│ Service │ +└─────────┘ └──────────────┘ └───────────┘ + │ + ▼ + ┌──────────┐ + │ Token │ + │ Manager │ + └──────────┘ +``` + +--- +# Complete Architecture Context +EOF + +# 2. Extract code with size tracking +node scripts/extract-code.js \ + --track-size --output=auth-bug-consultation.md \ + --section="What Changed" \ + src/auth/AuthService.cs:diff \ + --section="Auth Flow (COMPLETE)" \ + src/auth/AuthController.cs \ + src/auth/TokenManager.cs \ + --section="Middleware" \ + src/middleware/AuthMiddleware.cs \ + --section="Tests" \ + tests/auth/AuthFlowShould.cs:1-150 + +# 3. Add expert questions +cat >> auth-bug-consultation.md << 'EOF' + +--- +# Expert Guidance Request + +## Questions +1. Does our sliding window approach introduce security risks? +2. Better patterns for handling token refresh in middleware? +3. How to test race conditions effectively? +4. Should authentication and session management be separate concerns? + +## Success Criteria +- Backward compatible with mobile clients +- No data loss during token refresh +- Clear security model +- Testable solution + +**Please answer in English** +EOF + +# 4. Verify size +wc -c auth-bug-consultation.md +``` + +### Example 2: API Redesign + +**Scenario**: Need expert review of new REST API design + +```bash +# Use config file for complex extraction +cat > api-redesign-plan.json << 'EOF' +{ + "output": "api-redesign-consultation.md", + "trackSize": true, + "sections": [ + { + "header": "Current API Design", + "files": [ + "src/controllers/ApiController.cs", + "src/models/ApiRequest.cs", + "src/models/ApiResponse.cs" + ] + }, + { + "header": "Service Layer", + "files": [ + "src/services/ApiService.cs:1-200", + "src/services/ApiService.Validation.cs" + ] + }, + { + "header": "Test Coverage", + "files": [ + "tests/ApiControllerShould.cs:100-300" + ] + } + ] +} +EOF + +node scripts/extract-code.js \ + --config=api-redesign-plan.json +``` + +### Example 3: Architecture Review + +**Scenario**: TypeScript strict mode migration + +```bash +# 1. Write problem context +cat > typescript-strict-consultation.md << 'EOF' +# Expert Consultation: TypeScript Strict Mode Migration + +## 1. Problem +Legacy codebase has `strict: false` in tsconfig.json. Need to enable strict mode without breaking production. + +## 2. Our Solution +Incremental migration by file, starting with new code and migrating old files gradually. + +## 3. Concerns +- 500+ files to migrate +- Some patterns don't work well with strict mode (dynamic property access) +- Team unfamiliar with strict mode patterns + +## 4. Alternatives +- Big bang migration with dedicated sprint +- Stay on non-strict mode indefinitely +- Use strict mode only for new files + +## 5. Architecture Overview +[Diagram showing file dependency graph] + +--- +# Complete Architecture Context +EOF + +# 2. Batch extract multiple files efficiently +node scripts/extract-code.js \ + --track-size --output=typescript-strict-consultation.md \ + --section="Type Definitions" \ + src/types/payloads.ts src/types/responses.ts \ + --section="Core Files (COMPLETE)" \ + src/handlers/base-handler.ts \ + src/handlers/intents.ts \ + --section="Config" \ + tsconfig.json \ + --section="Example Migrated Files" \ + src/services/user.ts src/services/auth.ts + +# 3. Add questions +cat >> typescript-strict-consultation.md << 'EOF' + +--- +# Expert Guidance Request + +## Questions +1. Recommended migration order for 500+ files? +2. Common patterns that break in strict mode and their fixes? +3. Tooling to automate parts of the migration? +4. Testing strategy during migration? + +## Success Criteria +- Zero runtime regressions +- Team can maintain strict mode going forward +- Migration completable in 2-3 sprints + +**Please answer in English** +EOF +``` + +## Extract-Code Script Usage + +### Basic Patterns + +**Extract full files:** +```bash +node scripts/extract-code.js \ + src/Service.cs tests/ServiceTests.cs +``` + +**Extract line ranges:** +```bash +node scripts/extract-code.js \ + src/Service.cs:100-200 tests/ServiceTests.cs:50-75 +``` + +**Multiple ranges from one file:** +```bash +node scripts/extract-code.js \ + src/Service.cs:1-30,86-213,500-600 +``` + +**Mix full files and ranges:** +```bash +node scripts/extract-code.js \ + src/Models/User.cs src/Service.cs:100-150 +``` + +### Git Diff Patterns + +**Diff vs master (default):** +```bash +node scripts/extract-code.js \ + src/Service.cs:diff +``` + +**Explicit diff range:** +```bash +node scripts/extract-code.js \ + src/Service.cs:diff=master..feature-branch \ + src/Helper.cs:diff=HEAD~3..HEAD +``` + +**Recent changes:** +```bash +node scripts/extract-code.js \ + src/Service.cs:diff=HEAD~5 +``` + +**Combine diffs with regular files:** +```bash +node scripts/extract-code.js \ + src/Service.cs:diff \ + src/Tests.cs:100-200 \ + src/Models.cs +``` + +### Size Tracking Patterns + +**Basic size tracking:** +```bash +node scripts/extract-code.js \ + --track-size --output=consultation.md \ + src/Service.cs +``` + +**With sections:** +```bash +node scripts/extract-code.js \ + --track-size --output=doc.md \ + --section="Core Interfaces" \ + Interface.cs BaseClass.cs \ + --section="Domain Models" \ + Contact.cs Company.cs \ + --section="Tests" \ + Tests.cs:100-200 +``` + +**Incremental building (appends each time):** +```bash +node scripts/extract-code.js \ + --track-size -o doc.md File1.cs + +node scripts/extract-code.js \ + --track-size -o doc.md File2.cs # Appends + +node scripts/extract-code.js \ + --track-size -o doc.md File3.cs # Appends again +``` + +### Config File Patterns + +**Simple config:** +```json +{ + "output": "consultation.md", + "trackSize": true, + "sections": [ + { + "header": "Core Implementation", + "files": ["src/Service.cs", "src/Model.cs"] + } + ] +} +``` + +**Complex config with diffs:** +```json +{ + "output": "feature-consultation.md", + "trackSize": true, + "sections": [ + { + "header": "What We Changed", + "files": [ + "src/Service.cs:diff", + "src/Helper.cs:diff=master..feature-branch" + ] + }, + { + "header": "Frontend Component (COMPLETE)", + "files": ["src/components/MyComponent.vue"] + }, + { + "header": "Component Tests (COMPLETE)", + "files": ["tests/components/MyComponent.test.ts"] + }, + { + "header": "Core Interfaces", + "files": ["src/interfaces/IMyService.cs"] + }, + { + "header": "Domain Models", + "files": [ + "src/models/MyModel.cs", + "src/models/RelatedModel.cs" + ] + }, + { + "header": "Service Implementation (Relevant Methods)", + "files": ["src/services/MyService.cs:100-500"] + } + ] +} +``` + +**Run config:** +```bash +node scripts/extract-code.js \ + --config=extraction-plan.json +``` + +## Size Tracking Output + +The script shows real-time progress: + +``` +📄 consultation.md: 4.9 KB (existing) +[1/8] NetworkIndex.vue → +25.5 KB (30.4 KB / 125 KB, 24.3%) +[2/8] NetworkIndex.test.ts → +14.0 KB (44.4 KB / 125 KB, 35.5%) +[3/8] NetworkController.cs → +12.3 KB (56.7 KB / 125 KB, 45.4%) +[4/8] INetworkService.cs → +3.2 KB (59.9 KB / 125 KB, 47.9%) +[5/8] Contact.cs → +8.1 KB (68.0 KB / 125 KB, 54.4%) +[6/8] Company.cs → +7.4 KB (75.4 KB / 125 KB, 60.3%) +[7/8] NetworkService.cs → +9.2 KB (84.6 KB / 125 KB, 67.7%) +[8/8] Tests.cs → +2.7 KB (87.3 KB / 125 KB, 69.8%) +✅ Saved: 8 files to consultation.md (87.3 KB / 125 KB) +``` + +**Warnings at thresholds:** +``` +⚠️ Approaching 100 KB (at 100 KB) +⚠️ Very close to limit! (at 115 KB) +❌ Exceeded 125 KB limit (stops processing) +``` + +## Traditional Redirection + +You can also use traditional shell redirection: + +```bash +node scripts/extract-code.js \ + src/Service.cs > expert-consultation.md + +node scripts/extract-code.js \ + src/Tests.cs >> expert-consultation.md # Append +``` + +**Note**: Without `--track-size`, you won't see progress or warnings. + +## Tips for Efficiency + +1. **Batch files together** - One call is better than many +2. **Use config files** for complex extractions you'll repeat +3. **Use full files** when possible - better context for expert +4. **Use diffs** to show "what changed" concisely +5. **Track size** to avoid hitting 125 KB limit +6. **Verify early** - run `wc -c` to check size before adding more diff --git a/skills/ask-expert/README.md b/skills/ask-expert/README.md new file mode 100644 index 0000000..3d277a7 --- /dev/null +++ b/skills/ask-expert/README.md @@ -0,0 +1,66 @@ +# Ask Expert Skill + +> Creates expert consultation documents with automated code extraction, git diffs, and size tracking + +## Overview + +This skill helps Claude create comprehensive technical consultation documents for external expert review. It automatically activates when you ask Claude to prepare code for expert analysis. + +**For complete plugin documentation, see the [main README](../../README.md).** + +## Quick Reference + +**Example prompts:** +``` +"Create an expert consultation document for our authentication refactor" +"Prepare code for expert review about our API design" +"I need to ask an expert about our database schema" +``` + +**What it does:** +- Guides you through structuring consultation documents +- Extracts code with size tracking (125KB limit) +- Organizes content with markdown sections +- Supports full files, line ranges, and git diffs + +**Allowed tools:** Bash, Read, Write, Edit + +## Script Usage + +The skill uses a bundled extraction script. For manual usage: + +**Basic extraction:** +```bash +node scripts/extract-code.js \ + --track-size --output=doc.md \ + src/file1.ts src/file2.ts +``` + +**With sections:** +```bash +node scripts/extract-code.js \ + --track-size --output=doc.md \ + --section="What Changed" src/Service.cs:diff \ + --section="Implementation" src/Service.cs +``` + +**Git diffs:** +```bash +node scripts/extract-code.js \ + src/Service.cs:diff=master..feature-branch +``` + +## Documentation + +- **[SKILL.md](SKILL.md)** - Skill definition for Claude +- **[EXAMPLES.md](EXAMPLES.md)** - Detailed usage examples with complete workflows +- **[Script Reference](scripts/extract-code.js)** - Run with `--help` for all options + +## Requirements + +- Node.js 18+ +- Git (for diff functionality) + +## License + +MIT - See [LICENSE](../../LICENSE) diff --git a/skills/ask-expert/SKILL.md b/skills/ask-expert/SKILL.md new file mode 100644 index 0000000..8911b28 --- /dev/null +++ b/skills/ask-expert/SKILL.md @@ -0,0 +1,208 @@ +--- +name: ask-expert +description: Creates expert consultation documents with code extraction, git diffs, and size tracking (125KB limit). Use when user wants to prepare comprehensive technical documentation for external review, gather code context for architecture consultations, or create detailed technical analysis documents with full source context. Requires Node.js 18+. +allowed-tools: [Bash, Read, Write, Edit] +--- + +# Expert Consultation Document Creator + +Create comprehensive technical consultation documents by extracting code, diffs, and architectural context within LLM token limits (125KB). + +## Document Structure + +Follow this proven structure: + +### Part 1: Problem Context (~15-25 KB) +1. **Problem** - Issue, errors, test failures +2. **Our Solution** - What was implemented and why +3. **Concerns** - Code smells, coupling, architectural questions +4. **Alternatives** - Other approaches, trade-offs + +### Part 2: Complete Architecture (~60-90 KB) +5. **Architecture Overview** - ASCII diagram, data flow, patterns +6. **Components** - Frontend, tests, controllers +7. **Services** - Implementation and interfaces +8. **Models** - Domain entities with relationships + +### Part 3: Expert Request (~5-10 KB) +9. **Questions** - Specific technical questions +10. **Success Criteria** - Requirements and priorities + +## Workflow + +### Step 1: Write Problem Context + +Create descriptive filename like `{topic}-consultation.md`: + +```bash +cat > feature-consultation.md << 'EOF' +# Expert Consultation: [Feature Name] + +## 1. Problem +[Describe the issue] + +## 2. Our Solution +[What was implemented] + +## 3. Concerns +[Technical concerns] + +## 4. Alternatives +[Other approaches considered] + +## 5. Architecture Overview +[ASCII diagram] + +--- +# Complete Architecture Context +EOF +``` + +### Step 2: Extract Code + +Use the bundled extraction script with size tracking. + +**💡 The script accepts multiple files in one call** - batch files for efficiency: + +```bash +node scripts/extract-code.js \ + --track-size --output=doc.md \ + --section="Core Files" \ + file1.ts file2.ts file3.ts \ + --section="Tests" \ + test1.ts test2.ts +``` + +**File format options:** +- Full file: `src/Service.cs` +- Line ranges: `src/Service.cs:100-200` or `src/Service.cs:1-30,100-150` +- Git diff: `src/Service.cs:diff` or `src/Service.cs:diff=master..HEAD` + +**Prefer FULL files over chunks** for better expert analysis. Use chunks only for very large files. + +### Step 3: Add Expert Request + +```bash +cat >> consultation.md << 'EOF' + +--- +# Expert Guidance Request + +## Questions +1. [Specific question about architecture] +2. [Question about trade-offs] +3. [Question about refactoring approach] + +## Success Criteria +- [Required constraints] +- [Priorities] + +**Please answer in English** +EOF +``` + +### Step 4: Verify Size + +```bash +wc -c consultation.md # Should be 100-125 KB +``` + +DO NOT read the full file back (exceeds context). + +## Code Extraction Examples + +See [EXAMPLES.md](EXAMPLES.md) for detailed usage patterns. + +**Basic extraction:** +```bash +node scripts/extract-code.js \ + --track-size --output=doc.md \ + src/Component.vue tests/Component.test.ts +``` + +**With sections:** +```bash +node scripts/extract-code.js \ + --track-size --output=doc.md \ + --section="What Changed" \ + src/Service.cs:diff \ + --section="Implementation" \ + src/Service.cs src/Model.cs +``` + +**Using config file:** +```bash +node scripts/extract-code.js \ + --config=extraction-plan.json +``` + +## Config File Format + +Create reusable extraction plans: + +```json +{ + "output": "consultation.md", + "trackSize": true, + "sections": [ + { + "header": "What Changed", + "files": ["src/Service.cs:diff"] + }, + { + "header": "Core Implementation", + "files": ["src/Service.cs", "src/Model.cs"] + } + ] +} +``` + +See `scripts/extract-code-example.json` for complete example. + +## Critical Rules + +- ✅ Use `--track-size` to stay within 125 KB +- ✅ Batch multiple files in single command +- ✅ Use absolute path to script from any directory +- ✅ Include FULL files when possible +- ✅ Add architecture diagrams +- ✅ Include working AND failing tests +- ❌ Don't read completed file back +- ❌ Don't send only bug fix without context + +## Troubleshooting + +**Script not found:** +```bash +# Verify script exists +ls scripts/extract-code.js + +# Show help +node scripts/extract-code.js --help +``` + +**Git diff errors:** +```bash +git status # Verify git repo +git rev-parse master # Verify branch exists +``` + +**Exceeding 125 KB:** +- Use line ranges instead of full files for large services +- Remove boilerplate and simple DTOs +- Focus on core interfaces and modified code +- Split into multiple consultations + +## Code Inclusion Priority + +**Must include:** +- Core interfaces/abstractions +- Modified/bug-fix code +- Domain models +- Key service methods +- Test examples + +**Skip if tight on space:** +- Boilerplate +- Simple DTOs +- Repetitive test setups diff --git a/skills/ask-expert/scripts/extract-code-example.json b/skills/ask-expert/scripts/extract-code-example.json new file mode 100644 index 0000000..3de2132 --- /dev/null +++ b/skills/ask-expert/scripts/extract-code-example.json @@ -0,0 +1,48 @@ +{ + "output": "expert-consultation.md", + "trackSize": true, + "sections": [ + { + "header": "What We Changed", + "files": [ + "src/services/UserService.ts:diff", + "src/utils/ApiHelper.ts:diff=master..HEAD" + ] + }, + { + "header": "Frontend Components", + "files": [ + "src/components/UserProfile.vue", + "src/components/UserSettings.vue:1-100" + ] + }, + { + "header": "API Layer", + "files": [ + "src/api/users.ts", + "src/api/auth.ts", + "src/types/User.ts" + ] + }, + { + "header": "Backend Services", + "files": [ + "backend/services/UserService.cs:1-150", + "backend/services/AuthService.cs:200-400" + ] + }, + { + "header": "Database Models", + "files": [ + "backend/models/User.cs", + "backend/models/Session.cs:50-120" + ] + }, + { + "header": "Test Examples", + "files": [ + "tests/UserService.test.ts:100-200,500-600" + ] + } + ] +} diff --git a/skills/ask-expert/scripts/extract-code.js b/skills/ask-expert/scripts/extract-code.js new file mode 100755 index 0000000..d4172f3 --- /dev/null +++ b/skills/ask-expert/scripts/extract-code.js @@ -0,0 +1,985 @@ +#!/usr/bin/env node +/** + * Code Extractor for Expert Consultations + * + * Extracts file contents, line ranges, or git diffs with automatic size tracking + * to stay within the 125 KB limit for expert consultation documents. + * + * @author Propstreet + * @license MIT + * @requires Node.js 18+ + */ + +import fs from "fs"; +import path from "path"; +import { fileURLToPath } from "url"; +import { parseArgs } from "util"; +import { execSync } from "child_process"; + +// ============================================================================ +// Constants +// ============================================================================ + +/** Maximum size for expert consultation documents (125 KB) */ +const MAX_SIZE_BYTES = 125 * 1024; + +/** Warning threshold at 100 KB */ +const WARNING_THRESHOLD_1 = 100 * 1024; + +/** Warning threshold at 115 KB (very close to limit) */ +const WARNING_THRESHOLD_2 = 115 * 1024; + +/** Regex pattern for parsing file arguments with ranges/diffs */ +const FILE_ARG_PATTERN = /^(.+?):([\d,:-]+|diff(?:=.+)?)$/; + +// ============================================================================ +// Utility Functions +// ============================================================================ + +/** + * Format bytes as human-readable size in KB + * @param {number} bytes - Size in bytes + * @returns {string} Formatted size (e.g., "25.5 KB") + */ +function formatSize(bytes) { + return (bytes / 1024).toFixed(1) + " KB"; +} + +/** + * Detect programming language from file extension + * @param {string} filePath - Path to file + * @returns {string} Language identifier for syntax highlighting + */ +function detectLanguage(filePath) { + const ext = path.extname(filePath).toLowerCase(); + const langMap = { + ".cs": "csharp", + ".js": "javascript", + ".ts": "typescript", + ".vue": "vue", + ".json": "json", + ".md": "markdown", + ".sql": "sql", + ".html": "html", + ".css": "css", + ".scss": "scss", + ".xml": "xml", + ".yaml": "yaml", + ".yml": "yaml", + ".sh": "bash", + ".py": "python", + ".jsx": "jsx", + ".tsx": "tsx", + }; + return langMap[ext] || "text"; +} + +// ============================================================================ +// File Argument Parsing +// ============================================================================ + +/** + * Parse file argument into path and range/diff specification + * Supports formats: + * - "path/to/file.cs" (full file) + * - "path/to/file.cs:100-200" (line range) + * - "path/to/file.cs:1-30,100-150" (multiple ranges) + * - "path/to/file.cs:diff" (git diff vs master) + * - "path/to/file.cs:diff=master..HEAD" (git diff with range) + * + * @param {string} fileArg - File argument from command line + * @returns {{filePath: string, rangeStr: string|null}} Parsed components + */ +function parseFileArgument(fileArg) { + const rangeMatch = fileArg.match(FILE_ARG_PATTERN); + + if (rangeMatch) { + return { + filePath: rangeMatch[1], + rangeStr: rangeMatch[2], + }; + } + + return { + filePath: fileArg, + rangeStr: null, + }; +} + +// ============================================================================ +// Git Operations +// ============================================================================ + +/** + * Check if current directory is in a git repository + * @throws {Error} If not in a git repository + */ +function validateGitRepository() { + try { + execSync("git rev-parse --git-dir", { stdio: "pipe" }); + } catch { + throw new Error("Not in a git repository"); + } +} + +/** + * Split git diff range into individual refs for validation + * @param {string} diffRange - Git range (e.g., "master", "master..HEAD", "HEAD~3") + * @returns {string[]} Array of git refs to validate + */ +function splitGitRefs(diffRange) { + return diffRange.includes("..") + ? diffRange.split("..").filter((r) => r) + : [diffRange]; +} + +/** + * Validate that git references exist + * @param {string[]} refs - Array of git refs to validate + * @throws {Error} If any ref is invalid + */ +function validateGitRefs(refs) { + for (const ref of refs) { + try { + execSync(`git rev-parse --verify ${ref}`, { stdio: "pipe" }); + } catch { + throw new Error(`Invalid git reference: ${ref}`); + } + } +} + +/** + * Parse diff specification from range string + * @param {string|null} specStr - Diff specification (e.g., "diff", "diff=master..HEAD") + * @returns {{type: string, range: string}|null} Parsed diff spec or null + */ +function parseDiffSpec(specStr) { + if (!specStr) { + return null; + } + + if (specStr === "diff") { + return { type: "diff", range: "master" }; + } + + const match = specStr.match(/^diff=(.+)$/); + if (match) { + return { type: "diff", range: match[1] }; + } + + return null; +} + +/** + * Read git diff content for a file + * @param {string} filePath - Absolute path to the file + * @param {string} diffRange - Git range (e.g., "master", "master..HEAD", "HEAD~3") + * @returns {string} Unified diff output + * @throws {Error} If git operations fail + */ +function readDiffContent(filePath, diffRange) { + try { + validateGitRepository(); + + // Validate all refs in the range + const refs = splitGitRefs(diffRange); + validateGitRefs(refs); + + // Get relative path from git root for git diff + const gitRoot = execSync("git rev-parse --show-toplevel", { + encoding: "utf8", + }).trim(); + const relativePath = path.relative(gitRoot, filePath); + + // Execute git diff + const diffCommand = diffRange.includes("..") + ? `git diff ${diffRange} -- "${relativePath}"` + : `git diff ${diffRange} -- "${relativePath}"`; + + return execSync(diffCommand, { + encoding: "utf8", + cwd: gitRoot, + }); + } catch (error) { + // Re-throw with context + if ( + error.message.includes("Not in a git repository") || + error.message.includes("Invalid git reference") + ) { + throw error; + } + throw new Error(`Git diff failed: ${error.message}`); + } +} + +// ============================================================================ +// Line Range Operations +// ============================================================================ + +/** + * Parse line range string into structured format + * Supports: "10-20", "10:20", "10-20,50-60,100-150" + * + * @param {string|null} rangeStr - Line range string + * @returns {{from: number, to: number}[]|null} Array of range objects or null + * @throws {Error} If range format is invalid + */ +function parseLineRanges(rangeStr) { + if (!rangeStr) { + return null; + } + + const ranges = rangeStr.split(",").map((r) => r.trim()); + const parsed = []; + + for (const range of ranges) { + const match = range.match(/^(\d+)[-:](\d+)$/); + if (!match) { + throw new Error( + `Invalid line range format: "${range}". Use format "10-20" or "10:20"` + ); + } + + const from = parseInt(match[1], 10); + const to = parseInt(match[2], 10); + + if (from < 1) { + throw new Error(`Line numbers must be >= 1, got ${from}`); + } + + if (to < from) { + throw new Error(`End line (${to}) must be >= start line (${from})`); + } + + parsed.push({ from, to }); + } + + return parsed; +} + +/** + * Read file content and optionally extract line ranges + * @param {string} filePath - Absolute path to file + * @param {{from: number, to: number}[]|null} lineRanges - Line ranges to extract + * @returns {string} File content (full or extracted ranges) + * @throws {Error} If line ranges exceed file length + */ +function readFileContent(filePath, lineRanges) { + const content = fs.readFileSync(filePath, "utf8"); + + if (!lineRanges || lineRanges.length === 0) { + return content; + } + + const lines = content.split("\n"); + const totalLines = lines.length; + const extractedSegments = []; + + for (const range of lineRanges) { + if (range.from > totalLines) { + throw new Error( + `Start line ${range.from} exceeds file length (${totalLines} lines)` + ); + } + + const endLine = Math.min(range.to, totalLines); + const segment = lines.slice(range.from - 1, endLine); + extractedSegments.push(segment.join("\n")); + } + + return extractedSegments.join("\n\n"); +} + +// ============================================================================ +// Output Formatting +// ============================================================================ + +/** + * Format file content as markdown code block + * @param {string} filePath - Path to file + * @param {string} language - Language for syntax highlighting + * @param {string} content - File content + * @param {{from: number, to: number}[]|null} lineRanges - Line ranges (for display) + * @returns {string} Formatted markdown code block + */ +function formatCodeBlock(filePath, language, content, lineRanges) { + let lineRangeStr = ""; + if (lineRanges && lineRanges.length > 0) { + const rangeStrings = lineRanges.map((r) => `${r.from}-${r.to}`); + lineRangeStr = ` (lines ${rangeStrings.join(", ")})`; + } + + return `# File: ${filePath}${lineRangeStr} +\`\`\`${language} +${content} +\`\`\``; +} + +/** + * Format git diff output as markdown code block + * @param {string} filePath - Path to file + * @param {string} diffContent - Git diff output + * @param {string} diffRange - Git range for display + * @returns {string} Formatted markdown diff block + */ +function formatDiffBlock(filePath, diffContent, diffRange) { + return `# File: ${filePath} (diff=${diffRange}) +\`\`\`diff +${diffContent} +\`\`\``; +} + +// ============================================================================ +// File Validation +// ============================================================================ + +/** + * Validate file argument without processing it + * Checks file existence, git refs, and line ranges + * + * @param {string} fileArg - File argument from command line + * @returns {{valid: boolean, fileArg?: string, error?: string}} Validation result + */ +function validateFile(fileArg) { + const { filePath: parsedPath, rangeStr } = parseFileArgument(fileArg); + + // Resolve to absolute path + const filePath = path.isAbsolute(parsedPath) + ? parsedPath + : path.resolve(process.cwd(), parsedPath); + + // Check if file exists + if (!fs.existsSync(filePath)) { + const cwd = process.cwd(); + const suggestion = getSuggestion(filePath); + return { + valid: false, + fileArg, + error: `File not found: ${filePath}\n Current directory: ${cwd}${suggestion}`, + }; + } + + // Validate range specification if present + if (rangeStr) { + const diffSpec = parseDiffSpec(rangeStr); + + if (diffSpec) { + // Validate git diff specification + try { + validateGitRepository(); + const refs = splitGitRefs(diffSpec.range); + validateGitRefs(refs); + } catch (error) { + return { + valid: false, + fileArg, + error: error.message, + }; + } + } else { + // Validate line ranges + try { + const lineRanges = parseLineRanges(rangeStr); + const content = fs.readFileSync(filePath, "utf8"); + const totalLines = content.split("\n").length; + + for (const range of lineRanges) { + if (range.from > totalLines || range.to > totalLines) { + return { + valid: false, + fileArg, + error: `Line range ${range.from}-${range.to} exceeds file length (${totalLines} lines) in ${filePath}`, + }; + } + } + } catch (error) { + return { + valid: false, + fileArg, + error: `Invalid line range format in "${fileArg}": ${error.message}`, + }; + } + } + } + + return { valid: true }; +} + +/** + * Process a single file argument and return formatted content + * @param {string} fileArg - File argument (path with optional range/diff spec) + * @returns {string} Formatted markdown output + * @throws {Error} If file processing fails + */ +function processFile(fileArg) { + const { filePath: parsedPath, rangeStr } = parseFileArgument(fileArg); + + // Resolve to absolute path + const filePath = path.isAbsolute(parsedPath) + ? parsedPath + : path.resolve(process.cwd(), parsedPath); + + // Check if file exists + if (!fs.existsSync(filePath)) { + const cwd = process.cwd(); + const suggestion = getSuggestion(filePath); + throw new Error( + `File not found: ${filePath}\n Current directory: ${cwd}${suggestion}` + ); + } + + // Check for diff specification + const diffSpec = rangeStr ? parseDiffSpec(rangeStr) : null; + + if (diffSpec) { + // Handle git diff mode + const diffContent = readDiffContent(filePath, diffSpec.range); + + // Handle empty diff + if (!diffContent || diffContent.trim() === "") { + return formatDiffBlock( + filePath, + `(No changes between ${diffSpec.range})`, + diffSpec.range + ); + } + + return formatDiffBlock(filePath, diffContent, diffSpec.range); + } + + // Handle line range or full file mode + const lineRanges = rangeStr ? parseLineRanges(rangeStr) : null; + const language = detectLanguage(filePath); + const content = readFileContent(filePath, lineRanges); + + return formatCodeBlock(filePath, language, content, lineRanges); +} + +// ============================================================================ +// Config File Operations +// ============================================================================ + +/** + * Read and validate JSON config file + * @param {string} configPath - Path to config file + * @returns {object} Validated config object + * @throws {Error} If config is invalid + */ +function readConfigFile(configPath) { + if (!fs.existsSync(configPath)) { + throw new Error(`Config file not found: ${configPath}`); + } + + let config; + try { + const content = fs.readFileSync(configPath, "utf8"); + config = JSON.parse(content); + } catch (error) { + throw new Error(`Failed to parse config file: ${error.message}`); + } + + // Validate schema + if (!config.sections || !Array.isArray(config.sections)) { + throw new Error( + 'Config must have "sections" array. See example config for format.' + ); + } + + if (config.sections.length === 0) { + throw new Error("Config must have at least one section"); + } + + for (const [index, section] of config.sections.entries()) { + if (!section.files || !Array.isArray(section.files)) { + throw new Error( + `Section ${index + 1} must have "files" array. Header: ${section.header || "(no header)"}` + ); + } + + if (section.files.length === 0) { + throw new Error( + `Section ${index + 1} must have at least one file. Header: ${section.header || "(no header)"}` + ); + } + } + + return config; +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/** + * Provide helpful error suggestions when file is not found + * @param {string} filePath - Path that was not found + * @returns {string} Suggestion text (empty if none applicable) + */ +function getSuggestion(filePath) { + // Provide helpful suggestion for relative path issues + if (!path.isAbsolute(filePath) && filePath.includes(path.sep)) { + return `\n 💡 Tip: Verify you're in the correct directory:\n pwd # Check current directory\n ls ${path.dirname(filePath)} # Check parent directory exists`; + } + + // Check if file exists with different casing + const dir = path.dirname(filePath); + const filename = path.basename(filePath); + if (fs.existsSync(dir)) { + try { + const files = fs.readdirSync(dir); + const match = files.find( + (f) => f.toLowerCase() === filename.toLowerCase() + ); + if (match && match !== filename) { + return `\n 💡 Tip: File exists with different casing: ${match}`; + } + } catch { + // Ignore directory read errors + } + } + + return ""; +} + +// ============================================================================ +// CLI Interface +// ============================================================================ + +/** + * Display help message + */ +function showHelp() { + console.log(` +📄 Code Extractor for Expert Consultations + +Usage: + extract-code [options] [file2] [file3] ... + extract-code --help + +Arguments: + file File path with optional line range(s) or diff spec + Formats: + /path/to/file.cs (extract full file) + /path/to/file.cs:10-50 (extract lines 10-50) + /path/to/file.cs:10:50 (extract lines 10-50, alternative) + /path/to/file.cs:10-50,100-150 (multiple ranges, comma-separated) + /path/to/file.cs:1-30,86-213,500-600 (multiple ranges) + /path/to/file.cs:diff (git diff vs master) + /path/to/file.cs:diff=master..HEAD (git diff with explicit range) + /path/to/file.cs:diff=HEAD~3 (git diff vs 3 commits ago) + relative/path/file.cs (resolved to absolute path) + relative/path/file.cs:5-15 (with line range) + +Options: + --help, -h Show this help message + --output, -o Write output to file (appends to existing file) + --track-size Show size tracking and progress (requires --output) + --section
Add markdown section header before next file + Can be used multiple times for different files + --config Use JSON config file for batch extraction + See example-config.json for format + +Output: + Prints markdown-formatted code blocks with file paths and line ranges. + Output can be redirected to a file or piped to other commands. + +Examples: + # Extract full files + extract-code src/Service.cs tests/ServiceTests.cs + + # Extract specific line ranges + extract-code src/Service.cs:100-200 tests/ServiceTests.cs:50-75 + + # Extract multiple ranges from a single file + extract-code src/Service.cs:1-30,86-213 + + # Mix full files and ranges + extract-code src/Models/User.cs src/Service.cs:100-150 + + # Show git diff vs master (default) + extract-code src/Service.cs:diff + + # Show git diff with explicit range + extract-code src/Service.cs:diff=master..feature-branch + extract-code src/Service.cs:diff=HEAD~3..HEAD + + # Show what changed in recent commits + extract-code src/Service.cs:diff=HEAD~5 + + # Combine diffs with regular files + extract-code src/Service.cs:diff src/Tests.cs:100-200 + + # Save to file with size tracking (appends to existing file) + extract-code --track-size --output=consultation.md src/Service.cs + + # Add section headers + extract-code --section="Core Interfaces" Interface.cs \\ + --section="Domain Models" Contact.cs Company.cs \\ + --output=doc.md + + # Combined: size tracking + sections + extract-code --track-size --output=doc.md \\ + --section="Core" Interface.cs \\ + --section="Tests" Tests.cs:100-200 + + # Include diffs in consultation documents + extract-code --track-size --output=consultation.md \\ + --section="What Changed" \\ + src/NetworkService.cs:diff \\ + src/PineconeHelper.cs:diff=master..feature-branch + + # Build document incrementally (appends each time) + extract-code --track-size -o doc.md File1.cs + extract-code --track-size -o doc.md File2.cs # Appends to existing + extract-code --track-size -o doc.md File3.cs # Appends again + + # Traditional output redirection still works + extract-code src/Service.cs > expert-consultation.md + extract-code src/Tests.cs >> expert-consultation.md # Append + + # Use config file for complex extractions + extract-code --config=extraction-plan.json + extract-code --config=extraction-plan.json --track-size # Override trackSize + +Notes: + • Automatically detects language from file extension + • Line numbers are 1-indexed (first line is line 1) + • Line ranges are inclusive (10-20 includes both lines 10 and 20) + • Multiple ranges are separated by blank lines in output + • Supports 20+ file types (cs, js, ts, vue, py, etc.) + • Error messages go to stderr, formatted output to stdout + • --output mode always appends (matches >> behavior) + • Size tracking shows warnings at 100KB, 115KB, errors at 125KB + • Section headers apply to the immediately following file only + • Diff mode requires git repository and valid refs + • Diff output uses unified diff format (standard git diff) +`); +} + +/** + * Main entry point + */ +function main() { + const options = { + help: { + type: "boolean", + short: "h", + }, + "track-size": { + type: "boolean", + }, + output: { + type: "string", + short: "o", + }, + section: { + type: "string", + multiple: true, + }, + config: { + type: "string", + }, + }; + + let args; + try { + const parsed = parseArgs({ options, allowPositionals: true }); + args = parsed.values; + args.positionals = parsed.positionals; + } catch (error) { + console.error(`❌ ${error.message}`); + showHelp(); + process.exit(1); + } + + if (args.help) { + showHelp(); + process.exit(0); + } + + // Handle config file mode + if (args.config) { + try { + const config = readConfigFile(args.config); + processConfigFile(config, args); + return; + } catch (error) { + console.error(`❌ Error processing config file: ${error.message}`); + process.exit(1); + } + } + + if (!args.positionals || args.positionals.length === 0) { + console.error("❌ No files specified"); + showHelp(); + process.exit(1); + } + + // Filter out empty arguments + const fileArgs = args.positionals.filter((arg) => arg && arg.trim() !== ""); + + // VALIDATE ALL FILES FIRST - before writing anything + const validationErrors = []; + for (const fileArg of fileArgs) { + const validation = validateFile(fileArg); + if (!validation.valid) { + validationErrors.push({ + fileArg: validation.fileArg, + error: validation.error, + }); + } + } + + // If any validation errors, report them all and exit without modifying output + if (validationErrors.length > 0) { + console.error( + `❌ Validation failed for ${validationErrors.length} file(s):\n` + ); + for (const { fileArg, error } of validationErrors) { + console.error(` • "${fileArg}":`); + console.error(` ${error.replace(/\n/g, "\n ")}`); + console.error(""); + } + console.error("⚠️ No files were written to avoid partial output."); + process.exit(1); + } + + const results = []; + let hasErrors = false; + let totalBytes = 0; + let sectionIndex = 0; + + // Read existing file size if output file specified + if (args.output && fs.existsSync(args.output)) { + const stats = fs.statSync(args.output); + totalBytes = stats.size; + if (args["track-size"]) { + console.error(`📄 ${args.output}: ${formatSize(totalBytes)} (existing)`); + } + } + + // Process each file + for (const [index, fileArg] of fileArgs.entries()) { + try { + // Add section header if specified + let output = ""; + if (args.section && sectionIndex < args.section.length) { + const sectionHeader = args.section[sectionIndex]; + if (sectionHeader) { + output = `### ${sectionHeader}\n\n`; + } + sectionIndex++; + } + + const result = processFile(fileArg); + output += result; + results.push(output); + + // Calculate size + const contentSize = Buffer.byteLength(output + "\n\n", "utf8"); + totalBytes += contentSize; + + // Write to file or collect for stdout + if (args.output) { + fs.appendFileSync(args.output, output + "\n\n", "utf8"); + } + + // Show progress if tracking size + if (args["track-size"]) { + const percent = ((totalBytes / MAX_SIZE_BYTES) * 100).toFixed(1); + const filename = path.basename(fileArg.split(":")[0]); + console.error( + `[${index + 1}/${fileArgs.length}] ${filename} → +${formatSize(contentSize)} (${formatSize(totalBytes)} / 125 KB, ${percent}%)` + ); + + // Check thresholds + if (totalBytes >= MAX_SIZE_BYTES) { + console.error( + `❌ Error: Exceeded 125 KB limit (${formatSize(totalBytes)})` + ); + console.error( + ` Stop processing to stay within expert consultation limits` + ); + process.exit(1); + } else if (totalBytes >= WARNING_THRESHOLD_2) { + console.error(`⚠️ Very close to 125 KB limit!`); + } else if (totalBytes >= WARNING_THRESHOLD_1) { + console.error(`⚠️ Approaching 100 KB`); + } + } + } catch (error) { + console.error(`❌ Error processing "${fileArg}": ${error.message}`); + hasErrors = true; + } + } + + if (results.length === 0) { + console.error("\n❌ No files were successfully processed"); + process.exit(1); + } + + // Output results to stdout if no output file specified + if (!args.output) { + console.log(results.join("\n\n")); + } else if (args["track-size"]) { + const status = hasErrors ? "⚠️ Completed with errors" : "✅ Saved"; + const fileCount = `${results.length} ${results.length === 1 ? "file" : "files"}`; + console.error( + `${status}: ${fileCount} to ${args.output} (${formatSize(totalBytes)} / 125 KB)` + ); + } + + process.exit(hasErrors ? 1 : 0); +} + +/** + * Process files from config file + * @param {object} config - Validated config object + * @param {object} args - Parsed command-line arguments + */ +function processConfigFile(config, args) { + let totalBytes = 0; + let totalFilesProcessed = 0; + let hasErrors = false; + + // Use output from config or args + const outputFile = args.output || config.output; + const trackSize = args["track-size"] || config.trackSize || false; + + if (!outputFile) { + console.error( + "❌ Config mode requires output file. Specify in config file or use --output flag" + ); + process.exit(1); + } + + // Read existing file size if output file exists + if (fs.existsSync(outputFile)) { + const stats = fs.statSync(outputFile); + totalBytes = stats.size; + if (trackSize) { + console.error(`📄 ${outputFile}: ${formatSize(totalBytes)} (existing)`); + } + } + + // VALIDATE ALL FILES FIRST - before writing anything + const validationErrors = []; + for (const section of config.sections) { + for (const fileArg of section.files) { + const validation = validateFile(fileArg); + if (!validation.valid) { + validationErrors.push({ + fileArg: validation.fileArg, + error: validation.error, + section: section.header || "(no header)", + }); + } + } + } + + // If any validation errors, report them all and exit without modifying output + if (validationErrors.length > 0) { + console.error( + `❌ Validation failed for ${validationErrors.length} file(s):\n` + ); + for (const { fileArg, error, section } of validationErrors) { + console.error(` • "${fileArg}" in section "${section}":`); + console.error(` ${error.replace(/\n/g, "\n ")}`); + console.error(""); + } + console.error("⚠️ No files were written to avoid partial output."); + process.exit(1); + } + + // Process each section + for (const [sectionIndex, section] of config.sections.entries()) { + if (trackSize) { + console.error( + `[Section ${sectionIndex + 1}/${config.sections.length}] ${section.header || "(no header)"}` + ); + } + + // Add section header if specified + if (section.header) { + const headerContent = `### ${section.header}\n\n`; + fs.appendFileSync(outputFile, headerContent, "utf8"); + totalBytes += Buffer.byteLength(headerContent, "utf8"); + } + + // Process each file in section + for (const [fileIndex, fileArg] of section.files.entries()) { + try { + const result = processFile(fileArg); + const content = result + "\n\n"; + const contentSize = Buffer.byteLength(content, "utf8"); + totalBytes += contentSize; + + // Write to file + fs.appendFileSync(outputFile, content, "utf8"); + totalFilesProcessed++; + + // Show progress if tracking size + if (trackSize) { + const percent = ((totalBytes / MAX_SIZE_BYTES) * 100).toFixed(1); + const filename = path.basename(fileArg.split(":")[0]); + console.error( + ` [${fileIndex + 1}/${section.files.length}] ${filename} → +${formatSize(contentSize)} (${formatSize(totalBytes)} / 125 KB, ${percent}%)` + ); + + // Check thresholds + if (totalBytes >= MAX_SIZE_BYTES) { + console.error( + `❌ Error: Exceeded 125 KB limit (${formatSize(totalBytes)})` + ); + console.error( + ` Stop processing to stay within expert consultation limits` + ); + process.exit(1); + } else if (totalBytes >= WARNING_THRESHOLD_2) { + console.error(`⚠️ Very close to 125 KB limit!`); + } else if (totalBytes >= WARNING_THRESHOLD_1) { + console.error(`⚠️ Approaching 100 KB`); + } + } + } catch (error) { + console.error( + `❌ Error processing "${fileArg}" in section "${section.header || "(no header)"}": ${error.message}` + ); + hasErrors = true; + } + } + } + + if (trackSize) { + const status = hasErrors ? "⚠️ Completed with errors" : "✅ Saved"; + const fileCount = `${totalFilesProcessed} ${totalFilesProcessed === 1 ? "file" : "files"}`; + const sectionCount = `${config.sections.length} ${config.sections.length === 1 ? "section" : "sections"}`; + console.error( + `${status}: ${fileCount}, ${sectionCount} to ${outputFile} (${formatSize(totalBytes)} / 125 KB)` + ); + } + + process.exit(hasErrors ? 1 : 0); +} + +// ============================================================================ +// Entry Point +// ============================================================================ + +// Handle unhandled errors +process.on("unhandledRejection", (err) => { + console.error("❌ Error:", err.message); + process.exit(1); +}); + +// Run the script only if executed directly (not imported) +const scriptPath = path.normalize(process.argv[1]); +const modulePath = path.normalize(fileURLToPath(import.meta.url)); +if (modulePath === scriptPath) { + try { + main(); + } catch (err) { + console.error("❌ Error:", err.message); + process.exit(1); + } +} diff --git a/skills/update-pr/EXAMPLES.md b/skills/update-pr/EXAMPLES.md new file mode 100644 index 0000000..c940418 --- /dev/null +++ b/skills/update-pr/EXAMPLES.md @@ -0,0 +1,271 @@ +# PR Description Examples + +Examples of comprehensive vs incomplete PR descriptions. + +## Good Example: Complete Coverage + +This example shows a PR that properly documents multiple categories of changes: + +```markdown +## Summary + +Refactors UserService to eliminate 600 lines of duplication by extracting common +validation logic. Additionally includes critical bug fixes for session handling, +EF Core query optimization, and test infrastructure improvements. + +## User Impact + +**User Management:** +- More consistent validation behavior across all user operations +- Better error messages when validation fails + +**Reliability:** +- Bug fix: Sessions no longer expire prematurely during long operations +- Bug fix: Race condition in concurrent user updates resolved + +**Performance:** +- 40% faster user lookup queries through optimized includes + +## Technical Notes + +### 1. UserService Refactoring + +Extracted common validation logic into `UserValidationService`: + +- **Before**: 3 nearly-identical validation methods across `Create`, `Update`, `Import` +- **After**: Single `ValidateUser()` method with operation-specific extensions +- **Files**: `UserService.cs`, `UserValidationService.cs` (new) + +### 2. Bug Fixes + +**Session Expiration (Critical)** +- **Location**: `SessionManager.cs:125` +- **Problem**: Timeout calculated from session start, not last activity +- **Impact**: Users logged out during long form submissions +- **Fix**: Track last activity timestamp, reset on each request + +**Concurrent Update Race Condition** +- **Location**: `UserRepository.cs:89` +- **Problem**: No optimistic concurrency on user updates +- **Impact**: Last write wins, potentially losing data +- **Fix**: Added `RowVersion` column with EF Core concurrency token + +### 3. Query Optimization + +- **Location**: `UserRepository.cs:45` +- **Problem**: N+1 query pattern when loading users with roles +- **Fix**: Added explicit `.Include(u => u.Roles)` with split query +- **Measured**: 40% reduction in query time for user list endpoint + +### 4. Test Infrastructure + +- New `UserTestFixture` base class for consistent test setup +- Extracted common assertions to `UserAssertions` helper +- Added integration test for concurrent update scenario + +### 5. Configuration + +- Added `SessionTimeoutMinutes` to `appsettings.json` (default: 30) +- New `ConcurrencyRetryCount` setting for optimistic concurrency retries + +## Testing + +``` +Total: 127 tests +Passed: 127 +Failed: 0 +New tests: 8 +``` + +- Unit tests for validation extraction +- Integration test for session timeout behavior +- Concurrency test for race condition fix + +## Implementation Approach + +1. **feat: extract user validation service** - Core refactoring work +2. **fix: session timeout calculation** - Critical bug fix +3. **fix: add optimistic concurrency to user updates** - Race condition fix +4. **perf: optimize user query includes** - Query performance +5. **test: add user service test fixtures** - Test infrastructure +6. **chore: add session configuration** - Configuration changes + +--- + +Generated with [Claude Code](https://claude.com/claude-code) +``` + +### Why This Is Good + +- Documents ALL commits, not just the main feature +- Bug fixes are prominently highlighted with impact +- Performance improvement is measured and documented +- Test coverage is quantified +- Configuration changes are noted +- Each section has specific file references + +--- + +## Bad Example: Incomplete Coverage + +This example shows what to avoid: + +```markdown +## Summary + +Refactors UserService to eliminate duplication. + +## Technical Notes + +- New base class +- Moved duplicate code +- Tests passing + +## Testing + +All tests pass. +``` + +### Why This Is Bad + +- **Missing bug fixes**: The session and concurrency fixes aren't mentioned +- **No impact context**: Doesn't explain WHY the refactoring matters +- **Vague descriptions**: "Moved duplicate code" tells reviewers nothing +- **Missing commits**: Only describes 1 of 6 commits +- **No configuration changes**: Settings changes completely omitted +- **No performance details**: Query optimization not mentioned +- **No file references**: Reviewers don't know where to look + +--- + +## Example: Feature with Multiple Side Effects + +When a feature PR includes necessary side effects: + +### Good + +```markdown +## Summary + +Adds export-to-CSV functionality for reports. Also fixes date formatting +inconsistency discovered during development and adds missing null checks +in the report generator. + +## User Impact + +**New Feature:** +- Users can now export any report to CSV format +- Supports all report types (daily, weekly, monthly) + +**Bug Fixes:** +- Dates now display consistently in user's timezone across all reports +- Reports no longer fail when optional fields are null + +## Technical Notes + +### 1. CSV Export Feature +[Details of main feature] + +### 2. Date Formatting Fix +- **Location**: `DateFormatter.cs:45` +- **Problem**: Some dates used UTC, others used local time +- **Fix**: Standardized on user's configured timezone + +### 3. Null Safety +- **Location**: `ReportGenerator.cs:112, 156, 203` +- **Problem**: Null reference exceptions on optional fields +- **Fix**: Added null-conditional operators and default values +``` + +### Bad + +```markdown +## Summary + +Adds CSV export for reports. + +## Changes + +- Added ExportService +- Updated ReportController +- Fixed some bugs +``` + +The bad version: +- Hides important bug fixes under "Fixed some bugs" +- Doesn't explain the impact of the bugs that were fixed +- Reviewers might miss that this PR changes date behavior + +--- + +## Example: Infrastructure-Heavy PR + +When the main work is test/infrastructure improvements: + +### Good + +```markdown +## Summary + +Overhauls test infrastructure to support parallel execution, reducing CI +time from 12 minutes to 4 minutes. Includes migration of 45 test classes +to new fixture pattern. + +## User Impact + +**Developer Experience:** +- CI feedback 3x faster +- Local test runs significantly quicker +- Tests now properly isolated (no more flaky failures) + +## Technical Notes + +### 1. Parallel Test Execution +- **Problem**: Tests ran sequentially due to shared database state +- **Solution**: New `IsolatedDatabaseFixture` creates per-test databases +- **Result**: Full parallelization across all CPU cores + +### 2. Test Migration +- Migrated 45 test classes to new fixture pattern +- Removed hardcoded IDs that caused parallel conflicts +- Added scoped assertions that filter to test-created data + +### 3. Performance Results +| Metric | Before | After | +|--------|--------|-------| +| CI Duration | 12:34 | 4:12 | +| Local (8 cores) | 8:45 | 1:23 | +| Flaky test rate | 12% | 0% | + +## Testing + +All 312 tests pass in parallel mode. +``` + +### Bad + +```markdown +## Summary + +Updated tests. + +## Changes + +- Changed test base class +- Updated 45 files +``` + +--- + +## Checklist for Complete Coverage + +Before finalizing your PR description, verify: + +- [ ] Every commit message is reflected in the description +- [ ] Bug fixes are prominently documented (not hidden) +- [ ] Performance improvements include measurements +- [ ] Configuration changes are listed +- [ ] Test coverage changes are quantified +- [ ] File paths are included for key changes +- [ ] User impact is clearly separated from technical details +- [ ] "Why" is explained, not just "what" diff --git a/skills/update-pr/README.md b/skills/update-pr/README.md new file mode 100644 index 0000000..44a2a01 --- /dev/null +++ b/skills/update-pr/README.md @@ -0,0 +1,60 @@ +# Update PR Skill + +> Creates comprehensive PR descriptions by systematically reviewing all changes + +## Overview + +This skill helps Claude create thorough PR descriptions that document every meaningful change in a branch - not just the headline feature, but also bug fixes, test improvements, configuration changes, and documentation updates. + +**For complete plugin documentation, see the [main README](../../README.md).** + +## Quick Reference + +**Example prompts:** +``` +"Update the PR description" +"Prepare this PR for review" +"Document the changes in this branch" +"Write a comprehensive PR summary" +``` + +**What it does:** +- Systematically inventories ALL changed files and commits +- Categorizes changes (features, fixes, tests, docs, config) +- Creates structured PR descriptions with user impact section +- Saves to `/tmp/pr-summary.md` and updates PR via `gh pr edit` + +**Allowed tools:** Bash, Read, Write, Edit, Glob, Grep + +## How It Works + +The skill runs a 5-phase workflow: + +1. **Inventory** - Detects base branch (from PR or default), runs git diff/log commands +2. **Categorization** - Groups files by type (core, fixes, tests, docs, config) +3. **Analysis** - Reviews each commit to understand what/why/impact +4. **Documentation** - Creates structured PR description +5. **Update** - Saves to temp file, optionally updates PR + +See [SKILL.md](SKILL.md) for the complete workflow details. + +## Tips for Best Results + +1. **Let it run all phases** - Don't interrupt the systematic review +2. **Review before updating** - Claude will show you the summary first +3. **Provide context** - Mention important context Claude can't see in the code +4. **Base branch detection** - Uses PR's actual base branch (supports release backports, non-default targets) + +## Requirements + +- **gh CLI** - GitHub's official CLI, authenticated +- **Git** - For diff and log commands + +## Documentation + +- **[SKILL.md](SKILL.md)** - Complete workflow for Claude +- **[EXAMPLES.md](EXAMPLES.md)** - Good vs bad PR description examples + +## License + +MIT - See [LICENSE](../../LICENSE) diff --git a/skills/update-pr/SKILL.md b/skills/update-pr/SKILL.md new file mode 100644 index 0000000..4eca102 --- /dev/null +++ b/skills/update-pr/SKILL.md @@ -0,0 +1,256 @@ +--- +name: update-pr +description: Creates comprehensive PR descriptions by systematically reviewing ALL changes - features, bug fixes, tests, docs, and infrastructure. Use when user wants to update PR description, prepare PR for review, or document branch changes. Requires gh CLI. +allowed-tools: [Bash, Read, Write, Edit, Glob, Grep] +# Note: Glob/Grep are useful for finding files by pattern (e.g., *Test*.cs) +# and searching code content when categorizing changes. +--- + +# Comprehensive PR Description Creator + +Create thorough PR descriptions that document EVERY meaningful change, not just the headline feature. + +## Critical Rule: Complete Coverage + +**NEVER assume you know what's in the PR based on branch name or first glance.** + +PRs often contain: +- Main feature work +- Bug fixes discovered during development +- Performance optimizations +- Test infrastructure improvements +- Documentation updates +- Dependency changes +- Configuration adjustments + +You MUST systematically review ALL changes and include them in the summary. + +## Phase 1: Complete Change Inventory + +First, determine the base branch for comparison: + +```bash +# Get base branch from current PR (if one exists), otherwise fall back to default branch +BASE_BRANCH=$( \ + gh pr view --json baseRefName -q '.baseRefName' 2>/dev/null || \ + git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null || \ + echo 'refs/remotes/origin/main' \ +) +BASE_BRANCH="${BASE_BRANCH#refs/remotes/origin/}" +``` + +Then gather context (using `$BASE_BRANCH` from above). These commands are independent and can be run as separate tool calls: + +**PR and working tree status:** +```bash +gh pr status +``` + +```bash +git status --short +``` + +**Changed files:** +```bash +git diff origin/$BASE_BRANCH...HEAD --stat +git diff origin/$BASE_BRANCH...HEAD --name-status +``` + +**Commit history:** +```bash +git log origin/$BASE_BRANCH..HEAD --oneline --no-merges +``` + +**Note**: Using the PR's actual base branch ensures accurate diffs for release backports or PRs targeting non-default branches. + +## Phase 2: Systematic File Analysis + +Using the `--name-status` output, create a categorized inventory of EVERY changed file: + +### 2.1 Core Application Changes +- Check files matching your framework patterns (e.g., `*Service*`, `*Controller*`, `*Component*`) +- Look for: new methods, refactoring, bug fixes, performance improvements +- Read key diffs to understand WHAT changed and WHY + +### 2.2 Bug Fixes & Corrections +- Scan commit messages for: "fix", "bug", "correct", "resolve" +- Read diffs for files mentioned in fix commits +- Document: what was broken, what was fixed, impact + +### 2.3 Infrastructure & Framework +- Check: interceptors, middleware, base classes, test fixtures +- Look for: new patterns, performance improvements, testing infrastructure +- These are often overlooked but important + +### 2.4 Configuration Changes +- Check: config files, constants, environment settings, package files +- Look for: new constants, dependency changes, configuration adjustments +- Even small changes here can be significant + +### 2.5 Test Files +- Count new test files vs modified test files +- Check for: new test patterns, test infrastructure, coverage improvements +- Document test coverage added + +### 2.6 Documentation +- Check: README files, docs folders, inline documentation +- Look for: new documentation, removed obsolete content, updated instructions + +### 2.7 Build & Tooling +- Check: package.json, build configs, CI/CD files, scripts +- Look for: dependency updates, new tooling, build process changes + +### 2.8 UI/Frontend Changes +- Check: components, styles, state management +- Look for: new components, UI fixes, styling changes + +## Phase 3: Commit-by-Commit Review + +For EACH commit in `git log`: + +1. Read the commit message - it tells you WHAT category of change +2. Identify which files were changed in that commit +3. Read diffs for key files to understand the WHY +4. Note if commit is: feature, bugfix, test, docs, refactor, perf, chore + +**Common prefixes:** +- `fix:` = bug fix (HIGH PRIORITY - always include) +- `feat:` = feature (main work) +- `test:` = test infrastructure +- `docs:` = documentation +- `perf:` = performance optimization +- `refactor:` = code organization +- `chore:` = maintenance tasks + +## Phase 4: Verification Checklist + +Before writing the summary, confirm you've checked: + +- [ ] ALL commits reviewed and categorized +- [ ] Core application changes documented +- [ ] Bug fixes identified and explained +- [ ] Infrastructure/framework changes noted +- [ ] Configuration changes included +- [ ] Test coverage quantified +- [ ] Documentation updates listed +- [ ] Build/tooling changes noted + +**If you cannot check ALL boxes, you are not done gathering data.** + +## Phase 5: Write Comprehensive Summary + +Structure your summary to cover ALL categories of changes. + +### Template Structure + +```markdown +## Summary + +[One sentence covering the MAIN change, plus brief mention of other significant improvements] + +## User Impact + +**[Main Feature Category]:** +- [Specific user-facing improvements] + +**[Secondary Categories if applicable - e.g., Reliability, Performance]:** +- [Bug fixes with impact] +- [Performance improvements] + +## Technical Notes + +### 1. [Main Feature Name] +[Detailed explanation of main feature with file references] + +### 2. [Bug Fixes / Corrections] +[Each bug fix with location, what was wrong, impact, fix] + +### 3. [Infrastructure / Performance] +[Test improvements, framework changes, optimizations] + +### 4. [Configuration & Dependencies] +[Constants, config changes, dependency updates] + +### 5. [Documentation] +[README updates, new docs, removed obsolete content] + +## Testing + +[Comprehensive test results with specific numbers] + +## Implementation Approach + +[List ALL commits with brief explanation of each] + +1. **[commit message]** - [what it did] +2. **[commit message]** - [what it did] +... + +## Next Steps + +[Only if applicable] + +--- + +Generated with [Claude Code](https://claude.com/claude-code) +``` + +## Quality Checklist + +Before finalizing, verify: + +- **Completeness**: Every commit is represented in the summary +- **Accuracy**: All bug fixes are documented with impact +- **Context**: WHY changes were made, not just WHAT changed +- **Organization**: Changes grouped logically (features, bugs, infrastructure, etc.) +- **Specificity**: File paths for critical changes +- **Impact**: User-facing vs internal changes clearly separated +- **Testing**: Actual test results reported, not assumptions + +## Output Instructions + +1. **Save to temporary file**: Write the summary to `/tmp/pr-summary.md` (avoids cluttering repo) +2. **Self-review**: Read your summary and verify all commits and file categories are covered +3. **User approval**: Show the summary and ask if they want to update the PR +4. **Update PR** (only if user approves): + ```bash + gh pr edit --body-file /tmp/pr-summary.md + ``` + +## Common Mistakes to Avoid + +- **Focusing only on main feature** - PRs often contain multiple types of changes +- **Skipping "small" changes** - Constants, config, and doc changes matter +- **Ignoring bug fixes** - These are often HIGH PRIORITY to document +- **Missing test infrastructure** - Test improvements affect development velocity +- **Incomplete commit review** - Every commit tells part of the story +- **Vague descriptions** - "Updated files" tells reviewers nothing + +## Troubleshooting + +**No PR exists yet:** +```bash +# Create PR first +gh pr create --title "Title" --body "WIP" +# Then run the update process +``` + +**Verify base branch:** +```bash +# Check what base branch the PR is targeting +gh pr view --json baseRefName -q '.baseRefName' + +# Or detect default branch if no PR exists (with guaranteed fallback) +BASE=$(git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null || echo 'refs/remotes/origin/main') +echo "${BASE#refs/remotes/origin/}" +``` + +**gh CLI not authenticated:** +```bash +gh auth status +gh auth login +``` + +--- + +**Remember**: The PR may contain a week's worth of work across multiple areas. Your job is to tell the complete story, not just the headline feature.