commit 36a6fff8d82a2ded84cce091354edcf7b1a5c71f Author: Zhongwei Li Date: Sun Nov 30 08:58:05 2025 +0800 Initial commit diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..8fcbca4 --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,13 @@ +{ + "name": "dev-flow", + "description": "Skills to optimize and enhance the development process - work immediately after installation", + "version": "0.0.0-2025.11.28", + "author": { + "name": "slamb2k", + "email": "slamb2k@users.noreply.github.com" + }, + "skills": [ + "./skills/cyberarian", + "./skills/start-right" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..b22a957 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# dev-flow + +Skills to optimize and enhance the development process - work immediately after installation diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..7eafe8a --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,108 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:slamb2k/mad-skills:dev-flow", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "f7fb5282b651474c6d9d53bfb690fbd7b9ff62ef", + "treeHash": "5c8afb1fcfc084037a00a97f4b5b1e7dad87679a20aea48d40ab5e57470c67c0", + "generatedAt": "2025-11-28T10:28:25.120163Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "dev-flow", + "description": "Skills to optimize and enhance the development process - work immediately after installation" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "993b8f62ab6a539ef69e7f6ec11750f4f3024c2d2c84ea0bf2e30a1866533ce5" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "be927dff7224f572ae548993f9274e8a63b51da898b42a8ab29d199c85e3fab1" + }, + { + "path": "skills/cyberarian/SKILL.md", + "sha256": "6e1758f5d0b095a9e09018de89a31e7acf0c3799b04a16008f20a8a2dc9cbf79" + }, + { + "path": "skills/cyberarian/references/metadata-schema.md", + "sha256": "65288414209546b8c6575d92b8864ad6b8bc51f9e393e9502059b4e4ecfa7e93" + }, + { + "path": "skills/cyberarian/references/archiving-criteria.md", + "sha256": "296eb3b76972d917363d05486cf014cf179e7ec91572749edb12194d95c02e9d" + }, + { + "path": "skills/cyberarian/agents/doc-librarian-subagent.md", + "sha256": "8532792e65cf9f8396c92912972cfda21fc718f17ac4f34f03be1a9452d7249b" + }, + { + "path": "skills/cyberarian/scripts/archive_docs.py", + "sha256": "22ba5ada9830b84349f4cb9748e5f90fa309884325cfce165b68a352e92fbf4d" + }, + { + "path": "skills/cyberarian/scripts/index_docs.py", + "sha256": "a8f0109777352c2f25608d7bdc5c52e71f13378fab57b5f09c8d975931803fa8" + }, + { + "path": "skills/cyberarian/scripts/init_docs_structure.py", + "sha256": "096ff7ed29691975404dfcb3dfcbde76a2c90fe5d0c73e2209d9c66fd5a8b482" + }, + { + "path": "skills/cyberarian/scripts/validate_doc_metadata.py", + "sha256": "46544383dba305edd3b2ab27a6b2fd733d7422aeccecb4a62a2ed725b7f914f1" + }, + { + "path": "skills/cyberarian/assets/doc_template.md", + "sha256": "19a321a0eda590a817d045c989ff5debe20c9d22dd9d9791594b8c7598df3ece" + }, + { + "path": "skills/start-right/SKILL.md", + "sha256": "cbd61836d22137bc03f3ffd78e0961eed8e3ed30c8a501b9788d283ae1404275" + }, + { + "path": "skills/start-right/references/release-strategies.md", + "sha256": "151fdaadf6d4c473f64385e5e98aebbd95ce9f4949fa7d3bb7c77e55d2e63c00" + }, + { + "path": "skills/start-right/references/project-types.md", + "sha256": "56c164aa205f3c2cbf64a672ccdc918650bfa8f570bc0f4726b59b7c5292b63f" + }, + { + "path": "skills/start-right/scripts/setup_tooling.py", + "sha256": "df9b6524a6a1ce37cead0ddf5e447e2d9995bc79b40945189bc37a95a25bec80" + }, + { + "path": "skills/start-right/scripts/setup_branch_protection.py", + "sha256": "093497db35e93df18f26f4f48239b046180c9574e3606093c6fe9ff3d4cda13c" + }, + { + "path": "skills/start-right/scripts/generate_workflows.py", + "sha256": "4c33af6c2b25d6b289526c936ac29d3b93cb98bb4a1f6c7b87aee80a44a5e70a" + }, + { + "path": "skills/start-right/scripts/init_git_repo.py", + "sha256": "290ccc3d15ad284b83f4015bcd8ba91bb8618f55064ebbf6c924162fcfa210cd" + }, + { + "path": "skills/start-right/scripts/setup_git_hooks.py", + "sha256": "204f5a9d0714ad6df33b5f203e9a6ba3d6380af0a0084ddb45230e801fc2cda6" + } + ], + "dirSha256": "5c8afb1fcfc084037a00a97f4b5b1e7dad87679a20aea48d40ab5e57470c67c0" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/cyberarian/SKILL.md b/skills/cyberarian/SKILL.md new file mode 100644 index 0000000..271cbdf --- /dev/null +++ b/skills/cyberarian/SKILL.md @@ -0,0 +1,333 @@ +--- +name: cyberarian +description: The digital librarian for Claude Code projects. Enforces structured document lifecycle management - organizing, indexing, and archiving project documentation automatically. Use when creating, organizing, or managing project documentation. Ensures documents are created in the proper `docs/` directory structure with required metadata, handles temporary documents in system temp directories, maintains an auto-generated index, and performs automatic archiving of old/complete documents. Use for any task involving document creation, organization, or maintenance. +--- + +# Cyberarian - Document Lifecycle Management + +This skill enforces a structured approach to documentation in Claude Code projects, ensuring consistency, discoverability, and automatic maintenance. + +## Core Principles + +1. **Structured Organization**: All persistent documentation goes in `docs/` with semantic categorization +2. **No Temporary Docs in docs/**: Ephemeral/scratch documents belong in `/tmp` or system temp, never in `docs/` +3. **Metadata-Driven**: YAML frontmatter enables automation and lifecycle management +4. **Automatic Maintenance**: Indexing and archiving happen automatically, not manually +5. **Context Efficiency**: Bulk operations delegate to subagents to preserve main context + +## Context-Efficient Operations + +### The Problem + +Document management operations can produce verbose output that pollutes the main agent's context: +- Validation scripts listing many errors across files +- Index generation scanning dozens of documents +- Archive operations listing all files being moved +- Search results returning many matches + +### The Solution: Subagent Delegation + +**Delegate to Task subagent** for operations that return verbose output. The subagent absorbs the verbose output in its isolated context and returns a concise summary (<50 tokens). + +### Delegation Rules + +**Execute directly** (simple, low-output): +- Creating a single document from template +- Reading a specific document's metadata +- Checking if `docs/` directory exists + +**Delegate to Task subagent** (complex, verbose): +- Running validation across all documents +- Regenerating the index +- Archiving operations (especially dry-run) +- Searching documents by tag/status/category +- Summarizing INDEX.md contents +- Any operation touching multiple files + +### Delegation Pattern + +When verbose output is expected: + +``` +1. Recognize the operation will be verbose +2. Delegate to Task subagent with explicit instructions +3. Subagent executes scripts, absorbs output +4. Subagent parses and returns summary <50 tokens +5. Main agent receives only essential summary +``` + +**Task subagent prompt format:** +``` +Execute document operation and return concise summary: +- Run: [command] +- Parse: Extract [specific data needed] +- Return: [emoji] [state] | [metric] | [next action] +- Limit: <50 tokens + +Use agents/doc-librarian-subagent.md patterns for response formatting. +``` + +### Response Formats + +**Success:** `āœ“ [result] | [metric] | Next: [action]` +**List:** `šŸ“‹ [N] items: [item1], [item2], ... (+[remainder] more)` +**Error:** `āŒ [operation] failed | Reason: [brief] | Fix: [action]` +**Warning:** `āš ļø [concern] | Impact: [brief] | Consider: [action]` + +## Directory Structure + +``` +docs/ +ā”œā”€ā”€ README.md # Human-written guide to the structure +ā”œā”€ā”€ INDEX.md # Auto-generated index of all documents +ā”œā”€ā”€ ai_docs/ # Reference materials for Claude Code (SDKs, APIs, repo context) +ā”œā”€ā”€ specs/ # Feature and migration specifications +ā”œā”€ā”€ analysis/ # Investigation outputs (bugs, optimization, cleanup) +ā”œā”€ā”€ plans/ # Implementation plans +ā”œā”€ā”€ templates/ # Reusable templates +└── archive/ # Historical and completed documents + ā”œā”€ā”€ specs/ + ā”œā”€ā”€ analysis/ + └── plans/ +``` + +## Workflows + +### First-Time Setup + +When a project doesn't have a `docs/` directory: + +1. **Initialize the structure**: + ```bash + python scripts/init_docs_structure.py + ``` + This creates all directories, README.md, and initial INDEX.md + +2. **Inform the user** about the structure and conventions + +### Creating a New Document + +When asked to create documentation (specs, analysis, plans, etc.): + +1. **Determine the category**: + - **ai_docs**: SDKs, API references, repo architecture, coding conventions + - **specs**: Feature specifications, migration plans, technical designs + - **analysis**: Bug investigations, performance analysis, code audits + - **plans**: Implementation plans, rollout strategies, task breakdowns + - **templates**: Reusable document templates + +2. **Use the template**: + ```bash + cp assets/doc_template.md docs//.md + ``` + +3. **Fill in metadata**: + - Set `title`, `category`, `status`, `created`, `last_updated` + - Add relevant `tags` + - Start with `status: draft` + +4. **Write the content** following the document structure + +5. **Update the index**: + ```bash + python scripts/index_docs.py + ``` + +**File naming convention**: Use lowercase with hyphens, descriptive names: +- āœ… `oauth2-migration-spec.md` +- āœ… `auth-performance-analysis.md` +- āŒ `spec1.md` +- āŒ `MyDocument.md` + +### Working with Existing Documents + +When modifying existing documentation: + +1. **Update metadata**: + - Set `last_updated` to current date + - Update `status` if lifecycle changes (draft → active → complete) + +2. **Regenerate index** if significant changes: + ```bash + python scripts/index_docs.py + ``` + +### Creating Temporary/Scratch Documents + +When creating ephemeral documents (scratchpads, temporary notes, single-use docs): + +**NEVER create in docs/** - Use system temp instead: + +```bash +# Create in /tmp for Linux/macOS +/tmp/scratch-notes.md +/tmp/debug-output.txt + +# Let the system clean up temporary files +``` + +**Why**: The `docs/` directory is for persistent, managed documentation. Temporary files clutter the structure and interfere with indexing and archiving. + +### Regular Maintenance + +**When to run**: +- After creating/modifying documents: Update index +- Weekly/monthly: Run archiving to clean up completed work +- Before commits: Validate metadata + +**Maintenance workflow** (delegate to Task subagent for context efficiency): + +1. **Validate metadata** → Delegate to subagent: + ``` + Task: Run python scripts/validate_doc_metadata.py + Return: āœ“ [N] valid | [N] issues: [list top 3] | Next: [action] + ``` + +2. **Archive old documents** → Delegate to subagent: + ``` + Task: Run python scripts/archive_docs.py --dry-run + Return: šŸ“¦ [N] ready for archive: [list top 3] | Next: Run archive + + Task: Run python scripts/archive_docs.py + Return: āœ“ Archived [N] docs | Categories: [list] | Index updated + ``` + +3. **Update index** → Delegate to subagent: + ``` + Task: Run python scripts/index_docs.py + Return: āœ“ Index updated | [N] documents | Categories: [summary] + ``` + +**Why delegate?** These operations can scan dozens of files and produce verbose output. Subagent isolation keeps the main context clean for reasoning. + +### Archiving Documents + +Archiving happens automatically based on category-specific rules. See `references/archiving-criteria.md` for full details. + +**Quick reference**: +- `specs/`: Auto-archive when `status: complete` AND >90 days +- `analysis/`: Auto-archive when `status: complete` AND >60 days +- `plans/`: Auto-archive when `status: complete` AND >30 days +- `ai_docs/`: Manual archiving only +- `templates/`: Never auto-archive + +**To prevent auto-archiving**, set in frontmatter: +```yaml +archivable_after: 2025-12-31 +``` + +## Metadata Requirements + +Every document must have YAML frontmatter. See `references/metadata-schema.md` for complete schema. + +**Minimal required frontmatter**: +```yaml +--- +title: Document Title +category: specs +status: draft +created: 2024-11-16 +last_updated: 2024-11-16 +tags: [] +--- +``` + +**Lifecycle statuses**: +- `draft` → Document being created +- `active` → Current and relevant +- `complete` → Work done, kept for reference +- `archived` → Moved to archive + +## Reference Files + +Load these when needed for detailed guidance: + +- **references/metadata-schema.md**: Complete YAML frontmatter specification +- **references/archiving-criteria.md**: Detailed archiving rules and philosophy +- **agents/doc-librarian-subagent.md**: Subagent template for context-efficient operations + +## Scripts Reference + +All scripts accept optional path argument (defaults to current directory): + +- `scripts/init_docs_structure.py [path]` - Initialize docs structure +- `scripts/index_docs.py [path]` - Regenerate INDEX.md +- `scripts/archive_docs.py [path] [--dry-run]` - Archive old documents +- `scripts/validate_doc_metadata.py [path]` - Validate all metadata + +## Common Patterns + +### Creating a Specification +```bash +# Copy template +cp assets/doc_template.md docs/specs/new-feature-spec.md + +# Edit with proper metadata +# category: specs +# status: draft +# tags: [feature-name, relevant-tags] + +# Update index +python scripts/index_docs.py +``` + +### Completing Work +```bash +# Update document metadata +# status: draft → active → complete +# last_updated: + +# After a while, archiving script will auto-archive +python scripts/archive_docs.py +``` + +### Finding Documents + +**Delegate searches to subagent** for context efficiency: + +``` +Task: Summarize docs/INDEX.md +Return: šŸ“Š [N] total docs | Categories: [breakdown] | Recent: [latest doc] + +Task: Search docs for tag "performance" +Run: grep -r "tags:.*performance" docs/ --include="*.md" | head -10 +Return: šŸ“‹ [N] docs match: [path1], [path2], ... | Next: Read [most relevant] + +Task: Find all draft documents +Run: grep -r "status: draft" docs/ --include="*.md" +Return: šŸ“‹ [N] drafts: [list top 5] | Next: [action] +``` + +**Direct execution** (only for quick checks): +```bash +# Check if docs/ exists +ls docs/ 2>/dev/null +``` + +## Best Practices + +1. **Always use metadata**: Don't skip the frontmatter, it enables automation +2. **Keep status current**: Update as work progresses (draft → active → complete) +3. **Use descriptive names**: File names should be clear and searchable +4. **Update dates**: Set `last_updated` when making significant changes +5. **Run maintenance regularly**: Index and archive periodically +6. **Temp goes in /tmp**: Never create temporary/scratch docs in docs/ +7. **Validate before committing**: Run `validate_doc_metadata.py` to catch issues +8. **Delegate bulk operations**: Use Task subagents for validation, indexing, archiving, and search to preserve main context + +## Error Handling + +**Document has no frontmatter**: +- Add frontmatter using `assets/doc_template.md` as reference +- Run `validate_doc_metadata.py` to confirm + +**Document in wrong category**: +- Move file to correct category directory +- Update `category` field in frontmatter to match +- Regenerate index + +**Archived document still needed**: +- Move from `archive//` back to `/` +- Update `status` from `archived` to `active` +- Remove `archived_date` and `archive_reason` fields +- Regenerate index diff --git a/skills/cyberarian/agents/doc-librarian-subagent.md b/skills/cyberarian/agents/doc-librarian-subagent.md new file mode 100644 index 0000000..c3ff095 --- /dev/null +++ b/skills/cyberarian/agents/doc-librarian-subagent.md @@ -0,0 +1,311 @@ +# doc-librarian Subagent Template + +**Use this template when delegating document operations via Task tool** + +--- + +You are **doc-librarian**, a specialized subagent for context-efficient document lifecycle management operations. + +## Your Mission + +Execute document management operations (scanning, indexing, validation, archiving, searching) while maintaining extreme context efficiency. You absorb verbose script output in your isolated context and return only essential summaries to the main orchestration agent. + +## Core Principles + +### 1. Context Efficiency is Paramount +- Your context window is disposable; the main agent's is precious +- All verbose output stays in YOUR context +- Return summaries under 50 tokens +- Think: "What decision does the main agent need to make?" + +### 2. Structured Processing +- Parse script output before summarizing +- Extract only decision-relevant information +- Suppress verbose tracebacks with `2>/dev/null` + +### 3. Actionable Intelligence +- Don't just report status; recommend next actions +- Format: `[emoji] [current state] | [key metric] | [next action]` +- Example: `āœ“ 12 docs indexed | 3 need metadata fixes | Run validation` + +## Operation Patterns + +### Document Scanning/Indexing + +**Regenerate index:** +```bash +python scripts/index_docs.py 2>/dev/null +``` + +**Return format:** +``` +āœ“ Index updated | [N] documents | Categories: [list top 3] +``` + +**If errors:** +``` +āŒ Index failed | Missing docs/ directory | Run: python scripts/init_docs_structure.py +``` + +### Validation Operations + +**Validate all documents:** +```bash +python scripts/validate_doc_metadata.py 2>/dev/null +``` + +**Return format (success):** +``` +āœ“ All [N] documents valid | Ready to commit +``` + +**Return format (errors):** +``` +āŒ [N] documents have issues: + • [path1]: Missing [field] + • [path2]: Invalid [field] + (+[remainder] more) +Next: Fix metadata in listed files +``` + +### Archiving Operations + +**Check what would be archived (dry run):** +```bash +python scripts/archive_docs.py --dry-run 2>/dev/null +``` + +**Return format:** +``` +šŸ“¦ [N] documents ready for archive: + • specs/[doc1] (complete, 95 days old) + • analysis/[doc2] (complete, 70 days old) +Next: Run `python scripts/archive_docs.py` to archive +``` + +**Execute archiving:** +```bash +python scripts/archive_docs.py 2>/dev/null +``` + +**Return format:** +``` +āœ“ Archived [N] documents | Moved to archive/[categories] | Index updated +``` + +### Document Search + +**Search by tag:** +```bash +grep -r "tags:.*[search-term]" docs/ --include="*.md" 2>/dev/null | head -10 +``` + +**Return format:** +``` +šŸ“‹ [N] documents match "[term]": + • [path1]: [title] + • [path2]: [title] + (+[remainder] more) +``` + +**Search by status:** +```bash +grep -r "status: [status]" docs/ --include="*.md" 2>/dev/null | head -10 +``` + +**Return format:** +``` +šŸ“‹ [N] [status] documents: + • [path1]: [title] + • [path2]: [title] +Next: [action based on status] +``` + +### Index Summary + +**Read and summarize INDEX.md:** +```bash +head -50 docs/INDEX.md 2>/dev/null +``` + +**Return format:** +``` +šŸ“Š Documentation Summary: + Total: [N] documents + Categories: [category1] ([n1]), [category2] ([n2]), ... + Recent: [most recent doc title] +``` + +### Structure Initialization + +**Initialize docs structure:** +```bash +python scripts/init_docs_structure.py 2>/dev/null +``` + +**Return format:** +``` +āœ“ docs/ structure created | Categories: ai_docs, specs, analysis, plans, templates | Next: Add first document +``` + +## Response Templates + +### Success Operations +``` +āœ“ [operation completed] | [key result] | Next: [action] +``` + +### Status Checks +``` +šŸ“Š [metric]: [value] | [metric]: [value] | [recommendation] +``` + +### Lists (max 5 items) +``` +šŸ“‹ [N] items: + • [item 1] - [detail] + • [item 2] - [detail] + • [item 3] - [detail] + (+[remainder] more) +``` + +### Errors +``` +āŒ [operation] failed | Reason: [brief explanation] | Fix: [action] +``` + +### Warnings +``` +āš ļø [concern] | Impact: [brief] | Consider: [action] +``` + +## Decision-Making Framework + +When processing script output, ask yourself: + +1. **What decision is the main agent trying to make?** + - Creating doc? → Return category guidance + template location + - Maintenance? → Return what needs attention + priority + - Searching? → Return matching docs + relevance + +2. **What's the minimum information needed?** + - Counts: totals and breakdowns only + - Lists: top 5 items + count of remainder + - Errors: specific files and fixes, not full tracebacks + +3. **What action should follow?** + - Always recommend the logical next step + - Make it concrete: "Fix metadata in specs/auth-spec.md" not "fix issues" + +## Error Handling + +**When scripts fail:** +```bash +python scripts/validate_doc_metadata.py 2>&1 +EXIT_CODE=$? + +if [ $EXIT_CODE -ne 0 ]; then + # Return actionable error + echo "āŒ Validation failed | Check: docs/ exists | Fix: python scripts/init_docs_structure.py" +fi +``` + +**When no documents found:** +``` +ā„¹ļø No documents in [category] | Reason: empty directory | Next: Create first doc with template +``` + +## Critical Rules + +### ALWAYS: +1. āœ“ Run scripts with proper path context +2. āœ“ Suppress stderr for clean parsing: `2>/dev/null` +3. āœ“ Parse before returning (no raw script output) +4. āœ“ Keep responses under 50 tokens +5. āœ“ Include next action recommendation +6. āœ“ Use emoji prefixes for visual parsing (āœ“ āŒ šŸ“‹ āš ļø ā„¹ļø šŸ“Š šŸ“¦) + +### NEVER: +1. āŒ Return full file contents to main agent +2. āŒ Return raw INDEX.md (summarize it) +3. āŒ Return full validation output (summarize errors) +4. āŒ Return more than 5 list items (summarize remainder) +5. āŒ Make the main agent parse verbose output +6. āŒ Forget the next action recommendation + +## Examples + +### Good Response +``` +User: "Check documentation health" +You execute: python scripts/validate_doc_metadata.py 2>/dev/null +You return: "āœ“ 15 docs | 12 valid | 3 need fixes: specs/auth.md, analysis/perf.md, plans/q4.md | Next: Fix missing 'status' field" +Tokens: 32 +Main agent: Knows exactly what to fix +``` + +### Bad Response +``` +User: "Check documentation health" +You execute: python scripts/validate_doc_metadata.py +You return: [Full validation output with all file paths, all errors, verbose formatting] +Tokens: 500+ +Main agent: Context polluted, overwhelmed with details +``` + +### Good Search Response +``` +User: "Find authentication docs" +You execute: grep -r "tags:.*auth" docs/ | head -5 +You return: "šŸ“‹ 4 docs match 'auth': specs/oauth-migration.md, analysis/auth-audit.md, plans/auth-refactor.md, ai_docs/auth-sdk.md | Next: Read specs/oauth-migration.md for current spec" +Tokens: 38 +Main agent: Has what they need to proceed +``` + +### Bad Search Response +``` +User: "Find authentication docs" +You execute: grep -r "auth" docs/ +You return: [200 lines of grep output with every mention of 'auth'] +Tokens: 1,200 +Main agent: Can't find the actual documents in the noise +``` + +## Philosophy + +You are a **filter**, not a **conduit**. + +- **Conduit:** Passes data through unchanged → context pollution +- **Filter:** Extracts essence, provides intelligence → context efficiency + +Your value is in **compression without information loss**. The main agent should never need the verbose output you processed; your summary should contain every decision-relevant fact. + +## Integration with Main Workflows + +When the main agent uses you as part of larger workflows: + +```markdown +# Example: Documentation maintenance workflow + +Main Agent: "Let's do documentation maintenance" +Main Agent → You: "Check validation status" +You: "āœ“ 20 docs | 18 valid | 2 issues | Next: Fix specs/api.md (missing status)" + +Main Agent: "Fix the issues" [edits files] +Main Agent → You: "Re-validate" +You: "āœ“ All 20 documents valid | Ready to archive check" + +Main Agent → You: "Check what should be archived" +You: "šŸ“¦ 3 docs ready: analysis/q2-review.md, specs/old-feature.md, plans/done-task.md | Next: Run archive" + +Main Agent → You: "Archive them" +You: "āœ“ Archived 3 docs to archive/ | Index updated | Maintenance complete" +``` + +Your responses enable the main agent to orchestrate smoothly without getting bogged down in script output. + +--- + +**Remember:** You are doc-librarian. Your job is to keep the main orchestration agent's context clean while providing precise, actionable intelligence about documentation operations. Every response should answer: "What's the state?" and "What should we do next?" + +Operate with extreme precision. The main agent's effectiveness depends on your context discipline. diff --git a/skills/cyberarian/assets/doc_template.md b/skills/cyberarian/assets/doc_template.md new file mode 100644 index 0000000..e6870a8 --- /dev/null +++ b/skills/cyberarian/assets/doc_template.md @@ -0,0 +1,30 @@ +--- +title: Your Document Title Here +category: specs # One of: ai_docs, specs, analysis, plans, templates +status: draft # One of: draft, active, complete, archived +created: YYYY-MM-DD +last_updated: YYYY-MM-DD +tags: [] # Add relevant tags: [tag1, tag2, tag3] +--- + +# Your Document Title Here + +## Overview + +Brief description of what this document covers. + +## [Section 1] + +Content goes here... + +## [Section 2] + +More content... + +## References + +- Related docs, links, etc. + +--- + +_Template usage: Copy this file and fill in the frontmatter and sections._ diff --git a/skills/cyberarian/references/archiving-criteria.md b/skills/cyberarian/references/archiving-criteria.md new file mode 100644 index 0000000..9d3ff2f --- /dev/null +++ b/skills/cyberarian/references/archiving-criteria.md @@ -0,0 +1,184 @@ +# Document Archiving Criteria + +Documents are automatically archived based on their category, status, and age. This ensures the active workspace remains focused on current, relevant documentation. + +## Archiving Philosophy + +**Goals:** +- Keep active directories focused on current work +- Preserve historical context in archive +- Automate routine maintenance while allowing manual control where needed +- Make archiving decisions deterministic and transparent + +**Non-goals:** +- Deleting documents (everything is preserved) +- Aggressive archiving that loses important context +- One-size-fits-all rules (categories have different lifecycles) + +## Category-Specific Rules + +### specs/ - Specifications +**Auto-archive**: Yes +**Criteria**: Status is `complete` AND >90 days since last_updated + +**Rationale**: Specs are valuable reference material even after implementation. 90 days allows for iteration, rollout, and bug fixes before archiving. + +**Manual override**: Set `archivable_after` date in frontmatter to defer archiving. + +**Example scenarios:** +- āœ… Archive: Feature spec marked `complete` 100 days ago +- āŒ Skip: Active spec being refined +- āŒ Skip: Complete spec only 30 days old (still in rollout phase) + +### analysis/ - Investigation Outputs +**Auto-archive**: Yes +**Criteria**: Status is `complete` AND >60 days since last_updated + +**Rationale**: Analysis documents are point-in-time investigations. Once the work is done and changes are implemented, they have less ongoing value. 60 days allows for follow-up work. + +**Manual override**: Set `archivable_after` to keep important analyses active longer. + +**Example scenarios:** +- āœ… Archive: Bug investigation completed 70 days ago +- āœ… Archive: Performance analysis from 2 months ago +- āŒ Skip: Ongoing investigation (status: `active` or `draft`) + +### plans/ - Implementation Plans +**Auto-archive**: Yes +**Criteria**: Status is `complete` AND >30 days since last_updated + +**Rationale**: Plans become stale quickly. Once implementation is done, plans are primarily historical. 30 days accounts for plan execution and retrospective. + +**Manual override**: Set `archivable_after` for long-running initiatives. + +**Example scenarios:** +- āœ… Archive: Migration plan completed 45 days ago +- āœ… Archive: Sprint plan from last month (status: `complete`) +- āŒ Skip: Ongoing multi-phase plan (status: `active`) +- āŒ Skip: Just-completed plan (20 days old) + +### ai_docs/ - Reference Materials +**Auto-archive**: No +**Manual archiving only** + +**Rationale**: Reference materials (SDKs, API docs, repo context) are meant to be persistent. These inform Claude Code's understanding and should only be archived manually when truly obsolete. + +**When to manually archive:** +- SDK documentation for deprecated versions +- API references for sunset APIs +- Repository context for archived projects + +**Example scenarios:** +- āŒ Auto-archive: Never, regardless of age or status +- āœ… Manual: Move OAuth 1.0 docs when OAuth 2.0 is fully adopted +- āœ… Manual: Archive legacy API docs after migration complete + +### templates/ - Reusable Templates +**Auto-archive**: No +**Templates never auto-archive** + +**Rationale**: Templates are meant to be reused indefinitely. They don't have a lifecycle in the same way as other documents. + +**When to manually archive:** +- Deprecated templates that should no longer be used +- Templates replaced by improved versions + +**Best practice**: Instead of archiving, update templates in place or clearly mark as deprecated in the template itself. + +## Archive Structure + +Archived documents are moved to `archive/` while preserving their category: + +``` +archive/ +ā”œā”€ā”€ specs/ +│ └── oauth2-migration-spec.md +ā”œā”€ā”€ analysis/ +│ └── auth-perf-analysis.md +└── plans/ + └── q3-migration-plan.md +``` + +This structure: +- Maintains categorical organization +- Allows easy browsing of archived content +- Prevents mixing of categories in archive + +## Manual Archiving + +To manually archive a document: + +1. Move it to `archive//` +2. Update metadata: + ```yaml + status: archived + archived_date: YYYY-MM-DD + archive_reason: "Manual archiving: " + ``` +3. Run `scripts/index_docs.py` to update the index + +## Preventing Auto-Archiving + +To prevent a document from being auto-archived: + +**Option 1**: Keep status as `active` or `draft` +**Option 2**: Set explicit `archivable_after` date in frontmatter: + +```yaml +archivable_after: 2025-12-31 # Don't archive until after this date +``` + +This is useful for: +- Long-running projects +- Reference specs that should remain active +- Documents with ongoing relevance despite completion + +## Running the Archiving Script + +```bash +# Dry run to see what would be archived +python scripts/archive_docs.py --dry-run + +# Actually archive documents +python scripts/archive_docs.py + +# Archive and update index +python scripts/archive_docs.py && python scripts/index_docs.py +``` + +**Best practice**: Run archiving periodically (weekly or monthly) as part of documentation maintenance. + +## Retrieval from Archive + +Archived documents are not deleted and can be retrieved by: + +1. **Browsing**: Navigate to `archive//` +2. **Search**: Use grep or file search tools +3. **Index**: Check `INDEX.md` which includes archived documents +4. **Unarchiving**: Move document back to its category and update status + +To unarchive a document: +```bash +# Move file back +mv archive/specs/old-spec.md specs/ + +# Update metadata +# Change status from 'archived' to 'active' or appropriate status +# Remove archived_date and archive_reason fields +``` + +## Monitoring + +The archiving script provides a summary: +``` +Archive Summary: + Documents scanned: 45 + Documents archived: 3 + Documents skipped: 42 + Errors: 0 +``` + +Keep an eye on: +- **Unexpected archives**: Documents archived sooner than expected +- **Errors**: Failed archiving operations +- **Zero archives**: May indicate metadata issues (e.g., status never set to `complete`) diff --git a/skills/cyberarian/references/metadata-schema.md b/skills/cyberarian/references/metadata-schema.md new file mode 100644 index 0000000..3785f1b --- /dev/null +++ b/skills/cyberarian/references/metadata-schema.md @@ -0,0 +1,125 @@ +# Document Metadata Schema + +All documents in the docs/ directory must include YAML frontmatter with the following structure. + +## Required Fields + +### title +- **Type**: String +- **Description**: Human-readable document title +- **Example**: `"OAuth2 Migration Specification"` + +### category +- **Type**: String (enum) +- **Description**: Document category, must match the directory it's in +- **Valid values**: + - `ai_docs` - Reference materials for Claude Code + - `specs` - Feature and migration specifications + - `analysis` - Investigation outputs + - `plans` - Implementation plans + - `templates` - Reusable templates + - `archive` - Historical documents (auto-set on archiving) +- **Example**: `specs` + +### status +- **Type**: String (enum) +- **Description**: Current lifecycle status of the document +- **Valid values**: + - `draft` - Document is being created + - `active` - Document is current and relevant + - `complete` - Work is done, kept for reference + - `archived` - Document has been archived +- **Example**: `active` +- **Lifecycle**: draft → active → complete → archived + +### created +- **Type**: Date (YYYY-MM-DD) +- **Description**: Date the document was created +- **Example**: `2024-11-16` + +### last_updated +- **Type**: Date (YYYY-MM-DD) +- **Description**: Date the document was last modified +- **Example**: `2024-11-16` +- **Note**: Should be updated whenever significant changes are made + +## Optional Fields + +### tags +- **Type**: List of strings +- **Description**: Keywords for categorization and search +- **Example**: `[auth, oauth2, security, migration]` +- **Best practice**: Use consistent tags across related documents + +### archivable_after +- **Type**: Date (YYYY-MM-DD) +- **Description**: Explicit date after which the document can be auto-archived +- **Example**: `2025-02-16` +- **Note**: Overrides category-based archiving rules when set + +### archived_date +- **Type**: Date (YYYY-MM-DD) +- **Description**: Date the document was archived (auto-set by archiving script) +- **Example**: `2024-12-01` + +### archive_reason +- **Type**: String +- **Description**: Reason for archiving (auto-set by archiving script) +- **Example**: `"90 days old (threshold: 90)"` + +### author +- **Type**: String +- **Description**: Document author or owner +- **Example**: `"Simon Lamb"` + +### related_docs +- **Type**: List of strings (file paths) +- **Description**: Links to related documents +- **Example**: `["specs/auth-system/oauth2-spec.md", "plans/oauth2-rollout.md"]` + +## Complete Example + +```yaml +--- +title: OAuth2 Migration Specification +category: specs +status: active +created: 2024-11-16 +last_updated: 2024-11-16 +tags: [auth, oauth2, security, migration] +author: Simon Lamb +related_docs: + - analysis/auth-system-audit.md + - plans/oauth2-implementation-plan.md +--- +``` + +## Validation + +Documents are validated using `scripts/validate_doc_metadata.py`. Run this before committing to ensure all metadata is correct. + +## Metadata Updates + +### When Creating a New Document +1. Copy from `assets/doc_template.md` +2. Fill in all required fields +3. Set status to `draft` +4. Set created and last_updated to current date + +### When Updating a Document +1. Update `last_updated` to current date +2. Update `status` if lifecycle stage changes +3. Add relevant `tags` if needed + +### When Completing Work +1. Set `status` to `complete` +2. Update `last_updated` to current date +3. Optionally set `archivable_after` if auto-archiving should be deferred + +## Best Practices + +1. **Consistent Tags**: Use a common vocabulary of tags across documents +2. **Accurate Status**: Keep status up to date as work progresses +3. **Related Docs**: Link to related documents for context and discoverability +4. **Regular Updates**: Update `last_updated` whenever making significant changes +5. **Descriptive Titles**: Use clear, specific titles that describe the content diff --git a/skills/cyberarian/scripts/archive_docs.py b/skills/cyberarian/scripts/archive_docs.py new file mode 100755 index 0000000..97adda6 --- /dev/null +++ b/skills/cyberarian/scripts/archive_docs.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python3 +""" +Automatically archive documents based on status, age, and category-specific rules. +Documents are moved to archive/ and their metadata is updated. +""" + +import os +import sys +import re +import shutil +from pathlib import Path +from datetime import datetime, timedelta +import yaml + + +# Archiving rules by category (days since last_updated) +ARCHIVING_RULES = { + 'specs': { + 'complete_after_days': 90, + 'auto_archive': True, + 'require_complete_status': True + }, + 'analysis': { + 'complete_after_days': 60, + 'auto_archive': True, + 'require_complete_status': True + }, + 'plans': { + 'complete_after_days': 30, + 'auto_archive': True, + 'require_complete_status': True + }, + 'ai_docs': { + 'auto_archive': False, # Manual archiving only for reference docs + }, + 'templates': { + 'auto_archive': False, # Never auto-archive templates + } +} + + +def extract_frontmatter(file_path: Path) -> tuple[dict, str]: + """Extract YAML frontmatter and remaining content from a markdown file.""" + try: + content = file_path.read_text() + + # Match YAML frontmatter between --- delimiters + match = re.match(r'^---\s*\n(.*?)\n---\s*\n(.*)', content, re.DOTALL) + if not match: + return {}, content + + frontmatter_text = match.group(1) + body = match.group(2) + metadata = yaml.safe_load(frontmatter_text) + + return (metadata if isinstance(metadata, dict) else {}), body + + except Exception as e: + print(f"āš ļø Warning: Could not parse {file_path}: {e}") + return {}, "" + + +def update_frontmatter(file_path: Path, metadata: dict) -> None: + """Update the YAML frontmatter in a markdown file.""" + _, body = extract_frontmatter(file_path) + + frontmatter = yaml.dump(metadata, default_flow_style=False, sort_keys=False) + new_content = f"---\n{frontmatter}---\n{body}" + + file_path.write_text(new_content) + + +def should_archive(metadata: dict, category: str, file_modified: datetime) -> tuple[bool, str]: + """ + Determine if a document should be archived based on rules. + Returns (should_archive, reason). + """ + # Skip if already archived + if metadata.get('status') == 'archived': + return False, "already archived" + + # Get category rules + rules = ARCHIVING_RULES.get(category, {}) + + # Skip if auto-archiving is disabled for this category + if not rules.get('auto_archive', False): + return False, f"{category} does not auto-archive" + + # Check if status is 'complete' (required for most categories) + if rules.get('require_complete_status', False): + if metadata.get('status') != 'complete': + return False, "status is not 'complete'" + + # Check age-based archiving + complete_after_days = rules.get('complete_after_days') + if complete_after_days: + last_updated = metadata.get('last_updated') + if not last_updated: + return False, "no last_updated date in metadata" + + try: + if isinstance(last_updated, str): + updated_date = datetime.strptime(last_updated, '%Y-%m-%d').date() + else: + # YAML parser returns date objects, convert to date for comparison + updated_date = last_updated if hasattr(last_updated, 'year') else datetime.strptime(str(last_updated), '%Y-%m-%d').date() + + days_old = (datetime.now().date() - updated_date).days + + if days_old >= complete_after_days: + return True, f"{days_old} days old (threshold: {complete_after_days})" + except ValueError: + return False, "invalid last_updated date format" + + return False, "no archiving criteria met" + + +def archive_document(file_path: Path, docs_path: Path, reason: str, dry_run: bool = False) -> bool: + """ + Archive a document by moving it to archive/ and updating its metadata. + Returns True if successful. + """ + try: + # Read metadata + metadata, body = extract_frontmatter(file_path) + + # Determine archive path (preserve subdirectory structure) + relative_path = file_path.relative_to(docs_path) + category = relative_path.parts[0] + + # Create archive subdirectory for the category + archive_path = docs_path / 'archive' / category + archive_path.mkdir(parents=True, exist_ok=True) + + # Build destination path + archive_file = archive_path / file_path.name + + # Handle name conflicts + if archive_file.exists(): + base = archive_file.stem + suffix = archive_file.suffix + counter = 1 + while archive_file.exists(): + archive_file = archive_path / f"{base}_{counter}{suffix}" + counter += 1 + + if dry_run: + print(f" [DRY RUN] Would archive: {relative_path} → archive/{category}/{archive_file.name}") + print(f" Reason: {reason}") + return True + + # Update metadata + metadata['status'] = 'archived' + metadata['archived_date'] = datetime.now().strftime('%Y-%m-%d') + metadata['archive_reason'] = reason + + # Write updated file to archive + frontmatter = yaml.dump(metadata, default_flow_style=False, sort_keys=False) + new_content = f"---\n{frontmatter}---\n{body}" + archive_file.write_text(new_content) + + # Remove original + file_path.unlink() + + print(f" āœ… Archived: {relative_path} → archive/{category}/{archive_file.name}") + print(f" Reason: {reason}") + + return True + + except Exception as e: + print(f" āŒ Error archiving {file_path}: {e}") + return False + + +def scan_and_archive(docs_path: Path, dry_run: bool = False) -> dict: + """ + Scan all documents and archive those that meet criteria. + Returns statistics about the archiving operation. + """ + stats = { + 'scanned': 0, + 'archived': 0, + 'skipped': 0, + 'errors': 0 + } + + skip_files = {'README.md', 'INDEX.md', '.gitkeep'} + skip_dirs = {'archive'} + + for category_dir in docs_path.iterdir(): + if not category_dir.is_dir() or category_dir.name in skip_dirs or category_dir.name.startswith('.'): + continue + + category_name = category_dir.name + + # Find all markdown files + for md_file in category_dir.rglob('*.md'): + if md_file.name in skip_files: + continue + + stats['scanned'] += 1 + + # Extract metadata + metadata, _ = extract_frontmatter(md_file) + file_stats = md_file.stat() + file_modified = datetime.fromtimestamp(file_stats.st_mtime) + + # Check if should archive + should_arch, reason = should_archive(metadata, category_name, file_modified) + + if should_arch: + success = archive_document(md_file, docs_path, reason, dry_run) + if success: + stats['archived'] += 1 + else: + stats['errors'] += 1 + else: + stats['skipped'] += 1 + + return stats + + +def main(): + """Main entry point.""" + dry_run = '--dry-run' in sys.argv + + # Get base path + args = [arg for arg in sys.argv[1:] if not arg.startswith('--')] + if args: + base_path = Path(args[0]).resolve() + else: + base_path = Path.cwd() + + docs_path = base_path / 'docs' + + if not docs_path.exists(): + print(f"āŒ Error: docs/ directory not found at {docs_path}") + sys.exit(1) + + print(f"Scanning documents in: {docs_path}") + if dry_run: + print("šŸ” DRY RUN MODE - No files will be modified") + print() + + # Scan and archive + stats = scan_and_archive(docs_path, dry_run) + + print() + print("=" * 60) + print("Archive Summary:") + print(f" Documents scanned: {stats['scanned']}") + print(f" Documents archived: {stats['archived']}") + print(f" Documents skipped: {stats['skipped']}") + print(f" Errors: {stats['errors']}") + print() + + if not dry_run and stats['archived'] > 0: + print("šŸ’” Tip: Run 'python scripts/index_docs.py' to update the documentation index") + + +if __name__ == '__main__': + main() diff --git a/skills/cyberarian/scripts/index_docs.py b/skills/cyberarian/scripts/index_docs.py new file mode 100755 index 0000000..7258955 --- /dev/null +++ b/skills/cyberarian/scripts/index_docs.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python3 +""" +Generate and update the INDEX.md file by scanning all documents in docs/. +Reads YAML frontmatter to extract metadata and organize the index. +""" + +import os +import sys +import re +from pathlib import Path +from datetime import datetime +from collections import defaultdict +import yaml + + +def extract_frontmatter(file_path: Path) -> dict: + """Extract YAML frontmatter from a markdown file.""" + try: + content = file_path.read_text() + + # Match YAML frontmatter between --- delimiters + match = re.match(r'^---\s*\n(.*?)\n---\s*\n', content, re.DOTALL) + if not match: + return {} + + frontmatter_text = match.group(1) + metadata = yaml.safe_load(frontmatter_text) + + return metadata if isinstance(metadata, dict) else {} + + except Exception as e: + print(f"āš ļø Warning: Could not parse frontmatter in {file_path}: {e}") + return {} + + +def get_file_stats(file_path: Path) -> dict: + """Get file statistics.""" + stats = file_path.stat() + return { + 'size': stats.st_size, + 'modified': datetime.fromtimestamp(stats.st_mtime) + } + + +def scan_documents(docs_path: Path) -> dict: + """Scan all markdown documents in docs/ and extract metadata.""" + categories = defaultdict(list) + + # Skip these files/directories + skip_files = {'README.md', 'INDEX.md', '.gitkeep'} + skip_dirs = {'archive'} # We'll handle archive separately + + for category_dir in docs_path.iterdir(): + if not category_dir.is_dir() or category_dir.name.startswith('.'): + continue + + category_name = category_dir.name + + # Find all markdown files + for md_file in category_dir.rglob('*.md'): + if md_file.name in skip_files: + continue + + # Extract metadata + metadata = extract_frontmatter(md_file) + stats = get_file_stats(md_file) + + # Build document entry + relative_path = md_file.relative_to(docs_path) + doc_entry = { + 'path': str(relative_path), + 'title': metadata.get('title', md_file.stem), + 'status': metadata.get('status', 'unknown'), + 'created': metadata.get('created', 'unknown'), + 'last_updated': metadata.get('last_updated', stats['modified'].strftime('%Y-%m-%d')), + 'tags': metadata.get('tags', []), + 'category': category_name, + 'file_modified': stats['modified'] + } + + categories[category_name].append(doc_entry) + + return categories + + +def generate_index(categories: dict) -> str: + """Generate the INDEX.md content.""" + total_docs = sum(len(docs) for docs in categories.values()) + + index_lines = [ + "# Documentation Index", + "", + f"Auto-generated index of all documents. Last updated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", + "", + "Run `python scripts/index_docs.py` to regenerate this index.", + "", + "---", + "", + "## Summary", + "", + f"Total documents: {total_docs}", + "" + ] + + # Add category breakdown + if categories: + index_lines.append("By category:") + for category in sorted(categories.keys()): + count = len(categories[category]) + index_lines.append(f"- **{category}**: {count} document{'s' if count != 1 else ''}") + index_lines.append("") + + index_lines.append("---") + index_lines.append("") + + # Add documents by category + if not categories: + index_lines.append("_No documents found. Add documents to the category directories and regenerate the index._") + else: + for category in sorted(categories.keys()): + docs = categories[category] + docs.sort(key=lambda d: d['last_updated'], reverse=True) + + index_lines.append(f"## {category.replace('_', ' ').title()}") + index_lines.append("") + + for doc in docs: + # Format: [Title](path) - status | updated: date | tags + title_link = f"[{doc['title']}]({doc['path']})" + status_badge = f"**{doc['status']}**" + updated = f"updated: {doc['last_updated']}" + tags = f"tags: [{', '.join(doc['tags'])}]" if doc['tags'] else "" + + parts = [title_link, status_badge, updated] + if tags: + parts.append(tags) + + index_lines.append(f"- {' | '.join(parts)}") + + index_lines.append("") + + return '\n'.join(index_lines) + + +def main(): + """Main entry point.""" + if len(sys.argv) > 1: + base_path = Path(sys.argv[1]).resolve() + else: + base_path = Path.cwd() + + docs_path = base_path / 'docs' + + if not docs_path.exists(): + print(f"āŒ Error: docs/ directory not found at {docs_path}") + print("Run 'python scripts/init_docs_structure.py' first to initialize the structure.") + sys.exit(1) + + print(f"Scanning documents in: {docs_path}") + + # Scan all documents + categories = scan_documents(docs_path) + + # Generate index content + index_content = generate_index(categories) + + # Write INDEX.md + index_path = docs_path / 'INDEX.md' + index_path.write_text(index_content) + + total_docs = sum(len(docs) for docs in categories.values()) + print(f"āœ… Generated index with {total_docs} documents") + print(f"āœ… Updated: {index_path}") + + +if __name__ == '__main__': + main() diff --git a/skills/cyberarian/scripts/init_docs_structure.py b/skills/cyberarian/scripts/init_docs_structure.py new file mode 100755 index 0000000..66da748 --- /dev/null +++ b/skills/cyberarian/scripts/init_docs_structure.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +""" +Initialize the docs/ directory structure for document lifecycle management. +Creates all required directories and initial README.md. +""" + +import os +import sys +from pathlib import Path +from datetime import datetime + + +DIRECTORY_STRUCTURE = { + 'ai_docs': 'Reference materials for Claude Code: SDKs, API docs, repo context', + 'specs': 'Feature and migration specifications', + 'analysis': 'Investigation outputs: bug hunting, optimization, cleanup', + 'plans': 'Implementation plans from specs, analysis, or ad-hoc tasks', + 'templates': 'Reusable document templates', + 'archive': 'Historical and completed documents' +} + + +README_TEMPLATE = """# Documentation Structure + +This directory contains project documentation organized by purpose and lifecycle stage. + +## Directory Structure + +{directory_descriptions} + +## Document Lifecycle + +Documents follow a lifecycle managed through YAML frontmatter: + +1. **Draft** → Document is being created +2. **Active** → Document is current and relevant +3. **Complete** → Work is done, kept for reference +4. **Archived** → Moved to archive/ when no longer relevant + +## Metadata Requirements + +All documents should include YAML frontmatter: + +```yaml +--- +title: Document Title +category: specs|analysis|plans|ai_docs|templates +status: draft|active|complete|archived +created: YYYY-MM-DD +last_updated: YYYY-MM-DD +tags: [tag1, tag2] +--- +``` + +See INDEX.md for a complete list of all documents. + +## Temporary Documents + +Ephemeral/scratch documents should be created in `/tmp` or system temp directories, +NOT in this docs/ directory. The docs/ directory is for persistent documentation only. + +--- +Last updated: {timestamp} +""" + + +def create_directory_structure(base_path: Path) -> None: + """Create the docs directory structure.""" + docs_path = base_path / 'docs' + + # Create main docs directory + docs_path.mkdir(exist_ok=True) + print(f"āœ… Created: {docs_path}") + + # Create category directories + for directory, description in DIRECTORY_STRUCTURE.items(): + dir_path = docs_path / directory + dir_path.mkdir(exist_ok=True) + print(f"āœ… Created: {dir_path}") + + # Create .gitkeep for empty directories + gitkeep = dir_path / '.gitkeep' + if not any(dir_path.iterdir()): + gitkeep.touch() + + +def create_readme(base_path: Path) -> None: + """Create the README.md file.""" + docs_path = base_path / 'docs' + readme_path = docs_path / 'README.md' + + # Format directory descriptions + descriptions = [] + for directory, description in DIRECTORY_STRUCTURE.items(): + descriptions.append(f"- **{directory}/** - {description}") + + readme_content = README_TEMPLATE.format( + directory_descriptions='\n'.join(descriptions), + timestamp=datetime.now().strftime('%Y-%m-%d') + ) + + readme_path.write_text(readme_content) + print(f"āœ… Created: {readme_path}") + + +def create_index(base_path: Path) -> None: + """Create initial INDEX.md file.""" + docs_path = base_path / 'docs' + index_path = docs_path / 'INDEX.md' + + index_content = f"""# Documentation Index + +Auto-generated index of all documents. Last updated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} + +Run `python scripts/index_docs.py` to regenerate this index. + +--- + +## Summary + +Total documents: 0 + +--- + +_No documents found. Add documents to the category directories and regenerate the index._ +""" + + index_path.write_text(index_content) + print(f"āœ… Created: {index_path}") + + +def main(): + """Main entry point.""" + if len(sys.argv) > 1: + base_path = Path(sys.argv[1]).resolve() + else: + base_path = Path.cwd() + + print(f"Initializing docs structure at: {base_path}") + print() + + create_directory_structure(base_path) + create_readme(base_path) + create_index(base_path) + + print() + print("šŸŽ‰ Documentation structure initialized successfully!") + print() + print("Next steps:") + print("1. Add documents to the category directories") + print("2. Run 'python scripts/index_docs.py' to update the index") + print("3. Run 'python scripts/archive_docs.py' periodically to maintain the archive") + + +if __name__ == '__main__': + main() diff --git a/skills/cyberarian/scripts/validate_doc_metadata.py b/skills/cyberarian/scripts/validate_doc_metadata.py new file mode 100755 index 0000000..2c1defc --- /dev/null +++ b/skills/cyberarian/scripts/validate_doc_metadata.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +""" +Validate that all documents have proper YAML frontmatter metadata. +Reports documents with missing or invalid metadata. +""" + +import sys +import re +from pathlib import Path +from datetime import datetime +import yaml + + +REQUIRED_FIELDS = ['title', 'category', 'status', 'created', 'last_updated'] +VALID_STATUSES = ['draft', 'active', 'complete', 'archived'] +VALID_CATEGORIES = ['ai_docs', 'specs', 'analysis', 'plans', 'templates', 'archive'] + + +def extract_frontmatter(file_path: Path) -> dict: + """Extract YAML frontmatter from a markdown file.""" + try: + content = file_path.read_text() + + # Match YAML frontmatter between --- delimiters + match = re.match(r'^---\s*\n(.*?)\n---\s*\n', content, re.DOTALL) + if not match: + return None # No frontmatter found + + frontmatter_text = match.group(1) + metadata = yaml.safe_load(frontmatter_text) + + return metadata if isinstance(metadata, dict) else None + + except Exception as e: + return {'_error': str(e)} + + +def validate_date(date_str: str) -> bool: + """Validate date format (YYYY-MM-DD).""" + try: + datetime.strptime(str(date_str), '%Y-%m-%d') + return True + except (ValueError, TypeError): + return False + + +def validate_metadata(metadata: dict, category_from_path: str) -> list[str]: + """ + Validate metadata against requirements. + Returns list of validation errors (empty if valid). + """ + errors = [] + + if metadata is None: + return ["No YAML frontmatter found"] + + if '_error' in metadata: + return [f"Failed to parse frontmatter: {metadata['_error']}"] + + # Check required fields + for field in REQUIRED_FIELDS: + if field not in metadata: + errors.append(f"Missing required field: {field}") + + # Validate status + if 'status' in metadata: + if metadata['status'] not in VALID_STATUSES: + errors.append(f"Invalid status '{metadata['status']}'. Must be one of: {', '.join(VALID_STATUSES)}") + + # Validate category + if 'category' in metadata: + if metadata['category'] not in VALID_CATEGORIES: + errors.append(f"Invalid category '{metadata['category']}'. Must be one of: {', '.join(VALID_CATEGORIES)}") + elif metadata['category'] != category_from_path: + errors.append(f"Category mismatch: metadata says '{metadata['category']}' but file is in '{category_from_path}/'") + + # Validate dates + for date_field in ['created', 'last_updated']: + if date_field in metadata: + if not validate_date(metadata[date_field]): + errors.append(f"Invalid {date_field} date format. Must be YYYY-MM-DD") + + # Validate tags (optional but must be list if present) + if 'tags' in metadata: + if not isinstance(metadata['tags'], list): + errors.append("Tags must be a list") + + return errors + + +def scan_and_validate(docs_path: Path) -> dict: + """ + Scan all documents and validate their metadata. + Returns validation results. + """ + results = { + 'valid': [], + 'invalid': [], + 'no_frontmatter': [], + 'total': 0 + } + + skip_files = {'README.md', 'INDEX.md', '.gitkeep'} + + for category_dir in docs_path.iterdir(): + if not category_dir.is_dir() or category_dir.name.startswith('.'): + continue + + category_name = category_dir.name + + # Find all markdown files + for md_file in category_dir.rglob('*.md'): + if md_file.name in skip_files: + continue + + results['total'] += 1 + relative_path = md_file.relative_to(docs_path) + + # Extract and validate metadata + metadata = extract_frontmatter(md_file) + errors = validate_metadata(metadata, category_name) + + if not errors: + results['valid'].append(str(relative_path)) + else: + results['invalid'].append({ + 'path': str(relative_path), + 'errors': errors + }) + + return results + + +def main(): + """Main entry point.""" + if len(sys.argv) > 1: + base_path = Path(sys.argv[1]).resolve() + else: + base_path = Path.cwd() + + docs_path = base_path / 'docs' + + if not docs_path.exists(): + print(f"āŒ Error: docs/ directory not found at {docs_path}") + sys.exit(1) + + print(f"Validating documents in: {docs_path}") + print() + + # Scan and validate + results = scan_and_validate(docs_path) + + # Display results + print("=" * 60) + print("Validation Results:") + print(f" Total documents: {results['total']}") + print(f" āœ… Valid: {len(results['valid'])}") + print(f" āŒ Invalid: {len(results['invalid'])}") + print() + + if results['invalid']: + print("Invalid Documents:") + print() + for item in results['invalid']: + print(f" šŸ“„ {item['path']}") + for error in item['errors']: + print(f" • {error}") + print() + + if results['valid'] and not results['invalid']: + print("šŸŽ‰ All documents have valid metadata!") + + # Exit with error code if any invalid documents + sys.exit(1 if results['invalid'] else 0) + + +if __name__ == '__main__': + main() diff --git a/skills/start-right/SKILL.md b/skills/start-right/SKILL.md new file mode 100644 index 0000000..dd35469 --- /dev/null +++ b/skills/start-right/SKILL.md @@ -0,0 +1,376 @@ +--- +name: start-right +description: Comprehensive repository initialization and scaffolding for new projects. Use when setting up a new repository from scratch with git, GitHub, CI/CD workflows, branch protection, validation checks (format, lint, type-check, tests, builds), git hooks (husky/lefthook), GitHub Actions for PR and main branch validation, automated versioning and tagging, and project-specific release workflows. Ideal for solo developers who want production-ready repository setup including (1) Git initialization with main branch, (2) GitHub repository creation and configuration, (3) Branch protection rules, (4) PR workflow with squash merging and auto-delete branches, (5) Comprehensive validation checks, (6) Git hooks for pre-commit and pre-push validation, (7) GitHub Actions CI/CD pipelines, (8) Automated releases with GitHub Releases integration. +--- + +# Start Right + +Complete repository initialization and scaffolding for new projects with production-ready CI/CD workflows, validation checks, and automated releases. + +## Overview + +This skill provides end-to-end repository setup for solo developers, automating everything from git initialization to production deployment workflows. It handles project type detection, appropriate tooling configuration, GitHub repository creation, branch protection, validation checks, git hooks, and comprehensive GitHub Actions workflows. + +## Workflow Decision Tree + +When a user requests repo initialization/scaffolding: + +1. **Determine project type** → Detect or ask user for project type (node/typescript/react/rust/python/go/docker/skill) +2. **Gather preferences** → Ask about: + - Repository visibility (public/private) + - Organization (if applicable) + - Validation checks to enable (format, lint, type-check, test, build, integration-test) + - Git hooks preference (husky for Node.js projects, lefthook for others) + - Release strategy (npm, github-pages, vercel, docker, binary, pypi, crates.io, skill) +3. **Execute scaffolding** → Run setup scripts in order +4. **Verify and report** → Confirm all components are configured correctly + +## Scaffolding Process + +### Step 1: Prerequisites Check + +Before starting, verify: +- Git is installed +- GitHub CLI (gh) is installed and authenticated +- Current directory is the intended project location +- Directory is empty or contains minimal files + +```bash +# Check prerequisites +git --version +gh --version +gh auth status +``` + +If prerequisites are missing, provide installation instructions: +- **Git**: `brew install git` (macOS) or system package manager +- **GitHub CLI**: `brew install gh` or from https://cli.github.com +- **Authentication**: `gh auth login` + +### Step 2: Git Initialization + +Initialize git repository with main as default branch: + +```bash +python3 scripts/init_git_repo.py [--private] [--org ] [--type ] +``` + +**Options**: +- `--private`: Create private repository (default: public) +- `--org `: Create under organization +- `--type `: Project type for appropriate .gitignore (node, python, rust, go) + +This script: +- Initializes git with `main` branch +- Creates appropriate `.gitignore` file +- Creates GitHub repository +- Sets up remote connection + +### Step 3: Tooling Configuration + +Set up linting, formatting, and type checking based on project type: + +```bash +python3 scripts/setup_tooling.py [project-type] +``` + +If project type is not specified, it will be auto-detected. + +This creates configuration files like: +- **Node/TypeScript**: `.eslintrc.json`, `.prettierrc.json`, `tsconfig.json` +- **Python**: `.flake8`, `.black.toml`, `mypy.ini` +- **Rust**: `rustfmt.toml` + +After running, **install the necessary dependencies**: +- **Node**: `npm install --save-dev eslint prettier typescript @typescript-eslint/parser @typescript-eslint/eslint-plugin` +- **Python**: `pip install black flake8 mypy pytest` +- **Rust**: Tools are built-in to cargo + +### Step 4: Git Hooks Setup + +Configure pre-commit and pre-push hooks: + +```bash +python3 scripts/setup_git_hooks.py [--husky|--lefthook] --checks format,lint,type-check,test,build +``` + +**Hook tool selection**: +- `--husky`: For Node.js projects (recommended, requires npm) +- `--lefthook`: Universal, works with any project type + +**Checks configuration**: +- **Pre-commit**: format, lint, type-check (fast checks) +- **Pre-push**: test, build (slower checks) +- Customize with `--checks` flag + +This script: +- Installs the git hooks tool +- Creates hook configuration files +- Sets up hooks to run appropriate validation checks +- Updates package.json scripts (if Node.js project) + +### Step 5: GitHub Actions Workflows + +Generate comprehensive CI/CD workflows: + +```bash +python3 scripts/generate_workflows.py --checks format,lint,type-check,test,build --release +``` + +**Release types** (choose based on project): +- `npm`: npm package publication +- `github-pages`: Static site to GitHub Pages +- `docker`: Container image to GitHub Container Registry +- `binary`: Compiled binaries for multiple platforms +- `skill`: Claude Code skill (no deployment) +- `pypi`: Python package to PyPI +- `vercel`: Skip (Vercel handles deployment automatically) + +This creates: +1. **pr-validation.yml**: Runs validation checks on PRs to main +2. **main-ci-cd.yml**: Runs on merge to main, includes: + - All validation checks + - Automatic version bumping + - Git tagging + - Calls release workflow +3. **release.yml**: Reusable workflow for deployment (if release type specified) + +**Workflow features**: +- PR validation runs subset of fast checks +- Main branch validation runs ALL checks including integration tests +- Automatic semantic versioning +- GitHub Releases with auto-generated notes +- Project-specific optimizations (caching, parallel jobs) + +### Step 6: Branch Protection Rules + +Configure branch protection to enforce PR workflow: + +```bash +python3 scripts/setup_branch_protection.py build,test +``` + +Pass comma-separated list of required status checks (matching GitHub Actions job names). + +This script configures: +- **Direct pushes to main**: Blocked +- **Pull requests**: Required +- **Status checks**: Must pass before merge (if configured) +- **Squash merging**: Enabled (enforced) +- **Merge commits**: Disabled +- **Rebase merging**: Disabled +- **Auto-delete branches**: Enabled after merge + +### Step 7: Initial Commit and Verification + +Create initial commit and push to main: + +```bash +git add . +git commit -m "chore: initial repository scaffolding" +git push -u origin main +``` + +Verify setup: +- Check GitHub repository exists and is configured correctly +- Verify branch protection rules are active +- Confirm workflows are present in `.github/workflows/` +- Test git hooks by making a small change + +## Interactive Setup Flow + +For best user experience, guide users through the process interactively: + +### Phase 1: Information Gathering + +Ask the user: + +1. **Repository name**: What should the repository be named? +2. **Visibility**: Public or private repository? +3. **Organization**: Create under an organization? (optional) +4. **Project type**: What type of project is this? + - Node.js / JavaScript + - TypeScript + - React / Next.js + - Python + - Rust + - Go + - Docker container + - Claude Code skill + - Other / Generic +5. **Validation checks**: Which checks should run? + - Format checking (recommended: pre-commit) + - Linting (recommended: pre-commit) + - Type checking (recommended: pre-commit for TypeScript/Python) + - Unit tests (recommended: pre-push) + - Build verification (recommended: PR and main) + - Integration tests (recommended: main branch only) +6. **Git hooks**: Set up pre-commit and pre-push hooks? + - Yes (recommended) + - No +7. **Release strategy**: How will this project be released? + - Provide options based on project type (see [Release Strategies](#release-strategies)) + +### Phase 2: Execution + +Execute the scaffolding scripts in order, showing progress: + +``` +šŸš€ Initializing repository... +āœ… Git initialized with main branch +āœ… Created .gitignore +āœ… Created GitHub repository: username/repo-name (public) +āœ… Configured remote origin + +šŸ”§ Setting up tooling... +āœ… Created .eslintrc.json +āœ… Created .prettierrc.json +āœ… Created tsconfig.json + +šŸŖ Configuring git hooks... +āœ… Installed husky +āœ… Created pre-commit hook (format, lint) +āœ… Created pre-push hook (test, build) + +āš™ļø Generating GitHub Actions workflows... +āœ… Created pr-validation.yml +āœ… Created main-ci-cd.yml +āœ… Created release.yml (npm) + +šŸ”’ Configuring branch protection... +āœ… Enabled branch protection for main +āœ… Configured squash merge only +āœ… Enabled auto-delete branches + +šŸ“ Creating initial commit... +āœ… Initial commit created and pushed +``` + +### Phase 3: Post-Setup Instructions + +Provide next steps to the user: + +``` +āœ… Repository scaffolding complete! + +Your repository is ready at: https://github.com/username/repo-name + +Next steps: +1. Install dependencies: + npm install + +2. Install dev dependencies for tooling: + npm install --save-dev eslint prettier typescript + +3. Test the setup: + - Make a change in a file + - Commit (hooks should run) + - Push to trigger branch protection (push will fail - create PR instead) + +4. Create your first feature: + git checkout -b feature/initial-implementation + # Make changes + git add . + git commit -m "feat: add initial implementation" + git push -u origin feature/initial-implementation + +5. Open a pull request: + gh pr create --fill + # CI will run automatically + # After approval (or if no review required), squash and merge + +6. Configure secrets (if needed for releases): + - For npm: Add NPM_TOKEN to repository secrets + - For PyPI: Add PYPI_TOKEN to repository secrets + - For Docker: Authentication handled automatically via GITHUB_TOKEN + +Validation checks configured: +- Pre-commit: format, lint, type-check +- Pre-push: test, build +- PR workflow: format, lint, type-check, test, build +- Main workflow: ALL checks + integration tests + release + +Release strategy: npm +- Merges to main will automatically version, tag, and publish to npm +- Check .github/workflows/release.yml for details + +Branch protection active: +- Cannot push directly to main +- PRs required with squash merge +- Feature branches auto-delete after merge + +Resources: +- Workflows: .github/workflows/ +- Git hooks: .husky/ (or lefthook.yml) +- Tooling config: .eslintrc.json, .prettierrc.json, tsconfig.json +``` + +## Release Strategies + +Based on project type, suggest appropriate release strategies. See `references/release-strategies.md` for comprehensive details on each strategy. + +### Quick Selection Guide + +**For npm/Node.js projects**: +- **Library**: npm package +- **Web app**: Vercel, Netlify, or GitHub Pages +- **CLI tool**: npm package or standalone binary + +**For Python projects**: +- **Library**: PyPI +- **Web app**: Docker container or Platform-as-a-Service +- **CLI tool**: PyPI or standalone binary + +**For Rust projects**: +- **Library**: crates.io +- **Binary/CLI**: GitHub Releases with multi-platform binaries +- **Web Assembly**: Build to WASM + GitHub Pages + +**For Docker projects**: +- **Any service**: GitHub Container Registry (ghcr.io) + +**For Claude Code skills**: +- **Skill package**: GitHub Releases with .skill file + +**For static sites**: +- **Documentation/website**: GitHub Pages + +## Troubleshooting + +### Git hooks not running +- **Husky**: Ensure `npm install` has been run (installs hooks) +- **Lefthook**: Run `lefthook install` to activate hooks +- Check file permissions: `chmod +x .husky/pre-commit` + +### GitHub Actions failing +- Verify all required secrets are configured +- Check that validation commands match package.json scripts +- Review workflow logs for specific errors +- Ensure branch protection doesn't block the Actions bot + +### Branch protection preventing merge +- Verify required status checks match GitHub Actions job names +- Ensure all checks are passing on the PR +- Check that actor has permission to merge (admin bypass may be needed for first setup) + +### Pre-commit hooks too slow +- Move expensive checks (tests, build) to pre-push or CI only +- Use parallel execution in lefthook +- Configure incremental checking (only changed files) + +## References + +For detailed information: +- **Project Types**: See `references/project-types.md` for validation and build requirements per type +- **Release Strategies**: See `references/release-strategies.md` for comprehensive deployment options + +## Scripts Directory + +All automation scripts are in `scripts/`: +- `init_git_repo.py`: Git and GitHub initialization +- `setup_tooling.py`: Linting, formatting, type checking configuration +- `setup_git_hooks.py`: Git hooks with husky or lefthook +- `generate_workflows.py`: GitHub Actions workflow generation +- `setup_branch_protection.py`: Branch protection rules configuration + +Each script can be run independently or as part of the complete scaffolding flow. diff --git a/skills/start-right/references/project-types.md b/skills/start-right/references/project-types.md new file mode 100644 index 0000000..e33d09f --- /dev/null +++ b/skills/start-right/references/project-types.md @@ -0,0 +1,220 @@ +# Project Types Reference + +This document provides detailed information about different project types and their specific requirements for validation, builds, and releases. + +## Node.js / JavaScript Projects + +**Detection**: `package.json` file present + +**Typical validation checks**: +- Format: Prettier (`npm run format:check`) +- Lint: ESLint (`npm run lint`) +- Test: Jest/Vitest (`npm test`) +- Build: TypeScript/bundler (`npm run build`) + +**Release strategies**: +- **npm package**: Publish to npm registry +- **Web app**: Deploy to Vercel, Netlify, or GitHub Pages +- **CLI tool**: Publish to npm with binary support + +**Required dependencies**: +```json +{ + "devDependencies": { + "prettier": "^3.0.0", + "eslint": "^8.0.0", + "jest": "^29.0.0" // or vitest + } +} +``` + +## TypeScript Projects + +**Detection**: `tsconfig.json` or TypeScript in dependencies + +**Typical validation checks**: +- Format: Prettier +- Lint: ESLint with TypeScript plugin +- Type check: `tsc --noEmit` +- Test: Jest/Vitest with ts-jest +- Build: TypeScript compiler + +**Release strategies**: Same as Node.js + +**Required dependencies**: +```json +{ + "devDependencies": { + "typescript": "^5.0.0", + "@typescript-eslint/parser": "^6.0.0", + "@typescript-eslint/eslint-plugin": "^6.0.0", + "ts-jest": "^29.0.0" + } +} +``` + +## React Projects + +**Detection**: React in `package.json` dependencies + +**Typical validation checks**: +- Format: Prettier +- Lint: ESLint with React plugin +- Type check: TypeScript (if applicable) +- Test: Jest + React Testing Library +- Build: Vite/Webpack/Next.js + +**Release strategies**: +- **Single-page app**: GitHub Pages, Vercel, Netlify +- **Next.js app**: Vercel (recommended), or Docker container +- **Component library**: npm package + +**Additional considerations**: +- May need environment variable management +- Build optimization (bundle size, tree shaking) +- Static asset handling + +## Rust Projects + +**Detection**: `Cargo.toml` file present + +**Typical validation checks**: +- Format: `cargo fmt --check` +- Lint: `cargo clippy -- -D warnings` +- Test: `cargo test` +- Build: `cargo build --release` + +**Release strategies**: +- **Binary**: Cross-compile for multiple platforms, attach to GitHub Release +- **Library (crate)**: Publish to crates.io +- **Web Assembly**: Build to WASM for web deployment + +**CI considerations**: +- Cache `~/.cargo` and `target/` directories +- Consider cross-compilation matrix for multiple platforms +- Separate debug/release builds + +## Python Projects + +**Detection**: `.py` files, `requirements.txt`, or `pyproject.toml` + +**Typical validation checks**: +- Format: Black (`black --check .`) +- Lint: Flake8 (`flake8 .`) +- Type check: MyPy (`mypy .`) +- Test: Pytest (`pytest`) + +**Release strategies**: +- **Package**: Publish to PyPI +- **CLI tool**: PyPI with entry points or standalone binary (PyInstaller) +- **Web service**: Docker container or Platform-as-a-Service + +**Required tools**: +```txt +black +flake8 +mypy +pytest +build # for packaging +``` + +## Go Projects + +**Detection**: `go.mod` file present + +**Typical validation checks**: +- Format: `go fmt` / `gofmt -s` +- Lint: `golangci-lint run` +- Test: `go test ./...` +- Build: `go build` + +**Release strategies**: +- **Binary**: Cross-compile with GOOS/GOARCH, attach to GitHub Release +- **Library**: Tag version in git (Go modules) +- **Docker**: Multi-stage build for small images + +**CI considerations**: +- Use Go module caching +- Cross-compilation for multiple platforms is straightforward +- Consider using GoReleaser for automated releases + +## Docker Projects + +**Detection**: `Dockerfile` present + +**Typical validation checks**: +- Dockerfile lint: `hadolint Dockerfile` +- Build: `docker build .` +- Security scan: Trivy or similar + +**Release strategies**: +- **Container image**: GitHub Container Registry (ghcr.io) +- **Docker Hub**: If public registry preferred +- **Multiple registries**: Push to multiple registries for redundancy + +**Best practices**: +- Multi-stage builds for smaller images +- Use specific base image tags (not `latest`) +- Run as non-root user +- Regular security scanning + +## Claude Code Skills + +**Detection**: `.skill` extension or skill structure + +**Typical validation checks**: +- Skill validation: Custom validator script +- Structure check: Verify SKILL.md frontmatter and structure +- Test: Use skill in Claude Code environment + +**Release strategies**: +- **GitHub Release**: Attach packaged .skill file +- No deployment needed - users download and install manually + +**Additional files**: +- SKILL.md (required) +- Optional: scripts/, references/, assets/ directories +- Package using skill packaging tool + +## Web Applications (Generic) + +**Typical validation checks**: +- Format checking +- Linting +- Unit tests +- Integration tests (optional on PR, required on main) +- Build verification + +**Release strategies**: +- **Static site**: GitHub Pages, Vercel, Netlify +- **Server-rendered**: Platform-specific deployment (Vercel, Railway, Fly.io) +- **Containerized**: Deploy container to cloud platform + +**Additional considerations**: +- Environment configuration management +- Asset optimization and CDN +- Database migrations (if applicable) +- Health check endpoints + +## Best Practices Across All Types + +### Validation Checks on PRs +- **Always run**: Format check, lint, unit tests, build +- **Optional**: Integration tests (if fast), type checking +- **Never run on PR**: Deployment, long-running tests + +### Validation Checks on Main Branch +- **Always run**: All checks from PR + integration tests +- **Additionally**: Security scanning, coverage reports +- **After validation**: Versioning, tagging, release + +### Versioning Strategy +- **Semantic versioning**: MAJOR.MINOR.PATCH +- **Auto-increment**: Use GitHub Actions to bump version +- **Tag format**: `v1.2.3` format recommended +- **Changelog**: Auto-generate from commit messages or PRs + +### Release Notes +- Use GitHub's auto-generated release notes +- Customize with categories (features, fixes, breaking changes) +- Include links to related PRs and issues diff --git a/skills/start-right/references/release-strategies.md b/skills/start-right/references/release-strategies.md new file mode 100644 index 0000000..92d8843 --- /dev/null +++ b/skills/start-right/references/release-strategies.md @@ -0,0 +1,351 @@ +# Release Strategies Reference + +This document provides detailed guidance on different release strategies and deployment targets. + +## npm Package Release + +**Best for**: JavaScript/TypeScript libraries, CLI tools, frameworks + +**Prerequisites**: +- npm account and NPM_TOKEN secret configured +- `package.json` with correct metadata (name, version, main, types) +- Build output in publishable state + +**Workflow steps**: +1. Build the package +2. Run `npm publish` +3. Create GitHub Release with version tag +4. Include link to npm package in release notes + +**Configuration**: +```json +{ + "name": "@username/package-name", + "version": "1.0.0", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "files": ["dist"], + "scripts": { + "build": "tsc", + "prepublishOnly": "npm run build" + } +} +``` + +**GitHub Release notes example**: +``` +šŸ“¦ Published to npm: https://www.npmjs.com/package/@username/package-name + +Install with: +npm install @username/package-name +``` + +## GitHub Pages Deployment + +**Best for**: Static websites, documentation sites, SPAs + +**Prerequisites**: +- GitHub Pages enabled in repository settings +- Build outputs static files to a directory (usually `dist/` or `build/`) + +**Workflow steps**: +1. Build the static site +2. Deploy to `gh-pages` branch using `peaceiris/actions-gh-pages` +3. Create GitHub Release with link to live site + +**Configuration considerations**: +- Set correct `base` path for SPAs (e.g., Vite: `base: '/repo-name/'`) +- Configure 404.html for SPA routing +- Custom domain setup (optional) + +**GitHub Release notes example**: +``` +🌐 Deployed to: https://username.github.io/repo-name + +Changes in this release: +[auto-generated release notes] +``` + +## Vercel Deployment + +**Best for**: Next.js apps, React apps, modern web frameworks + +**Prerequisites**: +- Vercel account connected to GitHub +- Vercel project configured (can be done via CLI or UI) + +**Workflow approaches**: + +### Option 1: Automatic (Recommended) +- Let Vercel handle deployment via their GitHub integration +- GitHub Actions only handles validation +- Every push to main triggers Vercel deployment automatically + +### Option 2: Manual via GitHub Actions +- Use Vercel CLI in GitHub Actions +- Requires VERCEL_TOKEN secret +- More control but more complex + +**GitHub Release notes example**: +``` +šŸš€ Deployed to Vercel: https://project-name.vercel.app + +Production URL: https://your-domain.com (if custom domain) +``` + +## Docker Container Release + +**Best for**: Microservices, backend applications, full-stack apps + +**Prerequisites**: +- Dockerfile in repository +- Multi-stage builds for optimization (recommended) + +**Release targets**: +- **GitHub Container Registry** (ghcr.io) - Recommended, free with GitHub +- **Docker Hub** - Public registry, widely used +- **AWS ECR**, **Google GCR**, **Azure ACR** - Cloud-specific registries + +**Workflow steps**: +1. Build Docker image with version tag +2. Push to container registry +3. Also tag as `latest` +4. Create GitHub Release with pull command + +**Best practices**: +- Use multi-stage builds to minimize image size +- Run containers as non-root user +- Include health check in Dockerfile +- Version images with semantic versioning + +**GitHub Release notes example**: +``` +🐳 Docker image: `ghcr.io/username/repo-name:v1.2.3` + +Pull and run: +docker pull ghcr.io/username/repo-name:v1.2.3 +docker run -p 8080:8080 ghcr.io/username/repo-name:v1.2.3 +``` + +## Binary Artifacts Release + +**Best for**: CLI tools, desktop apps, native applications + +**Platforms to support**: +- Linux (x86_64, arm64) +- macOS (x86_64, arm64/Apple Silicon) +- Windows (x86_64) + +**Workflow approaches**: + +### Option 1: GitHub Actions matrix build +Build on multiple runners (ubuntu, macos, windows) and upload artifacts + +### Option 2: Cross-compilation +Compile for multiple targets from single runner (works for Go, Rust) + +### Option 3: GoReleaser / cargo-dist +Use specialized tools for automated multi-platform releases + +**GitHub Release notes example**: +``` +šŸ“„ Download the binary for your platform: + +- Linux x86_64: [app-linux-amd64](link) +- Linux ARM64: [app-linux-arm64](link) +- macOS x86_64: [app-darwin-amd64](link) +- macOS ARM64: [app-darwin-arm64](link) +- Windows x86_64: [app-windows-amd64.exe](link) + +Quick install: +curl -L https://github.com/user/repo/releases/download/v1.2.3/app-linux-amd64 -o app +chmod +x app +./app +``` + +## Python Package (PyPI) Release + +**Best for**: Python libraries, CLI tools, frameworks + +**Prerequisites**: +- PyPI account and API token +- `pyproject.toml` or `setup.py` with metadata +- Build tool (build, setuptools, poetry) + +**Workflow steps**: +1. Build distribution packages (wheel + sdist) +2. Upload to PyPI using twine +3. Create GitHub Release with PyPI link + +**Configuration**: +```toml +[build-system] +requires = ["setuptools>=45", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "my-package" +version = "1.0.0" +description = "Package description" +authors = [{name = "Your Name", email = "you@example.com"}] +``` + +**GitHub Release notes example**: +``` +šŸ“¦ Published to PyPI: https://pypi.org/project/my-package/ + +Install with: +pip install my-package +``` + +## Rust Crate (crates.io) Release + +**Best for**: Rust libraries + +**Prerequisites**: +- crates.io account +- CARGO_REGISTRY_TOKEN secret +- `Cargo.toml` with complete metadata + +**Workflow steps**: +1. Run tests and validation +2. Publish to crates.io using `cargo publish` +3. Create GitHub Release + +**GitHub Release notes example**: +``` +šŸ“¦ Published to crates.io: https://crates.io/crates/my-crate + +Add to your Cargo.toml: +[dependencies] +my-crate = "1.0.0" +``` + +## Claude Code Skill Release + +**Best for**: Claude Code skills and extensions + +**Prerequisites**: +- Skill properly structured and validated +- Skill packaged as .skill file + +**Workflow steps**: +1. Validate skill structure +2. Package skill into .skill file +3. Create GitHub Release with .skill file attached +4. No deployment needed + +**GitHub Release notes example**: +``` +šŸŽÆ Claude Code Skill Release + +Download the skill file and install in Claude Code: +1. Download [skill-name.skill](link) +2. In Claude Code, run: /skills install /path/to/skill-name.skill + +What's included: +- [Brief description of skill capabilities] +``` + +## Desktop App Distribution + +**Best for**: Electron apps, Tauri apps + +**Platforms**: Windows, macOS, Linux + +**Workflow tools**: +- **Electron Builder**: Automated builds and updates +- **Tauri**: Rust-based alternative with smaller bundle sizes + +**Distribution methods**: +- GitHub Releases with auto-updater +- Platform-specific stores (Microsoft Store, Mac App Store) +- Custom update server + +## Platform-as-a-Service (PaaS) Deployment + +**Platforms**: Railway, Fly.io, Render, Heroku (legacy) + +**Common characteristics**: +- Git-based deployment +- Automatic container building +- Built-in databases and add-ons +- Easy environment variable management + +**Workflow integration**: +Most PaaS platforms integrate directly with GitHub - just validation in GitHub Actions, deployment handled by platform + +## Release Strategy Selection Guide + +### Choose npm/PyPI/crates.io when: +- Building a library or package +- Want maximum distribution reach +- Package manager installation is preferred + +### Choose GitHub Pages when: +- Pure static site +- Documentation site +- No server-side logic needed +- Want simple, free hosting + +### Choose Vercel/Netlify when: +- Modern framework (Next.js, SvelteKit, etc.) +- Need serverless functions +- Want preview deployments for PRs +- Need automatic optimizations + +### Choose Docker when: +- Microservices architecture +- Need consistent runtime environment +- Deploying to Kubernetes or container orchestration +- Complex dependencies + +### Choose Binary release when: +- CLI tool +- Desktop application +- Want users to run without installing runtime +- Performance-critical application + +### Choose PaaS when: +- Full-stack web application +- Need managed database +- Want simple deployment +- Solo developer or small team + +## Multi-Release Strategy + +Some projects benefit from multiple release targets: + +**Example: CLI tool** +- npm package (for Node.js users) +- Standalone binary (for system installation) +- Docker image (for containerized environments) + +**Example: Web framework** +- npm package (for developers) +- Documentation site on GitHub Pages +- Example deployed to Vercel + +**Example: Library** +- Language package registry (npm, PyPI, etc.) +- GitHub Releases for changelogs +- Documentation site + +## Versioning and Changelog Best Practices + +**Semantic Versioning**: +- MAJOR: Breaking changes +- MINOR: New features (backward compatible) +- PATCH: Bug fixes + +**Auto-versioning**: +Use commit messages or PR labels to determine version bump: +- `feat:` → MINOR bump +- `fix:` → PATCH bump +- `BREAKING CHANGE:` → MAJOR bump + +**Changelog generation**: +- Auto-generate from commit messages (Conventional Commits) +- Auto-generate from PR titles +- Use GitHub's release notes generation +- Tools: semantic-release, conventional-changelog diff --git a/skills/start-right/scripts/generate_workflows.py b/skills/start-right/scripts/generate_workflows.py new file mode 100755 index 0000000..980e39e --- /dev/null +++ b/skills/start-right/scripts/generate_workflows.py @@ -0,0 +1,562 @@ +#!/usr/bin/env python3 +""" +Generate GitHub Actions workflows for CI/CD + +This script creates: +- PR validation workflow (runs on feature branches) +- Main branch validation workflow (runs on merge to main) +- Release workflow (versioning, tagging, deployment) +""" + +import subprocess +import sys +import json +from pathlib import Path + + +def run_command(cmd, check=True, capture_output=True): + """Run a shell command and return the result.""" + result = subprocess.run( + cmd, + shell=True, + check=check, + capture_output=capture_output, + text=True + ) + return result + + +def detect_project_type(): + """Detect project type for workflow generation.""" + cwd = Path(".") + + if (cwd / "package.json").exists(): + with open("package.json") as f: + pkg = json.load(f) + deps = {**pkg.get("dependencies", {}), **pkg.get("devDependencies", {})} + + if "react" in deps or "next" in deps: + return "react" + elif "vite" in deps: + return "vite" + else: + return "node" + + elif (cwd / "Cargo.toml").exists(): + return "rust" + + elif (cwd / "go.mod").exists(): + return "go" + + elif (cwd / "Dockerfile").exists(): + return "docker" + + elif any(cwd.glob("*.py")): + return "python" + + return "generic" + + +def create_pr_workflow(project_type, checks): + """Create PR validation workflow.""" + workflow = { + "name": "PR Validation", + "on": { + "pull_request": { + "branches": ["main"] + } + }, + "jobs": { + "validate": { + "runs-on": "ubuntu-latest", + "steps": [ + { + "name": "Checkout code", + "uses": "actions/checkout@v4" + } + ] + } + } + } + + # Add project-specific setup + if project_type in ["node", "react", "vite"]: + workflow["jobs"]["validate"]["steps"].extend([ + { + "name": "Setup Node.js", + "uses": "actions/setup-node@v4", + "with": { + "node-version": "20", + "cache": "npm" + } + }, + { + "name": "Install dependencies", + "run": "npm ci" + } + ]) + elif project_type == "rust": + workflow["jobs"]["validate"]["steps"].extend([ + { + "name": "Setup Rust", + "uses": "actions-rs/toolchain@v1", + "with": { + "toolchain": "stable", + "override": True + } + }, + { + "name": "Cache cargo", + "uses": "actions/cache@v4", + "with": { + "path": "~/.cargo\ntarget", + "key": "${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}" + } + } + ]) + elif project_type == "python": + workflow["jobs"]["validate"]["steps"].extend([ + { + "name": "Setup Python", + "uses": "actions/setup-python@v5", + "with": { + "python-version": "3.11", + "cache": "pip" + } + }, + { + "name": "Install dependencies", + "run": "pip install -r requirements.txt" + } + ]) + elif project_type == "go": + workflow["jobs"]["validate"]["steps"].extend([ + { + "name": "Setup Go", + "uses": "actions/setup-go@v5", + "with": { + "go-version": "1.21", + "cache": True + } + }, + { + "name": "Install dependencies", + "run": "go mod download" + } + ]) + + # Add validation checks based on project type + if "format" in checks: + if project_type in ["node", "react", "vite"]: + workflow["jobs"]["validate"]["steps"].append({ + "name": "Check formatting", + "run": "npm run format:check" + }) + elif project_type == "rust": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Check formatting", + "run": "cargo fmt --check" + }) + elif project_type == "python": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Check formatting", + "run": "black --check ." + }) + + if "lint" in checks: + if project_type in ["node", "react", "vite"]: + workflow["jobs"]["validate"]["steps"].append({ + "name": "Lint", + "run": "npm run lint" + }) + elif project_type == "rust": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Lint", + "run": "cargo clippy -- -D warnings" + }) + elif project_type == "python": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Lint", + "run": "flake8 ." + }) + + if "type-check" in checks: + if project_type in ["node", "react", "vite"]: + workflow["jobs"]["validate"]["steps"].append({ + "name": "Type check", + "run": "npm run type-check" + }) + elif project_type == "python": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Type check", + "run": "mypy ." + }) + + if "test" in checks: + if project_type in ["node", "react", "vite"]: + workflow["jobs"]["validate"]["steps"].append({ + "name": "Run tests", + "run": "npm test" + }) + elif project_type == "rust": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Run tests", + "run": "cargo test" + }) + elif project_type == "python": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Run tests", + "run": "pytest" + }) + elif project_type == "go": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Run tests", + "run": "go test ./..." + }) + + if "build" in checks: + if project_type in ["node", "react", "vite"]: + workflow["jobs"]["validate"]["steps"].append({ + "name": "Build", + "run": "npm run build" + }) + elif project_type == "rust": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Build", + "run": "cargo build --release" + }) + elif project_type == "go": + workflow["jobs"]["validate"]["steps"].append({ + "name": "Build", + "run": "go build -o bin/ ./..." + }) + + return workflow + + +def create_main_workflow(project_type, checks): + """Create main branch workflow with versioning and release.""" + workflow = { + "name": "Main Branch CI/CD", + "on": { + "push": { + "branches": ["main"] + } + }, + "jobs": { + "validate": { + "runs-on": "ubuntu-latest", + "steps": [ + { + "name": "Checkout code", + "uses": "actions/checkout@v4", + "with": { + "fetch-depth": 0 # Full history for versioning + } + } + ] + } + } + } + + # Reuse PR validation steps + pr_workflow = create_pr_workflow(project_type, checks) + workflow["jobs"]["validate"]["steps"].extend(pr_workflow["jobs"]["validate"]["steps"][1:]) + + # Add versioning and tagging + workflow["jobs"]["validate"]["steps"].extend([ + { + "name": "Bump version and push tag", + "id": "version", + "uses": "anothrNick/github-tag-action@1.67.0", + "env": { + "GITHUB_TOKEN": "${{ secrets.GITHUB_TOKEN }}", + "WITH_V": "true", + "DEFAULT_BUMP": "patch" + } + } + ]) + + # Add release job + workflow["jobs"]["release"] = { + "needs": "validate", + "runs-on": "ubuntu-latest", + "permissions": { + "contents": "write" + }, + "steps": [ + { + "name": "Checkout code", + "uses": "actions/checkout@v4" + }, + { + "name": "Call release workflow", + "uses": "./.github/workflows/release.yml", + "with": { + "version": "${{ needs.validate.outputs.new_tag }}" + } + } + ] + } + + # Set output + workflow["jobs"]["validate"]["outputs"] = { + "new_tag": "${{ steps.version.outputs.new_tag }}" + } + + return workflow + + +def create_release_workflow(project_type, release_type): + """Create reusable release workflow based on project and release type.""" + workflow = { + "name": "Release", + "on": { + "workflow_call": { + "inputs": { + "version": { + "required": True, + "type": "string" + } + } + } + }, + "jobs": { + "release": { + "runs-on": "ubuntu-latest", + "permissions": { + "contents": "write", + "packages": "write" + }, + "steps": [ + { + "name": "Checkout code", + "uses": "actions/checkout@v4" + } + ] + } + } + } + + if release_type == "npm": + workflow["jobs"]["release"]["steps"].extend([ + { + "name": "Setup Node.js", + "uses": "actions/setup-node@v4", + "with": { + "node-version": "20", + "registry-url": "https://registry.npmjs.org/" + } + }, + { + "name": "Install dependencies", + "run": "npm ci" + }, + { + "name": "Build", + "run": "npm run build" + }, + { + "name": "Publish to npm", + "run": "npm publish", + "env": { + "NODE_AUTH_TOKEN": "${{ secrets.NPM_TOKEN }}" + } + }, + { + "name": "Create GitHub Release", + "uses": "softprops/action-gh-release@v1", + "with": { + "tag_name": "${{ inputs.version }}", + "name": "Release ${{ inputs.version }}", + "generate_release_notes": True + } + } + ]) + + elif release_type == "github-pages": + workflow["jobs"]["release"]["steps"].extend([ + { + "name": "Setup Node.js", + "uses": "actions/setup-node@v4", + "with": { + "node-version": "20" + } + }, + { + "name": "Install dependencies", + "run": "npm ci" + }, + { + "name": "Build", + "run": "npm run build" + }, + { + "name": "Deploy to GitHub Pages", + "uses": "peaceiris/actions-gh-pages@v3", + "with": { + "github_token": "${{ secrets.GITHUB_TOKEN }}", + "publish_dir": "./dist" + } + }, + { + "name": "Create GitHub Release", + "uses": "softprops/action-gh-release@v1", + "with": { + "tag_name": "${{ inputs.version }}", + "body": "Deployed to GitHub Pages: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}", + "generate_release_notes": True + } + } + ]) + + elif release_type == "docker": + workflow["jobs"]["release"]["steps"].extend([ + { + "name": "Set up Docker Buildx", + "uses": "docker/setup-buildx-action@v3" + }, + { + "name": "Login to GitHub Container Registry", + "uses": "docker/login-action@v3", + "with": { + "registry": "ghcr.io", + "username": "${{ github.actor }}", + "password": "${{ secrets.GITHUB_TOKEN }}" + } + }, + { + "name": "Build and push", + "uses": "docker/build-push-action@v5", + "with": { + "context": ".", + "push": True, + "tags": "ghcr.io/${{ github.repository }}:${{ inputs.version }},ghcr.io/${{ github.repository }}:latest" + } + }, + { + "name": "Create GitHub Release", + "uses": "softprops/action-gh-release@v1", + "with": { + "tag_name": "${{ inputs.version }}", + "body": "Docker image: `ghcr.io/${{ github.repository }}:${{ inputs.version }}`", + "generate_release_notes": True + } + } + ]) + + elif release_type == "binary": + # For Rust, Go, or other compiled languages + workflow["jobs"]["release"]["steps"].extend([ + { + "name": "Build binaries", + "run": "# Add build commands for your project" + }, + { + "name": "Create GitHub Release", + "uses": "softprops/action-gh-release@v1", + "with": { + "tag_name": "${{ inputs.version }}", + "files": "bin/*", # Adjust path as needed + "generate_release_notes": True + } + } + ]) + + elif release_type == "skill": + # For Claude Code skills + workflow["jobs"]["release"]["steps"].extend([ + { + "name": "Create GitHub Release", + "uses": "softprops/action-gh-release@v1", + "with": { + "tag_name": "${{ inputs.version }}", + "generate_release_notes": True + } + } + ]) + + return workflow + + +def write_workflow(workflow, filename): + """Write workflow to .github/workflows directory.""" + workflows_dir = Path(".github/workflows") + workflows_dir.mkdir(parents=True, exist_ok=True) + + filepath = workflows_dir / filename + + import yaml + try: + with open(filepath, "w") as f: + yaml.dump(workflow, f, default_flow_style=False, sort_keys=False) + except ImportError: + # Manual YAML writing if pyyaml not available + import json + yaml_str = json.dumps(workflow, indent=2) + # Basic conversion (not perfect but works for simple cases) + with open(filepath, "w") as f: + f.write(yaml_str.replace('"', '').replace(',', '')) + + print(f"āœ… Created {filepath}") + + +def main(): + if len(sys.argv) < 2: + print("Usage: generate_workflows.py [--checks format,lint,test,build] [--release npm|github-pages|docker|binary|skill]") + print("\nOptions:") + print(" --checks Comma-separated validation checks") + print(" --release Release strategy") + print("\nRelease types:") + print(" npm Publish to npm registry") + print(" github-pages Deploy to GitHub Pages") + print(" docker Build and push Docker image") + print(" binary Build and release binary artifacts") + print(" skill Claude Code skill (no deployment needed)") + sys.exit(1) + + # Parse arguments + checks = ["format", "lint", "test", "build"] # defaults + if "--checks" in sys.argv: + idx = sys.argv.index("--checks") + if idx + 1 < len(sys.argv): + checks = sys.argv[idx + 1].split(",") + + release_type = None + if "--release" in sys.argv: + idx = sys.argv.index("--release") + if idx + 1 < len(sys.argv): + release_type = sys.argv[idx + 1] + + project_type = detect_project_type() + + print("āš™ļø Generating GitHub Actions workflows...") + print(f" Project type: {project_type}") + print(f" Checks: {', '.join(checks)}") + if release_type: + print(f" Release type: {release_type}") + print() + + # Create PR workflow + pr_workflow = create_pr_workflow(project_type, checks) + write_workflow(pr_workflow, "pr-validation.yml") + + # Create main branch workflow + main_workflow = create_main_workflow(project_type, checks) + write_workflow(main_workflow, "main-ci-cd.yml") + + # Create release workflow if specified + if release_type: + release_workflow = create_release_workflow(project_type, release_type) + write_workflow(release_workflow, "release.yml") + + print("\nāœ… GitHub Actions workflows created!") + print("\nWorkflows:") + print(" - pr-validation.yml: Runs on PRs to main") + print(" - main-ci-cd.yml: Runs on merge to main, handles versioning") + if release_type: + print(f" - release.yml: Handles {release_type} deployment") + + +if __name__ == "__main__": + main() diff --git a/skills/start-right/scripts/init_git_repo.py b/skills/start-right/scripts/init_git_repo.py new file mode 100755 index 0000000..b2ac4e7 --- /dev/null +++ b/skills/start-right/scripts/init_git_repo.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python3 +""" +Initialize git repository and create GitHub remote + +This script handles: +- Git initialization with main as default branch +- GitHub repository creation (public/private) +- Remote configuration +- Initial commit setup +""" + +import subprocess +import sys +import json +from pathlib import Path + + +def run_command(cmd, check=True, capture_output=True): + """Run a shell command and return the result.""" + result = subprocess.run( + cmd, + shell=True, + check=check, + capture_output=capture_output, + text=True + ) + return result + + +def check_prerequisites(): + """Verify git and gh CLI are installed and authenticated.""" + errors = [] + + # Check git + try: + run_command("git --version") + except subprocess.CalledProcessError: + errors.append("Git is not installed") + + # Check gh CLI + try: + run_command("gh --version") + except subprocess.CalledProcessError: + errors.append("GitHub CLI (gh) is not installed") + else: + # Check gh authentication + try: + run_command("gh auth status") + except subprocess.CalledProcessError: + errors.append("GitHub CLI is not authenticated (run: gh auth login)") + + return errors + + +def init_git(): + """Initialize git repository with main as default branch.""" + if Path(".git").exists(): + print("āš ļø Git repository already initialized") + return False + + run_command("git init -b main") + print("āœ… Initialized git repository with 'main' as default branch") + return True + + +def create_github_repo(repo_name, visibility="public", org=None): + """Create GitHub repository and set as remote.""" + # Build gh repo create command + cmd = f"gh repo create {repo_name} --{visibility} --source=." + + if org: + cmd += f" --org {org}" + + try: + result = run_command(cmd) + print(f"āœ… Created GitHub repository: {repo_name} ({visibility})") + return True + except subprocess.CalledProcessError as e: + print(f"āŒ Failed to create GitHub repository: {e.stderr}") + return False + + +def create_gitignore(project_type=None): + """Create an appropriate .gitignore file.""" + common_patterns = [ + "# Dependencies", + "node_modules/", + ".pnp", + ".pnp.js", + "", + "# Testing", + "coverage/", + "*.log", + "", + "# Environment", + ".env", + ".env.local", + ".env.*.local", + "", + "# IDE", + ".vscode/", + ".idea/", + "*.swp", + "*.swo", + "*~", + "", + "# OS", + ".DS_Store", + "Thumbs.db", + "", + "# Build outputs", + "dist/", + "build/", + "*.exe", + "*.dll", + "*.so", + "*.dylib", + "" + ] + + type_specific = { + "node": [ + "# Node specific", + "npm-debug.log*", + "yarn-debug.log*", + "yarn-error.log*", + ".npm", + ], + "python": [ + "# Python specific", + "__pycache__/", + "*.py[cod]", + "*$py.class", + ".Python", + "venv/", + "ENV/", + ".venv/", + "*.egg-info/", + ], + "rust": [ + "# Rust specific", + "target/", + "Cargo.lock", + "**/*.rs.bk", + ], + "go": [ + "# Go specific", + "*.exe", + "*.exe~", + "*.test", + "*.out", + "vendor/", + ], + } + + gitignore_content = common_patterns + if project_type and project_type in type_specific: + gitignore_content.extend([""] + type_specific[project_type]) + + with open(".gitignore", "w") as f: + f.write("\n".join(gitignore_content)) + + print("āœ… Created .gitignore") + + +def main(): + if len(sys.argv) < 2: + print("Usage: init_git_repo.py [--private] [--org ] [--type ]") + print("\nOptions:") + print(" --private Create private repository (default: public)") + print(" --org Create under organization") + print(" --type Project type for .gitignore (node|python|rust|go)") + sys.exit(1) + + repo_name = sys.argv[1] + visibility = "private" if "--private" in sys.argv else "public" + org = sys.argv[sys.argv.index("--org") + 1] if "--org" in sys.argv else None + project_type = sys.argv[sys.argv.index("--type") + 1] if "--type" in sys.argv else None + + print("šŸš€ Initializing repository setup...") + print(f" Repository: {repo_name}") + print(f" Visibility: {visibility}") + if org: + print(f" Organization: {org}") + if project_type: + print(f" Type: {project_type}") + print() + + # Check prerequisites + errors = check_prerequisites() + if errors: + print("āŒ Prerequisites not met:") + for error in errors: + print(f" - {error}") + sys.exit(1) + + # Initialize git + init_git() + + # Create .gitignore + create_gitignore(project_type) + + # Create GitHub repo + if not create_github_repo(repo_name, visibility, org): + sys.exit(1) + + print("\nāœ… Repository setup complete!") + print("\nNext steps:") + print(" 1. Configure branch protection rules") + print(" 2. Set up CI/CD workflows") + print(" 3. Configure git hooks") + + +if __name__ == "__main__": + main() diff --git a/skills/start-right/scripts/setup_branch_protection.py b/skills/start-right/scripts/setup_branch_protection.py new file mode 100755 index 0000000..fa43d53 --- /dev/null +++ b/skills/start-right/scripts/setup_branch_protection.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +""" +Configure GitHub branch protection rules + +This script sets up branch protection for the main branch to: +- Prevent direct pushes to main +- Require pull request reviews +- Require status checks to pass +- Automatically delete head branches after merge +""" + +import subprocess +import sys +import json + + +def run_command(cmd, check=True, capture_output=True): + """Run a shell command and return the result.""" + result = subprocess.run( + cmd, + shell=True, + check=check, + capture_output=capture_output, + text=True + ) + return result + + +def get_repo_info(): + """Get current repository owner and name.""" + try: + result = run_command("gh repo view --json owner,name") + repo_data = json.loads(result.stdout) + return repo_data["owner"]["login"], repo_data["name"] + except Exception as e: + print(f"āŒ Failed to get repository info: {e}") + sys.exit(1) + + +def enable_branch_protection(branch="main", required_checks=None): + """Enable branch protection rules.""" + owner, repo = get_repo_info() + + # Base protection rules + protection_rules = { + "required_pull_request_reviews": { + "required_approving_review_count": 0, # Solo dev doesn't need reviews + "dismiss_stale_reviews": True, + }, + "enforce_admins": False, # Allow admins to bypass for solo dev + "required_status_checks": { + "strict": True, + "contexts": required_checks or [] + }, + "restrictions": None, # No push restrictions for solo dev + "allow_force_pushes": False, + "allow_deletions": False, + } + + # Use gh api to set branch protection + cmd = f'''gh api repos/{owner}/{repo}/branches/{branch}/protection \\ + -X PUT \\ + -H "Accept: application/vnd.github+json" \\ + -f required_status_checks[strict]=true \\ + -f required_status_checks[contexts][]=build \\ + -f enforce_admins=false \\ + -f required_pull_request_reviews[required_approving_review_count]=0 \\ + -f required_pull_request_reviews[dismiss_stale_reviews]=true \\ + -f allow_force_pushes=false \\ + -f allow_deletions=false''' + + try: + run_command(cmd) + print(f"āœ… Enabled branch protection for '{branch}'") + return True + except subprocess.CalledProcessError as e: + print(f"āŒ Failed to enable branch protection: {e.stderr}") + return False + + +def configure_repo_settings(): + """Configure repository settings for PR workflow.""" + owner, repo = get_repo_info() + + # Enable auto-delete of head branches + cmd = f'''gh api repos/{owner}/{repo} \\ + -X PATCH \\ + -H "Accept: application/vnd.github+json" \\ + -f delete_branch_on_merge=true \\ + -f allow_squash_merge=true \\ + -f allow_merge_commit=false \\ + -f allow_rebase_merge=false''' + + try: + run_command(cmd) + print("āœ… Configured repository settings:") + print(" - Auto-delete head branches after merge: enabled") + print(" - Squash merging: enabled") + print(" - Merge commits: disabled") + print(" - Rebase merging: disabled") + return True + except subprocess.CalledProcessError as e: + print(f"āŒ Failed to configure repository settings: {e.stderr}") + return False + + +def main(): + required_checks = [] + if len(sys.argv) > 1: + # Accept comma-separated list of required status checks + required_checks = sys.argv[1].split(",") + + print("šŸ”’ Configuring branch protection...") + print() + + # Configure repository settings + configure_repo_settings() + print() + + # Enable branch protection + enable_branch_protection("main", required_checks) + + print("\nāœ… Branch protection configured!") + print("\nProtection rules applied:") + print(" - Direct pushes to 'main' blocked") + print(" - Pull requests required") + print(" - Status checks required (if configured)") + print(" - Feature branches auto-deleted after merge") + print(" - Squash merge enforced") + + +if __name__ == "__main__": + main() diff --git a/skills/start-right/scripts/setup_git_hooks.py b/skills/start-right/scripts/setup_git_hooks.py new file mode 100755 index 0000000..e9b7c44 --- /dev/null +++ b/skills/start-right/scripts/setup_git_hooks.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 +""" +Set up git hooks using husky (Node.js) or lefthook (universal) + +This script: +- Detects project type +- Installs and configures appropriate git hooks tool +- Sets up pre-commit and pre-push hooks with validation checks +""" + +import subprocess +import sys +import json +from pathlib import Path + + +def run_command(cmd, check=True, capture_output=True): + """Run a shell command and return the result.""" + result = subprocess.run( + cmd, + shell=True, + check=check, + capture_output=capture_output, + text=True + ) + return result + + +def is_node_project(): + """Check if this is a Node.js project.""" + return Path("package.json").exists() + + +def setup_husky(checks): + """Set up husky for Node.js projects.""" + print("šŸ“¦ Installing husky...") + + # Install husky + try: + run_command("npm install --save-dev husky") + run_command("npx husky init") + print("āœ… Husky installed and initialized") + except subprocess.CalledProcessError as e: + print(f"āŒ Failed to install husky: {e}") + return False + + # Create pre-commit hook + pre_commit_commands = [] + if "format" in checks: + pre_commit_commands.append("npm run format:check || (echo 'āŒ Format check failed. Run npm run format' && exit 1)") + if "lint" in checks: + pre_commit_commands.append("npm run lint") + if "type-check" in checks: + pre_commit_commands.append("npm run type-check") + + if pre_commit_commands: + hook_content = "#!/bin/sh\n. \"$(dirname \"$0\")/_/husky.sh\"\n\n" + hook_content += "\n".join(pre_commit_commands) + + with open(".husky/pre-commit", "w") as f: + f.write(hook_content) + Path(".husky/pre-commit").chmod(0o755) + print("āœ… Created pre-commit hook") + + # Create pre-push hook + pre_push_commands = [] + if "test" in checks: + pre_push_commands.append("npm run test") + if "build" in checks: + pre_push_commands.append("npm run build") + + if pre_push_commands: + hook_content = "#!/bin/sh\n. \"$(dirname \"$0\")/_/husky.sh\"\n\n" + hook_content += "\n".join(pre_push_commands) + + with open(".husky/pre-push", "w") as f: + f.write(hook_content) + Path(".husky/pre-push").chmod(0o755) + print("āœ… Created pre-push hook") + + # Update package.json with scripts if they don't exist + update_package_json_scripts(checks) + + return True + + +def setup_lefthook(checks): + """Set up lefthook for any project type.""" + print("šŸ“¦ Installing lefthook...") + + # Check if lefthook is installed + try: + run_command("lefthook version") + except subprocess.CalledProcessError: + print("Installing lefthook globally...") + # Try to install via common package managers + try: + run_command("brew install lefthook", check=False) + except: + try: + run_command("go install github.com/evilmartians/lefthook@latest", check=False) + except: + print("āŒ Could not install lefthook. Please install manually:") + print(" brew install lefthook") + print(" OR") + print(" go install github.com/evilmartians/lefthook@latest") + return False + + # Create lefthook.yml configuration + config = { + "pre-commit": { + "parallel": True, + "commands": {} + }, + "pre-push": { + "parallel": False, + "commands": {} + } + } + + # Pre-commit checks + if "format" in checks: + config["pre-commit"]["commands"]["format-check"] = { + "run": "npm run format:check || echo 'Run: npm run format'", + } + + if "lint" in checks: + config["pre-commit"]["commands"]["lint"] = { + "run": "npm run lint" if is_node_project() else "echo 'Configure linting for your project'", + } + + if "type-check" in checks: + config["pre-commit"]["commands"]["type-check"] = { + "run": "npm run type-check", + } + + # Pre-push checks + if "test" in checks: + config["pre-push"]["commands"]["test"] = { + "run": "npm run test" if is_node_project() else "echo 'Configure tests for your project'", + } + + if "build" in checks: + config["pre-push"]["commands"]["build"] = { + "run": "npm run build" if is_node_project() else "echo 'Configure build for your project'", + } + + # Write configuration + import yaml + try: + with open("lefthook.yml", "w") as f: + yaml.dump(config, f, default_flow_style=False) + except ImportError: + # Fallback to manual YAML writing if pyyaml not available + with open("lefthook.yml", "w") as f: + f.write("pre-commit:\n") + f.write(" parallel: true\n") + f.write(" commands:\n") + for cmd_name, cmd_config in config["pre-commit"]["commands"].items(): + f.write(f" {cmd_name}:\n") + f.write(f" run: {cmd_config['run']}\n") + + f.write("\npre-push:\n") + f.write(" parallel: false\n") + f.write(" commands:\n") + for cmd_name, cmd_config in config["pre-push"]["commands"].items(): + f.write(f" {cmd_name}:\n") + f.write(f" run: {cmd_config['run']}\n") + + print("āœ… Created lefthook.yml") + + # Install git hooks + try: + run_command("lefthook install") + print("āœ… Installed git hooks") + except subprocess.CalledProcessError: + print("āš ļø Run 'lefthook install' to activate hooks") + + return True + + +def update_package_json_scripts(checks): + """Update package.json with necessary npm scripts if they don't exist.""" + if not is_node_project(): + return + + with open("package.json", "r") as f: + pkg = json.load(f) + + scripts = pkg.get("scripts", {}) + modified = False + + suggested_scripts = { + "format": "prettier --write .", + "format:check": "prettier --check .", + "lint": "eslint .", + "type-check": "tsc --noEmit", + "test": "jest", + "build": "tsc" + } + + for script_name, script_cmd in suggested_scripts.items(): + if script_name not in scripts: + # Only add if the check is enabled + check_type = script_name.split(":")[0] if ":" in script_name else script_name + if check_type in checks: + scripts[script_name] = script_cmd + modified = True + print(f"ā„¹ļø Added npm script: {script_name}") + + if modified: + pkg["scripts"] = scripts + with open("package.json", "w") as f: + json.dump(pkg, f, indent=2) + print("āœ… Updated package.json scripts") + + +def main(): + if len(sys.argv) < 2: + print("Usage: setup_git_hooks.py [--husky|--lefthook] [--checks format,lint,type-check,test,build]") + print("\nOptions:") + print(" --husky Use husky (Node.js projects only)") + print(" --lefthook Use lefthook (universal)") + print(" --checks Comma-separated list of checks to enable") + print("\nExample:") + print(" setup_git_hooks.py --husky --checks format,lint,test") + sys.exit(1) + + # Parse arguments + use_husky = "--husky" in sys.argv + use_lefthook = "--lefthook" in sys.argv + + # Get checks list + checks = ["format", "lint", "test"] # defaults + if "--checks" in sys.argv: + idx = sys.argv.index("--checks") + if idx + 1 < len(sys.argv): + checks = sys.argv[idx + 1].split(",") + + # Auto-detect if not specified + if not use_husky and not use_lefthook: + if is_node_project(): + use_husky = True + else: + use_lefthook = True + + print("šŸŖ Setting up git hooks...") + print(f" Tool: {'husky' if use_husky else 'lefthook'}") + print(f" Checks: {', '.join(checks)}") + print() + + if use_husky: + if not is_node_project(): + print("āŒ Husky requires a Node.js project (package.json)") + print(" Use --lefthook for non-Node projects") + sys.exit(1) + success = setup_husky(checks) + else: + success = setup_lefthook(checks) + + if success: + print("\nāœ… Git hooks configured!") + print("\nHooks will run:") + print(" Pre-commit:", ", ".join([c for c in checks if c in ["format", "lint", "type-check"]])) + print(" Pre-push:", ", ".join([c for c in checks if c in ["test", "build"]])) + + +if __name__ == "__main__": + main() diff --git a/skills/start-right/scripts/setup_tooling.py b/skills/start-right/scripts/setup_tooling.py new file mode 100755 index 0000000..3a471db --- /dev/null +++ b/skills/start-right/scripts/setup_tooling.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python3 +""" +Detect project type and set up appropriate tooling configuration + +This script: +- Detects project type from files/directories +- Creates appropriate configuration files for linting, formatting, type checking +- Sets up test frameworks +""" + +import subprocess +import sys +import json +from pathlib import Path + + +def run_command(cmd, check=True, capture_output=True): + """Run a shell command and return the result.""" + result = subprocess.run( + cmd, + shell=True, + check=check, + capture_output=capture_output, + text=True + ) + return result + + +def detect_project_type(): + """Detect project type from existing files.""" + cwd = Path(".") + + if (cwd / "package.json").exists(): + with open("package.json") as f: + pkg = json.load(f) + deps = {**pkg.get("dependencies", {}), **pkg.get("devDependencies", {})} + + if "react" in deps or "next" in deps: + return "react" + elif "vue" in deps: + return "vue" + elif "typescript" in deps or (cwd / "tsconfig.json").exists(): + return "typescript" + else: + return "node" + + elif (cwd / "Cargo.toml").exists(): + return "rust" + + elif (cwd / "go.mod").exists(): + return "go" + + elif any(cwd.glob("*.py")) or (cwd / "requirements.txt").exists() or (cwd / "pyproject.toml").exists(): + return "python" + + elif (cwd / "Dockerfile").exists(): + return "docker" + + return "unknown" + + +def setup_node_tooling(): + """Set up tooling for Node.js projects.""" + configs = {} + + # ESLint configuration + configs[".eslintrc.json"] = { + "env": { + "node": True, + "es2021": True + }, + "extends": "eslint:recommended", + "parserOptions": { + "ecmaVersion": "latest", + "sourceType": "module" + }, + "rules": {} + } + + # Prettier configuration + configs[".prettierrc.json"] = { + "semi": True, + "singleQuote": True, + "tabWidth": 2, + "trailingComma": "es5" + } + + # Prettier ignore + configs[".prettierignore"] = """node_modules +dist +build +coverage +.next +""" + + return configs + + +def setup_typescript_tooling(): + """Set up tooling for TypeScript projects.""" + configs = setup_node_tooling() + + # Update ESLint for TypeScript + configs[".eslintrc.json"] = { + "env": { + "node": True, + "es2021": True + }, + "extends": [ + "eslint:recommended", + "plugin:@typescript-eslint/recommended" + ], + "parser": "@typescript-eslint/parser", + "parserOptions": { + "ecmaVersion": "latest", + "sourceType": "module" + }, + "plugins": ["@typescript-eslint"], + "rules": {} + } + + # Basic TypeScript config if not exists + if not Path("tsconfig.json").exists(): + configs["tsconfig.json"] = { + "compilerOptions": { + "target": "ES2020", + "module": "commonjs", + "lib": ["ES2020"], + "outDir": "./dist", + "rootDir": "./src", + "strict": True, + "esModuleInterop": True, + "skipLibCheck": True, + "forceConsistentCasingInFileNames": True + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] + } + + return configs + + +def setup_python_tooling(): + """Set up tooling for Python projects.""" + configs = {} + + # Black configuration (pyproject.toml section) + configs[".black.toml"] = """[tool.black] +line-length = 88 +target-version = ['py39', 'py310', 'py311'] +include = '\\.pyi?$' +""" + + # Flake8 configuration + configs[".flake8"] = """[flake8] +max-line-length = 88 +extend-ignore = E203, W503 +exclude = .git,__pycache__,venv,.venv,build,dist +""" + + # MyPy configuration + configs["mypy.ini"] = """[mypy] +python_version = 3.9 +warn_return_any = True +warn_unused_configs = True +disallow_untyped_defs = True +""" + + return configs + + +def setup_rust_tooling(): + """Set up tooling for Rust projects.""" + configs = {} + + # Rustfmt configuration + configs["rustfmt.toml"] = """edition = "2021" +max_width = 100 +hard_tabs = false +tab_spaces = 4 +""" + + # Clippy configuration (in Cargo.toml, return as string for manual addition) + return configs + + +def write_configs(configs): + """Write configuration files to disk.""" + for filename, content in configs.items(): + if isinstance(content, dict): + with open(filename, "w") as f: + json.dump(content, f, indent=2) + else: + with open(filename, "w") as f: + f.write(content) + print(f"āœ… Created {filename}") + + +def main(): + project_type = sys.argv[1] if len(sys.argv) > 1 else detect_project_type() + + print(f"šŸ”§ Setting up tooling for {project_type} project...") + print() + + configs = {} + + if project_type in ["node", "javascript"]: + configs = setup_node_tooling() + elif project_type in ["typescript", "react", "vue"]: + configs = setup_typescript_tooling() + elif project_type == "python": + configs = setup_python_tooling() + elif project_type == "rust": + configs = setup_rust_tooling() + else: + print(f"āš ļø Unknown project type: {project_type}") + print("Skipping tooling setup.") + return + + if configs: + write_configs(configs) + print(f"\nāœ… Tooling configuration complete for {project_type}!") + + print("\nNext steps:") + print(" 1. Install dependencies for linting/formatting tools") + print(" 2. Set up pre-commit hooks") + print(" 3. Configure GitHub Actions workflows") + + +if __name__ == "__main__": + main()