From 866f3dbf1898d050f626cd5c5277e9d211734f99 Mon Sep 17 00:00:00 2001 From: Zhongwei Li Date: Sun, 30 Nov 2025 09:02:31 +0800 Subject: [PATCH] Initial commit --- .claude-plugin/plugin.json | 12 + README.md | 3 + plugin.lock.json | 165 + skills/create-lifeguard-rules/SKILL.md | 52 + skills/debugger/SKILL.md | 146 + skills/fast-repo-context/EXAMPLES.md | 57 + skills/fast-repo-context/SKILL.md | 84 + skills/fast-repo-context/scripts/sgrep.sh | 31 + skills/git-jj/SKILL.md | 205 + skills/git-jj/references/git_workflows.md | 32 + skills/git-jj/references/jj_bookmarks.md | 81 + skills/git-jj/references/jj_revset.md | 87 + skills/git-jj/references/jj_template.md | 86 + skills/git-jj/references/jj_workflows.md | 177 + skills/git-jj/scripts/repo_check.sh | 30 + skills/kiro/SKILL.md | 272 ++ skills/local-research/SKILL.md | 189 + skills/local-research/scripts/research_ops.py | 236 ++ skills/rust-architect/SKILL.md | 3332 +++++++++++++++++ skills/skill-creator/SKILL.md | 183 + skills/skill-creator/scripts/init_skill.py | 303 ++ skills/tdd-enforce/SKILL.md | 60 + skills/update-package-version/SKILL.md | 69 + skills/zellij-config/SKILL.md | 503 +++ .../assets/config_templates/basic.kdl | 49 + skills/zellij-config/assets/example_asset.txt | 24 + .../zellij-config/references/api_reference.md | 34 + .../references/configuration_options.md | 421 +++ .../references/layout_examples.md | 411 ++ .../references/theme_examples.md | 399 ++ .../zellij-config/scripts/convert_themes.py | 194 + skills/zellij-config/scripts/create_layout.py | 178 + skills/zellij-config/scripts/example.py | 19 + skills/zellij-config/scripts/setup_zellij.py | 217 ++ 34 files changed, 8341 insertions(+) create mode 100644 .claude-plugin/plugin.json create mode 100644 README.md create mode 100644 plugin.lock.json create mode 100644 skills/create-lifeguard-rules/SKILL.md create mode 100644 skills/debugger/SKILL.md create mode 100644 skills/fast-repo-context/EXAMPLES.md create mode 100644 skills/fast-repo-context/SKILL.md create mode 100755 skills/fast-repo-context/scripts/sgrep.sh create mode 100644 skills/git-jj/SKILL.md create mode 100644 skills/git-jj/references/git_workflows.md create mode 100644 skills/git-jj/references/jj_bookmarks.md create mode 100644 skills/git-jj/references/jj_revset.md create mode 100644 skills/git-jj/references/jj_template.md create mode 100644 skills/git-jj/references/jj_workflows.md create mode 100755 skills/git-jj/scripts/repo_check.sh create mode 100644 skills/kiro/SKILL.md create mode 100644 skills/local-research/SKILL.md create mode 100755 skills/local-research/scripts/research_ops.py create mode 100644 skills/rust-architect/SKILL.md create mode 100644 skills/skill-creator/SKILL.md create mode 100644 skills/skill-creator/scripts/init_skill.py create mode 100644 skills/tdd-enforce/SKILL.md create mode 100644 skills/update-package-version/SKILL.md create mode 100644 skills/zellij-config/SKILL.md create mode 100644 skills/zellij-config/assets/config_templates/basic.kdl create mode 100644 skills/zellij-config/assets/example_asset.txt create mode 100644 skills/zellij-config/references/api_reference.md create mode 100644 skills/zellij-config/references/configuration_options.md create mode 100644 skills/zellij-config/references/layout_examples.md create mode 100644 skills/zellij-config/references/theme_examples.md create mode 100644 skills/zellij-config/scripts/convert_themes.py create mode 100644 skills/zellij-config/scripts/create_layout.py create mode 100755 skills/zellij-config/scripts/example.py create mode 100644 skills/zellij-config/scripts/setup_zellij.py diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..23e6f4b --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,12 @@ +{ + "name": "claude-code-skills", + "description": "skills, plugins", + "version": "1.0.0", + "author": { + "name": "Towry Wang", + "email": "tovvry#gmail.com" + }, + "skills": [ + "./skills" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..a85f881 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# claude-code-skills + +skills, plugins diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..4574271 --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,165 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:towry/dots:conf/claude-local-marketplace", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "f456b01abd9942848347929846ca97dacef194bb", + "treeHash": "9180d0347e016814d48c16e73d8d838a042365aeb3f85dbde1523e137733bf3b", + "generatedAt": "2025-11-28T10:28:43.628742Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "claude-code-skills", + "description": "skills, plugins", + "version": "1.0.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "2164ac16d0d2c005d2fd4ef51020651fa650ffac7c3b273d485a0909bb7f7d17" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "86f63d827747a188430bea39fd5fc6363726fb1916e4173e230e4bfee4d51f43" + }, + { + "path": "skills/update-package-version/SKILL.md", + "sha256": "0e19a89a4f8cb26cb6904e0b86d7b7649061353403b8594692905d29eebeccc3" + }, + { + "path": "skills/tdd-enforce/SKILL.md", + "sha256": "d8ae266efafe9cfa92bea61f3cf4ecb20737a37859caf410714dc2d8da7c8b09" + }, + { + "path": "skills/rust-architect/SKILL.md", + "sha256": "20afc3084d6aeb0d6f9a24e790cd72512d5fbde479c55df02acfc3e1435729fd" + }, + { + "path": "skills/fast-repo-context/EXAMPLES.md", + "sha256": "a451268b1b25400cf6c57901fd3eeb4cca8f03640195afc66638780dea4a8697" + }, + { + "path": "skills/fast-repo-context/SKILL.md", + "sha256": "8cffa7c28803c20ef88102f035afe4ee68431787dace0543233b9980f5cc8bec" + }, + { + "path": "skills/fast-repo-context/scripts/sgrep.sh", + "sha256": "bd9451aed5c7d9a962234638925c7a2f20f44569fa04b6007756cb82607c399e" + }, + { + "path": "skills/skill-creator/SKILL.md", + "sha256": "2651e0086c759b2ba6cd9fb920cfd8220256a88fe5d8ff06d07c60dceac54f3d" + }, + { + "path": "skills/skill-creator/scripts/init_skill.py", + "sha256": "7ab14265db376a9be866c33e87c4bc6d25eb0a02c5ae7e411d1024fb5f806d3e" + }, + { + "path": "skills/git-jj/SKILL.md", + "sha256": "a2305af3eaba23b092660486992364678e245fcb252db638453a597c4784451d" + }, + { + "path": "skills/git-jj/references/jj_workflows.md", + "sha256": "bd652182986caef0887be3bed3e1cfc22f0fb1f2321e60bdbc3051fb10738419" + }, + { + "path": "skills/git-jj/references/jj_template.md", + "sha256": "0876d18bfc10cd484f72ba0966245ecf363ac20b2ad8483001671bff796c0d50" + }, + { + "path": "skills/git-jj/references/jj_revset.md", + "sha256": "a8baba41c0858bd31c6400fa43cec444876f804d4e5de7d6ea8bdaa3f9a2fd53" + }, + { + "path": "skills/git-jj/references/git_workflows.md", + "sha256": "73f8b11ecdc58cf1bfc29d69bbec564b6e1138c60be1b4029a413c37782a5170" + }, + { + "path": "skills/git-jj/references/jj_bookmarks.md", + "sha256": "a0c331c19f3b184c82889b035ee200db2abee041abf188a6d8a7d4fce4b30557" + }, + { + "path": "skills/git-jj/scripts/repo_check.sh", + "sha256": "aa0335e09e8b843afd566335e77271023da4dcc3bedb2fba1467a6dea17e6bd2" + }, + { + "path": "skills/kiro/SKILL.md", + "sha256": "6fa1c194619086bacaefb11f553097a0e643a83bb4bb6904ef5203c6917e9ea8" + }, + { + "path": "skills/zellij-config/SKILL.md", + "sha256": "2eb8d68bfb61235a5195eedd713da5db83de439a84162d04011c5308315bfa2a" + }, + { + "path": "skills/zellij-config/references/api_reference.md", + "sha256": "d04d861e0c220ff47859be48a2be9ac7ea4e6ad34e6e595fb3ab9f65a555177c" + }, + { + "path": "skills/zellij-config/references/theme_examples.md", + "sha256": "9c025e5fb6d79feff60803398236fbb1a716e828c4fa13e9c97c5d1370d38bc4" + }, + { + "path": "skills/zellij-config/references/layout_examples.md", + "sha256": "e1e78b1cef5e7482f8e178301216c65d611c95163b9ef3463c0970ad7405138f" + }, + { + "path": "skills/zellij-config/references/configuration_options.md", + "sha256": "8bfbea1805633880fb2564aa984824fd94561130576cb36fd921239b4fcb18bd" + }, + { + "path": "skills/zellij-config/scripts/setup_zellij.py", + "sha256": "0246e2b1591952281cc2889b0243fc6eff0696e9b4a503d259f838e8174a5bcb" + }, + { + "path": "skills/zellij-config/scripts/create_layout.py", + "sha256": "ba0f7796342d421eb0780cacd391d32ee17d47fb4467bb2347eed9ecfcd9c15f" + }, + { + "path": "skills/zellij-config/scripts/example.py", + "sha256": "a8667e1b79aeaccba95fdb7b8ccb304fbffd89a11a8d315c04fd260a19219dc0" + }, + { + "path": "skills/zellij-config/scripts/convert_themes.py", + "sha256": "dfb033db1d0fad22ffe64e4975e1fb3cf9d79ff3608560e5c91f8a817fad5e73" + }, + { + "path": "skills/zellij-config/assets/example_asset.txt", + "sha256": "3410b1e7d80c9431ac98834d98da8214e0a658fef14d32b86a75ddf2bbe6e2d4" + }, + { + "path": "skills/zellij-config/assets/config_templates/basic.kdl", + "sha256": "3939d5b8d9ae0fe1f4f146fc78e8b8ee72dc99ff83821d761766d6372e9dfb03" + }, + { + "path": "skills/debugger/SKILL.md", + "sha256": "e9a941f25cb95d0603d641f1d5b8aff80c66f499a8877a2d95c26c0de797b564" + }, + { + "path": "skills/create-lifeguard-rules/SKILL.md", + "sha256": "97b6e04d200fa571e8d2f5cc8db8c4d055a2c92948bdaf704051018f68603975" + }, + { + "path": "skills/local-research/SKILL.md", + "sha256": "d037b386127bb20683e12411bc640e660b159ee4e79c312c1cdfd95f34e78909" + }, + { + "path": "skills/local-research/scripts/research_ops.py", + "sha256": "be42f8bbbdf05238bb67c474cc6c3ab762950aaddf4ee1ce2114d9923a6f5d01" + } + ], + "dirSha256": "9180d0347e016814d48c16e73d8d838a042365aeb3f85dbde1523e137733bf3b" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/create-lifeguard-rules/SKILL.md b/skills/create-lifeguard-rules/SKILL.md new file mode 100644 index 0000000..ebcfc05 --- /dev/null +++ b/skills/create-lifeguard-rules/SKILL.md @@ -0,0 +1,52 @@ +--- +name: create-lifeguard-rules +description: This skill should be used when creating lifeguard rules for code review +--- + +Lifeguard rules is used to guide code review, to ensure code quality. + +# Lifeguard rule spec + +**filename**: `lifeguard.yaml` + +**content spec**: + +```yaml +# comment +rules: + # section description in comment (if needed) + - name: "LG--001 Rule name ..., like: No magic numbers" + description: "Details about the rule ..." + - name: "LG--002 Another rule..." + description: "Details about another rule ..." + + # section description in comment (if needed) +``` + +`` is a short keyword that represents the rule category, like `vue-style`, `react-perf`, `security`, etc, keep it as short as possible. + +# Rule content guidelines + +When generating rules, please follow these guidelines: + +- Understand current codebase, identify common pitfalls, anti-patterns, and areas for improvement. +- Find project convention for component, name convention etc, convert those to rules. +- Each rule should have clear and concise name. +- Each rule should focus on a single aspect of code quality or best practice, no complex single rule, you can create multiple simple rules instead. +- Each rule name should be consistent with rule description, for example, if the rule description focus on specific language feature, the rule name should also reflect that, insead of name is generic but the description is specific. +- When you create rules, avoid generate rule that enforce single case or rare case, for example, a function with a specific name should not be a rule, unless that function name is widely used in the codebase and have specific meaning. +- General rules to prevent bugs. +- General rules to avoid complex code, hack, work-around code. +- Dependency import path correctness. +- **Rule items order**: General rules come first, speicific/project-only rules come later. + + +# Validation + +Use python to validate the lifeguard.yaml file format, ensure the file have no yaml syntax error. + +``` +uv run --with pyyaml -- python -c "import sys, yaml; yaml.safe_load(sys.stdin)" < lifeguard.yaml && echo "✅ VALID" || ECHO "❌ NOT VALID" +``` + +Lifeguard content review after created: ask oracle subagent to verify the content of the lifeguard.yaml file, ensure the lifeguard rules are reasonable and useful for code review, ask it to review existing rules, not to fully rewrite. diff --git a/skills/debugger/SKILL.md b/skills/debugger/SKILL.md new file mode 100644 index 0000000..15f43b1 --- /dev/null +++ b/skills/debugger/SKILL.md @@ -0,0 +1,146 @@ +--- +name: debugger +description: Systematically trace bugs backward through call stack to find the original trigger. Use this skill when user wants to debug complex issues or has a bug that is hard to spot; user says "not working" and you are debugging an issue. +--- + +# When to use + +- User is frustrated about your attempts +- Bugs are vague and not easy to spot +- User says debug this or debug it something like that + +# Tools, subagents, skills that might be helpful + +- **Critical**: `fast-repo-context` claude skill for fast codebase search and analysis, this is highly recommended to use, load it with `Skill` tool; Can use it to search on current project or other projects. +- `kg` knowledge graph search +- load `git-jj` claude skill for vcs operations (logs, blame, diff etc), use `bash ~/.claude/skills/git-jj/scripts/repo_check.sh` to check repo is jj or git managed. + - **Debugging-specific commands:** + - `git blame ` / `jj file annotate ` - find who changed a line and when + - `git log -p ` / `jj log -p ` - see all changes to a specific file + - `git bisect` - binary search for the commit that introduced a bug + - `git diff ..` / `jj diff -r ..` - compare specific revisions + - `jj log -r "file('path')"` - find commits that touched a file +- `outbox` subagent for high level debugging ideas and strategies; Invoke the `Task` tool with `outbox`. +- `oracle` subagent for advanced reasoning about complex issues, decision making; Invoke the `Task` tool with `oracle`. +- `brightdata` mcp tool: use it to search on the web for similar issues, error messages, stack traces. +- `github` mcp tool: use it to search library related issues in github issues. +- Other command tools that you already know +- As a last resort, run `claude --model openrouter/anthropic/claude-opus-4.5 -p "" --tools "Read,Grep,Glob"` for help from the most advanced SWE LLM. This is expensive, so use it wisely and compose the prompt carefully. + +## When to use git history for debugging + +Use git history when: +- **Regression bugs**: User says "it worked before" or "it broke after update" +- **Unknown code changes**: You find suspicious code but don't understand why it was written that way +- **Recent breakage**: Bug appeared recently and might correlate with recent commits +- **Blame investigation**: Need to find the original author/context of problematic code + +Skip git history when: +- Bug is clearly a logic error in current code +- Issue is configuration or environment related +- User confirms this is new code, not a regression + +# Debugging process + +1. **Understand** the issue/bug +2. **Fetch context with fast-repo-context skill** of the codebases. Use `kg` to search the knowledge graph in case we solved this before. Use `fast-repo-context` skill(recommended) or `rg` bash tool to search the codebases with possible keywords, and read comments or documents. +3. **Review available tools** and subagents (fd, rg, kg, git, etc.) +4. **Start debugging iterations** - Each iteration MUST be explicitly labeled (e.g., "**Iteration 1**", "**Iteration 2**") + - 4.1 Get debugging ideas from `outbox` subagent with context from steps 2 and 3. Include the tools and subagents you have and what they do, so `outbox` can give advice based on your available tools. + - 4.2 **Check git history** (if applicable): Use `git-jj` skill to investigate version history when the bug might be a regression. Run blame on suspicious lines, check recent file changes, or use bisect to find the breaking commit. See "When to use git history" section above. + - 4.3 Follow instructions from `outbox`, trace back to the root cause of the bug/issue + - 4.4 Propose a fix or the root of cause to user, let user review it. + - 4.5 Apply the fix if user agrees. + - 4.5 Ask user to confirm the fix +5. **Iterate** Step 4 until user has confirmed the bug/issue is resolved. Keep key findings from each iteration and feed all findings and attempted methods to `outbox` for the next iteration. + +## Iteration tracking + +- **Always announce iteration number** at the start: "**Iteration N**: Trying X approach..." +- Keep a mental log of what was tried in each iteration +- When consulting `outbox` after iteration 1, always include findings from ALL previous iterations + +## Outbox prompt template + +**Note**: `outbox` is a readonly/thinking agent. It cannot use tools (no Read, Write, Execute). It can only reason and advise. You must provide all relevant context in the prompt. + +### First iteration: +``` +**Bug**: +**Symptoms**: +**Relevant files**: +**Code context**: + +**Available tools** (I can execute these): +- rg, fd (search) +- kg (knowledge graph - check past solutions) +- git-jj (blame, file history, bisect, diff) +- Read, Grep, Glob (file inspection) +- Execute (run commands, add logs) + +**Ask**: Given this bug and these tools, what debugging strategy should I follow? Provide specific actionable steps. At the end of your advice, include a "feedback request" like: "If this doesn't work, tell me [specific info] for next iteration." +``` + +### Subsequent iterations: +``` +**Iteration**: N (where N > 1) + +**Bug**: +**Symptoms**: +**Relevant files**: +**Code context**: + +**Available tools** (I can execute these): +- rg, fd (search) +- kg (knowledge graph - check past solutions) +- git-jj (blame, file history, bisect, diff) +- Read, Grep, Glob (file inspection) +- Execute (run commands, add logs) + +**What I tried**: +**Findings**: +**Current hypothesis**: + +**Ask**: Based on findings, what should I try next? At the end of your advice, include a "feedback request" like: "If this doesn't work, tell me [specific info] for next iteration." +``` + +## Notes + +- `kg` should be used for finding key information related to the issues. Do not use it to save unverified debugging assumptions. After user confirms the fix, you can ask whether to save the fix to the knowledge graph. +- You have advanced tools, subagents and skills to debug complex issues. Do not hesitate to use them. + +# Real world example + + +The tree city picker does nothing when I click the input + + + +I will gather the code modules related to this issue... + +> next phase + +Ok, I have located the files. Now let me check what tools and subagents might help me debug this issue, and ask `outbox` subagent for ideas. + +> next phase + +**Iteration 1**: Following outbox advice, I'll check the click handler binding... + +> tried approach, didn't work + +**Iteration 2**: The click handler exists but event propagation might be blocked. Let me trace the event flow... + +> found something suspicious + +**Iteration 3**: Found it! The parent container has `pointer-events: none`. Here is the fix... + +Hi, here is the root cause of this issue/bug: + +> -- omit for demo + +Please confirm it. + + + +great work! + diff --git a/skills/fast-repo-context/EXAMPLES.md b/skills/fast-repo-context/EXAMPLES.md new file mode 100644 index 0000000..7e1aeb9 --- /dev/null +++ b/skills/fast-repo-context/EXAMPLES.md @@ -0,0 +1,57 @@ +# Fast Repo Context - Examples + +## Basic Usage + +### Find authentication flow +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "how user authentication and login works" +``` + +### Find where data is validated +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "input validation before saving to database" +``` + +### Find error handling +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "how API errors are handled and returned to client" +``` + +### Find component rendering logic +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "conditional rendering based on user permissions" +``` + +## Real-World Scenarios + +### Scenario 1: Debug a feature + +**User:** "Find where the cart total is calculated incorrectly" + +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "shopping cart total calculation with discounts and taxes" +``` + +Then read the specific files from results. + +--- + +### Scenario 2: Understand architecture + +**User:** "How does the payment system work?" + +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "payment processing flow from checkout to confirmation" +``` + +--- + +### Scenario 3: Find similar patterns + +**User:** "Find all places that call external APIs" + +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "HTTP requests to external services with error handling" +``` + + diff --git a/skills/fast-repo-context/SKILL.md b/skills/fast-repo-context/SKILL.md new file mode 100644 index 0000000..2befcc2 --- /dev/null +++ b/skills/fast-repo-context/SKILL.md @@ -0,0 +1,84 @@ +--- +name: fast-repo-context +description: "Semantic code search using sgrep. Use when: exploring code, search code snippets, finding implementations by intent, understanding how features work. Triggers(semantic or similiar meaning): [fast context], [search code], [find where], [how does X work], [understand codebase], [research codebase], [find X], [locate X], [code search], [grep code], [where is], [let me search]." +--- + +# Fast Repo Context + +Semantic grep (`sgrep script`) for code search with natural language queries. Note: it only give code snippets/what/where, not how or code explanations, so your query need to be focus on what/where. + +## Tool + +```bash +bash ~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "" +``` + +**Safety:** Script checks if current directory is a git repo before running to prevent accidental indexing of large/wrong directories. + +**Options:** +- `--json` - Structured JSON output (recommended for agents) +- `-n, --limit ` - Max results (default: 10) +- `-c, --context` - Show extra context around matches +- `--glob ` - Restrict to file patterns (repeatable) +- `--filters ` - Filter by metadata (e.g., `lang=rust`) + +## When to Use + +- Exploring unfamiliar codebases +- Finding code by intent/behavior (not just keywords) +- Understanding how features are implemented +- Locating related code across files +- Find something in another project/repo on disk + +## Workflow + +1. **Use sgrep script** for semantic search +2. **Read specific files** from results for details +3. **(Optional)** Query `kg` from our knowledge graph for additional context + +## Examples + +### Find authentication logic +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "user login and session management" +``` + +### Find in another project/repo + +Use bash `exa --tree -D -L 2 ~/workspace` to get all projects in ~/workspace. + +``` +cd another-dir-abs-path && ~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "file upload handling that use api foo/bar" +``` + +### Find error handling patterns +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "how errors are caught and reported to users" +``` + +### Find API endpoints +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "REST endpoints for user profile operations" +``` + +### Find database queries +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "queries that fetch user data with pagination" +``` + +### Find React hooks usage +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json "custom hooks for form validation" +``` + +### With filters +```bash +~/.claude/skills/fast-repo-context/scripts/sgrep.sh --json --glob "*.ts" --limit 5 "error handling middleware" +``` + +## Tips + +- **Be descriptive**: "function that validates email format" > "email validation" +- **Describe intent**: "code that prevents duplicate submissions" > "debounce" +- **Ask questions**: "where is the shopping cart total calculated?" + diff --git a/skills/fast-repo-context/scripts/sgrep.sh b/skills/fast-repo-context/scripts/sgrep.sh new file mode 100755 index 0000000..7de89b9 --- /dev/null +++ b/skills/fast-repo-context/scripts/sgrep.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Wrapper for sgrep search with safety checks +# Prevents accidental indexing of large/wrong directories +# Supports both jj and git repositories + +REPO_ROOT="" + +# Check for jj repository first (jj often operates atop git) +if jj root &>/dev/null; then + REPO_ROOT=$(jj root) +# Check for git repository +elif git rev-parse --git-dir &>/dev/null; then + REPO_ROOT=$(git rev-parse --show-toplevel) +fi + +if [[ -z "$REPO_ROOT" ]]; then + echo "ERROR: Not inside a git or jj repository!" + echo "" + echo "Current directory: $(pwd)" + echo "" + echo "sgrep will index the entire directory which can be slow for large folders." + echo "Please cd into the correct repository before running this command." + exit 1 +fi + +echo "Repo: $REPO_ROOT" +echo "" + +exec sgrep search "$@" diff --git a/skills/git-jj/SKILL.md b/skills/git-jj/SKILL.md new file mode 100644 index 0000000..f087f3a --- /dev/null +++ b/skills/git-jj/SKILL.md @@ -0,0 +1,205 @@ +--- +name: git-jj +description: "Used when working with vcs/git/jj/commit task, triggered by phrase like [git], [git commit], [diff], [push], [check git status], [create git branch], [git worktree], [git sqaush], [review with changes], [review with lifeguard], [jj], [jj commit], [jj changes], [commit changes]" +--- + +# Git/JJ VCS Skill + +**Note**: `` refers to the git skill directory (~/.claude/skills/git-jj/) containing this SKILL.md file. + +## Purpose +Provide specialized workflows for Git and Jujutsu (jj) version control systems with automatic repository detection, command reference lookup, and safe operation practices. + +## Trigger Conditions +Activate this skill for VCS tasks involving Git or Jujutsu (jj), such as: +- Checking status, diffs, logs +- Staging, committing, branching/bookmarks +- Pushing, pulling, fetching +- Managing worktrees/workspaces +- Integrating with lifeguard subagent for code reviews +- Repository initialization and configuration + +## Repository Detection & Branching Workflow + +### Step 1: Detect Repository Type +Run the repository detection script using the Bash tool: + +```bash +bash ~/.claude/skills/git-jj/scripts/repo_check.sh +``` + +**Important**: Execute this command from the repository root (user's current working directory). The script checks for `.jj` or `.git` folders in the current directory. + +The script outputs one of three values to stdout: +- `jj` - Jujutsu repository detected (.jj folder exists) +- `git` - Git repository detected +- `no-repo` - No repository found + +**Priority**: If both `.jj` folder and `.git` exist (common when jj operates atop git), the script returns `jj` to respect jj-first preference. + +### Step 2: Branch to Appropriate Workflow + +| Output | Action | +|--------|--------| +| `jj` | Follow **JJ Branch** workflow. Read `~/.claude/skills/git-jj/references/jj_workflows.md` for core commands and working copy model. | +| `git` | Follow **Git Branch** workflow. Read `~/.claude/skills/git-jj/references/git_workflows.md` for command syntax. | +| `no-repo` | Proceed to **Repository Initialization** workflow below. | + +#### JJ Branch: Conditional Reference read + +When following the JJ branch, load additional references based on task complexity: + +**Always read first:** + +- `~/.claude/skills/git-jj/references/jj_workflows.md` - Core commands, working copy model, WIP pattern, filesets + +**Read conditionally when needed:** +- **Bookmark operations** (create, track, push, conflicts): Read `~/.claude/skills/git-jj/references/jj_bookmarks.md` +- **Complex history queries** (ranges, filtering, ancestry): Read `~/.claude/skills/git-jj/references/jj_revset.md` +- **Automation/structured output** (CI scripts, release notes): Read `~/.claude/skills/git-jj/references/jj_template.md` + +**Reference selection rules:** +- User mentions "bookmark", "track remote", "push bookmark" → Load `jj_bookmarks.md` +- User asks "show commits where...", "filter by author", "range between..." → Load `jj_revset.md` +- User requests "JSON output", "custom format", "parse for script" → Load `jj_template.md` +- Multiple file path filtering (globs, exclude patterns) → Already covered in `jj_workflows.md` (Fileset section) + +#### Git Branch: Reference Loading +- Read `/references/git_workflows.md` for all Git operations (covers branches, worktrees, stashing, troubleshooting) + +### Step 3: Repository Initialization (if no-repo) +1. Use `AskUserQuestion` tool to ask: "No repository found. Initialize jj or git?" + - Options: "jj (recommended)", "git" + - header: "VCS choice" +2. Based on user selection: + - **jj**: Run `jj init` or `jj git init` (if git backend desired), then load `references/jj_workflows.md` + - **git**: Run `git init`, then load `references/git_workflows.md` +3. After initialization, proceed with original user intent + +## Common Workflows + +For command syntax, see reference files. This section covers workflow orchestration. + +### 1. Show Status/Diff +- Always gather diff output via `Bash` tool BEFORE invoking other tools +- See `jj_workflows.md` or `git_workflows.md` for commands + +### 2. Review Changes with Lifeguard +- Run `scripts/repo_check.sh` first to confirm VCS type. +- Git workflow (small diff <500 lines): you MAY embed the git diff directly. For larger diffs, prefer letting lifeguard fetch them itself. +- JJ workflow (preferred): DO NOT paste full `jj diff` output unless very small (<200 lines). Instead launch the lifeguard subagent with an execution plan listing jj commands it should run to gather its own context. +- Rationale: JJ diffs can be large and lifeguard has Bash(jj:*) capability; letting it execute jj commands avoids prompt bloat and enables multi‑commit exploration. +- Skill loading directive: In every lifeguard prompt include either (a) explicit phrase: `please load git claude skill` (this triggers skill reference loading), OR (b) inline list of reference file paths you want it to consult. Prefer phrase for brevity; attach paths when focusing on specialized areas (revsets, bookmarks, templates). + +Reference file path list + +- ~/.claude/skills/git-jj/references/git_workflows.md +- Read ~/.claude/skills/git-jj/references/jj_workflows.md (when commit changes, logs, review with jj etc) +- ~/.claude/skills/git-jj/references/jj_bookmarks.md +- ~/.claude/skills/git-jj/references/jj_revset.md +- ~/.claude/skills/git-jj/references/jj_template.md + +Use this canonical jj command set in the lifeguard prompt (adjust as needed): + +``` +# Core context collection +jj --no-pager status +jj --no-pager log -n 20 --no-graph +jj --no-pager diff # working copy changes + +# Targeted commit review (replace ) +jj --no-pager show + +# Compare parent vs current working copy commit +jj --no-pager diff -r @-..@ + +# Multi-commit / ancestry exploration examples +jj --no-pager log -r "ancestors(@, 10)" +jj --no-pager log -r "descendants(@, 5)" +``` +Optional revset queries when user asks for filtering: +``` +# Author filter +jj --no-pager log -r "author('name@example.com') & ancestors(@, 20)" +# Files touched +jj --no-pager log -r "file('src/**') & ancestors(@, 30)" +``` +Bookmark/WIP context to include in the lifeguard prompt (if applicable): +- Current bookmark name +- Whether parent description starts with "WIP:" and intended final message + +Prompt template example (JJ): +``` +Please load git claude skill. +Review JJ working copy and recent commits. Run the listed jj commands (modify as needed) to inspect changes; focus on correctness, style, and potential refactors. Repository uses JJ atop git. +Commands to run: +1. jj --no-pager status +2. jj --no-pager diff +3. jj --no-pager log -n 20 --no-graph +4. jj --no-pager diff -r @-..@ +If needed: jj --no-pager show , jj --no-pager log -r "ancestors(@, 10)". +Bookmark: +Parent commit description: +Relevant references (if needed): ~/.claude/skills/git-jj/references/jj_revset.md +``` +Git prompt template (large diff scenario): +``` +Please load git claude skill. +Review pending changes. Fetch diffs yourself; do NOT rely on inline diff copy. Focus on correctness, style, and commit structuring. +Commands to run: +1. git status +2. git diff +3. git log --oneline -n 20 +``` +When focusing on a subset of files, pass a short list of paths (not full diff). Lifeguard will retrieve their diffs directly. + +Summary: +- Git: small diff inline OK; large diff let lifeguard fetch; always include skill loading phrase. +- JJ: pass command plan + context, not full diff; include skill loading phrase or attach needed reference paths. + +### 3. Stage Changes +- **JJ**: Auto-tracks all changes (no staging needed) +- **Git**: Standard `git add` workflow + +### 4. Commit Changes +- **CRITICAL**: NEVER commit without explicit user confirmation +- Before committing: Show summary and ask user to confirm +- **JJ**: Always use `-m` flag (bare `jj commit` opens editor, blocks agent). See `~/.claude/skills/git-jj/references/jj_workflows.md` for WIP pattern. +- After JJ commit: verify with `jj --no-pager log -n 4 --no-graph` + +### 5. Push to Remote +- **CRITICAL**: NEVER push without explicit user confirmation +- NEVER use `--force` unless explicitly requested + +### 6. Other Operations +- History, branches/bookmarks, worktrees: See reference files + +## Key JJ vs Git Differences + +- **JJ colocated**: When `.jj` + `.git` coexist, prefer jj commands +- **No staging in JJ**: All changes auto-tracked +- **JJ conflicts**: First-class objects (can commit conflicts) +- See `git_workflows.md` for full mapping table + +## Example: Commit Workflow + +1. Run `scripts/repo_check.sh` → Determine VCS type +2. Show current status/diff +3. **JJ**: Check for WIP commits (see `jj_workflows.md` WIP pattern) +4. **ASK USER** to confirm commit message +5. Execute commit only after confirmation +6. Verify with log output + +## Safety Guidelines + +1. **NEVER commit or push without explicit user confirmation** +2. **NEVER use force push** unless user explicitly requests it +3. **Verify branch/bookmark** before pushing (avoid main/master/staging) +4. **Pre-commit**: Show summary, suggest message, wait for approval +5. **Pre-push**: Show commits ahead of remote, verify target + +## Integration Notes + +- Favor `jj` when `.jj` folder exists +- Use `TodoWrite` for multi-step VCS workflows +- Read reference files only when command syntax is uncertain diff --git a/skills/git-jj/references/git_workflows.md b/skills/git-jj/references/git_workflows.md new file mode 100644 index 0000000..aa40dd0 --- /dev/null +++ b/skills/git-jj/references/git_workflows.md @@ -0,0 +1,32 @@ +# Git Quick Reference + +Standard git commands assumed known. This covers less common operations and JJ mapping. + +## Less Common Commands + +```bash +git add -p # Interactive hunk staging +git restore --staged # Unstage (preferred over reset) +git switch -c # Create+switch (preferred over checkout -b) +git reflog # Local HEAD history (recovery) +git worktree add ../ # Parallel working directory +git stash show -p stash@{0} # Show stash diff +``` + +## Safety Reminders + +- Confirm before: `reset --hard`, `branch -D`, `push --force`, `clean -fd` +- Recovery: `git reflog` + `git branch ` + +## Git to JJ Mapping + +| Git | JJ | +|-----|-----| +| branch | bookmark | +| stash | `jj new` (checkpoint) | +| worktree | workspace | +| checkout/switch | `jj edit` | +| commit --amend | `jj describe` or `jj squash` | +| rebase | `jj rebase` (non-destructive) | +| cherry-pick | `jj duplicate` | +| merge | `jj new ` | \ No newline at end of file diff --git a/skills/git-jj/references/jj_bookmarks.md b/skills/git-jj/references/jj_bookmarks.md new file mode 100644 index 0000000..d779c70 --- /dev/null +++ b/skills/git-jj/references/jj_bookmarks.md @@ -0,0 +1,81 @@ +# JJ Bookmarks Reference + +Source: https://docs.jj-vcs.dev/latest/bookmarks/ + +## Core Concepts + +- **Named pointer** to a revision - similar to Git branch but NO "current branch" concept +- **Auto-moves** when target is rewritten (`jj rebase`, `jj squash`, `jj describe`) +- **Auto-deletes** when commit is abandoned +- Usable anywhere a revision is expected: `jj new main`, `jj rebase -r feature -d main` +- In colocated repos, bookmarks map directly to Git branches + +## Commands + +| Action | Command | +|--------|---------| +| List local | `jj bookmark list` | +| List all (local + remote) | `jj bookmark list --all` | +| Create at @ | `jj bookmark create ` | +| Point to revision | `jj bookmark set -r ` | +| Delete | `jj bookmark delete ` | +| Rename | `jj bookmark rename ` | +| Track remote | `jj bookmark track @` | +| Untrack remote | `jj bookmark untrack @` | +| Push single | `jj git push -b ` | +| Push all | `jj git push --all` | + +Alias: `jj b` for `jj bookmark` (e.g., `jj b c` for create, `jj b d` for delete) + +## Remote & Tracking + +- Address remote: `@` (e.g., `main@origin`) +- Can track same-name bookmarks on multiple remotes +- Auto-tracked on: clone (default remote), first push of local bookmark +- Enable auto-track fetched: `git.auto-local-bookmark = true` + +## Status Indicators + +| Suffix | Meaning | +|--------|---------| +| `*` | Local differs from remote (needs push) | +| `??` | Conflicted (multiple targets) | +| `@` | Remote snapshot reference | + +## Conflict Resolution + +When bookmark becomes conflicted (divergent updates): +```bash +jj log -r 'all:' # Inspect divergent commits +jj new 'all:' # Merge path: create merge commit +jj rebase -r -d # Rebase path +jj bookmark set -r # Finalize +``` + +## Safe Push Pattern + +1. `jj git fetch` - sync remote state +2. `jj status` - check for conflicts (look for `??` markers) +3. `jj git push -b ` - push single bookmark + +Push safety checks (force-with-lease equivalent): +- Remote position matches last known +- Local bookmark not conflicted +- Remote bookmark tracked (or use `--allow-new`) + +## Revset with Bookmarks + +```bash +jj new # New commit on top +:: # Ancestors +:: # Descendants +heads(all:) # All heads involving bookmark +``` + +## Git Comparison + +| Git | JJ | +|-----|-----| +| Branch HEAD moves on commit | Bookmark only moves on rewrite | +| Single upstream per branch | Can track multiple remotes | +| Force push risk | Built-in force-with-lease protection | diff --git a/skills/git-jj/references/jj_revset.md b/skills/git-jj/references/jj_revset.md new file mode 100644 index 0000000..ebe8bc4 --- /dev/null +++ b/skills/git-jj/references/jj_revset.md @@ -0,0 +1,87 @@ +# JJ Revset Language Reference + +Source: https://docs.jj-vcs.dev/latest/revsets/ + +Revsets select sets of revisions for inspection, history manipulation, and automation. + +## Atoms + +| Atom | Meaning | +|------|---------| +| `@` | Working-copy commit | +| `` | Specific commit (short/full hex) | +| `` | Latest visible commit for change | +| `` | Bookmark target | +| `@` | Remote bookmark snapshot | +| `root()` | Virtual root | +| `trunk()` | Main branch | + +## Operators + +| Syntax | Meaning | +|--------|---------| +| `X::` | Ancestors of X (inclusive) | +| `::X` | Descendants of X (inclusive) | +| `A..B` | Reachable from B but not A | +| `A \| B` | Union | +| `A & B` | Intersection | +| `A - B` | Difference | + +## Functions + +| Function | Purpose | +|----------|---------| +| `all()` | All commits (visible + hidden) | +| `visible()` | Non-abandoned commits | +| `ancestors(X)` / `descendants(X)` | Ancestry traversal | +| `parents(X)` / `children(X)` | Direct relatives | +| `heads(X)` | Commits with no descendants in X | +| `author("name")` | By author substring | +| `description("regex")` | By description regex | +| `file("path")` | Commits affecting path | +| `present(X)` | Filter to visible form | + +## Common Patterns + +```bash +# Work since branching from main +main..@ + +# Commits affecting file +descendants(main) & file("src/lib.rs") + +# Ahead of remote (push candidates) +feature - feature@origin + +# Behind remote (need to pull) +feature@origin - feature + +# Find WIP commits +description("WIP") & ::@ + +# Filter by author +author("alice") & main..@ + +# Divergent heads (conflicts) +heads() + +# Hidden/abandoned commits +all() - visible() +``` + +## File Filtering + +```bash +file("src/") # Commits under src/ +file("src/") - file("src/test/") # Exclude test dir +descendants(main) & file("*.rs") # Rust files since main +``` + +## Pitfalls + +| Issue | Fix | +|-------|-----| +| `A..B` direction confusion | B's ancestors minus A's ancestors | +| Using `all()` unnecessarily | Use `visible()` for normal queries | +| Hidden commits not showing | Use `all()` or `present()` | +| Shell expansion | Quote paths and regex | diff --git a/skills/git-jj/references/jj_template.md b/skills/git-jj/references/jj_template.md new file mode 100644 index 0000000..5a69af6 --- /dev/null +++ b/skills/git-jj/references/jj_template.md @@ -0,0 +1,86 @@ +# JJ Templating Language + +Source: https://docs.jj-vcs.dev/latest/templates/ + +**Use templates only when** plain `jj log` output is insufficient: machine-readable output, conditional formatting, scripting pipelines. + +## Invocation + +```bash +jj log -r @ --template '...' # Single commit +jj log -r 'trunk()..@' --template '...' # Range +jj log --no-pager --template '...' | jq ... # Pipe to tools +``` + +## Commit Fields + +| Field | Description | +|-------|-------------| +| `commit_id` / `.short()` | Full/short commit id | +| `change_id` / `.short()` | Change identity | +| `description` / `subject` | Full message / first line | +| `author.email` / `author.name` | Author metadata | +| `timestamp` | Commit timestamp | +| `bookmarks` / `tags` | Lists | +| `conflict` | Boolean | +| `is_empty` | Boolean | +| `divergent` | Boolean | +| `parents` | List of parent commits | + +## Functions + +| Function | Example | +|----------|---------| +| `format()` | `format("[", change_id.short(), "] ", subject)` | +| `if(cond, a, b)` | `if(conflict, "⚠", "")` | +| `short()` | `commit_id.short()` | +| `json()` | `author.email.json()` | +| `contains()` | `subject.contains("WIP")` | +| `truncate(n)` | `description.truncate(80)` | +| `indent(n)` | `description.indent(4)` | +| `join()` | `bookmarks.join(", ")` | +| `map()` / `filter()` | `bookmarks.map(lower())` | +| `len()` | `parents.len()` | + +Chaining: `bookmarks.filter(!contains("WIP")).map(lower()).join(" ")` + +## Examples + +```bash +# Minimal +change_id.short() " " subject + +# WIP emphasis +format(if(subject.contains("WIP"),"[WIP] ",""), change_id.short(), " ", subject) + +# Conflicts +format(if(conflict,"⚠ ",""), change_id.short(), " ", subject) + +# Bookmarks +format(change_id.short()," ",bookmarks.join(",")," ",subject) + +# JSON output +format( + '{"change":"', change_id.short().json(), '",', + '"author":"', author.email.json(), '",', + '"conflict":', if(conflict, 'true','false'), '}' +) +``` + +## Revset + Template + +```bash +# Divergent heads with conflicts +jj log -r 'heads(all()) & conflicts()' --template 'format(change_id.short()," ",subject)' + +# Commits ahead of remote +jj log -r 'myfeature - myfeature@origin' --template 'format("AHEAD ", change_id.short())' +``` + +## Pitfalls + +| Issue | Fix | +|-------|-----| +| Unescaped quotes in JSON | Use `.json()` on dynamic fields | +| Slow on huge histories | Narrow revset first | +| Complex templates | Build incrementally with `format()` | diff --git a/skills/git-jj/references/jj_workflows.md b/skills/git-jj/references/jj_workflows.md new file mode 100644 index 0000000..f5052a9 --- /dev/null +++ b/skills/git-jj/references/jj_workflows.md @@ -0,0 +1,177 @@ +# Jujutsu (jj) Workflows Guide + +**Version**: jj 0.9+ + +## Core Concepts + +**Key differences from Git:** +- **Auto-snapshotting**: Most commands automatically commit working-copy changes +- **No staging area**: All tracked files are automatically included +- **`@` symbol**: Always represents current working-copy commit +- **Immutable history**: Operations create new commits; old commits remain accessible + +## Quick Reference + +### Status & Inspection +```bash +jj status # Snapshot and show status +jj diff --git # Working copy changes +jj diff --git -r # Changes in specific revision +jj log -n 10 # History (graph by default) +jj show # Commit details +``` + +### Creating & Modifying Commits +```bash +jj describe -m "message" # Update current commit description +jj describe -r -m "msg" # Update specific revision +jj commit -m "message" # Create new commit, move @ forward +jj new -m "message" # Create empty checkpoint commit +jj squash # Fold @ into parent (amend-like) +jj squash -f -t # Move changes between commits +jj abandon # Remove commit from history +jj duplicate # Cherry-pick equivalent +jj rebase -r -d # Rebase revision +jj edit # Edit specific revision directly +``` + +### File Operations +```bash +jj restore # Restore from parent +jj restore --from # Restore from specific revision +jj file track # Start tracking +jj file untrack # Stop tracking +``` + +### Bookmarks (Branches) +```bash +jj bookmark list # List all +jj bookmark create # Create at @ +jj bookmark set -r # Point to revision +jj bookmark delete # Delete +jj bookmark track @ # Set up tracking +``` + +### Remotes +```bash +jj git fetch # Fetch all +jj git fetch --remote # Fetch specific +jj git push -b # Push bookmark +jj git push --all # Push all bookmarks +``` + +### Workspaces +```bash +jj workspace add # Create (like git worktree) +jj workspace list # List all +jj workspace forget # Remove +``` + +## WIP Commit Pattern + +```bash +# Start work +jj describe -m "WIP: feature" + +# Continue working (auto-snapshots on jj commands) + +# Finalize (if @ is the WIP commit) +jj commit -m "feat: completed" +# use describe with `-r` if wip commits is in parents. + +# Or squash into parent +jj squash +``` + +**Before committing**, check state: +```bash +jj --no-pager status +jj --no-pager log -r '@-' # Check parent +``` + +## Conflict Resolution + +JJ allows committing conflicts and resolving them later. + +### Recommended: Resolve in New Commit +```bash +jj new # Create child +# ... edit files to resolve ... +jj diff # Review resolutions +jj squash # Merge back into parent +``` + +### Alternative: Direct Edit +```bash +jj edit +# ... resolve conflicts ... +jj describe -m "resolved conflicts" +``` + +### External Tool +```bash +jj resolve # Opens merge tool (2-sided conflicts only) +``` + +## Git Command Mappings + +| Git | jj | +|-----|-----| +| `git status` | `jj status` | +| `git diff` | `jj diff --git` | +| `git add .` | (automatic) | +| `git commit -m` | `jj commit -m` | +| `git commit --amend` | `jj squash` or `jj describe` | +| `git log` | `jj log` | +| `git branch` | `jj bookmark list` | +| `git checkout ` | `jj edit ` | +| `git merge ` | `jj new ` | +| `git rebase` | `jj rebase -r -d ` | +| `git cherry-pick` | `jj duplicate ` | +| `git stash` | `jj new` (checkpoint) | +| `git worktree add` | `jj workspace add` | + +## Revision Syntax + +- `@` - Working copy +- `@-` - Parent of working copy +- `` - Bookmark target +- `::` - Ancestors +- `::` - Descendants +- `..` - Range +- `trunk()` - Main branch +- `file('')` - Revisions modifying file + +## Fileset Language + +### Patterns +| Pattern | Description | +|---------|-------------| +| `"path"` | CWD-relative prefix (recursive) | +| `file:"path"` | Exact file match | +| `glob:"*.rs"` | CWD-relative glob | +| `root:"path"` | Workspace-relative prefix | +| `root-glob:"**/*.rs"` | Workspace-relative glob | + +### Operators +| Op | Meaning | Example | +|----|---------|---------| +| `~x` | NOT | `~Cargo.lock` | +| `x & y` | AND | `src & glob:"*.rs"` | +| `x ~ y` | MINUS | `src ~ glob:"*test*"` | +| `x \| y` | OR | `src \| tests` | + +### Examples +```bash +jj diff '~Cargo.lock' # Exclude file +jj diff 'glob:"**/*.md"' # Only markdown +jj log -r 'trunk()..@ & file("src/core")' # Commits touching path +jj diff --git -f "trunk()" -t "@" # Diff vs trunk +``` + +## Important Notes + +1. **Always use `-m` flag** in non-interactive contexts (scripts, LLM agents) +2. **Check status before critical ops**: `jj --no-pager status` +3. **Conflicts are first-class**: Can be committed, shared, resolved incrementally +4. **Operations are recoverable**: `jj op log` shows history diff --git a/skills/git-jj/scripts/repo_check.sh b/skills/git-jj/scripts/repo_check.sh new file mode 100755 index 0000000..f9a5eec --- /dev/null +++ b/skills/git-jj/scripts/repo_check.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# repo_check.sh - Determine whether to use jj or git for this repository +# Outputs to stdout: "jj", "git", or "no-repo" +# Always exits with 0 on success +# +# Detection priority: +# 1. .jj folder exists → "jj" (even if git repo also present) +# 2. Git repository detected → "git" +# 3. Neither found → "no-repo" + +set -e + +# Check for jj repository - priority 1 +# Note: jj often operates atop git, so check .jj first +# Use `jj root` to detect jj workspace from any subdirectory +if jj root > /dev/null 2>&1; then + echo "jj" + exit 0 +fi + +# Check for git repository - priority 2 +if git rev-parse --git-dir > /dev/null 2>&1; then + echo "git" + exit 0 +fi + +# Neither found - user needs to initialize +echo "no-repo" +exit 0 diff --git a/skills/kiro/SKILL.md b/skills/kiro/SKILL.md new file mode 100644 index 0000000..43dca37 --- /dev/null +++ b/skills/kiro/SKILL.md @@ -0,0 +1,272 @@ +--- +name: kiro +description: "This skill should be used when managing structured spec-driven development workflows. Triggered by phrases like [kiro], [check kiro status], [load kiro], [update kiro], [kiro workflow], or when the user mentions kiro spec, create new kiro spec. Use this for managing requirements, design docs, and implementation tasks in kiro" +--- + +# Kiro - Structured Spec-Driven Development Workflow + +## Overview + +Kiro is a workflow management system for structured spec-driven development that uses spec files to track progress through requirements gathering, design, and implementation phases. This skill enables Claude to manage the complete lifecycle of a kiro spec, from initial requirements to final implementation. + +The kiro spec dir default to /llm/kiro// but can be customized per spec. + +## Core Workflow Structure + +Every kiro spec contains four key spec files, which need to be updated and tracked throughout the workflow: + +1. **`claude.md`** - Control board with session notes, decisions, and spec status tracking +2. **`requirements.md`** - User stories and acceptance criteria +3. **`design.md`** - Architecture, data models, and implementation details +4. **`tasks.md`** - Implementation tasks with status tracking + +## Workflow Decision Tree + +When user mentions kiro, follow this decision tree: + +1. **Does the user want to create a new kiro spec?** + - Keywords: "new kiro", "create kiro spec", "start new kiro" + - Action: Run `agpod kiro pr-new --desc "" --template default` + +2. **Does the user want to select/load an existing spec?** + - Keywords: "load kiro", "check kiro", "select kiro", "which kiro" + - Action: List specs and help user select one + +3. **Is the user already working on a spec?** + - Action: Continue with the current workflow phase + +## Phase 1: Loading a Kiro Spec + +### Step 1: List Available Specs + +Run the following command to get all kiro specs: + +```bash +agpod kiro --json pr-list +``` + +**Expected JSON output format:** +```json +[ + { + "name": "spec-name", + "path": "", + "created": "2024-01-15T10:30:00Z" + } +] +``` + +Note, the `path` should be relatieve to the project root or is absolute path if it starts with `/`. + +### Step 2: Parse User Selection + +If user mentions: +- **Spec number/index**: Select the corresponding spec from the list +- **Spec name/keyword**: Match against spec names using fuzzy matching +- **No specific spec**: Ask user to choose from the list + +### Step 3: Read the Control Board + +Once spec is selected, read `/claude.md` to understand: +- Current spec file statuses (empty, draft, ready) +- Previous decisions made +- Outstanding questions and risks +- Recent findings + +**Example:** +```bash +Read /claude.md +``` + +## Phase 2: Requirements Gathering + +**When to enter this phase:** +- `claude.md` shows `requirements.md: empty` or `requirements.md: draft` + +### Workflow + +1. **Read current requirements** + ```bash + Read /requirements.md + ``` + +2. **Gather requirements from user** + - Ask clarifying questions about user stories + - Define acceptance criteria with WHEN/THEN format + - Document each requirement using semantic IDs (e.g., REQ-AUTH-LOGIN, REQ-DATA-EXPORT) + - Use pattern: REQ-{DOMAIN}-{ACTION} for stable, descriptive identifiers + - Add all requirement IDs to the "Requirements Order" section at the top of requirements.md + +3. **Update requirements.md** + - Add user stories in format: "As a [role], I want to [action], so that [benefit]" + - Define acceptance criteria for each requirement + - Follow the existing structure already present in the generated `requirements.md` file + +4. **Update claude.md status** + - Change `requirements.md: draft` when in progress + - Change to `requirements.md: ready` when complete and approved + - Document key decisions in Session Notebook → Decisions + +**Critical Rule:** Never proceed to design phase without user approval of requirements. + +## Phase 3: Design Documentation + +**When to enter this phase:** +- `claude.md` shows `requirements.md: ready` AND `design.md: empty` or `design.md: draft` + +### Workflow + +1. **Read current design and requirements** + ```bash + Read /design.md + Read /requirements.md + ``` + +2. **Create design for each requirement** + + For each requirement ID (e.g., REQ-AUTH-LOGIN), create a corresponding design section with: + + - **Overview**: Purpose, key objectives, non-goals + - **Architecture**: System design diagrams, data flow + - **Components and Interfaces**: Component responsibilities + - **Data Models**: Configuration, data structures, API contracts + - **Implementation Details**: Algorithms, PoC findings (if needed) + - **Performance & Security Considerations** + - **Error Handling**: Error scenarios and strategies + - **Testing Strategy**: Unit and integration tests + - **Migration and Rollout**: Deployment strategy (if needed) + +3. **Present design options** + - Offer multiple approaches when applicable + - Explain trade-offs clearly + - Let user choose the preferred approach + - Ask oracle subagent with `Task` tool to review the design + +4. **Update design.md** + - Follow the existing structure already present in the generated `design.md` file + - Include ASCII diagrams for system architecture + - Keep it focused and scannable + +5. **Update claude.md** + - Change `design.md: draft` when in progress + - Change to `design.md: ready` when complete and approved + - Document design decisions in Session Notebook → Decisions + - Note any identified risks in Session Notebook → Risks + +**Critical Rule:** Never proceed to implementation without user approval of design. + +## Phase 4: Task Planning + +**When to enter this phase:** +- `claude.md` shows `design.md: ready` AND `tasks.md: empty` or `tasks.md: draft` + +### Workflow + +1. **Read design and create tasks** + ```bash + Read /design.md + Read /tasks.md + ``` + +2. **Break down each requirement into tasks** + + For each requirement ID (e.g., REQ-AUTH-LOGIN), create corresponding TASK-REQ-{ID} section (e.g., TASK-REQ-AUTH-LOGIN) with: + + - **Phases**: Group related tasks (e.g., Phase 1: Core Implementation, Phase 2: Integration) + - **Task details** for each item: + - Checkbox for completion tracking + - **Status**: ⬜ Not Started / 🟡 In Progress / ✅ Completed / ⚠️ Blocked + - **Prompt**: Detailed implementation instructions + - **Requirements**: Reference to requirement ID (e.g., REQ-AUTH-LOGIN) + - **Design ref**: Section reference in design.md + - **Files**: List of files to modify + +3. **Update tasks.md** + - Follow the existing structure already present in the generated `tasks.md` file + - Keep tasks atomic and independently executable + - Order tasks by dependencies + +4. **Update claude.md** + - Change `tasks.md: draft` when in progress + - Change to `tasks.md: ready` when complete and approved + +**Critical Rule:** Never start implementation without user approval of tasks. + +## Phase 5: Implementation + +**When to enter this phase:** +- `claude.md` shows `tasks.md: ready` +- User explicitly requests code changes + +### Workflow + +1. **Read tasks and select next task** + ```bash + Read /tasks.md + ``` + +2. **Confirm task with user** + - Show which task will be implemented + - Verify user approval before coding + +3. **Implement the task** + - Follow the prompt in the task definition + - Reference the design document + - Verify requirements are met + +4. **Update task status** + - Mark task as 🟡 In Progress when starting + - Update checkbox `[x]` when complete + - Change status to ✅ Completed + - Document any findings in `claude.md` → Session Notebook → Findings + +5. **Update claude.md after each task** + - Add any new decisions to Session Notebook → Decisions + - Note any risks encountered + - Keep spec file status current + +## Critical Rules (Always Enforce) + +1. **Status Tracking**: Keep the "Spec files status" section in `claude.md` current at all times +2. **Never Skip Approval**: Never proceed to next phase without explicit user approval +3. **Always Ask Questions**: When requirements are unclear, ask rather than assume +4. **Present Options**: Offer choices for user to decide rather than making assumptions +5. **Only Edit Code When Requested**: Research and planning phases should not modify code +6. **Document Decisions**: Update Session Notebook → Decisions whenever user makes a choice +7. **Task Checkboxes**: All tasks must have checkboxes for completion tracking +8. **Sync Updates**: Update `claude.md` immediately after changes to other spec files + +## Status Values + +Spec files can have three statuses: +- **empty**: File is template/placeholder, not yet filled +- **draft**: Work in progress, not approved +- **ready**: Complete and approved by user + +## Useful Commands + +### List all kiro specs +```bash +agpod kiro --json pr-list +``` + +### Create new kiro spec +```bash +agpod kiro pr-new --desc "" --template default +``` + +Additional options: +- `--git-branch`: Create and checkout git branch +- `--open`: Open in editor after creation +- `--force`: Force creation even if directory exists + +To list templates, run `agpod kiro --json list-templates` + +## Session Management Best Practices + +1. **Start every kiro session** by reading `claude.md` to understand context +2. **Review previous decisions** before proposing new solutions +3. **Update session notebook** in real-time as conversations progress +4. **Ask questions early** rather than making incorrect assumptions +5. **Keep user informed** of which phase and file you're currently working on +6. **Validate completion** of each phase before moving to next diff --git a/skills/local-research/SKILL.md b/skills/local-research/SKILL.md new file mode 100644 index 0000000..ab1155e --- /dev/null +++ b/skills/local-research/SKILL.md @@ -0,0 +1,189 @@ +--- +name: local-research +description: "This skill should be used when performing codebase research with markdown documentation persistence. Triggered by phrases like [local research], [quick research], [load local research], [init local research], [read local research ...]." +--- + +# Local Research + +## Overview + +Perform comprehensive codebase research with persistent markdown documentation stored in `~/workspace/llm/research/`. This skill integrates multiple research tools including fast-repo-context skill, knowledge graph queries, and external resources to create structured research documentation. + +Use absolute file path in research document for easy share across different projects/repos. + +### Automation Script + +The skill includes an automation script at `~/.claude/skills/local-research/scripts/research_ops.py` that handles: +- Generating descriptive research names from user queries +- Creating research directories and markdown files with timestamps +- Listing and locating existing research files by keywords +- Providing CLI interface for all research operations + +## When to Use This Skill + +Use this skill when: +- Need to research and analyze codebase structure and patterns +- Want to create persistent research documentation +- Need to load previous research findings +- User says "local research", "quick research", or "load local research" + +## Core Workflow + +### Research Generation Process (when user explicitly requests new research) + +1. **Generate Research Name**: Create descriptive research name based on user input as ``, user input may contain typos, improve it. +2. **Create Research File**: `python3 ~/.claude/skills/local-research/scripts/research_ops.py create ""` +3. **Ask Clarifying Questions**: Ask user for more details about research scope +4. **Execute Research Workflow**: Use integrated tools to gather information +5. **Document Findings**: Write results to research markdown file, use absolute file path when writting, do not use `~` path abbreviation. + +### Loading Research Process (when user mention load or update doc, or provided doc keywords) + +When user requests to "load local research" or similar: +1. **List Research Files**: `python3 ~/.claude/skills/local-research/scripts/research_ops.py list` +2. **Identify Target**: `python3 ~/.claude/skills/local-research/scripts/research_ops.py locate ` +3. **Load Content**: Read and display the summary of relevant research markdown file + +## Research Tools and Methods + +### Primary Research Tools + +1. **Fast Context Skill** (`fast-repo-context`): + - load fast-repo-context skill + - Use for comprehensive codebase understanding + - Leverages repomix-generated XML for efficient searching + +2. **Knowledge Graph** (`kg`): + - Query related keywords and existing research + - Use `mcp__kg__query_graph` with semantic search + - Set `group_id` to organize research by project/topics + +3. **External Resources**: + - **Brightdata**: Use `mcp__brightdata__search_engine` for web research + - **GitHub**: Use `mcp__github__search_code` or `mcp__github__search_repositories` for external code reference + +### Research Execution Order + +1. **Initialize Research Environment**: + ```bash + python3 ~/.claude/skills/local-research/scripts/research_ops.py create "" + ``` + +2. **Fast Context Analysis**: + - Extract code structure, patterns, and key files + - Document findings in research file + +3. **Knowledge Graph Integration**: + - Query `kg` for related information + - Use semantic search with research keywords + - Integrate findings into research documentation + +4. **External Research** (if needed): + - Use Brightdata for web research on related topics + - Use GitHub tools for external examples and best practices + - Add external insights to research file + +## Research Documentation Structure + +Each research markdown file should follow this structure: + +```markdown +# + +- **Created**: +- **Research Query**: + +## Executive Summary + + +## Codebase Analysis + + +## Knowledge Graph Insights + + +## External Research + + +## Key Findings + + +## Recommendations + + +## Files Referenced + + +## Next Steps + +``` + +- Note: file path in the research doc must use absolute path, do not use `~` abbreviation, because this doc will be shared across different project/repos. + +## Loading Research + +When user wants to load existing research: + +1. **Available Research**: List all research files with timestamps +2. **Search Matching**: Match user keywords to research names/content +3. **Display Findings**: Present the complete research file content + +### Script Commands + +```bash +# Create new research file +python3 ~/.claude/skills/local-research/scripts/research_ops.py create "" + +# List all research files (sorted by timestamp) +python3 ~/.claude/skills/local-research/scripts/research_ops.py list + +# Locate research file by keywords +python3 ~/.claude/skills/local-research/scripts/research_ops.py locate + +# Read specific research file +cat ~/workspace/llm/research/-.md +``` + +## Integration with Other Skills + +### Fast Context Integration +- Always invoke `fast-repo-context` skill for codebase analysis +- Follow its mandatory checklist: check repomix freshness, search XML, then optionally KG +- Document steps completed in research file + +### Knowledge Graph Integration +- Use consistent `group_id` for related research projects +- Store research summaries in KG for future retrieval +- Query KG before starting new research to avoid duplication + +## Research Naming Conventions + +Generate descriptive research names: +- Convert user input to kebab-case +- Include domain/technology focus +- Example inputs to names: + - "analyze authentication system" → "authentication-system-analysis" + - "react performance issues" → "react-performance-investigation" + - "api design patterns" → "api-design-patterns-research" + +## Error Handling + +- If research directory creation fails, check permissions and path +- If fast-repo-context claude skill is unavailable, fall back to direct code search +- If external resources are unreachable, continue with internal research +- Always document any limitations or issues encountered + +# Example + + + +please load local research on "authentication system analysis" and update the document with any new findings. + + + +```bash +python3 ~/.claude/skills/local-research/scripts/research_ops.py locate authentication system analysis +``` +Good, found the research file at ``. Now loading the content and summarizing the key points for you. + + diff --git a/skills/local-research/scripts/research_ops.py b/skills/local-research/scripts/research_ops.py new file mode 100755 index 0000000..73575c5 --- /dev/null +++ b/skills/local-research/scripts/research_ops.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python3 +""" +Research Helper Script for Local Research Skill + +This script provides utility functions for managing research files, +generating timestamps, and handling research naming conventions. +""" + +import re +import sys +from datetime import datetime +from pathlib import Path + + +def generate_research_name(user_input: str) -> str: + """ + Generate a descriptive research name based on user input. + + Args: + user_input: The original user query/description + + Returns: + A kebab-case research name + """ + # Remove common prefixes and normalize + input_clean = user_input.lower() + prefixes_to_remove = [ + "analyze ", + "investigate ", + "research ", + "look at ", + "examine ", + "study ", + "explore ", + "understand ", + ] + + for prefix in prefixes_to_remove: + if input_clean.startswith(prefix): + input_clean = input_clean[len(prefix) :] + + # Map common patterns to descriptive names + pattern_map = { + "auth": "authentication", + "api": "api", + "db": "database", + "ui": "user-interface", + "ux": "user-experience", + "perf": "performance", + "sec": "security", + "test": "testing", + "config": "configuration", + } + + # Split into words and process each + words = input_clean.split() + processed_terms = [] + + for word in words: + # Remove punctuation and apply pattern mapping + word_clean = re.sub(r"[^a-z0-9-]", "", word) + if word_clean: + # Apply pattern mapping + if word_clean in pattern_map: + word_clean = pattern_map[word_clean] + processed_terms.append(word_clean) + + # Remove duplicates while preserving order + seen = set() + unique_terms = [] + for term in processed_terms: + if term not in seen: + seen.add(term) + unique_terms.append(term) + + # Join with hyphens and limit length + research_name = "-".join(unique_terms) + if len(research_name) > 80: + research_name = "-".join(unique_terms[:5]) + + return research_name or "research" + + +def create_research_file(research_name: str, user_query: str) -> str: + """ + Create a new research file with timestamp and return the path. + + Args: + research_name: The generated research name + user_query: The original user query + + Returns: + Path to the created research file + """ + # Create research directory + research_dir = Path.home() / "workspace" / "llm" / "research" + research_dir.mkdir(parents=True, exist_ok=True) + + # Generate timestamp + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + + # Create file path + filename = f"{research_name}-{timestamp}.md" + filepath = research_dir / filename + + # Create file with initial header + with open(filepath, "w") as f: + f.write(f"# {research_name.replace('-', ' ').title()}\n") + f.write(f"- **Created**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") + f.write(f"- **Research Query**: {user_query}\n\n") + f.write("## Executive Summary\n\n") + f.write("## Codebase Analysis\n\n") + f.write("## Knowledge Graph Insights\n\n") + f.write("## External Research\n\n") + f.write("## Key Findings\n\n") + f.write("## Recommendations\n\n") + f.write("## Files Referenced\n\n") + f.write("## Next Steps\n\n") + + return str(filepath) + + +def list_research_files() -> list: + """ + List all research files sorted by timestamp (newest first). + + Returns: + List of file paths sorted by modification time + """ + research_dir = Path.home() / "workspace" / "llm" / "research" + if not research_dir.exists(): + return [] + + files = [] + for file_path in research_dir.glob("*.md"): + files.append((file_path.stat().st_mtime, file_path)) + + # Sort by modification time (newest first) + files.sort(key=lambda x: x[0], reverse=True) + return [file_path for _, file_path in files] + + +def locate_research_file(keywords: list) -> str: + """ + Find research file matching keywords. + + Args: + keywords: List of keywords to search for + + Returns: + Path to the best matching file or None if not found + """ + files = list_research_files() + if not files: + return None + + if not keywords: + # Return most recent file + return str(files[0]) if files else None + + # Simple keyword matching in filename and content + best_match = None + best_score = 0 + + for file_path in files: + score = 0 + filename = file_path.name.lower() + + # Check filename match + for keyword in keywords: + keyword_lower = keyword.lower() + if keyword_lower in filename: + score += 3 + + # Check content match + try: + with open(file_path, "r") as f: + content = f.read().lower() + for keyword in keywords: + keyword_lower = keyword.lower() + if keyword_lower in content: + score += 1 + except Exception: + pass + + if score > best_score: + best_score = score + best_match = file_path + + return str(best_match) if best_match else None + + +def main(): + """CLI interface for the research helper.""" + if len(sys.argv) < 2: + print("Usage: python research_helper.py [args...]") + print("Commands:") + print(" create - Create new research file") + print(" list - List all research files") + print(" locate - Locate research file by keywords") + sys.exit(1) + + command = sys.argv[1] + + if command == "create": + if len(sys.argv) < 3: + print("Error: create command requires user query") + sys.exit(1) + + user_query = " ".join(sys.argv[2:]) + research_name = generate_research_name(user_query) + filepath = create_research_file(research_name, user_query) + print(filepath) + + elif command == "list": + files = list_research_files() + for file_path in files: + print(f"{file_path}") + + elif command == "locate": + keywords = sys.argv[2:] if len(sys.argv) > 2 else [] + filepath = locate_research_file(keywords) + if filepath: + print(filepath) + else: + print("No matching research file found") + sys.exit(1) + + else: + print(f"Error: Unknown command '{command}'") + sys.exit(1) + + +if __name__ == "__main__": + main() + diff --git a/skills/rust-architect/SKILL.md b/skills/rust-architect/SKILL.md new file mode 100644 index 0000000..8d17603 --- /dev/null +++ b/skills/rust-architect/SKILL.md @@ -0,0 +1,3332 @@ +--- +name: rust-architect +description: Use when designing or architecting Rust applications, creating comprehensive project documentation, planning async/await patterns, defining domain models with ownership strategies, structuring multi-crate workspaces, or preparing handoff documentation for Director/Implementor AI collaboration +--- + +# Rust Project Architect + +You are an expert Rust system architect specializing in creating production-ready systems with comprehensive documentation. You create complete documentation packages that enable Director and Implementor AI agents to successfully build complex systems following best practices from the Rust community, The Rust Programming Language book, and idiomatic Rust patterns. + +## Core Principles + +1. **Ownership & Borrowing** - Leverage Rust's ownership system for memory safety +2. **Zero-Cost Abstractions** - Write high-level code that compiles to fast machine code +3. **Fearless Concurrency** - Use async/await with tokio for safe concurrent programming +4. **Error Handling with Result** - No exceptions, use Result and proper propagation +5. **Type Safety** - Use the type system to prevent bugs at compile time +6. **Cargo Workspaces** - Organize code into multiple crates for modularity +7. **Test-Driven Development** - Write tests first, always + +## When to Use This Skill + +Invoke this skill when you need to: + +- Design a new Rust application from scratch +- Create comprehensive architecture documentation +- Plan async/await patterns and concurrent system design +- Define domain models with ownership and borrowing strategies +- Structure multi-crate workspaces for modular organization +- Create Architecture Decision Records (ADRs) +- Prepare handoff documentation for AI agent collaboration +- Set up guardrails for Director/Implementor AI workflows +- Design web services, CLI tools, or backend systems +- Plan background task processing with tokio tasks +- Structure event-driven systems with async streams + +## Your Process + +### Phase 1: Gather Requirements + +Ask the user these essential questions: + +1. **Project Domain**: What is the system for? (e.g., web service, CLI tool, data processing, embedded system) +2. **Tech Stack**: Confirm Rust + tokio + axum/actix + sqlx/diesel? +3. **Project Location**: Where should files be created? (provide absolute path) +4. **Structure Style**: Single crate, binary + library, or multi-crate workspace? +5. **Special Requirements**: + - Async runtime needed? (tokio, async-std) + - Web framework? (axum, actix-web, warp, rocket) + - Database? (PostgreSQL, MySQL, SQLite) + - CLI interface? (clap, structopt) + - Error handling library? (anyhow, thiserror) + - Real-time features? (WebSockets, Server-Sent Events) + - Background processing needs? +6. **Scale Targets**: Expected load, users, requests per second? +7. **AI Collaboration**: Will Director and Implementor AIs be used? + +### Phase 2: Expert Consultation + +Launch parallel Task agents to research: + +1. **Domain Patterns** - Research similar Rust systems and proven architectures +2. **Framework Best Practices** - axum, tokio, sqlx, clap patterns +3. **Book Knowledge** - Extract wisdom from Rust documentation and books +4. **Structure Analysis** - Study workspace organization approaches +5. **Superpowers Framework** - If handoff docs needed, research task breakdown format + +Example Task invocations: +``` +Task 1: Research [domain] architecture patterns and data models in Rust +Task 2: Analyze axum/actix framework patterns, middleware, and best practices +Task 3: Study Rust workspace organization for multi-crate projects +Task 4: Research Superpowers framework for implementation plan format +``` + +### Phase 3: Create Directory Structure + +Create this structure at the user-specified location: + +``` +project_root/ +├── README.md +├── CLAUDE.md +├── docs/ +│ ├── HANDOFF.md +│ ├── architecture/ +│ │ ├── 00_SYSTEM_OVERVIEW.md +│ │ ├── 01_DOMAIN_MODEL.md +│ │ ├── 02_DATA_LAYER.md +│ │ ├── 03_CORE_LOGIC.md +│ │ ├── 04_BOUNDARIES.md +│ │ ├── 05_CONCURRENCY.md +│ │ ├── 06_ASYNC_PATTERNS.md +│ │ └── 07_INTEGRATION_PATTERNS.md +│ ├── design/ # Empty - Director AI fills during feature work +│ ├── plans/ # Empty - Director AI creates Superpowers plans +│ ├── api/ # Empty - Director AI documents API contracts +│ ├── decisions/ # ADRs +│ │ ├── ADR-001-framework-choice.md +│ │ ├── ADR-002-error-strategy.md +│ │ ├── ADR-003-ownership-patterns.md +│ │ └── [domain-specific ADRs] +│ └── guardrails/ +│ ├── NEVER_DO.md +│ ├── ALWAYS_DO.md +│ ├── DIRECTOR_ROLE.md +│ ├── IMPLEMENTOR_ROLE.md +│ └── CODE_REVIEW_CHECKLIST.md +``` + +### Phase 4: Foundation Documentation + +#### README.md Structure + +```markdown +# [Project Name] + +[One-line description] + +## Overview +[2-3 paragraphs: what this system does and why] + +## Architecture +This project follows Rust workspace structure: + +project_root/ +├── [app_name]_core/ # Domain logic (pure Rust, no I/O) +├── [app_name]_api/ # REST/GraphQL APIs (axum/actix) +├── [app_name]_db/ # Database layer (sqlx/diesel) +├── [app_name]_worker/ # Background tasks (tokio tasks) +└── [app_name]_cli/ # CLI interface (clap) + +## Tech Stack + +### Core Runtime & Framework +- **Rust** 1.83+ (2021 edition, MSRV 1.75) + - Note: 2024 edition is tentatively planned but not yet released +- **tokio** 1.48+ - Async runtime with multi-threaded scheduler +- **axum** 0.8+ - Web framework built on tower/hyper +- **sqlx** 0.8+ - Compile-time checked async SQL with PostgreSQL +- **PostgreSQL** 16+ - Primary database with JSONB, full-text search + +### Essential Libraries +- **serde** 1.0.228+ - Serialization/deserialization framework +- **anyhow** 1.0.100+ - Flexible error handling for applications +- **thiserror** 2.0+ - Derive macro for custom error types +- **uuid** 1.18+ - UUID generation and parsing +- **chrono** 0.4.42+ - Date and time library +- **rust_decimal** 1.39+ - Decimal numbers for financial calculations +- **argon2** 0.5.3+ - Password hashing (PHC string format) + +## Getting Started +[Setup instructions] + +## Development +[Common tasks, testing, etc.] + +## Documentation +See `docs/` directory for comprehensive architecture documentation. +``` + +#### CLAUDE.md - Critical AI Context + +Must include these sections with concrete examples: + +1. **Project Context** - System purpose and domain +2. **Rust Design Philosophy** - Ownership, borrowing, zero-cost abstractions +3. **Key Architectural Decisions** - With trade-offs +4. **Ownership Patterns** - When to use ownership vs borrowing vs cloning +5. **Code Conventions** - Naming, structure, organization +6. **Money Handling** - Use rust_decimal or integer cents, never f64! +7. **Testing Patterns** - Unit/Integration/Property tests with proptest +8. **AI Agent Roles** - Director vs Implementor boundaries +9. **Common Mistakes** - Anti-patterns with corrections + +Example money handling section: +```rust +// ❌ NEVER +struct Account { + balance: f64, // Float precision errors! +} + +// ✅ ALWAYS +use rust_decimal::Decimal; +use std::str::FromStr; + +#[derive(Debug, Clone)] +struct Account { + id: uuid::Uuid, + balance: Decimal, // Or i64 for cents: 10000 = $100.00 +} + +impl Account { + pub fn new(id: uuid::Uuid) -> Self { + Self { + id, + balance: Decimal::ZERO, + } + } + + pub fn deposit(&mut self, amount: Decimal) -> Result<(), String> { + if amount <= Decimal::ZERO { + return Err("Amount must be positive".to_string()); + } + self.balance += amount; + Ok(()) + } +} + +// Why: 0.1 + 0.2 != 0.3 in floating point! +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_float_precision_error() { + // ❌ Float precision errors + let a = 0.1_f64 + 0.2_f64; + assert_ne!(a, 0.3_f64); // This fails with floats! + + // ✅ Decimal is always precise + let a = Decimal::from_str("0.1").unwrap() + + Decimal::from_str("0.2").unwrap(); + assert_eq!(a, Decimal::from_str("0.3").unwrap()); + } +} +``` + +### Phase 5: Guardrails Documentation + +Create 5 critical files: + +#### 1. NEVER_DO.md (15 Prohibitions) + +Template structure: +```markdown +# NEVER DO: Critical Prohibitions + +## 1. Never Use f64/f32 for Money +❌ **NEVER**: `balance: f64` +✅ **ALWAYS**: `balance: Decimal` or `balance: i64` (cents) +**Why**: Float precision errors cause incorrect financial calculations + +## 2. Never Unwrap in Library Code +❌ **NEVER**: `let value = result.unwrap();` +✅ **ALWAYS**: Return `Result` and let caller decide +**Why**: Libraries should not panic, applications decide error handling + +## 3. Never Clone Without Justification +❌ **NEVER**: Arbitrary `.clone()` everywhere +✅ **ALWAYS**: Use references `&T` when possible, document why clone is needed +**Why**: Cloning can be expensive, defeats Rust's zero-cost abstractions + +## 4. Never Ignore Errors with `let _ = ` +❌ **NEVER**: +```rust +let _ = fs::write("config.json", data); // Silent failure! +``` +✅ **ALWAYS**: +```rust +fs::write("config.json", data) + .context("Failed to write config file")?; +``` +**Why**: Silent errors lead to data corruption and debugging nightmares + +## 5. Never Block Async Runtime +❌ **NEVER**: +```rust +async fn process() { + std::thread::sleep(Duration::from_secs(1)); // Blocks executor! +} +``` +✅ **ALWAYS**: +```rust +async fn process() { + tokio::time::sleep(Duration::from_secs(1)).await; +} +``` +**Why**: Blocking the async runtime prevents all other tasks from running + +## 6. Never Use Arc> Without Justification +❌ **NEVER**: Default to `Arc>` for all shared state +✅ **ALWAYS**: Use simpler alternatives first +```rust +// Prefer AtomicT for simple counters +use std::sync::atomic::{AtomicU64, Ordering}; +let counter = AtomicU64::new(0); +counter.fetch_add(1, Ordering::Relaxed); + +// Prefer RwLock for read-heavy workloads +use std::sync::{Arc, RwLock}; +let data = Arc::new(RwLock::new(HashMap::new())); + +// Prefer channels for message passing +use tokio::sync::mpsc; +let (tx, rx) = mpsc::channel(100); +``` +**Why**: Arc> is expensive and often unnecessary + +## 7. Never Use String When &str Suffices +❌ **NEVER**: +```rust +fn validate(input: String) -> bool { // Unnecessary allocation + input.len() > 0 +} +``` +✅ **ALWAYS**: +```rust +fn validate(input: &str) -> bool { // Zero-cost + !input.is_empty() +} +``` +**Why**: Unnecessary allocations hurt performance + +## 8. Never Use `unsafe` Without SAFETY Comments +❌ **NEVER**: +```rust +unsafe { + *ptr = value; // No explanation! +} +``` +✅ **ALWAYS**: +```rust +// SAFETY: ptr is valid, aligned, and points to initialized memory. +// This function has exclusive access to the memory region. +unsafe { + *ptr = value; +} +``` +**Why**: Unsafe code requires proof of soundness for reviewers + +## 9. Never Use Stringly-Typed APIs +❌ **NEVER**: +```rust +fn set_status(status: &str) { // Accepts any string! + // What if someone passes "invalid"? +} +``` +✅ **ALWAYS**: +```rust +#[derive(Debug, Clone, Copy)] +pub enum Status { + Active, + Inactive, + Pending, +} + +fn set_status(status: Status) { // Compile-time safety + // Only valid statuses accepted +} +``` +**Why**: Compile-time guarantees prevent runtime errors + +## 10. Never Write Tests That Can't Fail +❌ **NEVER**: +```rust +#[test] +fn test_add() { + let result = 2 + 2; + assert!(result > 0); // Always passes, useless test +} +``` +✅ **ALWAYS**: +```rust +#[test] +fn test_add() { + assert_eq!(add(2, 2), 4); // Specific assertion + assert_eq!(add(-1, 1), 0); // Edge case +} +``` +**Why**: Weak assertions don't catch bugs + +## 11. Never Collect When Iteration Suffices +❌ **NEVER**: +```rust +let doubled: Vec<_> = nums.iter().map(|x| x * 2).collect(); +for n in doubled { + println!("{}", n); +} +``` +✅ **ALWAYS**: +```rust +for n in nums.iter().map(|x| x * 2) { + println!("{}", n); // No intermediate allocation +} +``` +**Why**: Unnecessary allocations waste memory and CPU + +## 12. Never Add Errors Without Context +❌ **NEVER**: +```rust +File::open(path)? // What file? Where? Why? +``` +✅ **ALWAYS**: +```rust +File::open(path) + .with_context(|| format!("Failed to open config file: {}", path.display()))? +``` +**Why**: Error messages should help debugging, not obscure the problem + +## 13. Never Return References to Local Data +❌ **NEVER**: +```rust +fn get_string() -> &str { + let s = String::from("hello"); + &s // ❌ Dangling reference! s dropped at end of function +} +``` +✅ **ALWAYS**: +```rust +fn get_string() -> String { + String::from("hello") // Return owned data +} +// Or use static lifetime +fn get_string() -> &'static str { + "hello" // String literal has 'static lifetime +} +``` +**Why**: References to dropped data cause use-after-free + +## 14. Never Use `transmute` Without `repr(C)` +❌ **NEVER**: +```rust +#[derive(Debug)] +struct Foo { x: u32, y: u64 } + +let bytes: [u8; 12] = unsafe { std::mem::transmute(foo) }; // UB! +``` +✅ **ALWAYS**: +```rust +#[repr(C)] // Guaranteed memory layout +#[derive(Debug)] +struct Foo { x: u32, y: u64 } + +// Or use safe alternatives +let x_bytes = foo.x.to_ne_bytes(); +let y_bytes = foo.y.to_ne_bytes(); +``` +**Why**: Rust's default memory layout is undefined; transmute without repr(C) is UB + +## 15. Never Directly Interpolate User Input in SQL +❌ **NEVER**: +```rust +let query = format!("SELECT * FROM users WHERE id = {}", user_id); // SQL injection! +sqlx::query(&query).fetch_one(&pool).await?; +``` +✅ **ALWAYS**: +```rust +sqlx::query!("SELECT * FROM users WHERE id = $1", user_id) + .fetch_one(&pool) + .await?; +// Or use query builder +sqlx::query("SELECT * FROM users WHERE id = $1") + .bind(user_id) + .fetch_one(&pool) + .await?; +``` +**Why**: SQL injection is a critical security vulnerability +``` + +#### 2. ALWAYS_DO.md (25 Mandatory Practices) + +Categories and complete practices: + +```markdown +# ALWAYS DO: Mandatory Best Practices + +## Memory Safety (6 practices) + +### 1. ALWAYS Prefer Borrowing Over Cloning +```rust +// ✅ Good: Borrow when you only need to read +fn count_words(text: &str) -> usize { + text.split_whitespace().count() +} + +// ❌ Bad: Unnecessary allocation +fn count_words(text: String) -> usize { + text.split_whitespace().count() +} +``` + +### 2. ALWAYS Use the Smallest Lifetime Possible +```rust +// ✅ Good: Explicit lifetime for clarity +fn first_word<'a>(s: &'a str) -> &'a str { + s.split_whitespace().next().unwrap_or("") +} + +// ✅ Even better: Let compiler infer when obvious +fn first_word(s: &str) -> &str { + s.split_whitespace().next().unwrap_or("") +} +``` + +### 3. ALWAYS Document Unsafe Code with SAFETY Comments +```rust +// ✅ Required for all unsafe blocks +// SAFETY: We verified that: +// 1. ptr is valid and aligned +// 2. Memory is initialized +// 3. No other references exist +unsafe { + *ptr = value; +} +``` + +### 4. ALWAYS Use Smart Pointers Appropriately +```rust +// ✅ Box: Heap allocation for large data +let large_data = Box::new([0u8; 1000000]); + +// ✅ Rc: Shared ownership, single-threaded +let data = Rc::new(vec![1, 2, 3]); + +// ✅ Arc: Shared ownership, multi-threaded +let data = Arc::new(Mutex::new(vec![1, 2, 3])); +``` + +### 5. ALWAYS Check for Integer Overflow in Production +```rust +// ✅ Use checked arithmetic for critical calculations +let result = a.checked_add(b) + .ok_or(Error::Overflow)?; + +// ✅ Or use saturating for UI coordinates +let position = current.saturating_add(offset); +``` + +### 6. ALWAYS Use Vec::with_capacity When Size is Known +```rust +// ✅ Pre-allocate to avoid reallocations +let mut items = Vec::with_capacity(1000); +for i in 0..1000 { + items.push(i); +} + +// ❌ Multiple reallocations +let mut items = Vec::new(); +for i in 0..1000 { + items.push(i); // Reallocates at 4, 8, 16, 32... +} +``` + +## Testing (7 practices) + +### 7. ALWAYS Write Tests Before Implementation (TDD) +```rust +// ✅ Step 1: Write failing test +#[test] +fn test_add() { + assert_eq!(add(2, 2), 4); +} + +// ✅ Step 2: Minimum implementation +fn add(a: i32, b: i32) -> i32 { + a + b +} + +// ✅ Step 3: Refactor if needed +``` + +### 8. ALWAYS Test Edge Cases +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_divide_normal() { + assert_eq!(divide(10, 2), Some(5)); + } + + #[test] + fn test_divide_by_zero() { + assert_eq!(divide(10, 0), None); // Edge case! + } + + #[test] + fn test_divide_negative() { + assert_eq!(divide(-10, 2), Some(-5)); // Edge case! + } +} +``` + +### 9. ALWAYS Use Property-Based Testing for Complex Logic +```rust +use proptest::prelude::*; + +proptest! { + #[test] + fn test_reversing_twice_gives_original(ref v in prop::collection::vec(any::(), 0..100)) { + let mut v2 = v.clone(); + v2.reverse(); + v2.reverse(); + assert_eq!(v, &v2); + } +} +``` + +### 10. ALWAYS Write Integration Tests for Public APIs +```rust +// tests/integration_test.rs +use mylib::*; + +#[test] +fn test_full_workflow() { + let client = Client::new(); + let result = client.fetch_data().unwrap(); + assert!(result.is_valid()); +} +``` + +### 11. ALWAYS Use #[should_panic] for Expected Panics +```rust +#[test] +#[should_panic(expected = "index out of bounds")] +fn test_invalid_index() { + let v = vec![1, 2, 3]; + let _ = v[10]; // Should panic +} +``` + +### 12. ALWAYS Test Error Paths +```rust +#[test] +fn test_parse_invalid_input() { + let result = parse("invalid"); + assert!(result.is_err()); + assert!(matches!(result, Err(ParseError::InvalidFormat))); +} +``` + +### 13. ALWAYS Aim for >80% Test Coverage +```rust +// Use cargo-tarpaulin to measure +// cargo install cargo-tarpaulin +// cargo tarpaulin --out Html +``` + +## Code Quality (7 practices) + +### 14. ALWAYS Run Clippy and Fix Warnings +```bash +# ✅ Run before every commit +cargo clippy -- -D warnings +``` + +### 15. ALWAYS Format Code with rustfmt +```bash +# ✅ Run before every commit +cargo fmt --all +``` + +### 16. ALWAYS Document Public APIs +```rust +/// Calculates the sum of two numbers. +/// +/// # Examples +/// +/// ``` +/// use mylib::add; +/// assert_eq!(add(2, 2), 4); +/// ``` +/// +/// # Panics +/// +/// This function does not panic. +/// +/// # Errors +/// +/// Returns an error if overflow occurs. +pub fn add(a: i32, b: i32) -> Result { + a.checked_add(b).ok_or(Error::Overflow) +} +``` + +### 17. ALWAYS Use Descriptive Variable Names +```rust +// ✅ Clear intent +let user_count = users.len(); +let max_retry_attempts = 3; + +// ❌ Unclear +let n = users.len(); +let x = 3; +``` + +### 18. ALWAYS Keep Functions Small and Focused +```rust +// ✅ Single responsibility +fn validate_email(email: &str) -> bool { + email.contains('@') && email.contains('.') +} + +fn validate_password(password: &str) -> bool { + password.len() >= 8 +} + +// ❌ Doing too much +fn validate_user(email: &str, password: &str) -> bool { + (email.contains('@') && email.contains('.')) + && password.len() >= 8 + && /* 20 more conditions */ +} +``` + +### 19. ALWAYS Use Type Aliases for Complex Types +```rust +// ✅ Readable +type UserId = u64; +type Result = std::result::Result; + +fn get_user(id: UserId) -> Result { + // ... +} + +// ❌ Repetitive and error-prone +fn get_user(id: u64) -> std::result::Result { + // ... +} +``` + +### 20. ALWAYS Implement Debug for Custom Types +```rust +// ✅ Always derive or implement Debug +#[derive(Debug, Clone)] +pub struct User { + id: u64, + name: String, +} +``` + +## Architecture (5 practices) + +### 21. ALWAYS Propagate Errors with ? +```rust +// ✅ Clean error propagation +fn process_file(path: &Path) -> Result { + let content = fs::read_to_string(path)?; + let parsed = parse(&content)?; + let validated = validate(parsed)?; + Ok(validated) +} +``` + +### 22. ALWAYS Use thiserror for Library Errors +```rust +// ✅ Library errors should be typed +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum DataError { + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + + #[error("Parse error at line {line}: {message}")] + Parse { line: usize, message: String }, + + #[error("Validation failed: {0}")] + Validation(String), +} +``` + +### 23. ALWAYS Use anyhow for Application Errors +```rust +// ✅ Application-level convenience +use anyhow::{Context, Result}; + +fn main() -> Result<()> { + let config = load_config() + .context("Failed to load configuration")?; + + let data = fetch_data(&config) + .context("Failed to fetch data from API")?; + + Ok(()) +} +``` + +### 24. ALWAYS Separate Pure Logic from I/O +```rust +// ✅ Pure function (testable without I/O) +fn calculate_discount(price: Decimal, coupon: &str) -> Decimal { + match coupon { + "SAVE10" => price * Decimal::new(90, 2), + "SAVE20" => price * Decimal::new(80, 2), + _ => price, + } +} + +// ✅ I/O function (uses pure logic) +async fn apply_discount(order_id: Uuid, coupon: &str) -> Result { + let order = fetch_order(order_id).await?; + let discounted = calculate_discount(order.total, coupon); + update_order_total(order_id, discounted).await?; + Ok(order) +} +``` + +### 25. ALWAYS Use Builder Pattern for Complex Constructors +```rust +// ✅ Builder pattern for clarity +#[derive(Debug)] +pub struct HttpClient { + timeout: Duration, + retries: u32, + user_agent: String, +} + +impl HttpClient { + pub fn builder() -> HttpClientBuilder { + HttpClientBuilder::default() + } +} + +#[derive(Default)] +pub struct HttpClientBuilder { + timeout: Option, + retries: Option, + user_agent: Option, +} + +impl HttpClientBuilder { + pub fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = Some(timeout); + self + } + + pub fn retries(mut self, retries: u32) -> Self { + self.retries = Some(retries); + self + } + + pub fn build(self) -> HttpClient { + HttpClient { + timeout: self.timeout.unwrap_or(Duration::from_secs(30)), + retries: self.retries.unwrap_or(3), + user_agent: self.user_agent.unwrap_or_else(|| "rust-client".to_string()), + } + } +} + +// Usage +let client = HttpClient::builder() + .timeout(Duration::from_secs(10)) + .retries(5) + .build(); +``` +``` + +#### 3. DIRECTOR_ROLE.md + +Complete template with communication protocols: + +```markdown +# Director AI Role & Responsibilities + +## Core Mission +Architect the system, design features, plan implementation, and ensure quality through design review. + +## What Director CAN Do + +### ✅ Architecture & Design +- Make architectural decisions (frameworks, patterns, structure) +- Create design documents in `docs/design/` +- Write Architecture Decision Records (ADRs) +- Define domain models and entity relationships +- Design API contracts and data schemas + +### ✅ Planning & Documentation +- Create Superpowers implementation plans in `docs/plans/` +- Break features into 2-5 minute atomic tasks +- Define acceptance criteria and test strategies +- Document system architecture in `docs/architecture/` +- Write technical specifications + +### ✅ Quality Assurance +- Review implemented code against design +- Verify adherence to guardrails (NEVER_DO, ALWAYS_DO) +- Validate test coverage and quality +- Approve or request changes to implementations + +## What Director CANNOT Do + +### ❌ Implementation +- Write production code (that's Implementor's job) +- Execute cargo commands (build, test, run) +- Modify existing code directly +- Create git commits + +### ❌ Tactical Decisions +- Choose variable names (Implementor decides) +- Select specific algorithms (unless architecturally significant) +- Optimize performance details (unless architectural) + +## Decision Authority Matrix + +| Decision Type | Director | Implementor | Requires Approval | +|--------------|----------|-------------|-------------------| +| Framework choice | ✅ Decides | ❌ No input | User approval | +| Architecture pattern | ✅ Decides | Consults | User approval | +| API contract | ✅ Decides | ❌ No input | No (internal) | +| Error handling strategy | ✅ Decides | ❌ No input | No | +| Domain model design | ✅ Decides | Provides feedback | No | +| Variable naming | ❌ N/A | ✅ Decides | No | +| Algorithm choice | Consults | ✅ Decides | No | +| Test approach | ✅ Decides | ✅ Implements | No | +| File structure | ✅ Decides | ❌ No input | No | +| Code formatting | ❌ N/A | ✅ (cargo fmt) | No | + +## Communication Protocol + +### Template 1: Feature Assignment to Implementor + +```markdown +## Feature Assignment: [Feature Name] + +**Feature ID**: FEAT-XXX +**Priority**: High | Medium | Low +**Estimated Hours**: X + +### Design Documents +- Design: `docs/design/FEAT-XXX-[feature-name].md` +- Implementation Plan: `docs/plans/PLAN-XXX-[feature-name].md` +- Related ADRs: ADR-XXX, ADR-YYY + +### Implementation Plan Location +`docs/plans/PLAN-XXX-[feature-name].md` + +### Key Architectural Constraints +1. Must use Repository pattern for data access +2. All errors must use thiserror for domain layer +3. Follow existing naming conventions in `user` module + +### Success Criteria +- [ ] All tasks in implementation plan completed +- [ ] cargo test passes (≥80% coverage) +- [ ] cargo clippy clean (no warnings) +- [ ] Follows NEVER_DO and ALWAYS_DO guidelines + +### Questions or Blockers? +Please report any issues or questions back to Director before proceeding with workarounds. + +--- +**Next Step**: Review implementation plan, execute tasks in TDD manner, report completion. +``` + +### Template 2: Progress Check Request + +```markdown +## Progress Check: [Feature Name] + +**Feature ID**: FEAT-XXX +**Assigned**: [Date] + +### Status Update Requested +Please provide: +1. **Completed Tasks**: List task numbers from plan +2. **Current Task**: What you're working on now +3. **Blockers**: Any issues preventing progress +4. **Questions**: Architecture or design clarifications needed +5. **ETA**: Estimated completion date + +### Format +``` +- Completed: Tasks 1, 2, 3 +- Current: Task 4 (Password hashing) +- Blockers: None | [Describe blocker] +- Questions: [Any questions] +- ETA: [Date] | [X hours remaining] +``` + +--- +**Response Expected**: Within 24 hours or when blocked +``` + +### Template 3: Code Review Feedback + +```markdown +## Code Review: [Feature Name] + +**Feature ID**: FEAT-XXX +**Review Date**: [Date] +**Status**: ✅ Approved | ⚠️ Changes Requested | ❌ Rejected + +### Review Against Design +- [ ] Implementation matches design document +- [ ] All planned tasks completed +- [ ] API contracts followed +- [ ] Domain model correctly implemented + +### Guardrails Compliance +- [ ] No NEVER_DO violations detected +- [ ] ALWAYS_DO practices followed +- [ ] Error handling strategy correct (thiserror/anyhow) +- [ ] No blocking operations in async code + +### Code Quality +- [ ] Tests pass (cargo test) +- [ ] Clippy clean (cargo clippy) +- [ ] Formatted (cargo fmt) +- [ ] Test coverage ≥80% + +### Feedback + +#### ✅ Strengths +1. [Positive observation] +2. [Good practice noticed] + +#### ⚠️ Changes Requested +1. **Issue**: [Description] + **Location**: `src/path/file.rs:123` + **Required Change**: [What needs to change] + **Reason**: [Why this matters architecturally] + +2. [Additional issues...] + +#### 💡 Suggestions (Optional) +1. [Nice-to-have improvements] + +--- +**Next Step**: +- If Approved: Feature complete, merge approved +- If Changes Requested: Address issues, resubmit for review +- If Rejected: Schedule design discussion +``` + +### Template 4: Architecture Question Response + +```markdown +## Architecture Question Response + +**Question ID**: Q-XXX +**Feature**: [Feature Name] +**Asked By**: Implementor +**Date**: [Date] + +### Question +[Exact question from Implementor] + +### Answer +[Clear, specific answer] + +### Reasoning +[Why this approach is chosen] + +### Example +```rust +// Demonstrate the approach +[Code example if applicable] +``` + +### Related Documentation +- ADR-XXX: [Related decision] +- Design Doc: `docs/design/FEAT-XXX.md` + +--- +**Action**: Proceed with answered approach, update plan if needed +``` + +## Quality Gates + +### Before Creating Implementation Plan +- [ ] Feature request is clear and complete +- [ ] Architecture documents reviewed +- [ ] Domain model defined +- [ ] ADRs created for new decisions +- [ ] Design document complete + +### Before Assigning to Implementor +- [ ] Superpowers plan created and validated +- [ ] All tasks are 2-5 minutes and atomic +- [ ] Acceptance criteria are testable +- [ ] Prerequisites clearly defined +- [ ] Rollback plan documented + +### Before Approving Implementation +- [ ] All design requirements met +- [ ] Guardrails compliance verified +- [ ] Code quality standards met +- [ ] Tests comprehensive and passing +- [ ] Documentation updated + +## Escalation Protocol + +### When to Escalate to User +1. **Major Architecture Changes**: Framework swap, data model redesign +2. **Contradictory Requirements**: User requirements conflict +3. **Technical Limitations**: Can't meet requirements with current stack +4. **Security Concerns**: Potential vulnerability in design +5. **Timeline Impact**: Implementation will take significantly longer + +### Escalation Template +```markdown +## Escalation: [Issue] + +**Severity**: Critical | High | Medium +**Impact**: [What's affected] + +### Issue Description +[Clear explanation of the problem] + +### Options Considered +1. **Option A**: [Description] + - Pros: [List] + - Cons: [List] + - Timeline: [Impact] + +2. **Option B**: [Description] + - Pros: [List] + - Cons: [List] + - Timeline: [Impact] + +### Recommendation +[Director's recommended approach] + +### Reasoning +[Why this recommendation] + +--- +**Decision Needed**: [What user needs to decide] +``` +``` + +#### 4. IMPLEMENTOR_ROLE.md + +Complete template with TDD workflow: + +```markdown +# Implementor AI Role & Responsibilities + +## Core Mission +Execute implementation plans through test-driven development, maintain code quality, and deliver working features. + +## What Implementor CAN Do + +### ✅ Implementation +- Write production Rust code following the implementation plan +- Create and modify source files in src/ directories +- Implement domain logic, API handlers, repository patterns +- Write SQL migrations with sqlx +- Execute cargo commands (build, test, clippy, fmt) +- Create git commits with meaningful messages + +### ✅ Testing +- Write unit tests, integration tests, property tests +- Use TDD: write test first, implement, refactor +- Ensure ≥80% test coverage +- Test edge cases and error paths + +### ✅ Tactical Decisions +- Choose variable and function names +- Select algorithms and data structures +- Decide implementation details +- Optimize code performance (within design constraints) +- Format code with cargo fmt + +## What Implementor CANNOT Do + +### ❌ Architecture Changes +- Change frameworks or major dependencies +- Modify domain model structure +- Redesign API contracts +- Change error handling strategy +- Alter project structure + +### ❌ Design Decisions +- Skip tasks in the implementation plan +- Add features not in the plan +- Change acceptance criteria +- Modify architectural patterns + +## When to Stop and Ask Director + +### 🛑 Immediate Stop Scenarios +1. **Implementation Plan Unclear**: Task description is ambiguous +2. **Design Contradiction**: Code requirements conflict with architecture docs +3. **Missing Information**: Don't have data needed to proceed (API keys, schemas, etc.) +4. **Architectural Decision Needed**: Need to choose between architectural alternatives +5. **Guardrail Violation**: Following plan would violate NEVER_DO rules + +### 📝 Question Template +```markdown +## Implementation Question + +**Plan**: PLAN-XXX +**Task**: Task X +**Status**: Blocked + +### Question +[Clear, specific question] + +### Context +[What you were trying to do] + +### Options Considered +1. **Option A**: [Description] + - Aligns with: [Architecture doc reference] + - Concern: [Why you're asking] + +2. **Option B**: [Description] + - Aligns with: [Different consideration] + - Concern: [Trade-off] + +### Waiting For +Director's decision before proceeding with implementation. +``` + +## TDD Workflow (Red-Green-Refactor) + +### Complete Example: Adding Password Validation + +#### Step 1: RED - Write Failing Test +```rust +// myapp_core/src/domain/password.rs +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_validate_password_too_short() { + let result = validate_password("short"); + assert!(result.is_err()); + assert!(matches!(result, Err(PasswordError::TooShort))); + } + + #[test] + fn test_validate_password_no_number() { + let result = validate_password("password"); + assert!(result.is_err()); + assert!(matches!(result, Err(PasswordError::NoNumber))); + } + + #[test] + fn test_validate_password_valid() { + let result = validate_password("password123"); + assert!(result.is_ok()); + } +} +``` + +**Run**: `cargo test` → Tests fail (function doesn't exist yet) ✅ RED + +#### Step 2: GREEN - Minimum Implementation +```rust +// myapp_core/src/domain/password.rs +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum PasswordError { + #[error("Password must be at least 8 characters")] + TooShort, + + #[error("Password must contain at least one number")] + NoNumber, +} + +pub fn validate_password(password: &str) -> Result<(), PasswordError> { + if password.len() < 8 { + return Err(PasswordError::TooShort); + } + + if !password.chars().any(|c| c.is_numeric()) { + return Err(PasswordError::NoNumber); + } + + Ok(()) +} +``` + +**Run**: `cargo test` → Tests pass ✅ GREEN + +#### Step 3: REFACTOR - Improve Code +```rust +// Refactor: Extract magic numbers as constants +const MIN_PASSWORD_LENGTH: usize = 8; + +pub fn validate_password(password: &str) -> Result<(), PasswordError> { + validate_length(password)?; + validate_contains_number(password)?; + Ok(()) +} + +fn validate_length(password: &str) -> Result<(), PasswordError> { + if password.len() < MIN_PASSWORD_LENGTH { + return Err(PasswordError::TooShort); + } + Ok(()) +} + +fn validate_contains_number(password: &str) -> Result<(), PasswordError> { + if !password.chars().any(char::is_numeric) { + return Err(PasswordError::NoNumber); + } + Ok(()) +} +``` + +**Run**: `cargo test` → Tests still pass ✅ REFACTOR COMPLETE + +#### Step 4: Quality Checks +```bash +# Run all quality checks before moving to next task +cargo test # ✅ All tests pass +cargo clippy -- -D warnings # ✅ No warnings +cargo fmt --all # ✅ Code formatted +``` + +#### Step 5: Commit +```bash +git add src/domain/password.rs +git commit -m "feat: add password validation + +- Validate minimum length (8 characters) +- Require at least one numeric character +- Return typed errors for validation failures + +Tests: Added unit tests for validation logic +Coverage: 100% for password module" +``` + +## Code Quality Checklist + +### Before Marking Task Complete +- [ ] All tests pass: `cargo test` +- [ ] No clippy warnings: `cargo clippy -- -D warnings` +- [ ] Code formatted: `cargo fmt --all` +- [ ] Test coverage ≥80% for new code +- [ ] Edge cases tested (empty, null, boundaries) +- [ ] Error paths tested +- [ ] Documentation comments for public APIs +- [ ] Acceptance criteria from plan met + +### Before Requesting Review +- [ ] All tasks in plan completed +- [ ] No NEVER_DO violations +- [ ] ALWAYS_DO practices followed +- [ ] Integration tests pass (if applicable) +- [ ] Migrations applied successfully (if DB changes) +- [ ] No TODO comments in production code +- [ ] Git commits are clean and descriptive + +## Progress Reporting + +### Daily Progress Template +```markdown +## Progress Update: [Feature Name] + +**Date**: [Date] +**Plan**: PLAN-XXX + +### Completed Today +- ✅ Task 1: Database schema (3 min actual) +- ✅ Task 2: User domain model (4 min actual) +- ✅ Task 3: Password hashing (6 min actual) + +### Currently Working On +- 🔄 Task 4: Repository implementation + +### Blockers +- None | [Describe blocker and question to Director] + +### Next Up +- Task 5: Integration tests + +### Notes +- All tests passing, coverage at 85% +- Found edge case in email validation, added test +``` + +## Common Mistakes to Avoid + +### ❌ Don't: Skip Tests +```rust +// Wrong: Implementing without test +fn calculate_discount(price: Decimal) -> Decimal { + price * Decimal::new(90, 2) // No test! +} +``` + +### ✅ Do: Test First +```rust +#[test] +fn test_calculate_discount_10_percent() { + assert_eq!(calculate_discount(Decimal::new(100, 0)), Decimal::new(90, 0)); +} + +fn calculate_discount(price: Decimal) -> Decimal { + price * Decimal::new(90, 2) // Tested! +} +``` + +### ❌ Don't: Commit Failing Code +Always ensure `cargo test && cargo clippy` passes before commit. + +### ✅ Do: Commit Working Code Only +```bash +cargo test && cargo clippy -- -D warnings && git commit +``` + +### ❌ Don't: Change Architecture +If you find an issue with the design, ask Director—don't fix it yourself. + +### ✅ Do: Report Design Issues +Use the question template to escalate architectural concerns. +``` + +#### 5. CODE_REVIEW_CHECKLIST.md + +**Use this checklist before marking any task as complete or requesting code review.** + +--- + +### ✅ Correctness + +**Logic & Control Flow** +- [ ] All code paths handle both success and failure cases +- [ ] No unwrap() or expect() in production code (use proper error handling) +- [ ] Pattern matching is exhaustive (no wildcard `_` on critical enums) +- [ ] Loop termination conditions are correct (no infinite loops) +- [ ] Edge cases are explicitly tested (empty collections, boundary values, None/Some) + +**Error Handling** +- [ ] All errors have proper context using `.context()` or `.with_context()` +- [ ] Library code uses `thiserror` for custom error types +- [ ] Application code uses `anyhow::Result` for error propagation +- [ ] No errors are silently discarded (all Result/Option properly handled) +- [ ] Error messages include actionable information (what failed, why, how to fix) + +**Ownership & Borrowing** +- [ ] No unnecessary `.clone()` calls (prefer borrowing) +- [ ] Lifetime annotations are minimal and necessary +- [ ] No dangling references or use-after-free scenarios +- [ ] Smart pointers (Arc, Rc, Box) are used appropriately, not by default + +--- + +### 💰 Financial Integrity (if applicable) + +**Decimal Types** +- [ ] All money calculations use `rust_decimal::Decimal` or `i64` (never f32/f64) +- [ ] Currency conversions preserve precision +- [ ] Rounding is explicit and documented with business justification +- [ ] Database columns use `NUMERIC` or `BIGINT`, never `REAL`/`DOUBLE` + +**Audit Trail** +- [ ] All financial transactions are logged with timestamp, user, amount +- [ ] Immutable audit log (append-only, never delete/update) +- [ ] Transaction IDs are unique and traceable +- [ ] Balance changes include before/after snapshots + +**Idempotency** +- [ ] Financial operations are idempotent (safe to retry) +- [ ] Duplicate transaction detection is in place +- [ ] Distributed transactions use proper isolation levels + +--- + +### 🛡️ Memory Safety + +**Unsafe Code** +- [ ] No `unsafe` blocks unless absolutely necessary +- [ ] Every `unsafe` block has a `// SAFETY:` comment explaining invariants +- [ ] Unsafe code is isolated in smallest possible scope +- [ ] Alternative safe solutions were considered and documented + +**Lifetime Correctness** +- [ ] No lifetime parameters unless necessary for API design +- [ ] Lifetime elision is used where possible +- [ ] References don't outlive the data they point to +- [ ] Self-referential structs use `Pin` if needed + +**Smart Pointer Usage** +- [ ] `Vec::with_capacity()` for known-size collections +- [ ] `Arc` only for shared ownership across threads +- [ ] `Rc` only for single-threaded shared ownership +- [ ] `Box` for heap allocation or trait objects +- [ ] Mutex/RwLock used appropriately (prefer message passing) + +--- + +### 🔐 Security + +**Input Validation** +- [ ] All user input is validated before processing +- [ ] String length limits are enforced +- [ ] Numeric inputs check min/max ranges +- [ ] Email/URL validation uses proper libraries +- [ ] File uploads check MIME type and size limits + +**SQL Injection Prevention** +- [ ] All database queries use parameterized queries (sqlx macros or `query!`) +- [ ] No string concatenation for SQL +- [ ] Input sanitization for LIKE clauses +- [ ] Database user has minimum necessary privileges + +**Authentication & Authorization** +- [ ] Passwords are hashed with bcrypt/argon2 (never plaintext) +- [ ] JWT tokens have expiration times +- [ ] Authorization checks happen on every protected endpoint +- [ ] Session tokens are cryptographically random +- [ ] Sensitive operations require re-authentication + +**Secrets Management** +- [ ] No secrets in source code (use environment variables or secret manager) +- [ ] API keys rotate regularly +- [ ] Database credentials stored securely +- [ ] Secrets never logged or exposed in error messages + +**HTTPS & Transport Security** +- [ ] All HTTP traffic uses TLS in production +- [ ] Certificate validation is enabled +- [ ] No self-signed certificates in production +- [ ] CORS configuration is restrictive (not `allow_origin("*")`) + +--- + +### 🧪 Testing + +**Test Coverage** +- [ ] Minimum 80% code coverage (run `cargo tarpaulin`) +- [ ] All public functions have tests +- [ ] Critical business logic has >95% coverage +- [ ] Edge cases are explicitly tested (empty, null, boundary values) + +**Test Types** +- [ ] Unit tests for pure logic (no I/O) +- [ ] Integration tests for database/HTTP interactions +- [ ] Property-based tests for invariants (using `proptest` or `quickcheck`) +- [ ] `#[should_panic(expected = "...")]` for expected failures + +**Test Quality** +- [ ] Tests have descriptive names (test_user_registration_fails_with_weak_password) +- [ ] Tests are independent (no shared mutable state) +- [ ] Tests clean up resources (temp files, database transactions) +- [ ] Error paths are tested (not just happy path) +- [ ] Async tests use `#[tokio::test]` not `#[test]` + +**Performance Tests** +- [ ] Benchmarks exist for performance-critical code (using `criterion`) +- [ ] Load tests validate scalability targets +- [ ] Database query performance measured (no N+1 queries) + +--- + +### 📝 Code Quality + +**Linting & Formatting** +- [ ] `cargo clippy` passes with no warnings +- [ ] `cargo fmt --check` passes (code is formatted) +- [ ] No `#[allow(clippy::...)]` without justification +- [ ] Compiler warnings are treated as errors in CI + +**Naming Conventions** +- [ ] Types are `PascalCase` (struct User) +- [ ] Functions/variables are `snake_case` (get_user_by_id) +- [ ] Constants are `SCREAMING_SNAKE_CASE` (MAX_RETRIES) +- [ ] Names are descriptive (not `tmp`, `data`, `info`) + +**Function Design** +- [ ] Functions are <50 lines (prefer smaller) +- [ ] Functions do one thing well (Single Responsibility) +- [ ] Function names start with verbs (get_, create_, validate_) +- [ ] Nested blocks are <3 levels deep + +**Type Safety** +- [ ] Type aliases used for domain concepts (`type UserId = Uuid`) +- [ ] Newtypes for distinct domains (`struct Email(String)`) +- [ ] Enums for exclusive states (not bool flags) +- [ ] Structs implement `Debug` derive + +--- + +### 📚 Documentation + +**Module Documentation** +- [ ] Every module has `//!` doc comment explaining purpose +- [ ] Public API has rustdoc comments (`///`) +- [ ] Code examples in docs compile (use `cargo test --doc`) +- [ ] Complex algorithms have implementation notes + +**Function Documentation** +- [ ] Public functions document parameters and return values +- [ ] Error cases are documented +- [ ] Examples provided for non-obvious usage +- [ ] Panics are documented with `# Panics` section + +**Inline Comments** +- [ ] Comments explain WHY, not WHAT (code explains what) +- [ ] Complex logic has explanatory comments +- [ ] TODO comments have GitHub issue numbers +- [ ] Magic numbers are explained or replaced with constants + +--- + +### ⚡ Performance + +**Allocations** +- [ ] Hot paths avoid allocations (use references, slices, iterators) +- [ ] Unnecessary `String` allocations removed (use `&str` where possible) +- [ ] `.collect()` only used when necessary +- [ ] Clone-on-write (`Cow`) for conditional ownership + +**Async Performance** +- [ ] No `.await` inside loops (collect futures, join_all) +- [ ] Blocking operations use `spawn_blocking` +- [ ] Database connection pooling configured (min/max connections) +- [ ] HTTP client reused (not created per request) + +**Database Performance** +- [ ] Indexes exist for all WHERE/JOIN columns +- [ ] Queries are analyzed with EXPLAIN ANALYZE +- [ ] Batch inserts used for multiple records +- [ ] Pagination implemented for large result sets +- [ ] No N+1 queries (use eager loading) + +**Caching** +- [ ] Expensive computations are cached +- [ ] Cache invalidation strategy is correct +- [ ] TTL set appropriately for cached data + +--- + +### 🏗️ Architecture + +**Layering** +- [ ] Domain logic is pure (no I/O in business rules) +- [ ] Infrastructure code separated from domain code +- [ ] API handlers are thin (delegate to services) +- [ ] No database queries in handlers + +**Separation of Concerns** +- [ ] Each module has a single responsibility +- [ ] Dependencies flow inward (domain ← services ← handlers) +- [ ] No circular dependencies between crates/modules + +**Design Patterns** +- [ ] Builder pattern for complex construction +- [ ] Repository pattern for data access +- [ ] Error types follow thiserror/anyhow conventions +- [ ] Traits used for abstraction (not concrete types) + +**API Design** +- [ ] Public API is minimal (principle of least privilege) +- [ ] Breaking changes follow semantic versioning +- [ ] Deprecated items have replacement suggestions +- [ ] Generics have clear trait bounds + +--- + +### ✅ Final Checks + +Before marking task complete: +- [ ] All checklist items above are checked +- [ ] `cargo test` passes +- [ ] `cargo clippy` has no warnings +- [ ] `cargo fmt` applied +- [ ] Code compiles without warnings +- [ ] Git commit message follows conventional commits + +Before requesting code review: +- [ ] Self-review performed (read your own code) +- [ ] Edge cases tested and documented +- [ ] Performance implications considered +- [ ] Security implications considered +- [ ] Breaking changes documented +- [ ] Migration guide provided (if needed) + +### Phase 6: Architecture Documentation (8 Files) + +#### 00_SYSTEM_OVERVIEW.md +- Vision and goals +- High-level architecture diagram (ASCII art is fine) +- Component overview (crates and their purposes) +- Data flow diagrams +- Technology justification (why axum, why tokio, why sqlx) +- Scalability strategy (connection pooling, caching, load balancing) +- Security approach (authentication, authorization, secrets) +- Performance targets with specific metrics + +#### 01_DOMAIN_MODEL.md +- All domain entities with complete field definitions +- Relationships between entities +- Business rules and constraints +- State machines (if applicable, with ASCII diagrams) +- Use cases with concrete code examples +- Entity lifecycle explanations + +Example entity: +```rust +use chrono::{DateTime, NaiveDate, Utc}; +use uuid::Uuid; + +#[derive(Debug, Clone)] +pub struct Task { + pub id: Uuid, // Or use ULID: Ulid + pub project_id: Uuid, + pub title: String, + pub description: Option, + pub status: TaskStatus, // Enum: Todo | InProgress | Blocked | Review | Done + pub priority: Priority, // Enum: Low | Medium | High | Urgent + pub assignee_id: Option, + pub due_date: Option, + pub estimated_hours: Option, + pub version: i32, // For optimistic locking + pub created_at: DateTime, + pub updated_at: DateTime, +} + +impl Default for Task { + fn default() -> Self { + Self { + id: Uuid::new_v4(), + project_id: Uuid::new_v4(), + title: String::new(), + description: None, + status: TaskStatus::default(), + priority: Priority::default(), + assignee_id: None, + due_date: None, + estimated_hours: None, + version: 0, + created_at: Utc::now(), + updated_at: Utc::now(), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +#[default = Todo] +pub enum TaskStatus { + Todo, + InProgress, + Blocked, + Review, + Done, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] +#[default = Medium] +pub enum Priority { + Low, + Medium, + High, + Urgent, +} +``` + +#### 02_DATA_LAYER.md +- Complete sqlx query patterns for all entities +- PostgreSQL table schemas +- Indexes and their justifications +- Optimistic locking implementation (version fields) +- Performance considerations (connection pooling, prepared statements) +- Migration strategy + +Example sqlx pattern: +```rust +use sqlx::{PgPool, query_as, Type}; + +// For sqlx query_as! to work with PostgreSQL enums, we need Type derivation +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Type)] +#[sqlx(type_name = "task_status")] // PostgreSQL enum type name +#[sqlx(rename_all = "lowercase")] // Convert variants to lowercase +#[default = Todo] +pub enum TaskStatus { + Todo, + InProgress, + Blocked, + Review, + Done, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default, Type)] +#[sqlx(type_name = "priority")] +#[sqlx(rename_all = "lowercase")] +#[default = Medium] +pub enum Priority { + Low, + Medium, + High, + Urgent, +} + +// Corresponding PostgreSQL migration: +/* +-- migrations/YYYYMMDDHHMMSS_create_task_enums.sql + +-- Create custom enum types +CREATE TYPE task_status AS ENUM ('todo', 'inprogress', 'blocked', 'review', 'done'); +CREATE TYPE priority AS ENUM ('low', 'medium', 'high', 'urgent'); + +-- Create tasks table +CREATE TABLE tasks ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + project_id UUID NOT NULL, + title TEXT NOT NULL, + description TEXT, + status task_status NOT NULL DEFAULT 'todo', + priority priority NOT NULL DEFAULT 'medium', + assignee_id UUID, + due_date DATE, + estimated_hours INTEGER CHECK (estimated_hours > 0), + version INTEGER NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Create indexes +CREATE INDEX idx_tasks_project_id ON tasks(project_id); +CREATE INDEX idx_tasks_assignee_id ON tasks(assignee_id); +CREATE INDEX idx_tasks_status ON tasks(status); +CREATE INDEX idx_tasks_due_date ON tasks(due_date) WHERE due_date IS NOT NULL; +*/ + +pub struct TaskRepository { + pool: PgPool, +} + +impl TaskRepository { + pub async fn find_by_id(&self, id: Uuid) -> Result, sqlx::Error> { + query_as!( + Task, + r#" + SELECT id, project_id, title, description, + status as "status: TaskStatus", + priority as "priority: Priority", + assignee_id, due_date, estimated_hours, + version, created_at, updated_at + FROM tasks + WHERE id = $1 + "#, + id + ) + .fetch_optional(&self.pool) + .await + } + + pub async fn update_with_version( + &self, + task: &Task, + old_version: i32, + ) -> Result { + let updated = query_as!( + Task, + r#" + UPDATE tasks + SET title = $1, description = $2, status = $3, + priority = $4, assignee_id = $5, due_date = $6, + version = version + 1, updated_at = NOW() + WHERE id = $7 AND version = $8 + RETURNING * + "#, + task.title, + task.description, + task.status as TaskStatus, + task.priority as Priority, + task.assignee_id, + task.due_date, + task.id, + old_version + ) + .fetch_optional(&self.pool) + .await? + .ok_or(TaskError::VersionConflict)?; + + Ok(updated) + } +} +``` + +#### 03_CORE_LOGIC.md +- Pure business logic patterns (no I/O, no side effects) +- Core calculations (priorities, estimates, metrics) +- Validation logic (state transitions, constraints) +- Testing patterns for pure functions +- Property test examples with proptest + +Example: +```rust +/// Pure functions for task business logic. +/// No database access, no side effects. +pub mod task_logic { + use super::*; + + /// Validates if a status transition is allowed + pub fn can_transition(from: TaskStatus, to: TaskStatus) -> bool { + use TaskStatus::*; + match (from, to) { + (Todo, InProgress | Blocked) => true, + (InProgress, Blocked | Review | Done) => true, + (Blocked, Todo | InProgress) => true, + (Review, InProgress | Done) => true, + (Done, _) => false, + _ => false, + } + } + + /// Calculates priority score for sorting + pub fn calculate_priority_score(task: &Task) -> i32 { + let base_score = priority_value(task.priority); + let urgency_bonus = days_until_due(task.due_date); + let blocker_penalty = if task.status == TaskStatus::Blocked { -10 } else { 0 }; + + base_score + urgency_bonus + blocker_penalty + } + + fn priority_value(priority: Priority) -> i32 { + match priority { + Priority::Urgent => 100, + Priority::High => 75, + Priority::Medium => 50, + Priority::Low => 25, + } + } + + fn days_until_due(due_date: Option) -> i32 { + let Some(due) = due_date else { return 0 }; + let today = Utc::now().date_naive(); + let diff = (due - today).num_days(); + + match diff { + d if d < 0 => 50, // Overdue + d if d <= 3 => 30, // Within 3 days + d if d <= 7 => 15, // Within a week + _ => 0, + } + } + + #[cfg(test)] + mod tests { + use super::*; + + #[test] + fn test_valid_transitions() { + assert!(can_transition(TaskStatus::Todo, TaskStatus::InProgress)); + assert!(!can_transition(TaskStatus::Done, TaskStatus::InProgress)); + } + + // Property-based test with proptest + use proptest::prelude::*; + + proptest! { + #[test] + fn priority_score_never_negative( + priority in prop::sample::select(&[ + Priority::Low, Priority::Medium, Priority::High, Priority::Urgent + ]) + ) { + let task = Task { + priority, + status: TaskStatus::Todo, + due_date: None, + ..Task::default() + }; + assert!(calculate_priority_score(&task) >= 0); + } + } + } +} +``` + +#### 04_BOUNDARIES.md +- Service orchestration layer +- Transaction patterns (database transactions with sqlx) +- Error handling strategies (anyhow for app, thiserror for libs) +- Service composition patterns + +Example: +```rust +use anyhow::{Context, Result}; +use sqlx::PgPool; + +pub struct TaskService { + repo: TaskRepository, + activity_logger: ActivityLogger, + notifier: Notifier, +} + +impl TaskService { + pub async fn transition_task( + &self, + task_id: Uuid, + new_status: TaskStatus, + notify: bool, + ) -> Result { + // Load task + let task = self.repo + .find_by_id(task_id) + .await + .context("Failed to load task")? + .ok_or_else(|| anyhow::anyhow!("Task not found: {}", task_id))?; + + // Validate transition (pure function) + if !task_logic::can_transition(task.status, new_status) { + return Err(anyhow::anyhow!( + "Invalid transition from {:?} to {:?}", + task.status, + new_status + )); + } + + // Begin transaction + let mut tx = self.repo.pool.begin().await?; + + // Update task + let mut updated_task = task.clone(); + updated_task.status = new_status; + let updated = self.repo + .update_with_version(&updated_task, task.version) + .await + .context("Failed to update task")?; + + // Log activity + self.activity_logger + .log(&mut tx, task_id, "status_changed", json!({ + "from": task.status, + "to": new_status, + })) + .await?; + + // Commit transaction + tx.commit().await?; + + // Async notification (don't block on this) + if notify { + if let Some(assignee_id) = updated.assignee_id { + let notifier = self.notifier.clone(); + let task_clone = updated.clone(); + tokio::spawn(async move { + let _ = notifier.send_notification(assignee_id, task_clone).await; + }); + } + } + + Ok(updated) + } +} +``` + +#### 05_CONCURRENCY.md +- Async/await patterns with tokio +- Shared state management (Arc, RwLock, Mutex) +- Channel patterns (mpsc, oneshot, broadcast) +- Concurrent task spawning +- Cancellation and timeouts + +Example: +```rust +use tokio::sync::{RwLock, mpsc}; +use std::sync::Arc; + +pub struct AppState { + /// Read-heavy: Use RwLock for config + pub config: Arc>, + + /// Lock-free counters: Use atomic types + pub request_count: Arc, + + /// Connection pool: Already thread-safe + pub db: PgPool, +} + +// Spawning concurrent tasks +pub async fn process_batch(tasks: Vec) -> Vec> { + let handles: Vec<_> = tasks + .into_iter() + .map(|task| { + tokio::spawn(async move { + process_single_task(task).await + }) + }) + .collect(); + + // Wait for all tasks to complete + let mut results = Vec::new(); + for handle in handles { + results.push(handle.await.unwrap()); + } + results +} + +// Using channels for communication +pub async fn worker_pool(rx: mpsc::Receiver) { + while let Some(task) = rx.recv().await { + if let Err(e) = process_task(&task).await { + log::error!("Task processing failed: {}", e); + } + } +} +``` + +#### 06_ASYNC_PATTERNS.md +- Background task patterns with tokio +- Retry strategies with exponential backoff +- Circuit breaker implementation +- Health checks and graceful shutdown +- Async streams and futures + +Example: +```rust +use tokio::time::{sleep, Duration}; + +/// Retry with exponential backoff +pub async fn retry_with_backoff( + operation: F, + max_attempts: u32, +) -> Result +where + F: Fn() -> futures::future::BoxFuture<'static, Result>, +{ + let mut attempt = 0; + loop { + match operation().await { + Ok(result) => return Ok(result), + Err(e) if attempt >= max_attempts - 1 => return Err(e), + Err(_) => { + attempt += 1; + let delay = Duration::from_millis(100 * 2_u64.pow(attempt)); + sleep(delay).await; + } + } + } +} + +/// Background task that runs periodically +pub async fn periodic_task( + interval: Duration, + mut task: F, +) -> Result<()> +where + F: FnMut() -> Fut, + Fut: Future> + Send, +{ + let mut interval_timer = tokio::time::interval(interval); + loop { + interval_timer.tick().await; + if let Err(e) = task().await { + log::error!("Periodic task failed: {}", e); + } + } +} +``` + +**Health Check Example:** +```rust +use axum::{ + extract::State, + http::StatusCode, + response::{IntoResponse, Response}, + routing::get, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use std::sync::Arc; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct HealthStatus { + pub status: String, + pub version: String, + pub checks: HealthChecks, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct HealthChecks { + pub database: CheckResult, + pub redis: CheckResult, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CheckResult { + pub status: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, + pub response_time_ms: u64, +} + +#[derive(Clone)] +pub struct AppState { + pub db_pool: PgPool, + pub version: String, +} + +/// Liveness probe - returns 200 if service is running +/// Use for Kubernetes livenessProbe +pub async fn liveness() -> StatusCode { + StatusCode::OK +} + +/// Readiness probe - returns 200 if service can handle traffic +/// Checks database connection and other critical dependencies +/// Use for Kubernetes readinessProbe +pub async fn readiness( + State(state): State>, +) -> Response { + let start = std::time::Instant::now(); + + // Check database connection + let db_check = match sqlx::query("SELECT 1") + .execute(&state.db_pool) + .await + { + Ok(_) => CheckResult { + status: "healthy".to_string(), + message: None, + response_time_ms: start.elapsed().as_millis() as u64, + }, + Err(e) => CheckResult { + status: "unhealthy".to_string(), + message: Some(e.to_string()), + response_time_ms: start.elapsed().as_millis() as u64, + }, + }; + + // Check Redis (example) + let redis_check = CheckResult { + status: "healthy".to_string(), + message: None, + response_time_ms: 5, + }; + + let overall_healthy = db_check.status == "healthy" + && redis_check.status == "healthy"; + + let health_status = HealthStatus { + status: if overall_healthy { + "healthy".to_string() + } else { + "unhealthy".to_string() + }, + version: state.version.clone(), + checks: HealthChecks { + database: db_check, + redis: redis_check, + }, + }; + + let status_code = if overall_healthy { + StatusCode::OK + } else { + StatusCode::SERVICE_UNAVAILABLE + }; + + (status_code, Json(health_status)).into_response() +} + +pub fn health_routes(state: Arc) -> Router { + Router::new() + .route("/health/liveness", get(liveness)) + .route("/health/readiness", get(readiness)) + .with_state(state) +} +``` + +**Graceful Shutdown Example:** +```rust +use axum::Router; +use std::sync::Arc; +use tokio::{ + signal, + sync::watch, + time::{sleep, Duration}, +}; +use tracing::{info, warn}; + +pub struct ShutdownCoordinator { + /// Notify all workers to start shutdown + shutdown_tx: watch::Sender, +} + +impl ShutdownCoordinator { + pub fn new() -> (Self, watch::Receiver) { + let (shutdown_tx, shutdown_rx) = watch::channel(false); + (Self { shutdown_tx }, shutdown_rx) + } + + pub fn trigger_shutdown(&self) { + let _ = self.shutdown_tx.send(true); + } +} + +/// Listen for shutdown signals (SIGTERM, SIGINT) +async fn shutdown_signal() { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install SIGTERM handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => { + info!("Received SIGINT (Ctrl+C), initiating graceful shutdown"); + } + _ = terminate => { + info!("Received SIGTERM, initiating graceful shutdown"); + } + } +} + +/// Gracefully shutdown the application +pub async fn run_with_graceful_shutdown( + app: Router, + port: u16, + state: Arc, +) -> anyhow::Result<()> { + let (coordinator, mut shutdown_rx) = ShutdownCoordinator::new(); + + // Spawn background tasks + let background_task = tokio::spawn({ + let mut shutdown_rx = shutdown_rx.clone(); + async move { + info!("Background task started"); + loop { + tokio::select! { + _ = sleep(Duration::from_secs(60)) => { + info!("Background task running..."); + } + _ = shutdown_rx.changed() => { + info!("Background task received shutdown signal"); + break; + } + } + } + info!("Background task cleanup complete"); + } + }); + + // Start HTTP server + let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port)) + .await?; + + info!("Server listening on {}", listener.local_addr()?); + + // Serve with graceful shutdown + axum::serve(listener, app) + .with_graceful_shutdown(async move { + shutdown_signal().await; + coordinator.trigger_shutdown(); + }) + .await?; + + info!("HTTP server stopped, waiting for background tasks..."); + + // Wait for background tasks with timeout + tokio::select! { + _ = background_task => { + info!("All background tasks completed"); + } + _ = sleep(Duration::from_secs(30)) => { + warn!("Shutdown timeout exceeded, forcing exit"); + } + } + + // Close database connections + state.db_pool.close().await; + info!("Database connections closed"); + + info!("Graceful shutdown complete"); + Ok(()) +} + +/// Example usage in main +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // Initialize tracing + tracing_subscriber::fmt::init(); + + // Setup database pool + let db_pool = sqlx::PgPool::connect("postgresql://localhost/mydb").await?; + + let state = Arc::new(AppState { + db_pool, + version: env!("CARGO_PKG_VERSION").to_string(), + }); + + // Build application with health check routes + let app = Router::new() + .nest("/api", health_routes(state.clone())) + // ... other routes + .with_state(state.clone()); + + // Run with graceful shutdown + run_with_graceful_shutdown(app, 3000, state).await?; + + Ok(()) +} +``` + +**Key Points:** +- **Liveness Probe**: Simple endpoint that returns 200 if process is alive +- **Readiness Probe**: Checks dependencies (database, cache) before accepting traffic +- **Signal Handling**: Catches SIGTERM/SIGINT for graceful shutdown +- **Connection Draining**: HTTP server stops accepting new connections but finishes existing requests +- **Background Task Coordination**: Uses `watch` channel to notify all tasks +- **Timeout Protection**: Forceful shutdown after 30s if tasks don't complete +- **Resource Cleanup**: Explicitly close database pools and other resources + +#### 07_INTEGRATION_PATTERNS.md +- HTTP client patterns with reqwest +- Circuit breaker implementation +- Retry logic with exponential backoff +- Webhook handling (incoming and outgoing) +- Event streaming patterns +- External service integration patterns + +Example: +```rust +use reqwest::Client; +use serde::de::DeserializeOwned; +use std::time::Duration; +use anyhow::{Context, Result}; +use tokio::time::sleep; + +pub struct HttpClient { + client: Client, + timeout: Duration, +} + +impl HttpClient { + pub fn new(timeout: Duration) -> Result { + let client = Client::builder() + .timeout(timeout) + .build() + .context("Failed to build HTTP client")?; + + Ok(Self { client, timeout }) + } + + pub async fn request_with_retry( + &self, + url: &str, + max_retries: u32, + ) -> Result { + let mut attempt = 0; + loop { + match self.client + .get(url) + .timeout(self.timeout) + .send() + .await + { + Ok(resp) if resp.status().is_success() => { + return resp.json().await.context("Failed to parse response"); + } + Ok(resp) if resp.status().is_server_error() && attempt < max_retries => { + attempt += 1; + let backoff = Duration::from_millis(100 * 2_u64.pow(attempt)); + sleep(backoff).await; + continue; + } + Ok(resp) => { + return Err(anyhow::anyhow!( + "HTTP error: status {}", + resp.status() + )); + } + Err(e) if attempt < max_retries => { + attempt += 1; + let backoff = Duration::from_millis(100 * 2_u64.pow(attempt)); + sleep(backoff).await; + continue; + } + Err(e) => return Err(e.into()), + } + } + } +} +``` + +### Phase 7: Architecture Decision Records + +Create ADRs for major decisions. Template: + +```markdown +# ADR-XXX: [Decision Title] + +**Status:** Accepted +**Date:** YYYY-MM-DD +**Deciders:** [Role] +**Context:** [Brief context] + +## Context +[Detailed explanation of the situation requiring a decision] + +## Decision +[Clear statement of what was decided] + +## Rationale +[Why this decision was made - include code examples, metrics, trade-offs] + +## Alternatives Considered + +### Alternative 1: [Name] +**Implementation:** +```rust +// Example code +``` + +**Pros:** +- Advantage 1 +- Advantage 2 + +**Cons:** +- Disadvantage 1 +- Disadvantage 2 + +**Why Rejected:** [Clear explanation] + +### Alternative 2: [Name] +[Same structure] + +## Consequences + +### Positive +1. Benefit with explanation +2. Another benefit + +### Negative +1. Trade-off with mitigation strategy +2. Another trade-off + +## Implementation Guidelines + +### DO: [Pattern] +```rust +// Good example +``` + +### DON'T: [Anti-pattern] +```rust +// Bad example +``` + +## Validation +[How we'll verify this was the right choice] +- Metric 1: Target value +- Metric 2: Target value + +## References +- [Link 1] +- [Link 2] + +## Related ADRs +- ADR-XXX: Related Decision + +## Review Schedule +**Last Reviewed:** YYYY-MM-DD +**Next Review:** YYYY-MM-DD +``` + +**Minimum ADRs to create:** + +1. **ADR-001: Framework Choice** (axum vs actix-web vs warp vs rocket) +2. **ADR-002: Error Strategy** (anyhow vs thiserror usage patterns) +3. **ADR-003: Ownership Patterns** (When to use owned data vs references vs cloning) +4. **Domain-specific ADRs** based on requirements + +### Phase 8: Handoff Documentation + +Create HANDOFF.md with: + +1. **Overview** - Project status, location, ready state +2. **Project Structure** - Annotated directory tree +3. **Documentation Index** - What each file contains +4. **Workflow** - Director → Implementor → Review → Iterate cycle +5. **Implementation Phases** - Break project into 4-week phases +6. **Key Architectural Principles** - DO/DON'T examples +7. **Testing Strategy** - Unit/Integration/Property test patterns +8. **Commit Message Format** - Conventional commits structure +9. **Communication Protocol** - Message templates between Director/Implementor +10. **Troubleshooting** - Common issues and solutions +11. **Success Metrics** - Specific performance targets +12. **Next Steps** - Immediate actions for Director AI + +Example workflow section: +```markdown +## Workflow + +### Phase 1: Director Creates Design & Plan +1. Read feature request from user +2. Review architecture documents +3. Create design document in `docs/design/` +4. Create implementation plan in `docs/plans/` (Superpowers format) +5. Commit design + plan +6. Hand off to Implementor with plan path + +### Phase 2: Implementor Executes Plan +1. Read implementation plan +2. For each task: + - Write test first (TDD) + - Implement minimum code + - Refactor + - Run tests (cargo test) + - Check clippy (cargo clippy) + - Format code (cargo fmt) + - Commit +3. Report completion to Director + +### Phase 3: Director Reviews +1. Review committed code +2. Check against design +3. Verify guardrails followed +4. Either approve or request changes + +### Phase 4: Iterate Until Approved +[Loop until feature is complete] +``` + +### Superpowers Implementation Plan Format + +Superpowers plans are structured Markdown documents with YAML frontmatter that break down features into atomic, testable tasks of 2-5 minutes each. + +#### File Structure + +```markdown +--- +plan_id: "PLAN-001-user-authentication" +feature: "User Authentication System" +created: "2024-01-15" +author: "Director AI" +status: "approved" +estimated_hours: 8 +priority: "high" +dependencies: [] +--- + +# Implementation Plan: User Authentication System + +## Overview +Brief description of what this plan achieves and why it's necessary. + +## Context +- **Related ADRs**: ADR-001 (JWT Strategy), ADR-002 (Error Handling) +- **Related Docs**: `docs/architecture/04_BOUNDARIES.md` +- **Dependencies**: PostgreSQL 16+, argon2 crate for password hashing + +## Tasks + +### Task 1: Database Schema (2-5 min) +**Type**: database +**Estimated**: 3 minutes +**Prerequisites**: None + +**Objective**: Create users table with security best practices + +**Steps**: +1. Create migration file: `sqlx migrate add create_users_table` +2. Define schema with email, password_hash, created_at, updated_at +3. Add unique constraint on email for login uniqueness +4. Add index on email for login performance + +**Acceptance Criteria**: +- [ ] Migration file created in migrations/ directory +- [ ] `sqlx migrate run` succeeds without errors +- [ ] Can insert test user with email and password_hash + +**Code Location**: `migrations/YYYYMMDDHHMMSS_create_users_table.sql` + +--- + +### Task 2: User Domain Model (2-5 min) +**Type**: implementation +**Estimated**: 4 minutes +**Prerequisites**: Task 1 + +**Objective**: Define User entity with validation logic + +**Steps**: +1. Create `myapp_core/src/domain/user.rs` +2. Define User struct with proper types (email: String, password_hash: String, etc.) +3. Implement email validation (regex for email format) +4. Add methods: `new()`, `verify_password()` + +**Acceptance Criteria**: +- [ ] User struct defined with all required fields +- [ ] Email validation works (test with invalid emails) +- [ ] Password verification works (test with valid/invalid passwords) +- [ ] Unit tests pass: `cargo test user::tests` + +**Code Location**: `myapp_core/src/domain/user.rs` + +--- + +### Task 3: Password Hashing (2-5 min) +**Type**: implementation +**Estimated**: 5 minutes +**Prerequisites**: Task 2 + +**Objective**: Implement secure password hashing with argon2 + +**Steps**: +1. Add argon2 to Cargo.toml: `argon2 = "0.5.3"` +2. Create `myapp_core/src/domain/password.rs` +3. Implement `hash_password(password: &str) -> Result` +4. Implement `verify_password(password: &str, hash: &str) -> Result` +5. Write unit tests for both functions + +**Acceptance Criteria**: +- [ ] Passwords hashed with argon2 (verify config: memory=19MB, iterations=2) +- [ ] Same password produces different hashes (salt working correctly) +- [ ] Verification succeeds for valid passwords +- [ ] Verification fails for invalid passwords +- [ ] All tests pass: `cargo test password` + +**Code Location**: `myapp_core/src/domain/password.rs` + +--- + +## Testing Strategy +- **Unit Tests**: Each task includes its own isolated tests +- **Integration Tests**: Final end-to-end test in `myapp_api/tests/auth_flow.rs` +- **Coverage Target**: ≥80% for authentication code (critical security component) + +## Rollback Plan +If any task fails or needs to be reverted: +1. Revert migrations: `sqlx migrate revert` +2. Delete created files and restore from git +3. Restore to last commit: `git reset --hard HEAD~1` +4. Re-plan if fundamental issues discovered + +## Success Criteria +- [ ] All tasks completed and individually tested +- [ ] `cargo test` passes (all unit and integration tests) +- [ ] `cargo clippy` clean (no warnings) +- [ ] Integration test demonstrates full auth flow works end-to-end +- [ ] Documentation updated in HANDOFF.md + +## Notes +- Use `thiserror` for domain errors (library code following DDD) +- Use `anyhow` for application errors (API layer convenience) +- Never log passwords (even hashed ones in production logs) +- Follow OWASP authentication guidelines +``` + +#### Superpowers Plan Principles + +1. **Atomic Tasks**: Each task is independently completable in 2-5 minutes +2. **Clear Prerequisites**: Explicit task dependencies prevent blocking +3. **Testable Acceptance**: Every task has verifiable completion criteria +4. **TDD Workflow**: Write test first, minimum implementation, then refactor +5. **Rollback Safety**: Each task can be independently reverted if needed + +#### Task Types +- `database`: Schema definitions, migrations, query optimization +- `implementation`: Core logic, domain models, business rules +- `api`: HTTP endpoints, handlers, middleware +- `testing`: Test files, integration tests, property tests +- `documentation`: Docs, inline comments, examples, README updates + +#### Task Metadata +- **Type**: Categorizes the work for filtering and reporting +- **Estimated**: Time estimate in minutes (2-5 minute range) +- **Prerequisites**: Task IDs that must complete first +- **Objective**: One-sentence goal of this task +- **Steps**: Ordered list of concrete actions +- **Acceptance Criteria**: Checkboxes for verification +- **Code Location**: Where the changes will be made + +### Phase 9: Validate and Summarize + +Before finishing, verify: + +1. ✅ All directories created +2. ✅ 20+ documentation files present +3. ✅ All cross-references between docs work +4. ✅ All code examples are valid Rust syntax +5. ✅ Every architectural principle has concrete example +6. ✅ ADRs include alternatives with rationale +7. ✅ Guardrails have DO/DON'T code examples +8. ✅ Domain-specific adaptations included + +Present summary: +```markdown +## Project Architecture Complete! 🚀 + +**Location:** /path/to/project + +**Created:** +- ✅ Complete directory structure +- ✅ Foundation docs (README, CLAUDE.md) +- ✅ 5 guardrail documents +- ✅ 8 architecture documents (~6,000 lines) +- ✅ X Architecture Decision Records +- ✅ Handoff documentation + +**Ready For:** +- Director AI to create first design + plan +- Implementor AI to execute implementation +- Iterative feature development + +**Next Step:** +Director AI should begin by creating the first feature design. +``` + +## Domain-Specific Adaptations + +### For Web Services (axum/actix-web) + +Add emphasis on: + +1. **NEVER_DO.md** additions: + - Never block async runtime with std::thread::sleep (use tokio::time::sleep) + - Never use Arc> without justification (prefer message passing) + - Never unwrap in request handlers (return proper HTTP errors) + - Never store sessions in memory without justification (use database) + +2. **Domain Model** inclusions: + - HTTP request/response types + - Middleware patterns + - Authentication/authorization models + - State management with Arc + +3. **ADRs** to add: + - Web framework choice (axum vs actix-web) + - State sharing strategy + - Error response format (JSON API spec) + - Authentication method (JWT, sessions, OAuth) + +4. **Use Cases** examples: + - Handle HTTP request with validation + - Middleware for authentication + - Database query with connection pooling + - Background job spawning + +### For CLI Tools (clap) + +Add emphasis on: + +1. **Domain Model** additions: + - Command structure with clap + - Configuration file handling + - Progress indicators + - Error reporting to terminal + +2. **ADRs** to add: + - CLI argument parsing library choice + - Configuration file format (TOML, YAML, JSON) + - Error reporting strategy + - Output formatting approach + +3. **Use Cases** examples: + - Parse command-line arguments + - Read configuration file + - Execute subcommands + - Report progress and errors + +### For Backend Services + +Add emphasis on: + +1. **Domain Model** additions: + - Background job patterns with tokio + - Event sourcing patterns + - CQRS implementation + - Message queue integration + +2. **Workers** to document: + - Background job processing + - Periodic tasks + - Event handlers + - Cleanup tasks + +3. **Integration Patterns**: + - Message queue clients (RabbitMQ, Kafka) + - Cache integration (Redis) + - External API clients + +## Critical Patterns and Best Practices + +### Ownership Patterns + +```rust +// ✅ ALWAYS prefer borrowing over cloning +fn count_words(text: &str) -> usize { + text.split_whitespace().count() +} + +// ✅ Take ownership when you need to transform +fn to_uppercase(mut s: String) -> String { + s.make_ascii_uppercase(); + s +} + +// ✅ Clone only when necessary (document why) +fn store_in_cache(key: String, value: Data) { + // Need to clone because cache takes ownership + CACHE.insert(key.clone(), value); // Clone needed for concurrent access + log::info!("Stored {}", key); // Original key still available +} +``` + +### Error Handling Patterns + +```rust +// ✅ ALWAYS use thiserror for library errors +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum TaskError { + #[error("Database error: {0}")] + Database(#[from] sqlx::Error), + + #[error("Task not found: {0}")] + NotFound(Uuid), + + #[error("Invalid status transition from {from:?} to {to:?}")] + InvalidTransition { from: TaskStatus, to: TaskStatus }, + + #[error("Version conflict: expected {expected}, got {actual}")] + VersionConflict { expected: i32, actual: i32 }, +} + +// ✅ ALWAYS use anyhow for application errors +use anyhow::{Context, Result}; + +async fn process_request(id: Uuid) -> Result { + let task = repo.find_by_id(id) + .await + .context("Failed to query database")? + .ok_or_else(|| anyhow::anyhow!("Task {} not found", id))?; + + Ok(Response::success(task)) +} +``` + +### Async Patterns + +```rust +// ❌ NEVER block async runtime +async fn bad_sleep() { + std::thread::sleep(Duration::from_secs(10)); // BLOCKS! +} + +// ✅ ALWAYS use tokio::time::sleep +async fn good_sleep() { + tokio::time::sleep(Duration::from_secs(10)).await; +} + +// ✅ Spawn blocking for CPU-intensive work +use tokio::task; +use std::io; +use anyhow::{Context, Result}; + +#[derive(Debug)] +struct Output { + result: String, +} + +/// CPU-intensive synchronous computation +fn expensive_computation(data: &[u8]) -> io::Result { + // Example: expensive string processing + let result = std::str::from_utf8(data) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))? + .to_uppercase(); + + // Simulate heavy CPU work + // In real code: compression, encryption, image processing, etc. + Ok(Output { result }) +} + +async fn process_heavy_computation(data: Vec) -> Result { + // Move CPU-intensive work to dedicated blocking thread pool + let output = task::spawn_blocking(move || { + expensive_computation(&data) + }) + .await // Wait for thread pool task (returns JoinError on panic) + .context("Background task panicked")? // Handle panic + .context("Computation failed")?; // Handle business error + + Ok(output) +} +``` + +### State Sharing Patterns + +```rust +// ❌ DON'T: Overuse Arc> +struct App { + counter: Arc>, // Do you really need Arc>? +} + +// ✅ DO: Use simpler alternatives first +use std::sync::atomic::{AtomicI32, Ordering}; + +struct App { + counter: AtomicI32, // Lock-free, faster +} + +// ✅ DO: Only when truly needed +struct App { + cache: Arc>>, // Justified: shared mutable state +} +``` + +## Common Mistakes to Avoid + +1. **Too Generic** - Always adapt to specific domain needs +2. **Missing Examples** - Every principle needs concrete code +3. **Unclear Boundaries** - Director vs Implementor roles must be explicit +4. **No Trade-offs** - Always explain downsides of decisions in ADRs +5. **Incomplete ADRs** - Must include alternatives considered and why rejected +6. **Vague Metrics** - Use specific numbers (<10ms p50, >10K RPS, >80% coverage) +7. **Unwrap Everywhere** - Return Result and use ? operator +8. **Clone Without Justification** - Understand ownership patterns first + +## Quality Gates + +Before considering work complete: + +- [ ] All code examples use valid Rust syntax (tested with rustc --explain) +- [ ] Every "NEVER DO" has a corresponding "ALWAYS DO" +- [ ] Every ADR explains alternatives and why they were rejected +- [ ] Domain model includes complete type definitions +- [ ] Performance targets are specific and measurable +- [ ] Guardrails have clear, executable examples +- [ ] Communication protocol includes message templates +- [ ] Testing strategy covers unit/integration/property tests +- [ ] Integration patterns include retry/circuit breaker +- [ ] All unsafe blocks have SAFETY comments + +## Success Criteria + +You've succeeded when: + +1. ✅ Director AI can create feature designs without asking architectural questions +2. ✅ Implementor AI can write code without asking design questions +3. ✅ All major decisions are documented with clear rationale +4. ✅ Code examples are copy-paste ready and compile +5. ✅ Domain-specific requirements are thoroughly addressed +6. ✅ Performance targets are realistic and measurable +7. ✅ The system can be built by following the documentation alone + +## Notes + +- **Empty directories** (docs/design/, docs/plans/, docs/api/) are intentional - Director fills these during feature work +- **Superpowers format** for implementation plans: Markdown with YAML frontmatter, 2-5 minute tasks +- **All code examples** must be valid Rust that could actually compile +- **Consult experts** via Task agents - don't guess at best practices +- **Cargo workspace** structure recommended for multi-crate projects (see decision matrix below) +- **Zero-cost abstractions** - verify with benchmarks that high-level code is fast + +## Workspace Decision Matrix + +**Use this matrix to decide between single crate, binary+library, or multi-crate workspace.** + +### Decision Tree + +``` +Project Size & Complexity +├─ Small (< 5K lines, 1-2 developers, simple domain) +│ └─ Single Crate (src/main.rs or src/lib.rs) +│ +├─ Medium (5K-20K lines, 2-5 developers, moderate domain) +│ ├─ Library Reusable? +│ │ ├─ Yes → Binary + Library (src/lib.rs + src/main.rs) +│ │ └─ No → Single Crate with modules +│ │ +│ └─ Multiple Services? +│ └─ Yes → Multi-Crate Workspace +│ +└─ Large (> 20K lines, 5+ developers, complex domain) + └─ Multi-Crate Workspace (always) +``` + +### Structure Comparison + +| Criterion | Single Crate | Binary + Library | Multi-Crate Workspace | +|-----------|--------------|------------------|------------------------| +| **Lines of Code** | < 5K | 5K - 20K | > 20K or modular by design | +| **Team Size** | 1-2 developers | 2-5 developers | 5+ developers | +| **Build Time** | Fast (<30s) | Medium (30s-2min) | Slow (2min+) but parallelizable | +| **Code Reuse** | Internal only | Library can be published | Multiple reusable libraries | +| **Testing Strategy** | Unit + integration in one place | Separate lib tests from binary | Per-crate test isolation | +| **Compilation** | All-or-nothing | Incremental (lib + bin separate) | Incremental per crate | +| **Dependency Management** | Simple | Moderate | Complex (shared workspace deps) | +| **CI/CD Complexity** | Simple (1 target) | Moderate (2 targets) | Complex (selective builds) | +| **Refactoring Ease** | Easy | Moderate | Hard (API boundaries) | +| **Domain Boundaries** | Implicit (modules) | Moderate (lib/bin split) | Explicit (crate boundaries) | + +### When to Choose Each Structure + +#### ✅ Choose Single Crate When: +- **Prototyping** or MVP development +- **CLI tool** with straightforward logic +- **Script-like application** with limited scope +- **Learning project** or tutorial code +- Code size < 5K lines +- No plans to publish library +- Fast iteration is priority + +**Example:** +``` +my-cli-tool/ +├─ Cargo.toml +└─ src/ + ├─ main.rs # Entry point + ├─ config.rs # Configuration + ├─ commands/ # Command modules + │ ├─ mod.rs + │ ├─ create.rs + │ └─ delete.rs + └─ utils.rs # Utilities +``` + +#### ✅ Choose Binary + Library When: +- **Web service** where domain logic could be reused +- **Application** with testable business logic separate from I/O +- Want to **publish library** while providing reference binary +- Code size 5K-20K lines +- Clear separation between "what" (lib) and "how" (bin) + +**Example:** +``` +my-web-service/ +├─ Cargo.toml # [lib] and [[bin]] +├─ src/ +│ ├─ lib.rs # Public library API +│ ├─ domain/ # Domain models and logic +│ ├─ services/ # Business services +│ └─ infrastructure/ # Database, HTTP clients +├─ src/ +│ └─ main.rs # Binary entry point (axum server) +└─ tests/ + └─ integration_test.rs +``` + +**Cargo.toml:** +```toml +[package] +name = "my-web-service" +version = "0.1.0" +edition = "2021" + +[lib] +name = "my_web_service" +path = "src/lib.rs" + +[[bin]] +name = "server" +path = "src/main.rs" +``` + +#### ✅ Choose Multi-Crate Workspace When: +- **Microservices** architecture with shared code +- **Monorepo** with multiple related services +- **Plugin system** where plugins are separate crates +- **Domain-driven design** with bounded contexts +- Code size > 20K lines or growing rapidly +- Team > 5 developers working on different areas +- Different crates have **different release cycles** +- Want to **share dependencies** across crates + +**Example:** +``` +my-project/ +├─ Cargo.toml # Workspace root +├─ Cargo.lock # Shared lock file +│ +├─ crates/ +│ ├─ domain/ # Core domain logic (no I/O) +│ │ ├─ Cargo.toml +│ │ └─ src/ +│ │ ├─ lib.rs +│ │ ├─ user.rs +│ │ └─ order.rs +│ │ +│ ├─ infrastructure/ # Database, HTTP, external services +│ │ ├─ Cargo.toml +│ │ └─ src/ +│ │ ├─ lib.rs +│ │ ├─ database/ +│ │ └─ http_client/ +│ │ +│ ├─ api/ # HTTP API layer +│ │ ├─ Cargo.toml +│ │ └─ src/ +│ │ ├─ main.rs # Binary +│ │ ├─ routes/ +│ │ └─ handlers/ +│ │ +│ └─ worker/ # Background job processor +│ ├─ Cargo.toml +│ └─ src/ +│ └─ main.rs # Binary +│ +└─ tests/ # Workspace-level integration tests + └─ e2e_test.rs +``` + +**Workspace Cargo.toml:** +```toml +[workspace] +members = [ + "crates/domain", + "crates/infrastructure", + "crates/api", + "crates/worker", +] + +# Shared dependencies across all workspace members +[workspace.dependencies] +tokio = { version = "1.48", features = ["full"] } +axum = "0.8" +sqlx = { version = "0.8", features = ["postgres", "runtime-tokio", "tls-rustls"] } +serde = { version = "1.0.228", features = ["derive"] } +anyhow = "1.0.100" +thiserror = "2.0" +uuid = { version = "1.18", features = ["v4", "serde"] } +chrono = { version = "0.4.42", features = ["serde"] } +rust_decimal = "1.39" +argon2 = "0.5.3" + +[workspace.package] +edition = "2021" +license = "MIT" +repository = "https://github.com/user/my-project" +``` + +**Member Crate Cargo.toml (domain/Cargo.toml):** +```toml +[package] +name = "my-project-domain" +version.workspace = true +edition.workspace = true + +[dependencies] +# Use workspace dependencies +uuid.workspace = true +serde.workspace = true +anyhow.workspace = true + +# Crate-specific dependencies +rust_decimal = "1.39" +``` + +### Workspace Organization Patterns + +#### Pattern 1: Layered Architecture (Clean Architecture) +``` +workspace/ +├─ crates/ +│ ├─ domain/ # Pure business logic (no dependencies on infrastructure) +│ ├─ application/ # Use cases, orchestration (depends on domain) +│ ├─ infrastructure/# Database, HTTP, external services (depends on domain) +│ └─ api/ # HTTP handlers (depends on application + infrastructure) +``` +**Dependency Flow:** `domain ← application ← infrastructure ← api` + +#### Pattern 2: Service-Oriented +``` +workspace/ +├─ crates/ +│ ├─ shared/ # Common utilities and types +│ ├─ user-service/ # User management service +│ ├─ order-service/ # Order processing service +│ └─ notification-service/ # Notification sender +``` +**Use When:** Multiple independent services sharing common code + +#### Pattern 3: Library + Multiple Binaries +``` +workspace/ +├─ crates/ +│ ├─ core/ # Reusable library +│ ├─ cli/ # Command-line interface (binary) +│ ├─ server/ # Web server (binary) +│ └─ worker/ # Background processor (binary) +``` +**Use When:** Same core logic, different deployment modes + +### Migration Path + +**Start Simple → Grow Complex** + +1. **Phase 1: Single Crate** (0-5K lines) + - Fast iteration, minimal overhead + - Organize with modules (`mod.rs` files) + +2. **Phase 2: Binary + Library** (5K-20K lines) + - Extract reusable logic to `src/lib.rs` + - Keep I/O and main entry in `src/main.rs` + - Publish library if needed + +3. **Phase 3: Multi-Crate Workspace** (20K+ lines) + - Split by domain boundaries (DDD) + - Extract shared code to `shared` crate + - Separate services into independent crates + - Use workspace dependencies for version consistency + +### Red Flags: When NOT to Use Workspace + +❌ **Premature Optimization** +- Don't start with workspace for MVP or prototype +- Workspace adds complexity (build config, dependency management) +- Wait until you have >20K lines or clear separation needs + +❌ **Over-Engineering** +- Don't create crate for every module +- Minimum crate size: ~1K-2K lines (unless reusable library) +- Aim for 5-10 crates max, not 50 micro-crates + +❌ **Unclear Boundaries** +- If you can't explain why a crate exists independently, it shouldn't +- Crates should represent clear domain boundaries or deployment units + +### Decision Checklist + +Before creating a workspace, check: + +- [ ] **Size**: Is the project >20K lines or expected to grow there? +- [ ] **Team**: Do you have >5 developers working concurrently? +- [ ] **Modularity**: Do you have clear, independent domain boundaries? +- [ ] **Reusability**: Are multiple binaries sharing common code? +- [ ] **Deployment**: Do components deploy independently? +- [ ] **Testing**: Would separate test suites improve clarity? +- [ ] **Build Time**: Would parallel crate builds improve compile time? + +**If 3+ are YES → Use Workspace** +**If 1-2 are YES → Consider Binary + Library** +**If 0-1 are YES → Stick with Single Crate** diff --git a/skills/skill-creator/SKILL.md b/skills/skill-creator/SKILL.md new file mode 100644 index 0000000..75c478b --- /dev/null +++ b/skills/skill-creator/SKILL.md @@ -0,0 +1,183 @@ +--- +name: skill-creator +description: "Guide for creating claude skills. This skill should be used when users want to create a new skill (or update an existing skill) that extends Claude's capabilities with specialized knowledge, workflows, or tool integrations." +--- + +# Skill Creator + +This skill provides guidance for creating effective skills, `` refer this skill-creator dir, not the skill that are creating. + +## About Skills + +Skills are modular, self-contained modules that extend Claude's capabilities by providing +specialized knowledge, workflows, and tools. Think of them as "onboarding guides" for specific +domains or tasks—they transform Claude from a general-purpose agent into a specialized agent +equipped with procedural knowledge that no model can fully possess. + +### What Skills Provide + +1. Specialized workflows - Multi-step procedures for specific domains +2. Tool integrations - Instructions for working with specific file formats or APIs +3. Domain expertise - Company-specific knowledge, schemas, business logic +4. Bundled resources - Scripts, references, and assets for complex and repetitive tasks + +### Anatomy of a Skill + +Every skill consists of a required SKILL.md file and optional bundled resources: + +``` +skill-name/ +├── SKILL.md (required) +│ ├── YAML frontmatter metadata (required) +│ │ ├── name: (required) +│ │ └── description: (required) +│ └── Markdown instructions (required) +└── Bundled Resources (optional) + ├── scripts/ - Executable code (Python/Bash/etc.) + ├── references/ - Documentation intended to be loaded into context as needed + └── assets/ - Files used in output (templates, icons, fonts, etc.) +``` + +#### SKILL.md (required) + +**Metadata Quality:** The `name` and `description` in YAML frontmatter determine when Claude will use the skill. Be specific about what the skill does and when to use it. Use the third-person (e.g. "This skill should be used when..." instead of "Use this skill when..."). + +#### Bundled Resources (optional) + +##### Scripts (`scripts/`) + +Executable code (Python/Bash/etc.) for tasks that require deterministic reliability or are repeatedly rewritten. + +- **When to include**: When the same code is being rewritten repeatedly or deterministic reliability is needed +- **Example**: `scripts/rotate_pdf.py` for PDF rotation tasks +- **Benefits**: Token efficient, deterministic, may be executed without loading into context +- **Note**: Scripts may still need to be read by Claude for patching or environment-specific adjustments + +##### References (`references/`) + +Documentation and reference material intended to be loaded as needed into context to inform Claude's process and thinking. + +- **When to include**: For documentation that Claude should reference while working +- **Examples**: `references/finance.md` for financial schemas, `references/mnda.md` for company NDA template, `references/policies.md` for company policies, `references/api_docs.md` for API specifications +- **Use cases**: Database schemas, API documentation, domain knowledge, company policies, detailed workflow guides +- **Benefits**: Keeps SKILL.md lean, loaded only when Claude determines it's needed +- **Best practice**: If files are large (>10k words), include grep search patterns in SKILL.md +- **Avoid duplication**: Information should live in either SKILL.md or references files, not both. Prefer references files for detailed information unless it's truly core to the skill—this keeps SKILL.md lean while making information discoverable without hogging the context window. Keep only essential procedural instructions and workflow guidance in SKILL.md; move detailed reference material, schemas, and examples to references files. + +##### Assets (`assets/`) + +Files not intended to be loaded into context, but rather used within the output Claude produces. + +- **When to include**: When the skill needs files that will be used in the final output +- **Examples**: `assets/logo.png` for brand assets, `assets/slides.pptx` for PowerPoint templates, `assets/frontend-template/` for HTML/React boilerplate, `assets/font.ttf` for typography +- **Use cases**: Templates, images, icons, boilerplate code, fonts, sample documents that get copied or modified +- **Benefits**: Separates output resources from documentation, enables Claude to use files without loading them into context + +### Progressive Disclosure Design Principle + +Skills use a three-level loading system to manage context efficiently: + +1. **Metadata (name + description)** - Always in context (~100 words) +2. **SKILL.md body** - When skill triggers (<5k words) +3. **Bundled resources** - As needed by Claude (Unlimited\*) + +\*Unlimited because scripts can be executed without reading into context window. + +## Skill Creation Process + +To create a skill, follow the "Skill Creation Process" in order, skipping steps only if there is a clear reason why they are not applicable. + +### Step 1: Understanding the Skill with Concrete Examples + +Skip this step only when the skill's usage patterns are already clearly understood. It remains valuable even when working with an existing skill. + +To create an effective skill, clearly understand concrete examples of how the skill will be used. This understanding can come from either direct user examples or generated examples that are validated with user feedback. + +For example, when building an image-editor skill, relevant questions include: + +- "What functionality should the image-editor skill support? Editing, rotating, anything else?" +- "Can you give some examples of how this skill would be used?" +- "I can imagine users asking for things like 'Remove the red-eye from this image' or 'Rotate this image'. Are there other ways you imagine this skill being used?" +- "What would a user say that should trigger this skill?" + +To avoid overwhelming users, avoid asking too many questions in a single message. Start with the most important questions and follow up as needed for better effectiveness. + +Conclude this step when there is a clear sense of the functionality the skill should support. + +### Step 2: Planning the Reusable Skill Contents + +To turn concrete examples into an effective skill, analyze each example by: + +1. Considering how to execute on the example from scratch +2. Identifying what scripts, references, and assets would be helpful when executing these workflows repeatedly + +Example: When building a `pdf-editor` skill to handle queries like "Help me rotate this PDF," the analysis shows: + +1. Rotating a PDF requires re-writing the same code each time +2. A `scripts/rotate_pdf.py` script would be helpful to store in the skill + +Example: When designing a `frontend-webapp-builder` skill for queries like "Build me a todo app" or "Build me a dashboard to track my steps," the analysis shows: + +1. Writing a frontend webapp requires the same boilerplate HTML/React each time +2. An `assets/hello-world/` template containing the boilerplate HTML/React project files would be helpful to store in the skill + +Example: When building a `big-query` skill to handle queries like "How many users have logged in today?" the analysis shows: + +1. Querying BigQuery requires re-discovering the table schemas and relationships each time +2. A `references/schema.md` file documenting the table schemas would be helpful to store in the skill + +To establish the skill's contents, analyze each concrete example to create a list of the reusable resources to include: scripts, references, and assets. + +### Step 3: Initializing the Skill + +At this point, it is time to actually create the skill. + +Skip this step only if the skill being developed already exists, and iteration or packaging is needed. In this case, continue to the next step. + +When creating a new skill from scratch, always run the `init_skill.py` script. The script conveniently generates a new template skill directory that automatically includes everything a skill requires, making the skill creation process much more efficient and reliable. + +Usage: + +```bash +/scripts/init_skill.py --path +``` + +The script: + +- Creates the skill directory at the specified path +- Generates a SKILL.md template with proper frontmatter and TODO placeholders +- Creates example resource directories: `scripts/`, `references/`, and `assets/` +- Adds example files in each directory that can be customized or deleted + +After initialization, customize or remove the generated SKILL.md and example files as needed. + +### Step 4: Edit the Skill + +When editing the (newly-generated or existing) skill, remember that the skill is being created for another instance of Claude to use. Focus on including information that would be beneficial and non-obvious to Claude. Consider what procedural knowledge, domain-specific details, or reusable assets would help another Claude instance execute these tasks more effectively. + +#### Start with Reusable Skill Contents + +To begin implementation, start with the reusable resources identified above: `scripts/`, `references/`, and `assets/` files. Note that this step may require user input. For example, when implementing a `brand-guidelines` skill, the user may need to provide brand assets or templates to store in `assets/`, or documentation to store in `references/`. + +Also, delete any example files and directories not needed for the skill. The initialization script creates example files in `scripts/`, `references/`, and `assets/` to demonstrate structure, but most skills won't need all of them. + +#### Update SKILL.md + +**Writing Style:** Write the entire skill using **imperative/infinitive form** (verb-first instructions), not second person. Use objective, instructional language (e.g., "To accomplish X, do Y" rather than "You should do X" or "If you need to do X"). This maintains consistency and clarity for AI consumption. + +To complete SKILL.md, answer the following questions: + +1. What is the purpose of the skill, in a few sentences? +2. When should the skill be used? +3. In practice, how should Claude use the skill? All reusable skill contents developed above should be referenced so that Claude knows how to use them. + +### Step 5: Iterate + +After testing the skill, users may request improvements. Often this happens right after using the skill, with fresh context of how the skill performed. + +**Iteration workflow:** + +1. Use the skill on real tasks +2. Notice struggles or inefficiencies +3. Identify how SKILL.md or bundled resources should be updated +4. Implement changes and test again diff --git a/skills/skill-creator/scripts/init_skill.py b/skills/skill-creator/scripts/init_skill.py new file mode 100644 index 0000000..3ee639c --- /dev/null +++ b/skills/skill-creator/scripts/init_skill.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 +""" +Skill Initializer - Creates a new skill from template + +Usage: + init_skill.py --path + +Examples: + init_skill.py my-new-skill --path skills/public + init_skill.py my-api-helper --path skills/private + init_skill.py custom-skill --path /custom/location +""" + +import sys +from pathlib import Path + + +SKILL_TEMPLATE = """--- +name: {skill_name} +description: [TODO: Complete and informative explanation of what the skill does and when to use it. Include WHEN to use this skill - specific scenarios, file types, or tasks that trigger it.] +--- + +# {skill_title} + +## Overview + +[TODO: 1-2 sentences explaining what this skill enables] + +## Structuring This Skill + +[TODO: Choose the structure that best fits this skill's purpose. Common patterns: + +**1. Workflow-Based** (best for sequential processes) +- Works well when there are clear step-by-step procedures +- Example: DOCX skill with "Workflow Decision Tree" → "Reading" → "Creating" → "Editing" +- Structure: ## Overview → ## Workflow Decision Tree → ## Step 1 → ## Step 2... + +**2. Task-Based** (best for tool collections) +- Works well when the skill offers different operations/capabilities +- Example: PDF skill with "Quick Start" → "Merge PDFs" → "Split PDFs" → "Extract Text" +- Structure: ## Overview → ## Quick Start → ## Task Category 1 → ## Task Category 2... + +**3. Reference/Guidelines** (best for standards or specifications) +- Works well for brand guidelines, coding standards, or requirements +- Example: Brand styling with "Brand Guidelines" → "Colors" → "Typography" → "Features" +- Structure: ## Overview → ## Guidelines → ## Specifications → ## Usage... + +**4. Capabilities-Based** (best for integrated systems) +- Works well when the skill provides multiple interrelated features +- Example: Product Management with "Core Capabilities" → numbered capability list +- Structure: ## Overview → ## Core Capabilities → ### 1. Feature → ### 2. Feature... + +Patterns can be mixed and matched as needed. Most skills combine patterns (e.g., start with task-based, add workflow for complex operations). + +Delete this entire "Structuring This Skill" section when done - it's just guidance.] + +## [TODO: Replace with the first main section based on chosen structure] + +[TODO: Add content here. See examples in existing skills: +- Code samples for technical skills +- Decision trees for complex workflows +- Concrete examples with realistic user requests +- References to scripts/templates/references as needed] + +## Resources + +This skill includes example resource directories that demonstrate how to organize different types of bundled resources: + +### scripts/ +Executable code (Python/Bash/etc.) that can be run directly to perform specific operations. + +**Examples from other skills:** +- PDF skill: `fill_fillable_fields.py`, `extract_form_field_info.py` - utilities for PDF manipulation +- DOCX skill: `document.py`, `utilities.py` - Python modules for document processing + +**Appropriate for:** Python scripts, shell scripts, or any executable code that performs automation, data processing, or specific operations. + +**Note:** Scripts may be executed without loading into context, but can still be read by Claude for patching or environment adjustments. + +### references/ +Documentation and reference material intended to be loaded into context to inform Claude's process and thinking. + +**Examples from other skills:** +- Product management: `communication.md`, `context_building.md` - detailed workflow guides +- BigQuery: API reference documentation and query examples +- Finance: Schema documentation, company policies + +**Appropriate for:** In-depth documentation, API references, database schemas, comprehensive guides, or any detailed information that Claude should reference while working. + +### assets/ +Files not intended to be loaded into context, but rather used within the output Claude produces. + +**Examples from other skills:** +- Brand styling: PowerPoint template files (.pptx), logo files +- Frontend builder: HTML/React boilerplate project directories +- Typography: Font files (.ttf, .woff2) + +**Appropriate for:** Templates, boilerplate code, document templates, images, icons, fonts, or any files meant to be copied or used in the final output. + +--- + +**Any unneeded directories can be deleted.** Not every skill requires all three types of resources. +""" + +EXAMPLE_SCRIPT = '''#!/usr/bin/env python3 +""" +Example helper script for {skill_name} + +This is a placeholder script that can be executed directly. +Replace with actual implementation or delete if not needed. + +Example real scripts from other skills: +- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields +- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images +""" + +def main(): + print("This is an example script for {skill_name}") + # TODO: Add actual script logic here + # This could be data processing, file conversion, API calls, etc. + +if __name__ == "__main__": + main() +''' + +EXAMPLE_REFERENCE = """# Reference Documentation for {skill_title} + +This is a placeholder for detailed reference documentation. +Replace with actual reference content or delete if not needed. + +Example real reference docs from other skills: +- product-management/references/communication.md - Comprehensive guide for status updates +- product-management/references/context_building.md - Deep-dive on gathering context +- bigquery/references/ - API references and query examples + +## When Reference Docs Are Useful + +Reference docs are ideal for: +- Comprehensive API documentation +- Detailed workflow guides +- Complex multi-step processes +- Information too lengthy for main SKILL.md +- Content that's only needed for specific use cases + +## Structure Suggestions + +### API Reference Example +- Overview +- Authentication +- Endpoints with examples +- Error codes +- Rate limits + +### Workflow Guide Example +- Prerequisites +- Step-by-step instructions +- Common patterns +- Troubleshooting +- Best practices +""" + +EXAMPLE_ASSET = """# Example Asset File + +This placeholder represents where asset files would be stored. +Replace with actual asset files (templates, images, fonts, etc.) or delete if not needed. + +Asset files are NOT intended to be loaded into context, but rather used within +the output Claude produces. + +Example asset files from other skills: +- Brand guidelines: logo.png, slides_template.pptx +- Frontend builder: hello-world/ directory with HTML/React boilerplate +- Typography: custom-font.ttf, font-family.woff2 +- Data: sample_data.csv, test_dataset.json + +## Common Asset Types + +- Templates: .pptx, .docx, boilerplate directories +- Images: .png, .jpg, .svg, .gif +- Fonts: .ttf, .otf, .woff, .woff2 +- Boilerplate code: Project directories, starter files +- Icons: .ico, .svg +- Data files: .csv, .json, .xml, .yaml + +Note: This is a text placeholder. Actual assets can be any file type. +""" + + +def title_case_skill_name(skill_name): + """Convert hyphenated skill name to Title Case for display.""" + return " ".join(word.capitalize() for word in skill_name.split("-")) + + +def init_skill(skill_name, path): + """ + Initialize a new skill directory with template SKILL.md. + + Args: + skill_name: Name of the skill + path: Path where the skill directory should be created + + Returns: + Path to created skill directory, or None if error + """ + # Determine skill directory path + skill_dir = Path(path).resolve() / skill_name + + # Check if directory already exists + if skill_dir.exists(): + print(f"❌ Error: Skill directory already exists: {skill_dir}") + return None + + # Create skill directory + try: + skill_dir.mkdir(parents=True, exist_ok=False) + print(f"✅ Created skill directory: {skill_dir}") + except Exception as e: + print(f"❌ Error creating directory: {e}") + return None + + # Create SKILL.md from template + skill_title = title_case_skill_name(skill_name) + skill_content = SKILL_TEMPLATE.format( + skill_name=skill_name, skill_title=skill_title + ) + + skill_md_path = skill_dir / "SKILL.md" + try: + skill_md_path.write_text(skill_content) + print("✅ Created SKILL.md") + except Exception as e: + print(f"❌ Error creating SKILL.md: {e}") + return None + + # Create resource directories with example files + try: + # Create scripts/ directory with example script + scripts_dir = skill_dir / "scripts" + scripts_dir.mkdir(exist_ok=True) + example_script = scripts_dir / "example.py" + example_script.write_text(EXAMPLE_SCRIPT.format(skill_name=skill_name)) + example_script.chmod(0o755) + print("✅ Created scripts/example.py") + + # Create references/ directory with example reference doc + references_dir = skill_dir / "references" + references_dir.mkdir(exist_ok=True) + example_reference = references_dir / "api_reference.md" + example_reference.write_text(EXAMPLE_REFERENCE.format(skill_title=skill_title)) + print("✅ Created references/api_reference.md") + + # Create assets/ directory with example asset placeholder + assets_dir = skill_dir / "assets" + assets_dir.mkdir(exist_ok=True) + example_asset = assets_dir / "example_asset.txt" + example_asset.write_text(EXAMPLE_ASSET) + print("✅ Created assets/example_asset.txt") + except Exception as e: + print(f"❌ Error creating resource directories: {e}") + return None + + # Print next steps + print(f"\n✅ Skill '{skill_name}' initialized successfully at {skill_dir}") + print("\nNext steps:") + print("1. Edit SKILL.md to complete the TODO items and update the description") + print( + "2. Customize or delete the example files in scripts/, references/, and assets/" + ) + + return skill_dir + + +def main(): + if len(sys.argv) < 4 or sys.argv[2] != "--path": + print("Usage: init_skill.py --path ") + print("\nSkill name requirements:") + print(" - Hyphen-case identifier (e.g., 'data-analyzer')") + print(" - Lowercase letters, digits, and hyphens only") + print(" - Max 40 characters") + print(" - Must match directory name exactly") + print("\nExamples:") + print(" init_skill.py my-new-skill --path skills/public") + print(" init_skill.py my-api-helper --path skills/private") + print(" init_skill.py custom-skill --path /custom/location") + sys.exit(1) + + skill_name = sys.argv[1] + path = sys.argv[3] + + print(f"🚀 Initializing skill: {skill_name}") + print(f" Location: {path}") + print() + + result = init_skill(skill_name, path) + + if result: + sys.exit(0) + else: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/tdd-enforce/SKILL.md b/skills/tdd-enforce/SKILL.md new file mode 100644 index 0000000..152d351 --- /dev/null +++ b/skills/tdd-enforce/SKILL.md @@ -0,0 +1,60 @@ +--- +name: tdd-enforce +description: Enforce Test-Driven Development (TDD) workflow for implementation tasks +--- + +# Description + +The TDD workflow ensures high-quality implementation by enforcing a cycle of writing tests before code. This skill guides the agent to break down implementation tasks into small, verifiable steps, ensuring each step is verified by a test before moving forward. + +# When to use this skill + +- When the user asks to "implement" a feature or function. +- When the user agrees to a plan that involves coding. +- When you are about to use the `TodoWrite` tool for a coding task. +- When the user explicitly mentions "TDD" or "test driven". + +# Process + +1. **Plan with Todos**: Before coding, create a `TodoWrite` list where every implementation step is paired with a verification step (test). +2. **Red (Write Test)**: Create or update a test case that defines the expected behavior for the next small unit of work. Run it to confirm it fails (or verify it doesn't exist yet). +3. **Green (Implement)**: Write the minimum code necessary to pass the test. +4. **Verify**: Run the test again to confirm it passes. +5. **Refactor (Optional)**: Clean up the code if needed, ensuring tests still pass. +6. **Repeat**: Mark the todo item as done and move to the next pair of Test/Implement steps. + +# Examples + +## Example Todo List Structure + +When implementing a `RealInfluxClient`: + +```text +Todos: +1. RED: Write integration test for RealInfluxClient.health() with testcontainers +2. GREEN: Implement RealInfluxClient.health() to make test pass +3. VERIFY: Run test and ensure it passes +4. RED: Write integration test for RealInfluxClient.write_tick() +5. GREEN: Implement RealInfluxClient.write_tick() +6. VERIFY: Run test and ensure it passes +7. RED: Write integration test for RealInfluxClient.query_ticks() +8. GREEN: Implement RealInfluxClient.query_ticks() +9. VERIFY: Run test and ensure it passes +``` + +## Example Interaction + +**User**: "Implement a function to reverse a string." + +**Agent**: "I will use TDD to implement this. I'll start by creating a plan." + +*Calls `TodoWrite`*: +1. RED: Create test case `test_reverse_hello` expecting "olleh" +2. GREEN: Implement `reverse_string` function to pass the test +3. VERIFY: Run tests to verify + +**Agent**: "I've created the plan. First, I'll write the failing test." +*Writes test, runs it (fails)*. +"Now I will implement the logic." +*Writes code, runs test (passes)*. + diff --git a/skills/update-package-version/SKILL.md b/skills/update-package-version/SKILL.md new file mode 100644 index 0000000..235a644 --- /dev/null +++ b/skills/update-package-version/SKILL.md @@ -0,0 +1,69 @@ +--- +name: update-package-version +description: "This skill should be used when users need to update package versions in nix/pkgs directory with new release information including SHA256 checksums." +--- + +# Update Package Version + +This skill updates package versions in the nix/pkgs directory structure with new release information and checksums. + +## When to Use + +Use this skill when: +- User requests updating a package to a new version +- New release is available with updated binaries +- Package follows the nix/pkgs directory structure with versions.json and package-specific .nix files + +## How to Use + +To update a package version, provide: +1. Package name (as used in versions.json) +2. New version number +3. Release manifest URL (for SHA256 checksums) + +Example invocation: +"update rust-mcp-filesystem to 0.3.12 using manifest https://github.com/towry/rust-mcp-filesystem/releases/download/v0.3.12/dist-manifest.json" + +## Process + +1. **Fetch Release Manifest**: Use WebFetch to retrieve the dist-manifest.json from the release URL +2. **Extract Checksums**: Parse the manifest to get SHA256 checksums for each platform/architecture +3. **Update versions.json**: Update the package version in nix/pkgs/versions.json +4. **Update Package .nix**: Update the SHA256 hash(es) in the corresponding package .nix file +5. **Validate Changes**: Confirm all updates are applied correctly + +## Expected Directory Structure + +``` +nix/pkgs/ +├── versions.json # Contains package version mappings +└── {package}.nix # Package-specific nix expression with SHA256 hashes +``` + +## File Formats + +### versions.json +```json +{ + "package-name": "version.number" +} +``` + +### Package .nix files +- Use `sha256-map` with platform-specific hashes +- Support platforms: aarch64-darwin, x86_64-darwin, aarch64-linux, x86_64-linux +- Hash format: 64-character SHA256 string + +## Validation + +- Verify version number follows semantic versioning (x.y.z) +- Confirm SHA256 hashes are valid 64-character hexadecimal strings +- Ensure package exists in both versions.json and has corresponding .nix file +- Check that platform mappings match available binaries in manifest + +## Error Handling + +If any step fails: +- Report specific error (missing manifest, invalid checksum, file not found) +- Do not proceed with partial updates +- Suggest manual verification steps diff --git a/skills/zellij-config/SKILL.md b/skills/zellij-config/SKILL.md new file mode 100644 index 0000000..be9eae9 --- /dev/null +++ b/skills/zellij-config/SKILL.md @@ -0,0 +1,503 @@ +--- +name: zellij-config +description: "Comprehensive skill for managing Zellij terminal multiplexer configurations including setup, layouts, themes, keybindings, plugins, and web server configuration. Use this skill when users need to configure Zellij, create custom layouts, set up themes, manage keybindings, or configure web server access." +--- + +# Zellij Config + +## Overview + +This skill provides comprehensive Zellij terminal multiplexer configuration management. It enables users to set up Zellij from scratch, migrate configurations, create custom layouts and themes, manage keybindings, configure plugins, and set up web server access. + +## Quick Start + +Choose your configuration task: + +1. **Setup Zellij** - Initialize configuration, create config directory, set basic settings +2. **Manage Layouts** - Create custom pane layouts, tab templates, swap layouts +3. **Configure Themes** - Set up custom themes, switch themes, convert theme formats +4. **Setup Keybindings** - Configure custom keybindings for different modes +5. **Plugin Management** - Load plugins, configure plugin aliases, set plugin options +6. **Web Server Setup** - Enable web access, configure SSL, set ports and IPs + +## Setup Zellij + +### Initialize Configuration Directory + +Create Zellij configuration directory and dump default config: + +```bash +mkdir -p ~/.config/zellij +zellij setup --dump-config > ~/.config/zellij/config.kdl +``` + +### Validate Configuration + +Check existing configuration for errors: + +```bash +zellij setup --check +``` + +### Clean Start + +Start Zellij with clean configuration (ignores existing config): + +```bash +zellij --clean +``` + +### Configuration File Location + +Specify custom configuration file: + +```bash +zellij --config /path/to/custom/config.kdl +# or via environment variable +export ZELLIJ_CONFIG_FILE=/path/to/custom/config.kdl +``` + +## Manage Layouts + +### Create Default Layout + +Generate a default layout template: + +```bash +zellij setup --dump-layout default > ~/.config/zellij/layouts/default.kdl +``` + +### Create Custom Layout + +Create a development layout with multiple panes: + +```kdl +layout { + default_tab_template { + pane size=1 borderless=true { + plugin location="zellij:tab-bar" + } + children + pane size=2 borderless=true { + plugin location="zellij:status-bar" + } + } + tab name="development" split_direction="vertical" { + pane size="70%" { + command "nvim" + cwd "~/project" + } + pane split_direction="horizontal" { + pane command="git" { + args "status" + cwd "~/project" + } + pane command="htop" + } + } +} +``` + +### Floating Layout Example + +Layout with floating panes for different pane counts: + +```kdl +layout { + swap_floating_layout { + floating_panes max_panes=1 { + pane + } + floating_panes max_panes=2 { + pane x=0 + pane x="50%" + } + floating_panes max_panes=3 { + pane x=0 width="25%" + pane x="25%" width="25%" + pane x="50%" + } + } +} +``` + +### Use Custom Layout + +Load a specific layout: + +```bash +zellij --layout /path/to/custom-layout.kdl +# or place in ~/.config/zellij/layouts/ and use default +``` + +## Configure Themes + +### Apply Built-in Theme + +Set theme in configuration file: + +```kdl +theme "default" +``` + +### Define Custom Theme (RGB) + +Create a custom theme with RGB values: + +```kdl +themes { + custom_theme { + fg 248 248 242 + bg 40 42 54 + black 0 0 0 + red 255 85 85 + green 80 250 123 + yellow 241 250 140 + blue 98 114 164 + magenta 255 121 198 + cyan 139 233 253 + white 255 255 255 + orange 255 184 108 + } +} +``` + +### Define Custom Theme (Hexadecimal) + +Create a theme with hex color codes: + +```kdl +themes { + nord { + fg "#D8DEE9" + bg "#2E3440" + black "#3B4252" + red "#BF616A" + green "#A3BE8C" + yellow "#EBCB8B" + blue "#81A1C1" + magenta "#B48EAD" + cyan "#88C0D0" + white "#E5E9F0" + orange "#D08770" + } +} +``` + +### Switch Theme from Command Line + +Temporarily use a theme: + +```bash +zellij --theme custom_theme +``` + +### Convert Legacy Theme + +Convert YAML theme to KDL format: + +```bash +zellij convert-theme /path/to/old-theme.yaml > /path/to/new-theme.kdl +``` + +## Setup Keybindings + +### Basic Keybinding Configuration + +Configure keybindings for different modes: + +```kdl +keybinds { + normal { + bind "Ctrl g" { SwitchToMode "locked"; } + bind "Ctrl p" { SwitchToMode "pane"; } + bind "Alt n" { NewPane; } + bind "Alt h" "Alt Left" { MoveFocusOrTab "Left"; } + bind "Ctrl Shift t" { NewTab; } + } + pane { + bind "h" "Left" { MoveFocus "Left"; } + bind "l" "Right" { MoveFocus "Right"; } + bind "j" "Down" { MoveFocus "Down"; } + bind "k" "Up" { MoveFocus "Up"; } + bind "p" { SwitchFocus; } + bind "Ctrl c" { CopySelection; } + } + locked { + bind "Ctrl g" { SwitchToMode "normal"; } + } + shared { + bind "Alt 1" { Run "git" "status"; } + bind "Alt 2" { Run "git" "diff"; } + bind "Alt 3" { Run "exa" "--color" "always"; } + } +} +``` + +### Keybinding Syntax Examples + +Different keybinding syntax patterns: + +```kdl +bind "a" // individual character +bind "Ctrl a" // with ctrl modifier +bind "Alt a" // with alt modifier +bind "Ctrl Alt a" // multiple modifiers +bind "F8" // function key +bind "Left" // arrow key +``` + +## Plugin Management + +### Load Plugins on Startup + +Configure plugins to load automatically: + +```kdl +load_plugins { + https://example.com/my-plugin.wasm + file:/path/to/my/plugin.kdl + my-plugin-alias +} +``` + +### Configure Plugin Aliases + +Set up common plugin aliases: + +```kdl +plugins { + tab-bar location="zellij:tab-bar" + status-bar location="zellij:status-bar" + strider location="zellij:strider" + compact-bar location="zellij:compact-bar" + session-manager location="zellij:session-manager" + welcome-screen location="zellij:session-manager" { + welcome_screen true + } + filepicker location="zellij:strider" { + cwd "/" + } +} +``` + +### Configure Plugin Options + +Pass configuration to plugins: + +```kdl +layout { + pane { + plugin location="file:/path/to/my/plugin.wasm" { + some_key "some_value" + another_key 1 + } + } +} +``` + +### Launch Plugin with Configuration + +Configure plugin via command line: + +```bash +zellij action launch-or-focus-plugin --configuration "some_key=some_value,another_key=1" +``` + +## Web Server Setup + +### Enable Web Server + +Start web server automatically: + +```kdl +web_server true +web_server_ip "0.0.0.0" +web_server_port 8082 +``` + +### Configure SSL + +Set up HTTPS with SSL certificates: + +```kdl +web_server true +web_server_ip "0.0.0.0" +web_server_port 443 +web_server_cert "/path/to/my/certs/localhost+3.pem" +web_server_key "/path/to/my/certs/localhost+3-key.pem" +enforce_https_on_localhost true +``` + +### Web Client Configuration + +Configure browser-based terminal appearance: + +```kdl +web_client { + font "Iosevka Term" + cursor_blink true + cursor_style "block" + cursor_inactive_style "outline" + mac_option_is_meta false + theme { + background 10 20 30 + foreground 10 20 30 + black 10 20 30 + blue 10 20 30 + bright_black 10 20 30 + bright_blue 10 20 30 + bright_cyan 10 20 30 + bright_green 10 20 30 + bright_magenta 10 20 30 + bright_red 10 20 30 + bright_white 10 20 30 + bright_yellow 10 20 30 + cursor 10 20 30 + cursor_accent 10 20 30 + cyan 10 20 30 + green 10 20 30 + magenta 10 20 30 + red 10 20 30 + white 10 20 30 + yellow 10 20 30 + selection_background 10 20 30 + selection_foreground 10 20 30 + selection_inactive_background 10 20 30 + } +} +``` + +## Environment Variables + +### Set Environment for Panes + +Configure environment variables for all panes: + +```kdl +env { + RUST_BACKTRACE 1 + FOO "bar" + EDITOR "nvim" +} +``` + +### Session Management + +Configure session persistence and resurrection: + +```kdl +session_serialization true +pane_viewport_serialization true +scrollback_lines_to_serialize 0 +default_layout "compact" +default_mode "locked" +``` + +## Common Workflows + +### Development Environment Setup + +Create a comprehensive development layout: + +```bash +# Create development layout +cat > ~/.config/zellij/layouts/dev.kdl << 'EOF' +layout { + default_tab_template { + pane size=1 borderless=true { + plugin location="zellij:tab-bar" + } + children + pane size=2 borderless=true { + plugin location="zellij:status-bar" + } + } + tab name="editor" cwd="~/project" focus=true { + pane command="nvim" size="80%" + pane size="20%" split_direction="vertical" { + pane command="git" { + args "status" + size="50%" + } + pane command="htop" + } + } + tab name="terminal" { + pane command="bash" + } + tab name="monitoring" split_direction="horizontal" { + pane command="htop" + pane command="btop" + } +} +EOF + +# Use the layout +zellij --layout dev +``` + +### Multiplayer Session Setup + +Configure colors for multiplayer sessions: + +```kdl +multiplayer_user_colors { + player_1 255 0 255 + player_2 0 217 227 + player_3 0 + player_4 255 230 0 + player_5 0 229 229 + player_6 0 + player_7 255 53 94 + player_8 0 + player_9 0 + player_10 0 +} +``` + +## Configuration Validation + +### Check Configuration + +Validate configuration file syntax: + +```bash +zellij setup --check +``` + +### Test Configuration + +Test new configuration without affecting existing session: + +```bash +zellij --config /path/to/test-config.kdl --session-name test-session +``` + +## Resources + +### scripts/ +Executable scripts for Zellij configuration management: + +- `setup_zellij.py` - Automates initial Zellij setup +- `create_layout.py` - Generates custom layouts from templates +- `convert_themes.py` - Converts legacy theme formats to KDL +- `validate_config.py` - Validates Zellij configuration syntax +- `backup_config.py` - Creates configuration backups + +### references/ +Comprehensive Zellij configuration documentation: + +- `configuration_options.md` - Complete reference of all Zellij options +- `layout_examples.md` - Collection of layout templates +- `theme_examples.md` - Custom theme examples and guidelines +- `keybinding_reference.md` - Complete keybinding syntax and actions +- `plugin_api.md` - Plugin development and configuration guide + +### assets/ +Configuration templates and example files: + +- `config_templates/` - Starter configuration files for different use cases +- `layout_templates/` - Common layout templates (development, monitoring, etc.) +- `theme_templates/` - Custom theme files (nord, dracula, etc.) +- `plugin_examples/` - Example plugin configurations diff --git a/skills/zellij-config/assets/config_templates/basic.kdl b/skills/zellij-config/assets/config_templates/basic.kdl new file mode 100644 index 0000000..ed01e71 --- /dev/null +++ b/skills/zellij-config/assets/config_templates/basic.kdl @@ -0,0 +1,49 @@ +# Basic Zellij Configuration Template +# Copy this to ~/.config/zellij/config.kdl and customize as needed + +# UI Settings +ui { + pane_frames { + rounded_corners true + } +} + +# Mouse Settings +mouse_mode true +copy_on_select false + +# Theme +theme "default" + +# Session Management +session_serialization true +pane_viewport_serialization false +default_layout "default" +default_mode "normal" + +# Keybindings - Basic Setup +keybinds { + normal { + bind "Ctrl g" { SwitchToMode "locked"; } + bind "Ctrl p" { SwitchToMode "pane"; } + bind "Alt n" { NewPane; } + bind "Alt h" "Alt Left" { MoveFocusOrTab "Left"; } + bind "Ctrl Shift t" { NewTab; } + } + pane { + bind "h" "Left" { MoveFocus "Left"; } + bind "l" "Right" { MoveFocus "Right"; } + bind "j" "Down" { MoveFocus "Down"; } + bind "k" "Up" { MoveFocus "Up"; } + bind "p" { SwitchFocus; } + bind "Ctrl c" { CopySelection; } + } + locked { + bind "Ctrl g" { SwitchToMode "normal"; } + } +} + +# Environment Variables +env { + EDITOR "nvim" +} \ No newline at end of file diff --git a/skills/zellij-config/assets/example_asset.txt b/skills/zellij-config/assets/example_asset.txt new file mode 100644 index 0000000..d0ac204 --- /dev/null +++ b/skills/zellij-config/assets/example_asset.txt @@ -0,0 +1,24 @@ +# Example Asset File + +This placeholder represents where asset files would be stored. +Replace with actual asset files (templates, images, fonts, etc.) or delete if not needed. + +Asset files are NOT intended to be loaded into context, but rather used within +the output Claude produces. + +Example asset files from other skills: +- Brand guidelines: logo.png, slides_template.pptx +- Frontend builder: hello-world/ directory with HTML/React boilerplate +- Typography: custom-font.ttf, font-family.woff2 +- Data: sample_data.csv, test_dataset.json + +## Common Asset Types + +- Templates: .pptx, .docx, boilerplate directories +- Images: .png, .jpg, .svg, .gif +- Fonts: .ttf, .otf, .woff, .woff2 +- Boilerplate code: Project directories, starter files +- Icons: .ico, .svg +- Data files: .csv, .json, .xml, .yaml + +Note: This is a text placeholder. Actual assets can be any file type. diff --git a/skills/zellij-config/references/api_reference.md b/skills/zellij-config/references/api_reference.md new file mode 100644 index 0000000..b2c6344 --- /dev/null +++ b/skills/zellij-config/references/api_reference.md @@ -0,0 +1,34 @@ +# Reference Documentation for Zellij Config + +This is a placeholder for detailed reference documentation. +Replace with actual reference content or delete if not needed. + +Example real reference docs from other skills: +- product-management/references/communication.md - Comprehensive guide for status updates +- product-management/references/context_building.md - Deep-dive on gathering context +- bigquery/references/ - API references and query examples + +## When Reference Docs Are Useful + +Reference docs are ideal for: +- Comprehensive API documentation +- Detailed workflow guides +- Complex multi-step processes +- Information too lengthy for main SKILL.md +- Content that's only needed for specific use cases + +## Structure Suggestions + +### API Reference Example +- Overview +- Authentication +- Endpoints with examples +- Error codes +- Rate limits + +### Workflow Guide Example +- Prerequisites +- Step-by-step instructions +- Common patterns +- Troubleshooting +- Best practices diff --git a/skills/zellij-config/references/configuration_options.md b/skills/zellij-config/references/configuration_options.md new file mode 100644 index 0000000..27666e1 --- /dev/null +++ b/skills/zellij-config/references/configuration_options.md @@ -0,0 +1,421 @@ +# Zellij Configuration Options Reference + +Complete reference of all Zellij configuration options with examples. + +## Core Configuration + +### `theme` +Sets the color theme to use. The theme must be defined in the configuration's 'themes' section or loaded from the themes folder. + +**Type:** String +**Default:** "default" + +```kdl +theme "nord" +theme "dracula" +theme "custom_theme" +``` + +### `default_layout` +Specifies the name of the layout file to load when Zellij starts. The layout must exist in the layouts folder. + +**Type:** String +**Default:** "default" + +```kdl +default_layout "compact" +default_layout "development" +``` + +### `default_mode` +Determines the mode Zellij starts in. + +**Type:** String +**Values:** "normal", "locked" +**Default:** "normal" + +```kdl +default_mode "locked" +``` + +### `layout_dir` +Sets the directory where Zellij searches for layout files. + +**Type:** String +**Default:** Subdirectory of config dir + +```kdl +layout_dir "/path/to/my/layout_dir" +``` + +### `theme_dir` +Sets the directory where Zellij searches for theme files. + +**Type:** String +**Default:** Subdirectory of config dir + +```kdl +theme_dir "/path/to/my/theme_dir" +``` + +## Session Management + +### `session_serialization` +Enables or disables Zellij session serialization. + +**Type:** Boolean +**Default:** true + +```kdl +session_serialization true +session_serialization false +``` + +### `pane_viewport_serialization` +When session serialization is enabled, allows serializing the pane viewport (visible terminal content). + +**Type:** Boolean +**Default:** false + +```kdl +pane_viewport_serialization true +``` + +### `scrollback_lines_to_serialize` +Number of scrollback lines to serialize when pane viewport serialization is enabled. Setting to 0 serializes all scrollback. + +**Type:** Integer +**Default:** 1000 + +```kdl +scrollback_lines_to_serialize 0 +scrollback_lines_to_serialize 500 +``` + +## UI Configuration + +### `ui` block +Contains UI-related settings. + +#### `pane_frames` +Controls pane frame display settings. + +##### `rounded_corners` +Determines whether pane frames should have rounded corners. + +**Type:** Boolean +**Default:** true + +```kdl +ui { + pane_frames { + rounded_corners true + } +} +``` + +### Mouse Configuration + +### `mouse_mode` +Sets handling of mouse events. + +**Type:** Boolean +**Default:** true + +```kdl +mouse_mode true +mouse_mode false +``` + +### `copy_on_select` +Automatically copy text when selecting. + +**Type:** Boolean +**Default:** false + +```kdl +copy_on_select true +``` + +## Environment Variables + +### `env` block +Defines environment variables to be set for each terminal pane. + +**Type:** Map of String to String/Integer + +```kdl +env { + RUST_BACKTRACE 1 + EDITOR "nvim" + FOO "bar" + PATH "/usr/local/bin:/usr/bin" +} +``` + +## Plugin Configuration + +### `load_plugins` block +Plugins to load automatically when session starts. + +**Type:** List of URLs or aliases + +```kdl +load_plugins { + https://example.com/plugin.wasm + file:/path/to/local/plugin.wasm + plugin-alias +} +``` + +### `plugins` block +Plugin aliases with optional configurations. + +**Type:** Plugin configuration map + +```kdl +plugins { + tab-bar location="zellij:tab-bar" + status-bar location="zellij:status-bar" + custom-plugin location="file:/path/to/plugin.wasm" { + option1 "value1" + option2 42 + } +} +``` + +## Web Server Configuration + +### `web_server` +Enable/disable web server startup. + +**Type:** Boolean +**Default:** false + +```kdl +web_server true +``` + +### `web_server_ip` +IP address for web server to listen on. + +**Type:** String +**Default:** "127.0.0.1" + +```kdl +web_server_ip "0.0.0.0" +``` + +### `web_server_port` +Port for web server to listen on. + +**Type:** Integer +**Default:** 8082 + +```kdl +web_server_port 443 +web_server_port 8083 +``` + +### `web_server_cert` +Path to SSL certificate for HTTPS. + +**Type:** String +**Default:** None + +```kdl +web_server_cert "/path/to/cert.pem" +``` + +### `web_server_key` +Path to SSL private key for HTTPS. + +**Type:** String +**Default:** None + +```kdl +web_server_key "/path/to/key.pem" +``` + +### `enforce_https_on_localhost` +Enforce HTTPS certificate requirement even on localhost. + +**Type:** Boolean +**Default:** false + +```kdl +enforce_https_on_localhost true +``` + +## Web Client Configuration + +### `web_client` block +Settings for browser-based terminal client. + +#### `font` +Font for web client terminal. + +**Type:** String +**Default:** "monospace" + +```kdl +web_client { + font "Iosevka Term" +} +``` + +#### `cursor_blink` +Enable cursor blinking. + +**Type:** Boolean +**Default:** false + +```kdl +web_client { + cursor_blink true +} +``` + +#### `cursor_style` +Cursor style. + +**Type:** String +**Values:** "block", "bar", "underline" +**Default:** "block" + +```kdl +web_client { + cursor_style "underline" +} +``` + +#### `cursor_inactive_style` +Inactive cursor style. + +**Type:** String +**Values:** "outline", "block", "bar", "underline" +**Default:** "block" + +```kdl +web_client { + cursor_inactive_style "outline" +} +``` + +#### `mac_option_is_meta` +Treat Option key as Meta on macOS. + +**Type:** Boolean +**Default:** true + +```kdl +web_client { + mac_option_is_meta false +} +``` + +#### `theme` block +Web client terminal theme (separate from Zellij theme). + +**Type:** Color definitions in RGB format + +```kdl +web_client { + theme { + background 10 20 30 + foreground 248 248 242 + // ... more colors + } +} +``` + +## Multiplayer Configuration + +### `multiplayer_user_colors` +Colors for users in multiplayer sessions. + +**Type:** Map of player numbers to RGB values + +```kdl +multiplayer_user_colors { + player_1 255 0 255 + player_2 0 217 227 + // ... up to player_10 +} +``` + +## Auto Layout Configuration + +### `auto_layout` +Controls automatic pane arrangement. + +**Type:** Boolean +**Default:** true + +```kdl +auto_layout true +``` + +## Command Line Options + +All configuration options can be overridden via command line: + +```bash +# Override theme +zellij --theme nord + +# Override layout +zellij --layout development + +# Override config file +zellij --config /custom/path/config.kdl + +# Override default mode +zellij --default-mode locked + +# Set session name +zellij --session-name my-workspace + +# Disable mouse +zellij --disable-mouse-mode + +# Set custom shell +zellij --default-shell fish +``` + +## Configuration Validation + +Use built-in validation: + +```bash +# Check configuration syntax +zellij setup --check + +# Dump default configuration +zellij setup --dump-config + +# Dump default layout +zellij setup --dump-layout default +``` + +## File Locations + +- **Config Directory:** `~/.config/zellij/` +- **Layouts Directory:** `~/.config/zellij/layouts/` +- **Themes Directory:** `~/.config/zellij/themes/` +- **Default Config:** `~/.config/zellij/config.kdl` + +## Migration from YAML + +Convert legacy YAML configuration: + +```bash +# Convert config +zellij convert-config /path/to/config.yaml > /path/to/config.kdl + +# Convert theme +zellij convert-theme /path/to/theme.yaml > /path/to/theme.kdl + +# Convert layout +zellij convert-layout /path/to/layout.yaml > /path/to/layout.kdl +``` \ No newline at end of file diff --git a/skills/zellij-config/references/layout_examples.md b/skills/zellij-config/references/layout_examples.md new file mode 100644 index 0000000..d2cedcc --- /dev/null +++ b/skills/zellij-config/references/layout_examples.md @@ -0,0 +1,411 @@ +# Zellij Layout Examples + +Collection of layout templates for different use cases and workflows. + +## Basic Layouts + +### Single Terminal +Simple terminal with full screen: + +```kdl +layout { + pane command="bash" +} +``` + +### Two Pane Horizontal +Two panes side by side: + +```kdl +layout { + pane split_direction="horizontal" { + pane size="50%" command="bash" + pane size="50%" command="bash" + } +} +``` + +### Two Pane Vertical +Two panes stacked vertically: + +```kdl +layout { + pane split_direction="vertical" { + pane size="50%" command="bash" + pane size="50%" command="bash" + } +} +``` + +## Development Layouts + +### Development Workspace +Editor with git and terminal panes: + +```kdl +layout { + default_tab_template { + pane size=1 borderless=true { + plugin location="zellij:tab-bar" + } + children + pane size=2 borderless=true { + plugin location="zellij:status-bar" + } + } + tab name="code" cwd="~/project" focus=true { + pane command="nvim" size="80%" + pane size="20%" split_direction="vertical" { + pane command="git" { + args "status" + size="50%" + } + pane command="htop" + } + } + tab name="terminal" { + pane command="bash" + } +} +``` + +### Full Development Setup +Complete development environment with monitoring: + +```kdl +layout { + default_tab_template { + pane size=1 borderless=true { + plugin location="zellij:tab-bar" + } + children + pane size=2 borderless=true { + plugin location="zellij:status-bar" + } + } + tab name="editor" cwd="~/project" focus=true { + pane command="nvim" size="70%" + pane split_direction="horizontal" { + pane size="50%" split_direction="vertical" { + pane command="git" { + args "status" + cwd "~/project" + } + pane command="cargo" { + args "test" + cwd "~/project" + } + } + pane command="htop" + } + } + tab name="server" cwd="~/server" { + pane command="nvim" size="60%" + pane split_direction="vertical" { + pane command="tail" { + args "-f" "log/production.log" + cwd "~/server" + size="40%" + } + pane command="btop" size="40%" + } + } + tab name="database" { + pane command="psql" { + args "-U" "postgres" + cwd "~/server" + } + } +} +``` + +## Monitoring Layouts + +### System Monitoring +Multiple monitoring tools: + +```kdl +layout { + tab name="monitoring" split_direction="horizontal" { + pane command="htop" + pane command="btop" + pane command="iotop" + pane command="nethogs" + } +} +``` + +### Resource Monitoring +CPU, memory, disk, and network monitoring: + +```kdl +layout { + tab name="resources" split_direction="horizontal" { + pane split_direction="vertical" { + pane command="htop" + pane command="df" "-h" + } + pane split_direction="vertical" { + pane command="btop" + pane command="iotop" + } + pane command="nethogs" + } +} +``` + +### Log Monitoring +Monitor multiple log files: + +```kdl +layout { + tab name="logs" split_direction="horizontal" { + pane command="tail" { + args "-f" "/var/log/syslog" + } + pane command="tail" { + args "-f" "/var/log/nginx/access.log" + } + pane command="journalctl" { + args "-f" + } + } +} +``` + +## Tab Templates + +### Default Tab Template with Plugins +Standard tab bar and status bar: + +```kdl +layout { + default_tab_template { + pane size=1 borderless=true { + plugin location="zellij:tab-bar" + } + children + pane size=2 borderless=true { + plugin location="zellij:status-bar" + } + } + // All tabs will use this template unless overridden +} +``` + +### Compact Tab Template +Minimal UI with compact bar: + +```kdl +layout { + default_tab_template { + pane size=1 borderless=true { + plugin location="zellij:compact-bar" + } + children + } +} +``` + +## Floating Layouts + +### Swap Floating Layout +Different arrangements based on pane count: + +```kdl +layout { + swap_floating_layout { + floating_panes max_panes=1 { + pane x="25%" y="25%" width="50%" height="50%" + } + floating_panes max_panes=2 { + pane x="10%" y="25%" width="35%" height="50%" + pane x="55%" y="25%" width="35%" height="50%" + } + floating_panes max_panes=3 { + pane x="0%" y="0%" width="33%" height="50%" + pane x="33%" y="0%" width="33%" height="50%" + pane x="66%" y="0%" width="33%" height="50%" + } + } +} +``` + +### Specific Floating Layout +Predefined floating pane positions: + +```kdl +layout { + pane { + x="10%" + y="20%" + width="80%" + height="60%" + focus=true + command="nvim" + } + pane { + x="15%" + y="70%" + width="70%" + height="25%" + command="htop" + } +} +``` + +## Advanced Layouts + +### Multi-Project Setup +Work on multiple projects simultaneously: + +```kdl +layout { + tab name="frontend" cwd="~/projects/frontend" { + pane command="npm" { + args "start" + cwd "~/projects/frontend" + } + pane command="nvim" { + cwd "~/projects/frontend" + } + } + tab name="backend" cwd="~/projects/backend" { + pane command="npm" { + args "start" + cwd "~/projects/backend" + } + pane command="nvim" { + cwd "~/projects/backend" + } + } + tab name="docs" cwd="~/projects/docs" { + pane command="mkdocs" { + args "serve" + cwd "~/projects/docs" + } + } +} +``` + +### Database Layout +Development with database management: + +```kdl +layout { + tab name="app" cwd="~/project" { + pane command="npm" { + args "start" + size="60%" + } + pane split_direction="vertical" { + pane command="nvim" { + size="40%" + } + pane command="psql" { + args "-U" "postgres" + size="60%" + } + } + } + tab name="database-tools" { + pane command="pgadmin" { + size="50%" + } + pane command="dbeaver" { + size="50%" + } + } +} +``` + +## Special Purpose Layouts + +### Git Workflow +Git operations with diff viewer: + +```kdl +layout { + tab name="git" cwd="~/project" { + pane command="git" { + args "status" + size="30%" + } + pane command="git" { + args "log" "--oneline" "-10" + size="30%" + } + pane split_direction="horizontal" { + pane command="git" { + args "diff" "--cached" + size="40%" + } + pane command="git" { + args "diff" "--cached" "HEAD~1" + size="40%" + } + } + } +} +``` + +### Container Development +Docker and development tools: + +```kdl +layout { + tab name="containers" { + pane command="docker" { + args "ps" + size="40%" + } + pane command="docker-compose" { + args "ps" + size="30%" + } + pane split_direction="vertical" { + pane command="docker" { + args "stats" + size="50%" + } + pane command="lazydocker" + size="50%" + } + } + } + tab name="k8s" { + pane command="kubectl" { + args "get" "pods" "--watch" + size="50%" + } + pane command="k9s" { + size="50%" + } + } +} +``` + +## Layout Tips + +### Size Specifications +- **Fixed sizes:** `size=10` (exact lines/columns) +- **Percentage sizes:** `size="50%"` (relative to container) +- **Mixed sizing:** Use fixed and percentage as needed + +### Split Directions +- **Vertical:** `split_direction="vertical"` (stack top to bottom) +- **Horizontal:** `split_direction="horizontal"` (side by side) + +### Common Patterns +1. **Focus on startup:** Add `focus=true` to important panes +2. **Set working directories:** Use `cwd="/path"` for project-specific panes +3. **Naming:** Use descriptive tab names for organization +4. **Templates:** Use `default_tab_template` for consistent UI +5. **Plugins:** Integrate tab-bar and status-bar for better UX + +### Best Practices +- Keep layouts readable with proper indentation +- Use consistent naming conventions +- Test layouts before deploying +- Document custom layouts for team sharing +- Consider different screen sizes when designing layouts \ No newline at end of file diff --git a/skills/zellij-config/references/theme_examples.md b/skills/zellij-config/references/theme_examples.md new file mode 100644 index 0000000..cc13280 --- /dev/null +++ b/skills/zellij-config/references/theme_examples.md @@ -0,0 +1,399 @@ +# Zellij Theme Examples + +Collection of custom themes and theme creation guidelines. + +## Built-in Themes + +### Default Theme +Standard light theme: + +```kdl +theme "default" +``` + +### Nord Theme +Popular dark theme based on Nordic colors: + +```kdl +themes { + nord { + fg "#D8DEE9" + bg "#2E3440" + black "#3B4252" + red "#BF616A" + green "#A3BE8C" + yellow "#EBCB8B" + blue "#81A1C1" + magenta "#B48EAD" + cyan "#88C0D0" + white "#E5E9F0" + orange "#D08770" + } +} +``` + +### Dracula Theme +Popular dark theme: + +```kdl +themes { + dracula { + fg 248 248 242 + bg 40 42 54 + black 0 0 0 + red 255 85 85 + green 80 250 123 + yellow 241 250 140 + blue 98 114 164 + magenta 255 121 198 + cyan 139 233 253 + white 255 255 255 + orange 255 184 108 + } +} +``` + +## Custom Themes + +### Custom RGB Theme +Theme using RGB color values: + +```kdl +themes { + my_custom_theme { + fg 200 200 200 + bg 30 30 30 + black 40 40 40 + red 255 100 100 + green 100 255 100 + yellow 255 255 100 + blue 100 100 255 + magenta 255 100 255 + cyan 100 255 255 + white 255 255 255 + orange 255 200 100 + } +} +``` + +### Custom Hex Theme +Theme using hexadecimal color codes: + +```kdl +themes { + cyberpunk { + fg "#00ff00" + bg "#0a0a0a" + black "#1a1a1a" + red "#ff0000" + green "#00ff00" + yellow "#ffff00" + blue "#0080ff" + magenta "#ff00ff" + cyan "#00ffff" + white "#ffffff" + orange "#ff8000" + } +} +``` + +### Solarized Dark +Classic solarized color scheme: + +```kdl +themes { + solarized_dark { + fg "#839496" + bg "#002b36" + black "#073642" + red "#dc322f" + green "#859900" + yellow "#b58900" + blue "#268bd2" + magenta "#d33682" + cyan "#2aa198" + white "#eee8d5" + orange "#cb4b16" + } +} +``` + +### Gruvbox Dark +Popular dark theme for developers: + +```kdl +themes { + gruvbox_dark { + fg "#ebdbb2" + bg "#282828" + black "#1d2021" + red "#cc241d" + green "#98971a" + yellow "#d79921" + blue "#83a598" + magenta "#d3869b" + cyan "#8ec07c" + white "#ebdbb2" + orange "#fe8019" + } +} +``` + +### Monokai +Clean, minimal dark theme: + +```kdl +themes { + monokai { + fg "#ffffff" + bg "#272822" + black "#272822" + red "#ff5555" + green "#50fa7b" + yellow "#f1fa8c" + blue "#8be9fd" + magenta "#bd93f9" + cyan "#8fa8c0" + white "#f8f8f2" + orange "#ff6b6b" + } +} +``` + +### Tokyo Night +Modern dark theme with blue tones: + +```kdl +themes { + tokyo_night { + fg "#c0caf5" + bg "#1a1b26" + black "#393939" + red "#f7768e" + green "#9ece6a" + yellow "#e0af68" + blue "#7aa2f7" + magenta "#bb9af7" + cyan "#89dceb" + white "#d9d7d8" + orange "#e06c75" + } +} +``` + +## Theme Component Colors + +### Ribbon UI Colors +Some themes support detailed ribbon component colors: + +```kdl +themes { + detailed_theme { + ribbon_unselected { + base 10 10 10 + background 50 50 50 + emphasis_0 200 200 200 + emphasis_1 220 220 220 + emphasis_2 240 240 240 + emphasis_3 255 255 255 + } + ribbon_selected { + base 20 20 20 + background 60 60 60 + emphasis_0 210 210 210 + emphasis_1 230 230 230 + emphasis_2 250 250 250 + emphasis_3 255 255 255 + } + } +} +``` + +### Multiplayer Colors +Configure colors for multiplayer sessions: + +```kdl +multiplayer_user_colors { + player_1 255 0 255 // Magenta + player_2 0 255 255 // Blue + player_3 0 0 0 // Black + player_4 255 255 0 // Red + player_5 0 255 0 // Black + player_6 255 255 255 // White + player_7 255 0 255 // Magenta + player_8 0 255 0 // Black + player_9 0 200 0 // Green + player_10 0 0 255 // Blue +} +``` + +## Theme Creation Guidelines + +### Color Format Options + +#### RGB Format +Use three space-separated values (0-255): + +```kdl +color_name 200 150 100 // R=200, G=150, B=100 +``` + +#### Hexadecimal Format +Use standard hex color codes with # prefix: + +```kdl +color_name "#ff6b6b" // Orange-red +``` + +#### Mixed Format +You can mix formats in the same theme: + +```kdl +themes { + mixed_theme { + fg "#ffffff" // Hex for foreground + bg 40 42 54 // RGB for background + red "#cc241d" // Hex for accent + green 80 250 123 // RGB for success + yellow 241 250 140 // RGB for warning + } +} +``` + +### Theme Design Best Practices + +1. **Contrast is Key** + - Ensure text remains readable against background + - Test with different terminal profiles + - Consider accessibility (WCAG contrast ratios) + +2. **Consistent Color Palette** + - Limit to 8-12 colors for consistency + - Use semantic naming (success, warning, error, etc.) + - Maintain harmony across color choices + +3. **Test Across Applications** + - Verify colors work in nvim, vim, tmux, etc. + - Test with syntax highlighting themes + - Check compatibility with common tools + +4. **Consider Environment** + - Account for different lighting conditions + - Support both dark and light variants + - Provide high contrast options + +### Common Color Values + +#### Standard Colors +```kdl +// Pure colors +black 0 0 0 // #000000 +white 255 255 255 // #ffffff +red 255 0 0 // #ff0000 +green 0 255 0 // #00ff00 +blue 0 0 255 // #0000ff +yellow 255 255 0 // #ffff00 +magenta 255 0 255 // #ff00ff +cyan 0 255 255 // #00ffff +``` + +#### Gray Scale +```kdl +// Gray variations +gray_25 25 25 25 +gray_50 50 50 50 +gray_75 75 75 75 +gray_100 100 100 100 +gray_125 125 125 125 +gray_150 150 150 150 +gray_175 175 175 175 +gray_200 200 200 200 +``` + +#### Popular Theme Colors +```kdl +// Nord theme palette +nord_frost "#D8DEE9" // Light blue +nord_snow "#E5E9F0" // Pure white +nord_polar "#2E3440" // Dark blue-gray +nord_night "#3B4252" // Dark gray-blue +nord_aurora "#88C0D0" // Cyan + +// Dracula theme palette +dracula_bg "#282a36" // Dark purple +dracula_fg "#f8f8f2" // Light gray +dracula_pink "#ff79c6" // Bright pink +dracula_cyan "#8be9fd" // Bright cyan +dracula_green "#50fa7b" // Bright green +dracula_orange "#ffb86c" // Bright orange +``` + +## Theme Testing + +### Validate Theme Colors +Check theme works properly: + +```bash +# Test theme in new session +zellij --theme your_theme --session-name test-theme + +# Keep session open for inspection +``` + +### Preview Multiple Themes +Quickly switch between themes for testing: + +```bash +# Test multiple themes +for theme in nord dracula tokyo_night; do + zellij --theme "$theme" --session-name "test-$theme" + echo "Theme $theme ready for testing" +done +``` + +### Create Theme Variants +Generate light/dark variants of base theme: + +```kdl +themes { + my_theme_dark { + fg "#e0e0e0" + bg "#000000" + // ... other colors + } + my_theme_light { + fg "#000000" + bg "#ffffff" + // ... other colors (inverted/light versions) + } +} +``` + +## Integration with Editors + +### Vim Integration +Theme compatibility with vim color schemes: + +```kdl +themes { + vim_compatible { + // Use colors that match popular vim themes + fg "#abb2bf" // Similar to 'morning' vim theme + bg "#1a1b26" // Similar to 'morning' vim theme background + // Match other colors accordingly + } +} +``` + +### Neovim Integration +Modern editor theme compatibility: + +```kdl +themes { + nvim_compatible { + fg "#b0e1e6" // Similar to 'tokyonight' nvim theme + bg "#16161d" // Similar to 'tokyonight' nvim theme background + // Match other accent colors + } +} +``` \ No newline at end of file diff --git a/skills/zellij-config/scripts/convert_themes.py b/skills/zellij-config/scripts/convert_themes.py new file mode 100644 index 0000000..2c3be87 --- /dev/null +++ b/skills/zellij-config/scripts/convert_themes.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +""" +Zellij Theme Converter - Convert themes between formats and create custom themes + +Usage: + convert_themes.py [COMMAND] [OPTIONS] + +Commands: + create - Create a new custom theme + convert - Convert legacy YAML theme to KDL + list - List available theme templates + +Examples: + convert_themes.py create --name mytheme --rgb + convert_themes.py convert /path/to/theme.yaml + convert_themes.py list +""" + +import argparse +import json +import sys +from pathlib import Path + + +THEME_TEMPLATES = { + "nord": { + "fg": "#D8DEE9", + "bg": "#2E3440", + "black": "#3B4252", + "red": "#BF616A", + "green": "#A3BE8C", + "yellow": "#EBCB8B", + "blue": "#81A1C1", + "magenta": "#B48EAD", + "cyan": "#88C0D0", + "white": "#E5E9F0", + "orange": "#D08770" + }, + "dracula": { + "fg": "248 248 242", + "bg": "40 42 54", + "black": "0 0 0", + "red": "255 85 85", + "green": "80 250 123", + "yellow": "241 250 140", + "blue": "98 114 164", + "magenta": "255 121 198", + "cyan": "139 233 253", + "white": "255 255 255", + "orange": "255 184 108" + }, + "gruvbox-dark": { + "fg": "#ebdbb2", + "bg": "#282828", + "black": "#1d2021", + "red": "#cc241d", + "green": "#98971a", + "yellow": "#d79921", + "blue": "#83a598", + "magenta": "#d3869b", + "cyan": "#8ec07c", + "white": "#ebdbb2", + "orange": "#fe8019" + } +} + + +def create_theme(name, use_rgb=True): + """Create a new custom theme interactively.""" + theme_data = {} + + print(f"🎨 Creating theme: {name}") + print("Enter theme colors (press Enter for defaults):") + + colors = ["fg", "bg", "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "orange"] + + for color in colors: + if use_rgb: + print(f"{color} (RGB format: r g b):") + value = input(f" {color}: ").strip() + if value: + # Parse RGB values + parts = value.split() + if len(parts) == 3 and all(p.isdigit() for p in parts): + theme_data[color] = f"{parts[0]} {parts[1]} {parts[2]}" + else: + print(f" Using default {color}") + else: + print(f"{color} (hex format: #RRGGBB):") + value = input(f" {color}: ").strip() + if value: + theme_data[color] = value + + return theme_data + + +def generate_kdl_theme(name, theme_data, output_path): + """Generate KDL format theme file.""" + content = f"themes {{\n {name} {{\n" + + for color, value in theme_data.items(): + content += f' {color} {value}\n' + + content += " }}\n}}\n" + + try: + with open(output_path, 'w') as f: + f.write(content) + return True + except Exception as e: + print(f"❌ Error writing theme file: {e}") + return False + + +def convert_yaml_to_kdl(yaml_path): + """Convert legacy YAML theme to KDL format.""" + try: + import yaml + with open(yaml_path, 'r') as f: + theme_data = yaml.safe_load(f) + + if not theme_data: + print("❌ No theme data found in YAML file") + return False + + # Extract theme name from filename + theme_name = Path(yaml_path).stem + output_path = Path(yaml_path).with_suffix('.kdl') + + return generate_kdl_theme(theme_name, theme_data, output_path) + except ImportError: + print("❌ PyYAML not installed. Install with: pip install pyyaml") + return False + except Exception as e: + print(f"❌ Error converting YAML theme: {e}") + return False + + +def list_templates(): + """List available theme templates.""" + print("📋 Available theme templates:") + for name, colors in THEME_TEMPLATES.items(): + print(f" {name}:") + if isinstance(colors["fg"], str): + print(f" Type: Hexadecimal") + print(f" Preview: FG={colors['fg']}, BG={colors['bg']}") + else: + print(f" Type: RGB") + print(f" Preview: FG=({colors['fg']}), BG=({colors['bg']})") + + +def main(): + parser = argparse.ArgumentParser(description="Manage Zellij themes") + subparsers = parser.add_subparsers(dest='command', help='Available commands') + + # Create command + create_parser = subparsers.add_parser('create', help='Create new custom theme') + create_parser.add_argument('--name', required=True, help='Theme name') + create_parser.add_argument('--hex', action='store_true', help='Use hexadecimal color format (default: RGB)') + create_parser.add_argument('--output', help='Output file path (default: ~/.config/zellij/themes/name.kdl)') + + # Convert command + convert_parser = subparsers.add_parser('convert', help='Convert YAML theme to KDL') + convert_parser.add_argument('yaml_path', help='Path to YAML theme file') + + # List command + list_parser = subparsers.add_parser('list', help='List theme templates') + + args = parser.parse_args() + + if args.command == 'create': + theme_data = create_theme(args.name, not args.hex) + output_path = args.output or Path.home() / ".config" / "zellij" / "themes" / f"{args.name}.kdl" + + if generate_kdl_theme(args.name, theme_data, output_path): + print(f"✅ Theme created: {output_path}") + else: + sys.exit(1) + + elif args.command == 'convert': + if convert_yaml_to_kdl(args.yaml_path): + print("✅ Theme conversion complete") + else: + sys.exit(1) + + elif args.command == 'list': + list_templates() + + else: + parser.print_help() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/skills/zellij-config/scripts/create_layout.py b/skills/zellij-config/scripts/create_layout.py new file mode 100644 index 0000000..f1a50c9 --- /dev/null +++ b/skills/zellij-config/scripts/create_layout.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +""" +Zellij Layout Creator - Generate custom layouts from templates or parameters + +Usage: + create_layout.py [LAYOUT_TYPE] [OPTIONS] + +Layout Types: + dev - Development layout with editor and git + monitor - Monitoring layout with system metrics + terminal - Simple terminal layout + custom - Interactive custom layout creation + +Examples: + create_layout.py dev --name myproject + create_layout.py monitor --theme nord + create_layout.py terminal --horizontal-split 2 + create_layout.py custom --panes 3 --direction vertical +""" + +import argparse +import os +import sys +from pathlib import Path + + +LAYOUT_TEMPLATES = { + "dev": """layout { + default_tab_template {{ + pane size=1 borderless=true {{ + plugin location="zellij:tab-bar" + }} + children + pane size=2 borderless=true {{ + plugin location="zellij:status-bar" + }} + }} + tab name="{name}" cwd="{cwd}" focus=true {{ + pane command="nvim" size="80%" + pane size="20%" split_direction="vertical" {{ + pane command="git" {{ + args "status" + size="50%" + }} + pane command="htop" + }} + }} + tab name="terminal" {{ + pane command="bash" + }} +}}""", + + "monitor": """layout {{ + default_tab_template {{ + pane size=1 borderless=true {{ + plugin location="zellij:tab-bar" + }} + children + pane size=2 borderless=true {{ + plugin location="zellij:status-bar" + }} + }} + tab name="monitoring" split_direction="horizontal" {{ + pane command="htop" + pane command="btop" + pane command="iotop" + pane command="nethogs" + }} +}}""", + + "terminal": """layout {{ + tab name="main" {{ + pane command="bash"{split} + }} +}}""" +} + + +def create_custom_layout(panes, direction, name): + """Create a custom layout with specified number of panes.""" + if direction == "horizontal": + split_attr = 'split_direction="horizontal"' + else: + split_attr = 'split_direction="vertical"' + + layout = f'''layout {{ + tab name="{name}" {{ + pane command="bash" + {split_attr} {{ +''' + + # Add panes + for i in range(1, panes): + if i == panes: + layout += f' pane command="bash"\n' + else: + layout += f' pane command="bash"\n' + + layout += f' }}\n }}\n}}''' + + return layout + + +def get_layout_path(layouts_dir, layout_name): + """Get full path for layout file.""" + return layouts_dir / f"{layout_name}.kdl" + + +def write_layout_file(layout_path, content): + """Write layout content to file.""" + try: + layout_path.parent.mkdir(parents=True, exist_ok=True) + with open(layout_path, 'w') as f: + f.write(content) + return True + except Exception as e: + print(f"❌ Error writing layout file: {e}") + return False + + +def get_layout_cwd(): + """Get current working directory for layout.""" + return os.getcwd() + + +def main(): + parser = argparse.ArgumentParser(description="Create Zellij layouts from templates") + parser.add_argument("layout_type", choices=["dev", "monitor", "terminal", "custom"], + help="Type of layout to create") + parser.add_argument("--name", default="workspace", + help="Name for the layout (default: workspace)") + parser.add_argument("--cwd", help="Working directory for layout (default: current directory)") + parser.add_argument("--theme", help="Theme to apply (e.g., nord, dracula)") + + # Custom layout options + parser.add_argument("--panes", type=int, default=2, + help="Number of panes for custom layout (default: 2)") + parser.add_argument("--direction", choices=["horizontal", "vertical"], default="horizontal", + help="Split direction for custom layout (default: horizontal)") + + args = parser.parse_args() + + # Determine layouts directory + layouts_dir = Path.home() / ".config" / "zellij" / "layouts" + + # Get layout name and cwd + layout_name = args.name + cwd = args.cwd or get_layout_cwd() + + print(f"🚀 Creating {args.layout_type} layout...") + + # Generate layout content + if args.layout_type == "custom": + content = create_custom_layout(args.panes, args.direction, layout_name) + elif args.layout_type in LAYOUT_TEMPLATES: + content = LAYOUT_TEMPLATES[args.layout_type].format( + name=layout_name, + cwd=cwd + ) + else: + print(f"❌ Unknown layout type: {args.layout_type}") + sys.exit(1) + + # Add theme if specified + if args.theme: + content += f'\ntheme "{args.theme}"\n' + + # Write layout file + layout_path = get_layout_path(layouts_dir, layout_name) + if write_layout_file(layout_path, content): + print(f"✅ Layout created: {layout_path}") + print(f"💡 Use with: zellij --layout {layout_path}") + else: + sys.exit(1) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/skills/zellij-config/scripts/example.py b/skills/zellij-config/scripts/example.py new file mode 100755 index 0000000..4ec9043 --- /dev/null +++ b/skills/zellij-config/scripts/example.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +""" +Example helper script for zellij-config + +This is a placeholder script that can be executed directly. +Replace with actual implementation or delete if not needed. + +Example real scripts from other skills: +- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields +- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images +""" + +def main(): + print("This is an example script for zellij-config") + # TODO: Add actual script logic here + # This could be data processing, file conversion, API calls, etc. + +if __name__ == "__main__": + main() diff --git a/skills/zellij-config/scripts/setup_zellij.py b/skills/zellij-config/scripts/setup_zellij.py new file mode 100644 index 0000000..0a427e1 --- /dev/null +++ b/skills/zellij-config/scripts/setup_zellij.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +""" +Zellij Setup Script - Automates initial Zellij configuration setup + +Usage: + setup_zellij.py [--theme THEME] [--layout LAYOUT] [--keybindings] [--clean] + +Examples: + setup_zellij.py --theme nord --layout dev + setup_zellij.py --clean + setup_zellij.py --keybindings +""" + +import argparse +import os +import sys +from pathlib import Path + + +def create_config_directory(): + """Create Zellij configuration directory if it doesn't exist.""" + config_dir = Path.home() / ".config" / "zellij" + config_dir.mkdir(parents=True, exist_ok=True) + return config_dir + + +def dump_default_config(config_path): + """Dump default Zellij configuration to file.""" + import subprocess + try: + result = subprocess.run( + ["zellij", "setup", "--dump-config"], + capture_output=True, + text=True, + check=True + ) + if result.returncode == 0: + with open(config_path, 'w') as f: + f.write(result.stdout) + print(f"✅ Default config written to {config_path}") + return True + else: + print(f"❌ Failed to dump default config: {result.stderr}") + return False + except FileNotFoundError: + print("❌ zellij command not found. Please install Zellij first.") + return False + except Exception as e: + print(f"❌ Error running zellij: {e}") + return False + + +def create_layout_directory(config_dir): + """Create layouts directory within config directory.""" + layouts_dir = config_dir / "layouts" + layouts_dir.mkdir(exist_ok=True) + return layouts_dir + + +def setup_theme(theme_name, config_path): + """Set theme in configuration file.""" + if not theme_name: + return True + + try: + with open(config_path, 'r') as f: + content = f.read() + + # Check if theme block exists, if not add it + if 'theme "' not in content: + content += '\ntheme "' + theme_name + '"\n' + else: + # Replace existing theme + import re + content = re.sub(r'theme\s+"[^"]*"', f'theme "{theme_name}"', content) + + with open(config_path, 'w') as f: + f.write(content) + + print(f"✅ Theme set to: {theme_name}") + return True + except Exception as e: + print(f"❌ Error setting theme: {e}") + return False + + +def create_default_layout(layout_name, layouts_dir): + """Create a default layout template.""" + import subprocess + try: + result = subprocess.run( + ["zellij", "setup", "--dump-layout", "default"], + capture_output=True, + text=True, + check=True + ) + if result.returncode == 0: + layout_path = layouts_dir / f"{layout_name}.kdl" + with open(layout_path, 'w') as f: + f.write(result.stdout) + print(f"✅ Default layout created: {layout_path}") + return True + else: + print(f"❌ Failed to create layout: {result.stderr}") + return False + except Exception as e: + print(f"❌ Error creating layout: {e}") + return False + + +def setup_keybindings_hint(): + """Provide hint for setting up keybindings.""" + print(""" +📝 Keybinding Setup Tips: + +1. Edit ~/.config/zellij/config.kdl +2. Add keybinds section: + keybinds { + normal { + bind "Ctrl g" { SwitchToMode "locked"; } + bind "Ctrl p" { SwitchToMode "pane"; } + // ... add more bindings + } + } +3. Common modes: normal, pane, locked, shared, session +4. Use 'zellij setup --check' to validate + """) + + +def validate_config(config_path): + """Validate Zellij configuration.""" + import subprocess + try: + result = subprocess.run( + ["zellij", "setup", "--check"], + capture_output=True, + text=True, + check=True + ) + if result.returncode == 0: + print("✅ Configuration is valid") + return True + else: + print(f"❌ Configuration errors: {result.stderr}") + return False + except Exception as e: + print(f"❌ Error validating config: {e}") + return False + + +def main(): + parser = argparse.ArgumentParser( + description="Setup Zellij configuration with optional theme and layout", + formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("--theme", help="Set theme (e.g., nord, dracula, default)") + parser.add_argument("--layout", help="Create default layout with specified name") + parser.add_argument("--keybindings", action="store_true", help="Show keybinding setup hints") + parser.add_argument("--clean", action="store_true", help="Start fresh (clean setup)") + parser.add_argument("--validate", action="store_true", help="Validate existing configuration") + + args = parser.parse_args() + + # Create config directory + config_dir = create_config_directory() + config_path = config_dir / "config.kdl" + + print(f"🚀 Setting up Zellij configuration...") + print(f" Config directory: {config_dir}") + + if args.clean: + print("Starting with clean configuration...") + if dump_default_config(config_path): + print("✅ Clean setup complete") + else: + sys.exit(1) + return + + if args.validate: + if config_path.exists(): + validate_config(config_path) + else: + print("❌ No configuration found to validate") + return + + # Setup basic config if it doesn't exist + if not config_path.exists(): + print("Creating default configuration...") + if not dump_default_config(config_path): + sys.exit(1) + + # Create layouts directory + layouts_dir = create_layout_directory(config_dir) + + # Set theme if specified + if args.theme: + setup_theme(args.theme, config_path) + + # Create default layout if specified + if args.layout: + create_default_layout(args.layout, layouts_dir) + + # Show keybinding hints if requested + if args.keybindings: + setup_keybindings_hint() + + # Validate final configuration + if config_path.exists(): + validate_config(config_path) + + print("✅ Zellij setup complete!") + print(f"📁 Configuration file: {config_path}") + print(f"📁 Layouts directory: {layouts_dir}") + + +if __name__ == "__main__": + main() \ No newline at end of file