commit 38e80921c837a7f13b97f456407db887ca10a566 Author: Zhongwei Li Date: Sat Nov 29 17:51:59 2025 +0800 Initial commit diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..1e86378 --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,32 @@ +{ + "name": "navigator", + "description": "Complete framework for context-efficient AI development - Philosophy, Metrics, Education, Skills, Agents, and Multi-Claude Workflows. Master context engineering principles through interactive learning. Achieve 92% token reduction with proven strategies. Multi-Claude reliability: 90%+ success rate with automatic retry, timeout monitoring, and workflow resume. Production-ready orchestration.", + "version": "4.5.0", + "author": { + "name": "Aleks Petrov", + "email": "aleks@example.com", + "url": "https://github.com/alekspetrov" + }, + "skills": [ + "./skills/nav-init", + "./skills/nav-start", + "./skills/nav-stats", + "./skills/nav-update-claude", + "./skills/nav-upgrade", + "./skills/nav-install-multi-claude", + "./skills/nav-marker", + "./skills/nav-markers", + "./skills/nav-compact", + "./skills/nav-task", + "./skills/nav-sop", + "./skills/nav-skill-creator", + "./skills/plugin-slash-command", + "./skills/product-design", + "./skills/visual-regression", + "./skills/frontend-component", + "./skills/backend-endpoint", + "./skills/database-migration", + "./skills/backend-test", + "./skills/frontend-test" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..6dfb58e --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# navigator + +Complete framework for context-efficient AI development - Philosophy, Metrics, Education, Skills, Agents, and Multi-Claude Workflows. Master context engineering principles through interactive learning. Achieve 92% token reduction with proven strategies. Multi-Claude reliability: 90%+ success rate with automatic retry, timeout monitoring, and workflow resume. Production-ready orchestration. diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..80710a3 --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,385 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:alekspetrov/navigator:", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "2a670086a00daa26359425b953efe9fb12342881", + "treeHash": "bca5f3bcdc22270c15b1e98ff72e84bf1f5e162d01d3fbb2acbde439f1256685", + "generatedAt": "2025-11-28T10:13:08.150334Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "navigator", + "description": "Complete framework for context-efficient AI development - Philosophy, Metrics, Education, Skills, Agents, and Multi-Claude Workflows. Master context engineering principles through interactive learning. Achieve 92% token reduction with proven strategies. Multi-Claude reliability: 90%+ success rate with automatic retry, timeout monitoring, and workflow resume. Production-ready orchestration.", + "version": "4.5.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "755bdd5dd8a953fe473cfa3c17c923ea45e99ac24b920c648ed9093f1610d1e3" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "d3bc1b45b2b50fda48735df5a0cdb9dcc90b999a18690a1f0647cef1e97f6e67" + }, + { + "path": "skills/nav-skill-creator/SKILL.md", + "sha256": "c15fb1a881c210990ab12cc3a29d3cecc86dd4329349bae1ab39bbf79ab47343" + }, + { + "path": "skills/nav-skill-creator/examples/example-generated-skill.md", + "sha256": "d6982507362b434058e33e5a92bca90ca338e652513c607c6db0f1505e2bd031" + }, + { + "path": "skills/nav-skill-creator/functions/skill_generator.py", + "sha256": "d18e703a8be195e7e0af1622b59b266db23a946d8c89128c4147571719a232c0" + }, + { + "path": "skills/nav-skill-creator/templates/skill-template.md", + "sha256": "a76fe020fad3cffceb881cf1f9afbd9f627d88282cd30658d405a43f351934f3" + }, + { + "path": "skills/nav-stats/SKILL.md", + "sha256": "534fdddf7028538dba06aebd55a07bf26a7587bc151eb4c4ff8737c19678f3c8" + }, + { + "path": "skills/nav-stats/functions/report_formatter.py", + "sha256": "99ec3e3035559a676b335d398cbd2342f3dcb4b4a21fa1dcc0ac082152418070" + }, + { + "path": "skills/nav-stats/functions/efficiency_scorer.py", + "sha256": "10046a504afce8cee8c93ff90885784921d4f4cc87c4fd6de8f2c692d901ab75" + }, + { + "path": "skills/plugin-slash-command/SKILL.md", + "sha256": "0f8ba819d15cf3a59169eece43a4a81fff66fa9d61f69cafe15e2419e8b341ca" + }, + { + "path": "skills/plugin-slash-command/examples/simple-command-example.md", + "sha256": "59ef48a4fdd34a29e18c8152ac2303877e684cd39b9fb08759f2bf88c7473c8a" + }, + { + "path": "skills/plugin-slash-command/examples/medium-command-example.md", + "sha256": "ae39c9e3fb9b02163733eb7dca360e5f3c47f389e675e55f8f7f04bd9102e540" + }, + { + "path": "skills/plugin-slash-command/functions/command_generator.py", + "sha256": "454e28e5c82edb9d69b72e2b8e506983c65782933a3b9aed3ab62e7ead4715de" + }, + { + "path": "skills/plugin-slash-command/functions/command_validator.py", + "sha256": "039b12ff91b2814be96daddd6f76f867e7a9533278a0f8124cd4f7659c7cc47a" + }, + { + "path": "skills/plugin-slash-command/templates/command-template.md", + "sha256": "47194fa4b114778e16ebd82933c3b61671edd3f55bd3f049f14c5f17934caef6" + }, + { + "path": "skills/nav-install-multi-claude/SKILL.md", + "sha256": "19b50790a977991b7c7aa8c941a9f3bf6c8a7c97b690a5423a210029e29ce4d4" + }, + { + "path": "skills/nav-task/SKILL.md", + "sha256": "276be68a250c3162091bed33d9cb45fc00d22e53f857218c0a2c45594c74965b" + }, + { + "path": "skills/nav-task/functions/task_id_generator.py", + "sha256": "fbfe2405567fb5f99ffe32bff9682d5f3159624cd6113b3d2abe97999200446b" + }, + { + "path": "skills/nav-task/functions/index_updater.py", + "sha256": "d0d968e8a944ccd5d606a9469b520422c6bd38ebb5fe4b76a1da1b972838b132" + }, + { + "path": "skills/nav-task/functions/task_formatter.py", + "sha256": "29a4131b6693d05b89ab61fd21a870756d1b1f745b427c9a6259a66030eb0f38" + }, + { + "path": "skills/backend-test/SKILL.md", + "sha256": "8f1ac1a94009d42deb88953c8311141589b5170ef7175ca913d4f8132ae4db6f" + }, + { + "path": "skills/frontend-component/SKILL.md", + "sha256": "f856e69435dfd3ef237677b9cf5248cbb68858ee28b4fc93b6d719a9f46060fa" + }, + { + "path": "skills/frontend-component/examples/SearchBar.tsx", + "sha256": "e9b49d60a2ef3ed774fdedf029c40638d120c67f23902b3e50598c10eb8600f4" + }, + { + "path": "skills/frontend-component/examples/Button.tsx", + "sha256": "f89a43aad9c559f96d88585f6f1b4264ae04d9cce02329b4f5360a3f47e40667" + }, + { + "path": "skills/frontend-component/functions/style_generator.py", + "sha256": "e5e227710e92615539341d60b7f45b8aa21da24a38890e0429607c1d3c006870" + }, + { + "path": "skills/frontend-component/functions/test_generator.py", + "sha256": "315f244784ae8c5cafa5ecf280d2618ccbfb111e7bb1db4de5be16029ecd01fb" + }, + { + "path": "skills/frontend-component/functions/component_generator.py", + "sha256": "0b32f43545418353534d8e04f01001ba20ffe104d1db45c6e4c98ca85493cae0" + }, + { + "path": "skills/frontend-component/functions/props_interface_generator.py", + "sha256": "0b0d955010072b030f234ff522233e831ed66774cff3f287a50d0c48f9886294" + }, + { + "path": "skills/frontend-component/functions/name_validator.py", + "sha256": "ff3b9af4d6529e5e44281fb3086a9ee9b139296c160431aed6b68991c0fc139f" + }, + { + "path": "skills/frontend-component/templates/style-template.module.css", + "sha256": "83dd5fcda9580897f75a622eeb9e5f9f58f4a16b261d22b8a56a98318be8fe18" + }, + { + "path": "skills/frontend-component/templates/test-template.test.tsx", + "sha256": "29eb16e6ce287ae78c65e7d078287269456a52c96472649bba0654368c4512fb" + }, + { + "path": "skills/frontend-component/templates/component-simple-template.tsx", + "sha256": "53a2542967aafbeecb3e134554ef6bdb0aef14a88dea3b7d4c452b062a255fe8" + }, + { + "path": "skills/nav-marker/SKILL.md", + "sha256": "1bb94c16f68cb2bd88250569afd3eccb893ac28c3a9f2d897206559d23e0bf8a" + }, + { + "path": "skills/nav-marker/functions/marker_compressor.py", + "sha256": "ab140cce6e2ce493bd07e9590fa84e20745fa43fd8092d593d372af1847e5a13" + }, + { + "path": "skills/nav-start/SKILL.md", + "sha256": "1aa99920ab102a80d27ee63201093837dabf041fb670320962b70e7006b8c315" + }, + { + "path": "skills/nav-start/scripts/otel_session_stats.py", + "sha256": "d718cc9da568993b1872a51da90e145036368821ed312acdcf3255c7df9cc043" + }, + { + "path": "skills/nav-init/SKILL.md", + "sha256": "1c0fcf18c0c5defce78700aedee5545819935f8071ea3e0b5f0080920e3212fd" + }, + { + "path": "skills/nav-init/functions/template_customizer.py", + "sha256": "90743872ca63c030306232492237fd4ac6675b39c6d5c2ddbbebc682389087e2" + }, + { + "path": "skills/nav-init/functions/project_detector.py", + "sha256": "9d564a4e021f1ff5ad6b4c557419e06688d057b1560786a6aa74084f0f51783d" + }, + { + "path": "skills/visual-regression/SKILL.md", + "sha256": "8bda8d76ccaba6581805e50d04fc2469b883445298ef2ff141142a8c2e6dc52c" + }, + { + "path": "skills/visual-regression/examples/existing-storybook-vr.md", + "sha256": "d38511f8bacf31e4b673867202589c2fce874fc1309c3d24ead2360d5999cb62" + }, + { + "path": "skills/visual-regression/examples/design-system-vr.md", + "sha256": "e407669b0e5e8f70245b8ac25da68166c5e26baeeaeff943e60bdc5a70f641be" + }, + { + "path": "skills/visual-regression/examples/simple-component-vr.md", + "sha256": "4cfb24208fcfa37ca84774425d5733097c36a62a3da446cd30a414d5529fbaf1" + }, + { + "path": "skills/visual-regression/functions/ci_workflow_generator.py", + "sha256": "cc35c2078a60b90ce1ac1f9d7ee95cfab49692186e41cc69ac499d27fcc9f659" + }, + { + "path": "skills/visual-regression/functions/vr_setup_validator.py", + "sha256": "29fe58801220543abdb89143c6690f59b88db161f54a5c4dfee4596a6e73cc6b" + }, + { + "path": "skills/visual-regression/functions/story_generator.py", + "sha256": "23e94d1f3b90bac41db048f2106dc24c4358392c15f7fe5f6b6154656aef2e28" + }, + { + "path": "skills/visual-regression/functions/chromatic_config_generator.py", + "sha256": "3580e46d4d0f661b0ef4a5c4ee495f076624f44a72e00968d74d6a1909bbc5a6" + }, + { + "path": "skills/visual-regression/templates/story-template.tsx.j2", + "sha256": "9676b0ea728ccaa9b20e30c1b49efd8e3aa30d21be0b93f1fcbf71f7e2b062fb" + }, + { + "path": "skills/visual-regression/templates/storybook-main.js.j2", + "sha256": "f9aeb31f259da52410f6214b572bc9e5d97e2fcbbea57224bc2603cfbc843df5" + }, + { + "path": "skills/visual-regression/templates/chromatic-config.json.j2", + "sha256": "27ce72227f3bb43f7f716f355b78dd9480f94c20061a9d3aaba2daf326461ef8" + }, + { + "path": "skills/visual-regression/templates/gitlab-ci.yml.j2", + "sha256": "254797adb3e2c1989d654fece32630944205b61b873e73bd6ee54c4855bb1fcb" + }, + { + "path": "skills/visual-regression/templates/github-workflow.yml.j2", + "sha256": "5fefaed91b618b1d7489096bfcf22827b9489a1f8fffb16de69f1683073fd67b" + }, + { + "path": "skills/product-design/setup.sh", + "sha256": "7bd95360bbe007586f3b0ee460e27db8d4e5690079cd2a33d21c8255f84b06e4" + }, + { + "path": "skills/product-design/requirements.txt", + "sha256": "b34e308918170dffb16040dc950d539b59fa5d388c5ee468ee2370208ab9f5d6" + }, + { + "path": "skills/product-design/INSTALL.md", + "sha256": "deb6ffc250aeeb803ea8f72c7e8b1c968a0fc3be4c1a3e74679441d2cd79d17d" + }, + { + "path": "skills/product-design/GETTING-STARTED.md", + "sha256": "27197461f24ffacbcc3c87d55e06e9a4acfc3c46ae3ed4b01be31b475d522412" + }, + { + "path": "skills/product-design/README.md", + "sha256": "253c86d61bedf1ab52216c871fb9ec40cf016083fa1c3326d771133ae2801b2d" + }, + { + "path": "skills/product-design/SKILL.md", + "sha256": "db28fc8abe9a62ffa6ad2e17d9cfbf4e11350d7a3d70a08d0719af9fc5878381" + }, + { + "path": "skills/product-design/examples/dashboard-redesign-review.md", + "sha256": "67adacf562438c913c4eb0790a955d75e24afacae80b50e24257c10104dc8f31" + }, + { + "path": "skills/product-design/functions/figma_mcp_client.py", + "sha256": "28987212ffff2e93e3e363a15718e70352ee2b502fb6e62e2ef7c5e17abd276c" + }, + { + "path": "skills/product-design/functions/implementation_planner.py", + "sha256": "70187427d2488b948159642e594e6be48571842acdfa15d4452144be8077933f" + }, + { + "path": "skills/product-design/functions/design_analyzer.py", + "sha256": "085b97f64c53c238a617236fe0113e5f202c5f663b2071bfdec03c35d7ba798d" + }, + { + "path": "skills/product-design/functions/token_extractor.py", + "sha256": "8341e4dcae63f06891383a3b0bf065d9e4397b76b7cd40f9e4419525a49a1680" + }, + { + "path": "skills/product-design/functions/design_system_auditor.py", + "sha256": "9739aefc05b8023749cc21ffcbd1728835e53bb7a4fb08f0a78f9d842b91e02e" + }, + { + "path": "skills/product-design/functions/component_mapper.py", + "sha256": "2f5b78e3e4af13d45b0a0b9cb4b24c263016b624d73f383749c2a76981f706a3" + }, + { + "path": "skills/product-design/functions/test_mcp_connection.py", + "sha256": "ca708b916997a0ec557059bd16327ada53ac0ae05582810011433d4ed13073b0" + }, + { + "path": "skills/product-design/templates/design-review-report.md", + "sha256": "2ed909cd56c2418309a47d0ccd7c63ddf1752cbe9c973856fc04652f3ca3fb79" + }, + { + "path": "skills/database-migration/SKILL.md", + "sha256": "213c941367aea241f483382ee54b558e4a6595ea5443c2a7bdfb1bc73c3dae00" + }, + { + "path": "skills/nav-markers/SKILL.md", + "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "path": "skills/nav-update-claude/skill.md", + "sha256": "e595c3204429a2cea7c7133c2505ca3e19b4faf1741d8c48e33e64bb9a29a74f" + }, + { + "path": "skills/nav-update-claude/functions/version_detector.py", + "sha256": "720b1c733354e4cbb4f69ab903b4851e8b7ff9fb2f94ba6c3e929f0805278ebd" + }, + { + "path": "skills/nav-update-claude/functions/claude_updater.py", + "sha256": "e9ce74d873b05ce43fc2116f2b31b18bd0e73b906ff3902b957e04ed0989f07c" + }, + { + "path": "skills/backend-endpoint/SKILL.md", + "sha256": "6a8324efebcecef5221a5ff7f7c5f44e5c472fc93d6dabe9ec16f1513ea89551" + }, + { + "path": "skills/backend-endpoint/examples/users-get.ts", + "sha256": "12a508d0731b99566dfabc53ca13b50e03449b6bf3a912db95e7f9c6fce7d8ef" + }, + { + "path": "skills/backend-endpoint/examples/users-post.ts", + "sha256": "00ebf0c846265fc178e704b35917ce3f5c21922537e9b750644c70a80d3e69ee" + }, + { + "path": "skills/backend-endpoint/functions/validation_generator.py", + "sha256": "c90652bc689304f481cacba5093d3ea228ff3a5f118c77b68c096dc64e328daf" + }, + { + "path": "skills/backend-endpoint/functions/route_validator.py", + "sha256": "1a1a0740645d17f9937db64ccdb7d311fb661daeca010c1da6d782778e857e6b" + }, + { + "path": "skills/backend-endpoint/functions/endpoint_generator.py", + "sha256": "42af2625d719b995c15c9cac698fe910036c4b110a8e6dce6fb2305044fd698d" + }, + { + "path": "skills/backend-endpoint/templates/express-route-template.ts", + "sha256": "c6ef46babd7742436aae9ad8c41ff1542c0d57f2ad0d85ddc16d77a0857e0393" + }, + { + "path": "skills/backend-endpoint/templates/endpoint-test-template.spec.ts", + "sha256": "b912c12c0ec2fc6750d06896b33d4ea202bb4f7fa238415db8024e2ae536c7ce" + }, + { + "path": "skills/nav-sop/SKILL.md", + "sha256": "6afa4ec7db2bcea42009948b007fc379e29f7a8a4c0d2051a04f839a36abdedf" + }, + { + "path": "skills/nav-sop/functions/sop_formatter.py", + "sha256": "f485099128b352ae2119e1281380dca7f33c69333dce1f59e5f4fbadbeec8d0f" + }, + { + "path": "skills/nav-compact/SKILL.md", + "sha256": "8dd5a33676282e26156dbe179e978e92884ff799d5bdde6951c23a5d4c60f35f" + }, + { + "path": "skills/nav-upgrade/SKILL.md", + "sha256": "066822ed7db81777a2f1f0f29ba2eb3b63a59bccfebddde73c8d6d7663dda729" + }, + { + "path": "skills/nav-upgrade/functions/plugin_updater.py", + "sha256": "c22ca7f0703a6f49b782bf34297ad5f7c72244f32ceb0c6ea6ea823bb2fdaac3" + }, + { + "path": "skills/nav-upgrade/functions/plugin_verifier.py", + "sha256": "b99d7576dcb7afdee0f67650d33cf33e3e33f8b2b3c061e9b8b00182917970f8" + }, + { + "path": "skills/nav-upgrade/functions/version_detector.py", + "sha256": "a5f93c3f091ccbde8c17460681bea4fe33f0cb5c50a9893a972adc9fe7e492db" + }, + { + "path": "skills/frontend-test/SKILL.md", + "sha256": "58ba419449dff06b691362f416687555158cdc98acaa7c1d2b893d125f22c958" + } + ], + "dirSha256": "bca5f3bcdc22270c15b1e98ff72e84bf1f5e162d01d3fbb2acbde439f1256685" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/backend-endpoint/SKILL.md b/skills/backend-endpoint/SKILL.md new file mode 100644 index 0000000..c9ec7c8 --- /dev/null +++ b/skills/backend-endpoint/SKILL.md @@ -0,0 +1,472 @@ +--- +name: backend-endpoint +description: Create REST/GraphQL API endpoint with validation, error handling, and tests. Auto-invoke when user says "add endpoint", "create API", "new route", or "add route". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash +version: 1.0.0 +--- + +# Backend API Endpoint Generator + +Generate production-ready REST or GraphQL endpoints with request validation, error handling, and comprehensive tests. + +## When to Invoke + +Auto-invoke when user mentions: +- "Add endpoint" +- "Create API" +- "New route" +- "Add route" +- "Create API endpoint for [resource]" + +## What This Does + +1. Generates route handler with proper HTTP methods +2. Adds request validation (body, params, query) +3. Implements error handling +4. Creates test file with request/response tests +5. Follows REST/GraphQL conventions +6. Includes authentication middleware (if needed) + +## Execution Steps + +### Step 1: Gather Endpoint Requirements + +**Ask user for endpoint details**: +``` +Endpoint path: [e.g., /api/users/:id] +HTTP method: [GET, POST, PUT, PATCH, DELETE] +Resource name: [e.g., User, Post, Product] + +Framework: + - express (default) + - fastify + - nestjs + - graphql + +Authentication required: [yes/no] +Request validation needed: [yes/no] +``` + +**Validate endpoint path**: +- Use predefined function: `functions/route_validator.py` +- Ensure RESTful conventions +- Check path parameters syntax +- No trailing slashes + +### Step 2: Generate Route Handler + +**Based on HTTP method and framework**: + +Use predefined function: `functions/endpoint_generator.py` + +```bash +python3 functions/endpoint_generator.py \ + --path "/api/users/:id" \ + --method "GET" \ + --resource "User" \ + --framework "express" \ + --auth true \ + --validation true \ + --template "templates/express-route-template.ts" \ + --output "src/routes/users.ts" +``` + +**Template includes**: +- Route definition +- Request validation middleware +- Controller/handler function +- Error handling +- Response formatting +- TypeScript types + +### Step 3: Generate Validation Schema + +**Use predefined function**: `functions/validation_generator.py` + +```bash +python3 functions/validation_generator.py \ + --resource "User" \ + --method "POST" \ + --fields "name:string:required,email:email:required,age:number:optional" \ + --library "zod" \ + --output "src/validators/user.validator.ts" +``` + +**Supported validation libraries**: +- Zod (default, TypeScript-first) +- Joi (JavaScript schema) +- Yup (object schema) +- Express-validator (middleware-based) + +**Output example (Zod)**: +```typescript +import { z } from 'zod'; + +export const createUserSchema = z.object({ + name: z.string().min(1), + email: z.string().email(), + age: z.number().optional(), +}); + +export type CreateUserInput = z.infer; +``` + +### Step 4: Generate Error Handling Middleware + +**Use predefined function**: `functions/error_handler_generator.py` + +```bash +python3 functions/error_handler_generator.py \ + --framework "express" \ + --template "templates/error-handler-template.ts" \ + --output "src/middleware/errorHandler.ts" +``` + +**Error handler includes**: +- HTTP status code mapping +- Error response formatting +- Logging integration +- Development vs production modes +- Validation error handling + +### Step 5: Generate Test File + +**Use predefined function**: `functions/test_generator.py` + +```bash +python3 functions/test_generator.py \ + --endpoint "/api/users/:id" \ + --method "GET" \ + --framework "express" \ + --template "templates/endpoint-test-template.spec.ts" \ + --output "tests/routes/users.test.ts" +``` + +**Test template includes**: +- Success case (200/201) +- Validation errors (400) +- Not found (404) +- Unauthorized (401) +- Server errors (500) +- Edge cases + +**Example test**: +```typescript +describe('GET /api/users/:id', () => { + it('returns user when found', async () => { + const response = await request(app) + .get('/api/users/123') + .expect(200); + + expect(response.body).toMatchObject({ + id: '123', + name: expect.any(String), + }); + }); + + it('returns 404 when user not found', async () => { + await request(app) + .get('/api/users/999') + .expect(404); + }); +}); +``` + +### Step 6: Generate API Documentation Comment + +**JSDoc or OpenAPI annotation**: + +```typescript +/** + * @route GET /api/users/:id + * @description Get user by ID + * @access Private + * @param {string} id - User ID + * @returns {User} User object + * @throws {404} User not found + * @throws {401} Unauthorized + */ +``` + +### Step 7: Show Endpoint Summary + +**Display generated files and usage**: + +``` +✅ Endpoint Created: GET /api/users/:id + +Structure: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📁 src/ + ├── routes/users.ts (Route handler) + ├── validators/user.validator.ts (Request validation) + └── middleware/errorHandler.ts (Error handling) + +📁 tests/ + └── routes/users.test.ts (Integration tests) +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Route Registration: +import { userRoutes } from './routes/users'; +app.use('/api', userRoutes); + +Test: +curl http://localhost:3000/api/users/123 +# or +npm test -- users.test.ts + +Next Steps: +1. Implement business logic in controller +2. Connect to database/service layer +3. Run tests: npm test +4. Test with Postman/Thunder Client +``` + +--- + +## Predefined Functions + +### 1. route_validator.py + +Validates route path follows REST conventions. + +**Usage**: +```bash +python3 functions/route_validator.py --path "/api/users/:id" --method "GET" +``` + +**Checks**: +- RESTful naming (plural resources) +- Path parameter syntax (`:id` or `{id}`) +- No trailing slashes +- HTTP method matches intent + +**Returns**: Valid path or error message + +--- + +### 2. endpoint_generator.py + +Generates endpoint handler from template. + +**Usage**: +```bash +python3 functions/endpoint_generator.py \ + --path "/api/users/:id" \ + --method "GET" \ + --resource "User" \ + --framework "express" \ + --auth true \ + --validation true \ + --template "templates/express-route-template.ts" +``` + +**Parameters**: +- `--path`: API endpoint path +- `--method`: HTTP method +- `--resource`: Resource name (singular, PascalCase) +- `--framework`: Backend framework +- `--auth`: Include auth middleware +- `--validation`: Include validation middleware +- `--template`: Template file path + +**Returns**: Generated endpoint code + +--- + +### 3. validation_generator.py + +Generates request validation schema. + +**Usage**: +```bash +python3 functions/validation_generator.py \ + --resource "User" \ + --method "POST" \ + --fields "name:string:required,email:email:required" \ + --library "zod" +``` + +**Supported field types**: +- `string`, `number`, `boolean` +- `email`, `url`, `uuid` +- `array`, `object` +- `date`, `datetime` + +**Returns**: Validation schema code + +--- + +### 4. error_handler_generator.py + +Generates error handling middleware. + +**Usage**: +```bash +python3 functions/error_handler_generator.py \ + --framework "express" \ + --template "templates/error-handler-template.ts" +``` + +**Returns**: Error handler middleware code + +--- + +### 5. test_generator.py + +Generates endpoint integration tests. + +**Usage**: +```bash +python3 functions/test_generator.py \ + --endpoint "/api/users/:id" \ + --method "GET" \ + --framework "express" \ + --template "templates/endpoint-test-template.spec.ts" +``` + +**Generates tests for**: +- Success responses +- Validation errors +- Authentication errors +- Not found errors +- Server errors + +**Returns**: Generated test code + +--- + +## Templates + +### express-route-template.ts + +Express.js route handler template. + +**Placeholders**: +- `${ROUTE_PATH}` - API endpoint path +- `${HTTP_METHOD}` - HTTP method (lowercase) +- `${RESOURCE_NAME}` - Resource name (PascalCase) +- `${VALIDATION_MIDDLEWARE}` - Validation middleware +- `${AUTH_MIDDLEWARE}` - Authentication middleware + +### fastify-route-template.ts + +Fastify route handler template (alternative). + +### graphql-resolver-template.ts + +GraphQL resolver template (alternative). + +### validation-zod-template.ts + +Zod validation schema template. + +### endpoint-test-template.spec.ts + +Integration test template with supertest. + +**Placeholders**: +- `${ENDPOINT_PATH}` - Endpoint to test +- `${HTTP_METHOD}` - HTTP method +- `${TEST_CASES}` - Generated test cases + +--- + +## Examples + +See `examples/` directory for reference implementations: + +1. **users-get.ts** - GET endpoint with auth +2. **users-post.ts** - POST endpoint with validation +3. **graphql-resolver.ts** - GraphQL mutation example + +Each example includes: +- Route/resolver implementation +- Validation schema +- Error handling +- Test file +- Usage documentation + +--- + +## Best Practices + +### REST API Design +- **Use plural nouns** for resources (`/users`, not `/user`) +- **Use HTTP methods correctly** (GET=read, POST=create, PUT/PATCH=update, DELETE=remove) +- **Nest resources properly** (`/users/:userId/posts/:postId`) +- **Return proper status codes** (200, 201, 400, 401, 404, 500) + +### Request Validation +- **Validate all inputs** (body, params, query) +- **Fail fast** (validate before business logic) +- **Clear error messages** (tell user what's wrong) +- **Sanitize inputs** (prevent injection attacks) + +### Error Handling +- **Centralized error handler** (DRY principle) +- **Consistent error format** (always same structure) +- **Don't expose internals** (sanitize stack traces in production) +- **Log errors** (for debugging) + +### Security +- **Authentication** (verify identity) +- **Authorization** (check permissions) +- **Rate limiting** (prevent abuse) +- **Input sanitization** (prevent XSS, SQL injection) + +### Testing +- **Test happy path** (success cases) +- **Test error cases** (validation, auth, not found) +- **Test edge cases** (empty data, large data) +- **Mock external dependencies** (database, APIs) + +--- + +## Troubleshooting + +### Route Not Found (404) + +**Problem**: Endpoint returns 404 even though route is defined + +**Solutions**: +1. Check route registration order (specific before generic) +2. Verify path matches exactly (case-sensitive) +3. Check middleware isn't blocking request +4. Validate HTTP method matches + +### Validation Always Fails + +**Problem**: Valid requests fail validation + +**Solutions**: +1. Check field names match exactly +2. Verify data types are correct +3. Check required vs optional fields +4. Inspect validation error message + +### Tests Failing + +**Problem**: Integration tests don't pass + +**Solutions**: +1. Ensure test database is seeded +2. Check test fixtures are correct +3. Verify mocks are set up properly +4. Run tests with `--verbose` flag + +--- + +## Success Criteria + +**This skill succeeds when**: +- [ ] Endpoint responds with correct status codes +- [ ] Request validation catches invalid inputs +- [ ] Error handling works consistently +- [ ] Tests cover success and error cases +- [ ] Code follows REST/GraphQL conventions +- [ ] Documentation is clear and complete + +--- + +**Auto-invoke this skill when creating API endpoints to ensure consistency and security** 🔒 diff --git a/skills/backend-endpoint/examples/users-get.ts b/skills/backend-endpoint/examples/users-get.ts new file mode 100644 index 0000000..7ff3de1 --- /dev/null +++ b/skills/backend-endpoint/examples/users-get.ts @@ -0,0 +1,54 @@ +/** + * User Routes - GET endpoint example + * + * @route GET /api/users/:id + * @access Private + */ + +import { Router, Request, Response, NextFunction } from 'express'; +import { authMiddleware } from '../middleware/auth'; + +const router = Router(); + +/** + * GET /api/users/:id + * @description Get user by ID + * @access Private + */ +router.get( + '/api/users/:id', + authMiddleware, + async (req: Request, res: Response, next: NextFunction) => { + try { + const { id } = req.params; + + // TODO: Fetch user from database + const user = await getUserById(id); + + if (!user) { + return res.status(404).json({ + success: false, + error: 'User not found', + }); + } + + res.status(200).json({ + success: true, + data: user, + }); + } catch (error) { + next(error); + } + } +); + +export default router; + +// Mock function (replace with actual database query) +async function getUserById(id: string) { + return { + id, + name: 'John Doe', + email: 'john@example.com', + }; +} diff --git a/skills/backend-endpoint/examples/users-post.ts b/skills/backend-endpoint/examples/users-post.ts new file mode 100644 index 0000000..fe6b610 --- /dev/null +++ b/skills/backend-endpoint/examples/users-post.ts @@ -0,0 +1,59 @@ +/** + * User Routes - POST endpoint example with validation + * + * @route POST /api/users + * @access Private + */ + +import { Router, Request, Response, NextFunction } from 'express'; +import { z } from 'zod'; +import { authMiddleware } from '../middleware/auth'; +import { validateRequest } from '../middleware/validation'; + +const router = Router(); + +// Validation schema +const createUserSchema = z.object({ + name: z.string().min(1).max(100), + email: z.string().email(), + age: z.number().int().min(18).optional(), +}); + +type CreateUserInput = z.infer; + +/** + * POST /api/users + * @description Create new user + * @access Private + */ +router.post( + '/api/users', + authMiddleware, + validateRequest(createUserSchema), + async (req: Request, res: Response, next: NextFunction) => { + try { + const userData: CreateUserInput = req.body; + + // TODO: Save user to database + const newUser = await createUser(userData); + + res.status(201).json({ + success: true, + data: newUser, + }); + } catch (error) { + next(error); + } + } +); + +export default router; + +// Mock function (replace with actual database insert) +async function createUser(data: CreateUserInput) { + return { + id: '123', + ...data, + createdAt: new Date(), + }; +} diff --git a/skills/backend-endpoint/functions/endpoint_generator.py b/skills/backend-endpoint/functions/endpoint_generator.py new file mode 100755 index 0000000..b3813ab --- /dev/null +++ b/skills/backend-endpoint/functions/endpoint_generator.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 +""" +Generate backend API endpoint from template with substitutions. + +Creates route handlers with authentication, validation, and error handling. +""" + +import sys +import argparse +import os + +def read_template(template_path: str) -> str: + """Read template file content.""" + try: + with open(template_path, 'r') as f: + return f.read() + except FileNotFoundError: + raise FileNotFoundError(f"Template file not found: {template_path}") + +def generate_endpoint( + path: str, + method: str, + resource: str, + framework: str, + template_content: str, + auth: bool = False, + validation: bool = False +) -> str: + """ + Generate endpoint code by substituting placeholders in template. + + Args: + path: API endpoint path + method: HTTP method (GET, POST, etc.) + resource: Resource name (PascalCase) + framework: Backend framework (express, fastify, etc.) + template_content: Template file content + auth: Include authentication middleware + validation: Include validation middleware + + Returns: + str: Generated endpoint code + """ + # Convert method to lowercase for handler name + method_lower = method.lower() + + # Generate middleware chain + middlewares = [] + if auth: + middlewares.append('authMiddleware') + if validation: + validator_name = f'validate{resource}' + middlewares.append(validator_name) + + middleware_chain = ', '.join(middlewares) if middlewares else '' + + # Convert resource to different cases + resource_lower = resource.lower() + resource_plural = resource.lower() + 's' # Simple pluralization + + # Perform substitutions + substitutions = { + '${ROUTE_PATH}': path, + '${HTTP_METHOD}': method.upper(), + '${HTTP_METHOD_LOWER}': method_lower, + '${RESOURCE_NAME}': resource, + '${RESOURCE_NAME_LOWER}': resource_lower, + '${RESOURCE_NAME_PLURAL}': resource_plural, + '${VALIDATION_MIDDLEWARE}': f'validate{resource}' if validation else '', + '${AUTH_MIDDLEWARE}': 'authMiddleware' if auth else '', + '${MIDDLEWARE_CHAIN}': middleware_chain, + } + + result = template_content + for placeholder, value in substitutions.items(): + result = result.replace(placeholder, value) + + return result + +def main(): + parser = argparse.ArgumentParser(description='Generate backend API endpoint from template') + parser.add_argument('--path', required=True, help='API endpoint path') + parser.add_argument('--method', required=True, choices=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'], help='HTTP method') + parser.add_argument('--resource', required=True, help='Resource name (PascalCase)') + parser.add_argument('--framework', default='express', choices=['express', 'fastify', 'nestjs'], help='Backend framework') + parser.add_argument('--auth', action='store_true', help='Include authentication middleware') + parser.add_argument('--validation', action='store_true', help='Include validation middleware') + parser.add_argument('--template', required=True, help='Template file path') + parser.add_argument('--output', help='Output file path (optional, prints to stdout if not provided)') + + args = parser.parse_args() + + try: + # Read template + template_content = read_template(args.template) + + # Generate endpoint + endpoint_code = generate_endpoint( + args.path, + args.method, + args.resource, + args.framework, + template_content, + args.auth, + args.validation + ) + + # Output + if args.output: + os.makedirs(os.path.dirname(args.output), exist_ok=True) + with open(args.output, 'w') as f: + f.write(endpoint_code) + print(f"✅ Endpoint generated: {args.output}") + else: + print(endpoint_code) + + sys.exit(0) + except Exception as e: + print(f"❌ Error: {e}", file=sys.stderr) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/skills/backend-endpoint/functions/route_validator.py b/skills/backend-endpoint/functions/route_validator.py new file mode 100755 index 0000000..863902a --- /dev/null +++ b/skills/backend-endpoint/functions/route_validator.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +""" +Validate API route path follows REST conventions. + +Ensures routes are RESTful, properly formatted, and follow best practices. +""" + +import sys +import re +import argparse + +# HTTP methods and their typical use cases +HTTP_METHODS = { + 'GET': 'Retrieve resource(s)', + 'POST': 'Create new resource', + 'PUT': 'Replace entire resource', + 'PATCH': 'Update part of resource', + 'DELETE': 'Remove resource', +} + +def validate_route_path(path, method=None): + """ + Validate route path against REST conventions. + + Args: + path: API route path + method: HTTP method (optional) + + Returns: + tuple: (is_valid: bool, error_message: str or None) + """ + # Check path starts with / + if not path.startswith('/'): + return False, "Route path must start with '/'" + + # Check no trailing slash (except for root) + if len(path) > 1 and path.endswith('/'): + return False, "Route path should not end with '/' (except root '/')" + + # Check for double slashes + if '//' in path: + return False, "Route path contains double slashes '//'" + + # Check path segments + segments = [s for s in path.split('/') if s] + + # Check resource naming (should be plural for collections) + for i, segment in enumerate(segments): + # Skip API prefix and version + if segment in ('api', 'v1', 'v2', 'v3'): + continue + + # Skip path parameters + if segment.startswith(':') or (segment.startswith('{') and segment.endswith('}')): + continue + + # Check resource naming + if not segment.islower(): + return False, f"Resource '{segment}' should be lowercase" + + # Check for underscores vs hyphens (prefer hyphens) + if '_' in segment: + suggested = segment.replace('_', '-') + return False, f"Use hyphens instead of underscores: '{segment}' → '{suggested}'" + + # Method-specific validation + if method: + method = method.upper() + if method not in HTTP_METHODS: + return False, f"Invalid HTTP method: {method}. Use: {', '.join(HTTP_METHODS.keys())}" + + # Check method matches path intent + if method == 'POST' and segments and segments[-1].startswith(':'): + return False, "POST endpoints should target collections, not specific resources (remove :id)" + + if method in ('PUT', 'PATCH', 'DELETE'): + # These methods typically need an ID parameter + if not any(s.startswith(':') or (s.startswith('{') and s.endswith('}')) for s in segments): + return False, f"{method} endpoints typically need a resource ID parameter (e.g., /:id)" + + return True, None + +def suggest_valid_path(path): + """ + Suggest a valid route path if the provided one is invalid. + + Args: + path: Invalid route path + + Returns: + str: Suggested valid path + """ + # Remove trailing slash + if path.endswith('/') and len(path) > 1: + path = path.rstrip('/') + + # Fix double slashes + while '//' in path: + path = path.replace('//', '/') + + # Convert to lowercase and replace underscores + segments = path.split('/') + fixed_segments = [] + for segment in segments: + if segment.startswith(':') or (segment.startswith('{') and segment.endswith('}')): + fixed_segments.append(segment) + else: + fixed_segments.append(segment.lower().replace('_', '-')) + + suggested = '/'.join(fixed_segments) + + # Ensure starts with / + if not suggested.startswith('/'): + suggested = '/' + suggested + + return suggested + +def main(): + parser = argparse.ArgumentParser(description='Validate REST API route path') + parser.add_argument('--path', required=True, help='Route path to validate') + parser.add_argument('--method', help='HTTP method (GET, POST, PUT, PATCH, DELETE)') + parser.add_argument('--suggest', action='store_true', help='Suggest a valid path if invalid') + + args = parser.parse_args() + + is_valid, error = validate_route_path(args.path, args.method) + + if is_valid: + print(f"✅ '{args.path}' is a valid route path") + if args.method: + print(f" Method: {args.method.upper()} - {HTTP_METHODS[args.method.upper()]}") + sys.exit(0) + else: + print(f"❌ Invalid route path: {error}", file=sys.stderr) + + if args.suggest: + suggested = suggest_valid_path(args.path) + print(f"💡 Suggested path: {suggested}", file=sys.stderr) + + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/skills/backend-endpoint/functions/validation_generator.py b/skills/backend-endpoint/functions/validation_generator.py new file mode 100755 index 0000000..7820151 --- /dev/null +++ b/skills/backend-endpoint/functions/validation_generator.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +""" +Generate request validation schema. + +Creates Zod/Joi/Yup validation schemas for API endpoints. +""" + +import sys +import argparse + +# Type mapping for different validation libraries +ZOD_TYPE_MAP = { + 'string': 'z.string()', + 'number': 'z.number()', + 'boolean': 'z.boolean()', + 'email': 'z.string().email()', + 'url': 'z.string().url()', + 'uuid': 'z.string().uuid()', + 'date': 'z.date()', + 'array': 'z.array(z.any())', + 'object': 'z.object({})', +} + +def parse_field_spec(field_spec: str): + """ + Parse field specification. + + Format: "fieldName:type:required" or "fieldName:type:optional" + + Args: + field_spec: Field specification string + + Returns: + tuple: (field_name, field_type, is_required) + """ + parts = field_spec.strip().split(':') + + if len(parts) < 2: + raise ValueError(f"Invalid field spec: '{field_spec}'. Expected format: 'name:type' or 'name:type:required'") + + field_name = parts[0].strip() + field_type = parts[1].strip().lower() + is_required = len(parts) < 3 or parts[2].strip().lower() not in ('optional', 'opt', '?', 'false') + + return field_name, field_type, is_required + +def generate_zod_schema(resource: str, method: str, fields: list) -> str: + """ + Generate Zod validation schema. + + Args: + resource: Resource name (PascalCase) + method: HTTP method + fields: List of field specifications + + Returns: + str: Zod schema code + """ + schema_name = f"{method.lower()}{resource}Schema" + type_name = f"{method.capitalize()}{resource}Input" + + lines = [ + "import { z } from 'zod';\n", + f"export const {schema_name} = z.object({{", + ] + + for field_spec in fields: + if not field_spec.strip(): + continue + + field_name, field_type, is_required = parse_field_spec(field_spec) + zod_type = ZOD_TYPE_MAP.get(field_type, 'z.any()') + + if not is_required: + zod_type += '.optional()' + + lines.append(f" {field_name}: {zod_type},") + + lines.append("});\n") + lines.append(f"export type {type_name} = z.infer;") + + return '\n'.join(lines) + +def main(): + parser = argparse.ArgumentParser(description='Generate request validation schema') + parser.add_argument('--resource', required=True, help='Resource name (PascalCase)') + parser.add_argument('--method', required=True, help='HTTP method (GET, POST, etc.)') + parser.add_argument('--fields', required=True, help='Comma-separated field specifications') + parser.add_argument('--library', default='zod', choices=['zod', 'joi', 'yup'], help='Validation library') + parser.add_argument('--output', help='Output file path (optional, prints to stdout if not provided)') + + args = parser.parse_args() + + # Parse field specifications + field_specs = [f.strip() for f in args.fields.split(',') if f.strip()] + + try: + if args.library == 'zod': + schema_code = generate_zod_schema(args.resource, args.method, field_specs) + else: + print(f"❌ Library '{args.library}' not yet implemented. Use 'zod' for now.", file=sys.stderr) + sys.exit(1) + + # Output + if args.output: + import os + os.makedirs(os.path.dirname(args.output), exist_ok=True) + with open(args.output, 'w') as f: + f.write(schema_code) + print(f"✅ Validation schema generated: {args.output}") + else: + print(schema_code) + + sys.exit(0) + except ValueError as e: + print(f"❌ Error: {e}", file=sys.stderr) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/skills/backend-endpoint/templates/endpoint-test-template.spec.ts b/skills/backend-endpoint/templates/endpoint-test-template.spec.ts new file mode 100644 index 0000000..4326570 --- /dev/null +++ b/skills/backend-endpoint/templates/endpoint-test-template.spec.ts @@ -0,0 +1,6 @@ +import request from 'supertest'; +import app from '../app'; + +describe('${HTTP_METHOD} ${ENDPOINT_PATH}', () => { + ${TEST_CASES} +}); diff --git a/skills/backend-endpoint/templates/express-route-template.ts b/skills/backend-endpoint/templates/express-route-template.ts new file mode 100644 index 0000000..04cfaa6 --- /dev/null +++ b/skills/backend-endpoint/templates/express-route-template.ts @@ -0,0 +1,32 @@ +/** + * ${RESOURCE_NAME} Routes + * + * @route ${HTTP_METHOD} ${ROUTE_PATH} + */ + +import { Router, Request, Response, NextFunction } from 'express'; + +const router = Router(); + +/** + * ${HTTP_METHOD} ${ROUTE_PATH} + * @description ${HTTP_METHOD} ${RESOURCE_NAME_LOWER} + */ +router.${HTTP_METHOD_LOWER}( + '${ROUTE_PATH}', + ${MIDDLEWARE_CHAIN ? MIDDLEWARE_CHAIN + ',' : ''} + async (req: Request, res: Response, next: NextFunction) => { + try { + // TODO: Implement ${RESOURCE_NAME_LOWER} ${HTTP_METHOD_LOWER} logic + + res.status(200).json({ + success: true, + data: {}, // Replace with actual data + }); + } catch (error) { + next(error); + } + } +); + +export default router; diff --git a/skills/backend-test/SKILL.md b/skills/backend-test/SKILL.md new file mode 100644 index 0000000..afe907c --- /dev/null +++ b/skills/backend-test/SKILL.md @@ -0,0 +1,37 @@ +--- +name: backend-test +description: Generate backend tests (unit, integration, mocks). Auto-invoke when user says "write test for", "add test", "test this", or "create test". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash +version: 1.0.0 +--- + +# Backend Test Generator + +Generate comprehensive backend tests with Jest/Vitest including fixtures and mocks. + +## When to Invoke + +Auto-invoke when user mentions: +- "Write test for" +- "Add test" +- "Test this" +- "Create test" +- "Test [component/function]" + +## What This Does + +1. Generates test file with describe/it blocks +2. Creates test fixtures +3. Generates mocks for dependencies +4. Includes edge cases +5. Follows testing best practices + +## Success Criteria + +- [ ] Test file generated with proper structure +- [ ] Tests cover happy path and error cases +- [ ] Mocks isolate unit under test +- [ ] Fixtures provide test data +- [ ] Tests are runnable and pass + +**Auto-invoke when writing backend tests** 🧪 diff --git a/skills/database-migration/SKILL.md b/skills/database-migration/SKILL.md new file mode 100644 index 0000000..cd6d060 --- /dev/null +++ b/skills/database-migration/SKILL.md @@ -0,0 +1,37 @@ +--- +name: database-migration +description: Create database migration with schema changes and rollback. Auto-invoke when user says "create migration", "add table", "modify schema", or "change database". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash +version: 1.0.0 +--- + +# Database Migration Generator + +Generate database migrations with rollback capability for schema changes. + +## When to Invoke + +Auto-invoke when user mentions: +- "Create migration" +- "Add table" +- "Modify schema" +- "Change database" +- "Database migration for [change]" + +## What This Does + +1. Generates migration file with timestamp +2. Creates schema change SQL (up migration) +3. Creates rollback SQL (down migration) +4. Validates SQL syntax +5. Follows migration tool conventions (Knex, Prisma, TypeORM) + +## Success Criteria + +- [ ] Migration file generated with unique timestamp +- [ ] Up migration creates/modifies schema correctly +- [ ] Down migration rolls back changes +- [ ] SQL syntax is valid +- [ ] Follows naming conventions + +**Auto-invoke when creating database schema changes** 🗄️ diff --git a/skills/frontend-component/SKILL.md b/skills/frontend-component/SKILL.md new file mode 100644 index 0000000..79c1123 --- /dev/null +++ b/skills/frontend-component/SKILL.md @@ -0,0 +1,492 @@ +--- +name: frontend-component +description: Create React/Vue component with TypeScript, tests, and styles. Auto-invoke when user says "create component", "add component", "new component", or "build component". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash +version: 1.0.0 +--- + +# Frontend Component Generator + +Generate production-ready React/Vue components with TypeScript, tests, and styles following modern best practices. + +## When to Invoke + +Auto-invoke when user mentions: +- "Create a component" +- "Add a component" +- "New component" +- "Build a component" +- "Generate component for [feature]" + +## What This Does + +1. Generates component file with TypeScript and props interface +2. Creates test file with React Testing Library +3. Generates CSS module for styling +4. Creates barrel export (index.ts) +5. Validates naming conventions +6. Follows project patterns + +## Execution Steps + +### Step 1: Gather Component Requirements + +**Ask user for component details**: +``` +Component name: [PascalCase name, e.g., UserProfile] +Component type: + - simple (basic functional component) + - with-hooks (useState, useEffect, etc.) + - container (data fetching component) + +Styling approach: + - css-modules (default) + - styled-components + - tailwind + +Props needed: [Optional: describe expected props] +``` + +**Validate component name**: +- Use predefined function: `functions/name_validator.py` +- Ensure PascalCase format +- No reserved words +- Descriptive and specific + +### Step 2: Generate Props Interface + +**Based on component type and requirements**: + +Use predefined function: `functions/props_interface_generator.py` + +```python +# Generates TypeScript interface based on component requirements +python3 functions/props_interface_generator.py \ + --name "UserProfile" \ + --props "userId:string,onUpdate:function,isActive:boolean" +``` + +**Output**: +```typescript +interface UserProfileProps { + userId: string; + onUpdate?: () => void; + isActive?: boolean; + children?: React.ReactNode; + className?: string; +} +``` + +### Step 3: Generate Component File + +**Use appropriate template based on type**: + +**Simple component**: +``` +Use template: templates/component-simple-template.tsx +``` + +**Component with hooks**: +``` +Use template: templates/component-with-hooks-template.tsx +``` + +**Container component**: +``` +Use template: templates/component-container-template.tsx +``` + +**Use predefined function**: `functions/component_generator.py` + +```bash +python3 functions/component_generator.py \ + --name "UserProfile" \ + --type "simple" \ + --props-interface "UserProfileProps" \ + --template "templates/component-simple-template.tsx" \ + --output "src/components/UserProfile/UserProfile.tsx" +``` + +**Template substitutions**: +- `${COMPONENT_NAME}` → Component name (PascalCase) +- `${PROPS_INTERFACE}` → Generated props interface +- `${STYLE_IMPORT}` → CSS module import +- `${DESCRIPTION}` → Brief component description + +### Step 4: Generate Test File + +**Use predefined function**: `functions/test_generator.py` + +```bash +python3 functions/test_generator.py \ + --component-name "UserProfile" \ + --component-path "src/components/UserProfile/UserProfile.tsx" \ + --template "templates/test-template.test.tsx" \ + --output "src/components/UserProfile/UserProfile.test.tsx" +``` + +**Test template includes**: +- Basic rendering test +- Props validation test +- Event handler tests (if applicable) +- Accessibility tests + +**Template substitutions**: +- `${COMPONENT_NAME}` → Component name +- `${IMPORT_PATH}` → Relative import path +- `${TEST_CASES}` → Generated test cases based on props + +### Step 5: Generate Style File + +**Use predefined function**: `functions/style_generator.py` + +```bash +python3 functions/style_generator.py \ + --name "UserProfile" \ + --approach "css-modules" \ + --template "templates/style-template.module.css" \ + --output "src/components/UserProfile/UserProfile.module.css" +``` + +**CSS Modules template**: +```css +.container { + /* Component wrapper styles */ +} + +.title { + /* Title styles */ +} + +/* Add more classes as needed */ +``` + +**Styled Components alternative**: +```typescript +// Generated if --approach "styled-components" +import styled from 'styled-components'; + +export const Container = styled.div` + /* Component wrapper styles */ +`; + +export const Title = styled.h2` + /* Title styles */ +`; +``` + +### Step 6: Generate Barrel Export + +**Create index.ts for clean imports**: + +```bash +Write( + file_path: "src/components/UserProfile/index.ts", + content: "export { UserProfile } from './UserProfile';\nexport type { UserProfileProps } from './UserProfile';\n" +) +``` + +**Allows usage**: +```typescript +import { UserProfile } from '@/components/UserProfile'; +``` + +### Step 7: Show Component Summary + +**Display generated files and usage**: + +``` +✅ Component Created: UserProfile + +Structure: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📁 src/components/UserProfile/ + ├── UserProfile.tsx (Component) + ├── UserProfile.test.tsx (Tests) + ├── UserProfile.module.css (Styles) + └── index.ts (Exports) +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Props Interface: +interface UserProfileProps { + userId: string; + onUpdate?: () => void; + isActive?: boolean; +} + +Usage: +import { UserProfile } from '@/components/UserProfile'; + + console.log('Updated')} + isActive={true} +/> + +Next Steps: +1. Customize component implementation +2. Run tests: npm test UserProfile +3. Import and use in your feature +``` + +--- + +## Predefined Functions + +### 1. name_validator.py + +Validates component naming conventions. + +**Usage**: +```bash +python3 functions/name_validator.py --name "UserProfile" +``` + +**Checks**: +- PascalCase format +- Not a reserved word (e.g., Component, Element, etc.) +- Descriptive (length > 2 chars) +- No special characters + +**Returns**: Valid name or error message + +--- + +### 2. props_interface_generator.py + +Generates TypeScript props interface from user input. + +**Usage**: +```bash +python3 functions/props_interface_generator.py \ + --name "UserProfile" \ + --props "userId:string,onUpdate:function,isActive:boolean" +``` + +**Supported types**: +- `string`, `number`, `boolean` +- `function` (becomes `() => void`) +- `array` (becomes `any[]`) +- `object` (becomes `Record`) +- `react-node` (becomes `React.ReactNode`) + +**Returns**: TypeScript interface string + +--- + +### 3. component_generator.py + +Generates component file from template with substitutions. + +**Usage**: +```bash +python3 functions/component_generator.py \ + --name "UserProfile" \ + --type "simple" \ + --props-interface "UserProfileProps" \ + --template "templates/component-simple-template.tsx" \ + --output "src/components/UserProfile/UserProfile.tsx" +``` + +**Parameters**: +- `--name`: Component name (PascalCase) +- `--type`: Component type (simple/with-hooks/container) +- `--props-interface`: Props interface name +- `--template`: Template file path +- `--output`: Output file path + +**Returns**: Generated component code + +--- + +### 4. test_generator.py + +Generates test file with React Testing Library. + +**Usage**: +```bash +python3 functions/test_generator.py \ + --component-name "UserProfile" \ + --component-path "src/components/UserProfile/UserProfile.tsx" \ + --template "templates/test-template.test.tsx" \ + --output "src/components/UserProfile/UserProfile.test.tsx" +``` + +**Generates tests for**: +- Component rendering +- Props validation +- Event handlers +- Accessibility attributes + +**Returns**: Generated test code + +--- + +### 5. style_generator.py + +Generates style file (CSS Modules or Styled Components). + +**Usage**: +```bash +python3 functions/style_generator.py \ + --name "UserProfile" \ + --approach "css-modules" \ + --template "templates/style-template.module.css" \ + --output "src/components/UserProfile/UserProfile.module.css" +``` + +**Supported approaches**: +- `css-modules` (default) +- `styled-components` +- `tailwind` (generates className utilities) + +**Returns**: Generated style code + +--- + +## Templates + +### component-simple-template.tsx + +Basic functional component template. + +**Placeholders**: +- `${COMPONENT_NAME}` - Component name +- `${PROPS_INTERFACE}` - Props interface definition +- `${STYLE_IMPORT}` - CSS import statement +- `${DESCRIPTION}` - Component description + +### component-with-hooks-template.tsx + +Component template with useState, useEffect examples. + +**Additional placeholders**: +- `${HOOKS}` - Hook declarations +- `${HANDLERS}` - Event handler functions + +### component-container-template.tsx + +Container component template with data fetching. + +**Additional placeholders**: +- `${API_IMPORT}` - API function import +- `${DATA_TYPE}` - Data type definition +- `${FETCH_LOGIC}` - Data fetching implementation + +### test-template.test.tsx + +React Testing Library test template. + +**Placeholders**: +- `${COMPONENT_NAME}` - Component name +- `${IMPORT_PATH}` - Import path +- `${TEST_CASES}` - Generated test cases + +### style-template.module.css + +CSS Modules template. + +**Placeholders**: +- `${COMPONENT_NAME_KEBAB}` - Component name in kebab-case +- `${BASE_STYLES}` - Base container styles + +--- + +## Examples + +See `examples/` directory for reference implementations: + +1. **Button.tsx** - Simple component with variants +2. **SearchBar.tsx** - Component with hooks (useState, useEffect) +3. **UserProfile.tsx** - Container component with data fetching + +Each example includes: +- Component implementation +- Test file +- Style file +- Usage documentation + +--- + +## Best Practices + +### Component Design +- Keep components **small and focused** (single responsibility) +- **Compose** complex UIs from simple components +- **Lift state up** only when necessary +- Use **descriptive names** (UserProfile, not UP) + +### TypeScript +- **Define prop interfaces** explicitly +- **Avoid `any`** type (use `unknown` if needed) +- **Export types** for consumers +- **Use strict mode** + +### Testing +- **Test user behavior**, not implementation +- **Query by role/text**, not test IDs +- **Test accessible attributes** +- **Mock external dependencies** + +### Styling +- **CSS Modules** for scoped styles +- **BEM or descriptive class names** +- **Mobile-first** responsive design +- **Use CSS custom properties** for theming + +### Accessibility +- **Semantic HTML** (button, nav, main, etc.) +- **ARIA labels** when needed +- **Keyboard navigation** support +- **Focus management** in modals/dropdowns + +--- + +## Troubleshooting + +### Component Not Rendering + +**Problem**: Generated component throws errors + +**Solutions**: +1. Check TypeScript compilation errors +2. Verify all imports are correct +3. Check props interface matches usage +4. Validate JSX syntax + +### Tests Failing + +**Problem**: Generated tests don't pass + +**Solutions**: +1. Ensure React Testing Library is installed +2. Check test queries match component output +3. Verify mocks are set up correctly +4. Run tests with `--verbose` flag + +### Styles Not Applying + +**Problem**: CSS modules not loading + +**Solutions**: +1. Check CSS module import syntax +2. Verify webpack/vite config supports CSS modules +3. Check className is applied to element +4. Inspect browser devtools for loaded styles + +--- + +## Success Criteria + +**This skill succeeds when**: +- [ ] Component file generated with valid TypeScript +- [ ] Test file created with passing tests +- [ ] Style file generated with scoped styles +- [ ] Barrel export allows clean imports +- [ ] Props interface matches requirements +- [ ] Code follows React best practices +- [ ] Accessibility attributes included + +--- + +**Auto-invoke this skill when creating React components to ensure consistency and save time** ⚛️ diff --git a/skills/frontend-component/examples/Button.tsx b/skills/frontend-component/examples/Button.tsx new file mode 100644 index 0000000..1724b03 --- /dev/null +++ b/skills/frontend-component/examples/Button.tsx @@ -0,0 +1,40 @@ +/** + * Button - Simple button component with variants + * + * @example + * + */ + +import React from 'react'; +import styles from './Button.module.css'; + +interface ButtonProps { + children: React.ReactNode; + onClick?: () => void; + variant?: 'primary' | 'secondary' | 'danger'; + disabled?: boolean; + type?: 'button' | 'submit' | 'reset'; + className?: string; +} + +export const Button: React.FC = ({ + children, + onClick, + variant = 'primary', + disabled = false, + type = 'button', + className, +}) => { + return ( + + ); +}; diff --git a/skills/frontend-component/examples/SearchBar.tsx b/skills/frontend-component/examples/SearchBar.tsx new file mode 100644 index 0000000..cd0d4b1 --- /dev/null +++ b/skills/frontend-component/examples/SearchBar.tsx @@ -0,0 +1,52 @@ +/** + * SearchBar - Search input with debounced onChange + * + * @example + * console.log('Search:', query)} + * placeholder="Search users..." + * /> + */ + +import React, { useState, useEffect, useCallback } from 'react'; +import styles from './SearchBar.module.css'; + +interface SearchBarProps { + onSearch: (query: string) => void; + placeholder?: string; + debounceMs?: number; + className?: string; +} + +export const SearchBar: React.FC = ({ + onSearch, + placeholder = 'Search...', + debounceMs = 300, + className, +}) => { + const [query, setQuery] = useState(''); + + const handleSearch = useCallback(() => { + if (query.trim()) { + onSearch(query); + } + }, [query, onSearch]); + + useEffect(() => { + const timer = setTimeout(handleSearch, debounceMs); + return () => clearTimeout(timer); + }, [query, debounceMs, handleSearch]); + + return ( +
+ setQuery(e.target.value)} + placeholder={placeholder} + className={styles.input} + aria-label="Search" + /> +
+ ); +}; diff --git a/skills/frontend-component/functions/component_generator.py b/skills/frontend-component/functions/component_generator.py new file mode 100755 index 0000000..5154cf6 --- /dev/null +++ b/skills/frontend-component/functions/component_generator.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +""" +Generate React component file from template with substitutions. + +Replaces placeholders in template with component-specific values. +""" + +import sys +import argparse +import os + +def read_template(template_path: str) -> str: + """Read template file content.""" + try: + with open(template_path, 'r') as f: + return f.read() + except FileNotFoundError: + raise FileNotFoundError(f"Template file not found: {template_path}") + +def generate_component(name: str, props_interface: str, template_content: str, description: str = None) -> str: + """ + Generate component code by substituting placeholders in template. + + Args: + name: Component name (PascalCase) + props_interface: Props interface name + template_content: Template file content + description: Brief component description + + Returns: + str: Generated component code + """ + # Convert PascalCase to kebab-case for file names + kebab_name = ''.join(['-' + c.lower() if c.isupper() else c for c in name]).lstrip('-') + + # Perform substitutions + substitutions = { + '${COMPONENT_NAME}': name, + '${PROPS_INTERFACE}': props_interface, + '${STYLE_IMPORT}': f"import styles from './{name}.module.css';", + '${DESCRIPTION}': description or f"{name} component", + '${COMPONENT_NAME_KEBAB}': kebab_name, + } + + result = template_content + for placeholder, value in substitutions.items(): + result = result.replace(placeholder, value) + + return result + +def main(): + parser = argparse.ArgumentParser(description='Generate React component from template') + parser.add_argument('--name', required=True, help='Component name (PascalCase)') + parser.add_argument('--type', default='simple', choices=['simple', 'with-hooks', 'container'], help='Component type') + parser.add_argument('--props-interface', required=True, help='Props interface name') + parser.add_argument('--template', required=True, help='Template file path') + parser.add_argument('--output', help='Output file path (optional, prints to stdout if not provided)') + parser.add_argument('--description', help='Component description') + + args = parser.parse_args() + + try: + # Read template + template_content = read_template(args.template) + + # Generate component + component_code = generate_component( + args.name, + args.props_interface, + template_content, + args.description + ) + + # Output + if args.output: + os.makedirs(os.path.dirname(args.output), exist_ok=True) + with open(args.output, 'w') as f: + f.write(component_code) + print(f"✅ Component generated: {args.output}") + else: + print(component_code) + + sys.exit(0) + except Exception as e: + print(f"❌ Error: {e}", file=sys.stderr) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/skills/frontend-component/functions/name_validator.py b/skills/frontend-component/functions/name_validator.py new file mode 100755 index 0000000..ac0ea45 --- /dev/null +++ b/skills/frontend-component/functions/name_validator.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +""" +Validate component naming conventions. + +Ensures component names follow PascalCase, are descriptive, and avoid reserved words. +""" + +import sys +import re +import argparse + +# Reserved component names that should be avoided +RESERVED_WORDS = { + 'Component', 'Element', 'Node', 'React', 'ReactNode', 'Fragment', + 'Props', 'State', 'Context', 'Provider', 'Consumer', 'Children', + 'Ref', 'Key', 'Type', 'Class', 'Function', 'Object', 'Array', + 'String', 'Number', 'Boolean', 'Symbol', 'Null', 'Undefined' +} + +def is_pascal_case(name): + """ + Check if name is in PascalCase format. + + Args: + name: String to validate + + Returns: + bool: True if PascalCase, False otherwise + """ + # PascalCase: starts with uppercase, contains only alphanumeric + pattern = r'^[A-Z][a-zA-Z0-9]*$' + return bool(re.match(pattern, name)) + +def validate_component_name(name): + """ + Validate component name against conventions. + + Args: + name: Component name to validate + + Returns: + tuple: (is_valid: bool, error_message: str or None) + """ + # Check length + if len(name) < 2: + return False, "Component name must be at least 2 characters long" + + # Check for special characters + if not name.replace('_', '').isalnum(): + return False, "Component name should only contain alphanumeric characters" + + # Check PascalCase + if not is_pascal_case(name): + return False, f"Component name '{name}' must be in PascalCase (e.g., UserProfile, TodoList)" + + # Check reserved words + if name in RESERVED_WORDS: + return False, f"'{name}' is a reserved word. Choose a more descriptive name." + + # Check descriptiveness (not too generic) + if len(name) < 4: + return False, f"Component name '{name}' is too short. Use a more descriptive name (e.g., UserCard, not UC)" + + # Check doesn't start with common anti-patterns + anti_patterns = ['My', 'The', 'New', 'Test'] + if any(name.startswith(pattern) for pattern in anti_patterns): + return False, f"Avoid starting component names with '{name[:3]}...'. Be more specific about what it does." + + return True, None + +def suggest_valid_name(name): + """ + Suggest a valid component name if the provided one is invalid. + + Args: + name: Invalid component name + + Returns: + str: Suggested valid name + """ + # Convert to PascalCase + suggested = ''.join(word.capitalize() for word in re.split(r'[-_\s]+', name)) + + # Remove special characters + suggested = re.sub(r'[^a-zA-Z0-9]', '', suggested) + + # Ensure starts with uppercase + if suggested and not suggested[0].isupper(): + suggested = suggested.capitalize() + + return suggested if suggested else "MyComponent" + +def main(): + parser = argparse.ArgumentParser(description='Validate React component naming conventions') + parser.add_argument('--name', required=True, help='Component name to validate') + parser.add_argument('--suggest', action='store_true', help='Suggest a valid name if invalid') + + args = parser.parse_args() + + is_valid, error = validate_component_name(args.name) + + if is_valid: + print(f"✅ '{args.name}' is a valid component name") + sys.exit(0) + else: + print(f"❌ Invalid component name: {error}", file=sys.stderr) + + if args.suggest: + suggested = suggest_valid_name(args.name) + print(f"💡 Suggested name: {suggested}", file=sys.stderr) + + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/skills/frontend-component/functions/props_interface_generator.py b/skills/frontend-component/functions/props_interface_generator.py new file mode 100755 index 0000000..92501b2 --- /dev/null +++ b/skills/frontend-component/functions/props_interface_generator.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +""" +Generate TypeScript props interface from user input. + +Converts simple prop specifications into proper TypeScript interface definitions. +""" + +import sys +import argparse +from typing import List, Tuple + +# Type mapping from simple names to TypeScript types +TYPE_MAPPING = { + 'string': 'string', + 'str': 'string', + 'number': 'number', + 'num': 'number', + 'int': 'number', + 'boolean': 'boolean', + 'bool': 'boolean', + 'function': '() => void', + 'func': '() => void', + 'callback': '() => void', + 'array': 'any[]', + 'arr': 'any[]', + 'object': 'Record', + 'obj': 'Record', + 'react-node': 'React.ReactNode', + 'node': 'React.ReactNode', + 'children': 'React.ReactNode', + 'element': 'React.ReactElement', + 'style': 'React.CSSProperties', + 'class': 'string', + 'classname': 'string', +} + +def parse_prop_spec(prop_spec: str) -> Tuple[str, str, bool]: + """ + Parse a single prop specification. + + Format: "propName:type" or "propName:type:optional" + + Args: + prop_spec: Prop specification string + + Returns: + tuple: (prop_name, ts_type, is_optional) + """ + parts = prop_spec.strip().split(':') + + if len(parts) < 2: + raise ValueError(f"Invalid prop specification: '{prop_spec}'. Expected format: 'propName:type' or 'propName:type:optional'") + + prop_name = parts[0].strip() + type_name = parts[1].strip().lower() + is_optional = len(parts) > 2 and parts[2].strip().lower() in ('optional', 'opt', '?', 'true') + + # Map to TypeScript type + ts_type = TYPE_MAPPING.get(type_name, type_name) + + return prop_name, ts_type, is_optional + +def generate_props_interface(name: str, props: List[str], include_common: bool = True) -> str: + """ + Generate TypeScript props interface. + + Args: + name: Component name (will become {name}Props) + props: List of prop specifications + include_common: Whether to include common props (children, className, etc.) + + Returns: + str: TypeScript interface definition + """ + interface_name = f"{name}Props" + lines = [f"interface {interface_name} {{"] + + # Add custom props + for prop_spec in props: + if not prop_spec.strip(): + continue + + prop_name, ts_type, is_optional = parse_prop_spec(prop_spec) + optional_marker = '?' if is_optional else '' + lines.append(f" {prop_name}{optional_marker}: {ts_type};") + + # Add common props if requested + if include_common: + # Only add children if not already specified + if not any('children' in prop for prop in props): + lines.append(" children?: React.ReactNode;") + + # Only add className if not already specified + if not any('className' in prop or 'class' in prop.lower() for prop in props): + lines.append(" className?: string;") + + lines.append("}") + + return '\n'.join(lines) + +def main(): + parser = argparse.ArgumentParser(description='Generate TypeScript props interface') + parser.add_argument('--name', required=True, help='Component name') + parser.add_argument('--props', required=True, help='Comma-separated prop specifications (e.g., "userId:string,onUpdate:function,isActive:boolean:optional")') + parser.add_argument('--no-common', action='store_true', help='Do not include common props (children, className)') + + args = parser.parse_args() + + # Parse prop specifications + prop_specs = [p.strip() for p in args.props.split(',') if p.strip()] + + try: + interface = generate_props_interface( + args.name, + prop_specs, + include_common=not args.no_common + ) + print(interface) + sys.exit(0) + except ValueError as e: + print(f"❌ Error: {e}", file=sys.stderr) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/skills/frontend-component/functions/style_generator.py b/skills/frontend-component/functions/style_generator.py new file mode 100755 index 0000000..c3aa75f --- /dev/null +++ b/skills/frontend-component/functions/style_generator.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +""" +Generate style file (CSS Modules or Styled Components). + +Creates scoped styles for React components. +""" + +import sys +import argparse +import os + +def read_template(template_path: str) -> str: + """Read template file content.""" + try: + with open(template_path, 'r') as f: + return f.read() + except FileNotFoundError: + raise FileNotFoundError(f"Template file not found: {template_path}") + +def generate_style(name: str, approach: str, template_content: str) -> str: + """ + Generate style code by substituting placeholders in template. + + Args: + name: Component name (PascalCase) + approach: Styling approach (css-modules, styled-components, tailwind) + template_content: Template file content + + Returns: + str: Generated style code + """ + # Convert PascalCase to kebab-case + kebab_name = ''.join(['-' + c.lower() if c.isupper() else c for c in name]).lstrip('-') + + # Perform substitutions + substitutions = { + '${COMPONENT_NAME}': name, + '${COMPONENT_NAME_KEBAB}': kebab_name, + '${BASE_STYLES}': """ display: flex; + flex-direction: column; + gap: 1rem;""", + } + + result = template_content + for placeholder, value in substitutions.items(): + result = result.replace(placeholder, value) + + return result + +def main(): + parser = argparse.ArgumentParser(description='Generate React component style file') + parser.add_argument('--name', required=True, help='Component name (PascalCase)') + parser.add_argument('--approach', default='css-modules', choices=['css-modules', 'styled-components', 'tailwind'], help='Styling approach') + parser.add_argument('--template', required=True, help='Style template file path') + parser.add_argument('--output', help='Output file path (optional, prints to stdout if not provided)') + + args = parser.parse_args() + + try: + # Read template + template_content = read_template(args.template) + + # Generate style + style_code = generate_style( + args.name, + args.approach, + template_content + ) + + # Output + if args.output: + os.makedirs(os.path.dirname(args.output), exist_ok=True) + with open(args.output, 'w') as f: + f.write(style_code) + print(f"✅ Style file generated: {args.output}") + else: + print(style_code) + + sys.exit(0) + except Exception as e: + print(f"❌ Error: {e}", file=sys.stderr) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/skills/frontend-component/functions/test_generator.py b/skills/frontend-component/functions/test_generator.py new file mode 100755 index 0000000..db8f6c9 --- /dev/null +++ b/skills/frontend-component/functions/test_generator.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +""" +Generate test file with React Testing Library. + +Creates comprehensive test suite for React components. +""" + +import sys +import argparse +import os + +def read_template(template_path: str) -> str: + """Read template file content.""" + try: + with open(template_path, 'r') as f: + return f.read() + except FileNotFoundError: + raise FileNotFoundError(f"Template file not found: {template_path}") + +def generate_test(component_name: str, component_path: str, template_content: str) -> str: + """ + Generate test code by substituting placeholders in template. + + Args: + component_name: Component name (PascalCase) + component_path: Path to component file + template_content: Template file content + + Returns: + str: Generated test code + """ + # Calculate relative import path + import_path = f'./{component_name}' + + # Basic test cases (can be expanded based on props analysis) + test_cases = f""" + it('renders without crashing', () => {{ + render(<{component_name} />); + }}); + + it('renders children correctly', () => {{ + render(<{component_name}>Test Content); + expect(screen.getByText('Test Content')).toBeInTheDocument(); + }}); + + it('applies custom className', () => {{ + const {{ container }} = render(<{component_name} className="custom-class" />); + expect(container.firstChild).toHaveClass('custom-class'); + }}); +""".strip() + + # Perform substitutions + substitutions = { + '${COMPONENT_NAME}': component_name, + '${IMPORT_PATH}': import_path, + '${TEST_CASES}': test_cases, + } + + result = template_content + for placeholder, value in substitutions.items(): + result = result.replace(placeholder, value) + + return result + +def main(): + parser = argparse.ArgumentParser(description='Generate React component test file') + parser.add_argument('--component-name', required=True, help='Component name (PascalCase)') + parser.add_argument('--component-path', required=True, help='Path to component file') + parser.add_argument('--template', required=True, help='Test template file path') + parser.add_argument('--output', help='Output file path (optional, prints to stdout if not provided)') + + args = parser.parse_args() + + try: + # Read template + template_content = read_template(args.template) + + # Generate test + test_code = generate_test( + args.component_name, + args.component_path, + template_content + ) + + # Output + if args.output: + os.makedirs(os.path.dirname(args.output), exist_ok=True) + with open(args.output, 'w') as f: + f.write(test_code) + print(f"✅ Test file generated: {args.output}") + else: + print(test_code) + + sys.exit(0) + except Exception as e: + print(f"❌ Error: {e}", file=sys.stderr) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/skills/frontend-component/templates/component-simple-template.tsx b/skills/frontend-component/templates/component-simple-template.tsx new file mode 100644 index 0000000..77a96c5 --- /dev/null +++ b/skills/frontend-component/templates/component-simple-template.tsx @@ -0,0 +1,20 @@ +/** + * ${COMPONENT_NAME} - ${DESCRIPTION} + */ + +import React from 'react'; +${STYLE_IMPORT} + +${PROPS_INTERFACE} + +export const ${COMPONENT_NAME}: React.FC<${PROPS_INTERFACE}> = ({ + children, + className, + ...props +}) => { + return ( +
+ {children} +
+ ); +}; diff --git a/skills/frontend-component/templates/style-template.module.css b/skills/frontend-component/templates/style-template.module.css new file mode 100644 index 0000000..952f438 --- /dev/null +++ b/skills/frontend-component/templates/style-template.module.css @@ -0,0 +1,15 @@ +/* ${COMPONENT_NAME} Styles */ + +.container { +${BASE_STYLES} +} + +.title { + font-size: 1.5rem; + font-weight: 600; + margin-bottom: 0.5rem; +} + +.content { + flex: 1; +} diff --git a/skills/frontend-component/templates/test-template.test.tsx b/skills/frontend-component/templates/test-template.test.tsx new file mode 100644 index 0000000..ea23492 --- /dev/null +++ b/skills/frontend-component/templates/test-template.test.tsx @@ -0,0 +1,7 @@ +import React from 'react'; +import { render, screen } from '@testing-library/react'; +import { ${COMPONENT_NAME} } from '${IMPORT_PATH}'; + +describe('${COMPONENT_NAME}', () => { + ${TEST_CASES} +}); diff --git a/skills/frontend-test/SKILL.md b/skills/frontend-test/SKILL.md new file mode 100644 index 0000000..153aaab --- /dev/null +++ b/skills/frontend-test/SKILL.md @@ -0,0 +1,37 @@ +--- +name: frontend-test +description: Generate frontend component tests (unit, snapshot, e2e). Auto-invoke when user says "test this component", "write component test", or "add component test". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash +version: 1.0.0 +--- + +# Frontend Test Generator + +Generate React/Vue component tests with React Testing Library including user interactions. + +## When to Invoke + +Auto-invoke when user mentions: +- "Test this component" +- "Write component test" +- "Test component" +- "Add component test" +- "Component tests for [name]" + +## What This Does + +1. Generates test file with RTL utilities +2. Tests component rendering +3. Tests user interactions (click, type, etc.) +4. Tests accessibility +5. Generates snapshot tests + +## Success Criteria + +- [ ] Test file generated with RTL imports +- [ ] Tests render component correctly +- [ ] User interactions are tested +- [ ] Accessibility attributes validated +- [ ] Tests follow React Testing Library best practices + +**Auto-invoke when writing frontend component tests** ⚛️ diff --git a/skills/nav-compact/SKILL.md b/skills/nav-compact/SKILL.md new file mode 100644 index 0000000..abbf116 --- /dev/null +++ b/skills/nav-compact/SKILL.md @@ -0,0 +1,274 @@ +--- +name: nav-compact +description: Clear conversation context while preserving knowledge via context marker. Use when user says "clear context", "start fresh", "done with this task", or when approaching token limits. +allowed-tools: Read, Write, Bash +version: 1.0.0 +--- + +# Navigator Compact Skill + +Clear your conversation context while preserving all knowledge in a context marker. Like git commit before switching branches - save your state, then start fresh. + +## When to Invoke + +Invoke this skill when the user: +- Says "clear context", "start fresh", "reset conversation" +- Says "I'm done with this task", "moving to next feature" +- Mentions "approaching token limit", "context getting full" +- Says "compact", "clean up context" +- After completing isolated sub-task + +**DO NOT invoke** if: +- User is in middle of implementation +- Context is needed for next immediate step +- Less than 20 messages in conversation (not much to gain) + +## Execution Steps + +### Step 1: Check If Worth Compacting + +Estimate conversation size: +- If < 20 messages: Suggest waiting +- If 20-50 messages: Safe to compact +- If > 50 messages: Highly recommended + +Show message: +``` +📊 Current session: ~[N] messages + +Compacting will: +- Create marker with current state +- Clear conversation history +- Free up ~[X]k tokens for new work + +Continue? [Y/n]: +``` + +### Step 2: Create Automatic Marker + +Invoke `nav-marker` skill (or create marker directly) with auto-generated name: + +``` +Marker name: before-compact-{YYYY-MM-DD}-{HHmm} +Note: "Auto-created before compact" +``` + +This marker should capture: +- Last 10-15 messages summary +- Files modified +- Technical decisions +- Current progress +- Next steps + +### Step 3: Set Active Marker + +Write the marker filename to `.active` file: + +```bash +echo "{marker-filename}.md" > .agent/.context-markers/.active +``` + +This tells `nav-start` to offer restoration on next session. + +### Step 4: Display Compact Instructions + +Show user how to compact: + +``` +╔══════════════════════════════════════════════════════╗ +║ ║ +║ 🗜️ Navigator Compact ║ +║ ║ +╚══════════════════════════════════════════════════════╝ + +✅ Context marker created: + .agent/.context-markers/{marker-filename}.md + +📋 Marker contains: + - Conversation summary (last 15 messages) + - Files you modified + - Technical decisions made + - Current progress & next steps + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +🔹 TO COMPACT: + +Claude Code doesn't allow me to clear the conversation +programmatically. You need to do it manually: + +**Method 1: New conversation** +1. Start a new conversation +2. Say "load my context" or "start my session" +3. I'll detect the active marker and restore your state + +**Method 2: /clear command** (if available) +1. Type /clear +2. Say "load my context" +3. Active marker will be restored + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +💡 Your progress is saved. You won't lose any context. + +Next session: +1. Say "start my session" or run /nav:start +2. I'll detect the marker: "{marker-filename}" +3. Confirm to restore, and you'll be back where you left off + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +``` + +### Step 5: Confirm Marker Location + +Verify marker was created: + +```bash +ls -lh .agent/.context-markers/{marker-filename}.md +``` + +Show file size and confirm success: +``` +✅ Marker saved successfully + File: {marker-filename}.md + Size: {X} KB (~{Y} tokens) + +Your context is preserved! +``` + +## Important Notes + +**Why manual compact?** + +Claude Code conversations cannot be programmatically cleared from within a conversation. The user must: +- Start a new conversation, OR +- Use `/clear` command (if available) + +This skill creates the marker and sets up auto-restoration, but the actual clearing must be done by the user. + +**Auto-restoration workflow:** + +``` +Session 1: + User: "Clear context" + → nav-compact creates marker + → Sets .active file + → Instructs user how to clear + +Session 2 (new conversation): + User: "Start my session" + → nav-start detects .active file + → Offers to restore marker + → User confirms + → Context restored! +``` + +## Common Use Cases + +### After Completing Feature +``` +User: "Feature complete, clear context for next task" +→ Creates marker: "before-compact-2025-10-16-1430" +→ Captures: Feature implementation details +→ User starts new conversation +→ Restores marker, begins next feature +``` + +### Approaching Token Limit +``` +User: "Context getting full, let's compact" +→ Creates marker: "before-compact-2025-10-16-1500" +→ Preserves: All current work +→ User clears conversation +→ Continues with fresh context +``` + +### Switching Between Tasks +``` +User: "Done with auth, moving to payments" +→ Creates marker: "auth-feature-complete" +→ Clear context +→ New session: Fresh start for payments +→ Can restore auth marker later if needed +``` + +## Error Handling + +**Marker creation fails**: +``` +❌ Failed to create marker + +Cannot compact without preserving context. +Fix marker creation first. +``` + +**Not enough context to preserve**: +``` +⚠️ Very little context (< 10 messages) + +Compacting now won't save much. Consider: +- Continue working +- Compact after more progress + +Continue anyway? [y/N]: +``` + +**Active marker already exists**: +``` +⚠️ Active marker already exists: + .agent/.context-markers/.active + +This means you have an unrestored marker from previous compact. + +Options: +1. Load that marker first (recommended) +2. Overwrite with new marker +3. Cancel compact + +Your choice [1-3]: +``` + +## Success Criteria + +Compact is successful when: +- [ ] Context marker created successfully +- [ ] Marker contains comprehensive summary +- [ ] `.active` file created (for auto-restoration) +- [ ] User knows how to clear conversation +- [ ] User knows marker will auto-restore on next session + +## Scripts + +**compact.py**: Automated compact workflow +- Create marker +- Set active file +- Generate restore instructions + +## Best Practices + +**When to compact:** +- ✅ After completing isolated feature/sub-task +- ✅ After major documentation update +- ✅ Before switching to unrelated work +- ✅ When approaching 70%+ token usage +- ❌ In middle of implementation +- ❌ When context needed for next step +- ❌ After every few messages (wasteful) + +**Compact frequency:** +- Small task (30 min): No compact needed +- Medium task (2-3 hours): Compact after completion +- Large task (full day): Compact at logical breakpoints +- Multi-day task: Compact at end of each session + +## Notes + +This skill automates the preparation for compacting but cannot clear the conversation itself (Claude Code limitation). + +The value is in: +1. Automatic marker creation +2. Setting up auto-restoration +3. Guiding user through process +4. Preserving context seamlessly + +This provides same functionality as `/nav:compact` command but with natural language invocation. diff --git a/skills/nav-init/SKILL.md b/skills/nav-init/SKILL.md new file mode 100644 index 0000000..1fa1a89 --- /dev/null +++ b/skills/nav-init/SKILL.md @@ -0,0 +1,311 @@ +--- +name: nav-init +description: Initialize Navigator documentation structure in a project. Auto-invokes when user says "Initialize Navigator", "Set up Navigator", "Create Navigator structure", or "Bootstrap Navigator". +allowed-tools: Write, Bash, Read, Glob +version: 1.0.0 +auto-invoke: true +triggers: + - "initialize navigator" + - "init navigator" + - "set up navigator" + - "setup navigator" + - "create navigator structure" + - "bootstrap navigator" + - "start navigator project" +--- + +# Navigator Initialization Skill + +## Purpose + +Creates the Navigator documentation structure (`.agent/`) in a new project, copies templates, and sets up initial configuration. + +## When This Skill Auto-Invokes + +- "Initialize Navigator in this project" +- "Set up Navigator documentation structure" +- "Create .agent folder for Navigator" +- "Bootstrap Navigator for my project" + +## What This Skill Does + +1. **Checks if already initialized**: Prevents overwriting existing structure +2. **Creates `.agent/` directory structure**: + ``` + .agent/ + ├── DEVELOPMENT-README.md + ├── .nav-config.json + ├── tasks/ + ├── system/ + ├── sops/ + │ ├── integrations/ + │ ├── debugging/ + │ ├── development/ + │ └── deployment/ + └── grafana/ + ├── docker-compose.yml + ├── prometheus.yml + ├── grafana-datasource.yml + ├── grafana-dashboards.yml + ├── navigator-dashboard.json + └── README.md + ``` +3. **Copies templates**: DEVELOPMENT-README.md, config, Grafana setup +4. **Auto-detects project info**: Name, tech stack (from package.json if available) +5. **Updates CLAUDE.md**: Adds Navigator-specific instructions to project +6. **Creates .gitignore entries**: Excludes temporary Navigator files + +## Execution Steps + +### 1. Check if Already Initialized + +```bash +if [ -d ".agent" ]; then + echo "✅ Navigator already initialized in this project" + echo "" + echo "To start a session: 'Start my Navigator session'" + echo "To view documentation: Read .agent/DEVELOPMENT-README.md" + exit 0 +fi +``` + +### 2. Detect Project Information + +Read `package.json`, `pyproject.toml`, `go.mod`, `Cargo.toml`, or similar to extract: +- Project name +- Tech stack +- Dependencies + +**Fallback**: Use current directory name if no config found. + +### 3. Create Directory Structure + +Use Write tool to create: +``` +.agent/ +.agent/tasks/ +.agent/system/ +.agent/sops/integrations/ +.agent/sops/debugging/ +.agent/sops/development/ +.agent/sops/deployment/ +.agent/grafana/ +``` + +### 4. Copy Templates + +Copy from plugin's `templates/` directory to `.agent/`: + +**DEVELOPMENT-README.md**: +- Replace `${PROJECT_NAME}` with detected project name +- Replace `${TECH_STACK}` with detected stack +- Replace `${DATE}` with current date + +**`.nav-config.json`**: +```json +{ + "version": "4.5.0", + "project_name": "${PROJECT_NAME}", + "tech_stack": "${TECH_STACK}", + "project_management": "none", + "task_prefix": "TASK", + "team_chat": "none", + "auto_load_navigator": true, + "compact_strategy": "conservative" +} +``` + +**Grafana Setup**: +Copy all Grafana dashboard files to enable metrics visualization: + +```bash +# Find plugin installation directory +PLUGIN_DIR="${HOME}/.claude/plugins/marketplaces/jitd-marketplace" + +# Copy Grafana files if plugin has them +if [ -d "${PLUGIN_DIR}/.agent/grafana" ]; then + cp -r "${PLUGIN_DIR}/.agent/grafana/"* .agent/grafana/ + echo "✓ Grafana dashboard installed" +else + echo "⚠️ Grafana files not found in plugin" +fi +``` + +Files copied: +- docker-compose.yml (Grafana + Prometheus stack) +- prometheus.yml (scrape config for Claude Code metrics) +- grafana-datasource.yml (Prometheus datasource config) +- grafana-dashboards.yml (dashboard provider config) +- navigator-dashboard.json (10-panel Navigator metrics dashboard) +- README.md (setup instructions) + +### 5. Update Project CLAUDE.md + +If `CLAUDE.md` exists: +- Append Navigator-specific sections +- Keep existing project customizations + +If `CLAUDE.md` doesn't exist: +- Copy `templates/CLAUDE.md` to project root +- Customize with project info + +### 6. Create .gitignore Entries + +Add to `.gitignore` if not present: +``` +# Navigator context markers +.context-markers/ + +# Navigator temporary files +.agent/.nav-temp/ +``` + +### 7. Success Message + +``` +✅ Navigator Initialized Successfully! + +Created structure: + 📁 .agent/ Navigator documentation + 📁 .agent/tasks/ Implementation plans + 📁 .agent/system/ Architecture docs + 📁 .agent/sops/ Standard procedures + 📁 .agent/grafana/ Metrics dashboard + 📄 .agent/.nav-config.json Configuration + 📄 CLAUDE.md Updated with Navigator workflow + +Next steps: + 1. Start session: "Start my Navigator session" + 2. Optional: Enable metrics - see .agent/sops/integrations/opentelemetry-setup.md + 3. Optional: Launch Grafana - cd .agent/grafana && docker compose up -d + +Documentation: Read .agent/DEVELOPMENT-README.md +``` + +## Error Handling + +**If `.agent/` exists**: +- Don't overwrite +- Show message: "Already initialized" + +**If templates not found**: +- Error: "Navigator plugin templates missing. Reinstall plugin." + +**If no write permissions**: +- Error: "Cannot create .agent/ directory. Check permissions." + +## Predefined Functions + +### `project_detector.py` + +```python +def detect_project_info(cwd: str) -> dict: + """ + Detect project name and tech stack from config files. + + Checks (in order): + 1. package.json (Node.js) + 2. pyproject.toml (Python) + 3. go.mod (Go) + 4. Cargo.toml (Rust) + 5. composer.json (PHP) + 6. Gemfile (Ruby) + + Returns: + { + "name": "project-name", + "tech_stack": "Next.js, TypeScript, Prisma", + "detected_from": "package.json" + } + """ +``` + +### `template_customizer.py` + +```python +def customize_template(template_content: str, project_info: dict) -> str: + """ + Replace placeholders in template with project-specific values. + + Placeholders: + - ${PROJECT_NAME} + - ${TECH_STACK} + - ${DATE} + - ${YEAR} + + Returns customized template content. + """ +``` + +## Examples + +### Example 1: New Next.js Project + +**User says**: "Initialize Navigator in this project" + +**Skill detects**: +- `package.json` exists +- Name: "my-saas-app" +- Dependencies: next, typescript, prisma + +**Result**: +- `.agent/` created +- DEVELOPMENT-README.md shows: "Project: My SaaS App" +- DEVELOPMENT-README.md shows: "Tech Stack: Next.js, TypeScript, Prisma" +- .nav-config.json has project_name: "my-saas-app" + +### Example 2: Python Project + +**User says**: "Set up Navigator" + +**Skill detects**: +- `pyproject.toml` exists +- Name: "ml-pipeline" +- Dependencies: fastapi, pydantic, sqlalchemy + +**Result**: +- `.agent/` created +- Tech stack: "FastAPI, Pydantic, SQLAlchemy" + +### Example 3: Already Initialized + +**User says**: "Initialize Navigator" + +**Skill checks**: +- `.agent/` directory exists + +**Result**: +``` +✅ Navigator already initialized in this project + +To start a session: 'Start my Navigator session' +``` + +## Integration with Other Skills + +**nav-start skill**: +- Checks for `.agent/DEVELOPMENT-README.md` +- If missing, suggests: "Initialize Navigator first" + +**nav-task skill**: +- Creates tasks in `.agent/tasks/` +- Requires initialization + +**nav-sop skill**: +- Creates SOPs in `.agent/sops/` +- Requires initialization + +## Version History + +- **1.0.0** (2025-01-20): Initial implementation + - Auto-detection of project info + - Template customization + - Grafana setup included + - Error handling for existing installations + +## Notes + +- This skill replaces the deleted `/nav:init` command from v2.x +- Templates are copied from plugin installation directory +- Project info detection is best-effort (falls back to directory name) +- Safe to run multiple times (won't overwrite existing structure) diff --git a/skills/nav-init/functions/project_detector.py b/skills/nav-init/functions/project_detector.py new file mode 100644 index 0000000..a9cf30a --- /dev/null +++ b/skills/nav-init/functions/project_detector.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 +""" +Project information detection for Navigator initialization. + +Detects project name and tech stack from various config files. +""" + +import json +import os +import re +from pathlib import Path +from typing import Dict, Optional + + +def detect_project_info(cwd: str = ".") -> Dict[str, str]: + """ + Detect project name and tech stack from config files. + + Args: + cwd: Current working directory (default: ".") + + Returns: + Dictionary with keys: + - name: Project name + - tech_stack: Comma-separated technologies + - detected_from: Source file used for detection + """ + cwd_path = Path(cwd).resolve() + + # Try detection methods in order + detectors = [ + _detect_from_package_json, + _detect_from_pyproject_toml, + _detect_from_go_mod, + _detect_from_cargo_toml, + _detect_from_composer_json, + _detect_from_gemfile, + ] + + for detector in detectors: + result = detector(cwd_path) + if result: + return result + + # Fallback: use directory name + return { + "name": cwd_path.name, + "tech_stack": "Unknown", + "detected_from": "directory_name", + } + + +def _detect_from_package_json(cwd: Path) -> Optional[Dict[str, str]]: + """Detect from package.json (Node.js/JavaScript).""" + package_json = cwd / "package.json" + if not package_json.exists(): + return None + + try: + with open(package_json) as f: + data = json.load(f) + + name = data.get("name", cwd.name) + deps = {**data.get("dependencies", {}), **data.get("devDependencies", {})} + + # Detect framework/stack + stack_parts = [] + + if "next" in deps: + stack_parts.append("Next.js") + elif "react" in deps: + stack_parts.append("React") + elif "vue" in deps: + stack_parts.append("Vue") + elif "angular" in deps: + stack_parts.append("Angular") + elif "svelte" in deps: + stack_parts.append("Svelte") + elif "express" in deps: + stack_parts.append("Express") + elif "fastify" in deps: + stack_parts.append("Fastify") + + if "typescript" in deps: + stack_parts.append("TypeScript") + + if "prisma" in deps: + stack_parts.append("Prisma") + elif "mongoose" in deps: + stack_parts.append("MongoDB") + elif "pg" in deps or "postgres" in deps: + stack_parts.append("PostgreSQL") + + tech_stack = ", ".join(stack_parts) if stack_parts else "Node.js" + + return { + "name": name, + "tech_stack": tech_stack, + "detected_from": "package.json", + } + except (json.JSONDecodeError, IOError): + return None + + +def _detect_from_pyproject_toml(cwd: Path) -> Optional[Dict[str, str]]: + """Detect from pyproject.toml (Python).""" + pyproject = cwd / "pyproject.toml" + if not pyproject.exists(): + return None + + try: + content = pyproject.read_text() + + # Extract name + name_match = re.search(r'name\s*=\s*["\']([^"\']+)["\']', content) + name = name_match.group(1) if name_match else cwd.name + + # Detect framework/stack + stack_parts = [] + + if "fastapi" in content.lower(): + stack_parts.append("FastAPI") + elif "django" in content.lower(): + stack_parts.append("Django") + elif "flask" in content.lower(): + stack_parts.append("Flask") + + if "sqlalchemy" in content.lower(): + stack_parts.append("SQLAlchemy") + if "pydantic" in content.lower(): + stack_parts.append("Pydantic") + if "pytest" in content.lower(): + stack_parts.append("Pytest") + + tech_stack = ", ".join(stack_parts) if stack_parts else "Python" + + return { + "name": name, + "tech_stack": tech_stack, + "detected_from": "pyproject.toml", + } + except IOError: + return None + + +def _detect_from_go_mod(cwd: Path) -> Optional[Dict[str, str]]: + """Detect from go.mod (Go).""" + go_mod = cwd / "go.mod" + if not go_mod.exists(): + return None + + try: + content = go_mod.read_text() + + # Extract module name + module_match = re.search(r'module\s+([^\s]+)', content) + name = module_match.group(1).split("/")[-1] if module_match else cwd.name + + # Detect framework/stack + stack_parts = ["Go"] + + if "gin-gonic/gin" in content: + stack_parts.append("Gin") + elif "gorilla/mux" in content: + stack_parts.append("Gorilla Mux") + elif "fiber" in content: + stack_parts.append("Fiber") + + if "gorm" in content: + stack_parts.append("GORM") + + tech_stack = ", ".join(stack_parts) + + return { + "name": name, + "tech_stack": tech_stack, + "detected_from": "go.mod", + } + except IOError: + return None + + +def _detect_from_cargo_toml(cwd: Path) -> Optional[Dict[str, str]]: + """Detect from Cargo.toml (Rust).""" + cargo_toml = cwd / "Cargo.toml" + if not cargo_toml.exists(): + return None + + try: + content = cargo_toml.read_text() + + # Extract name + name_match = re.search(r'name\s*=\s*["\']([^"\']+)["\']', content) + name = name_match.group(1) if name_match else cwd.name + + # Detect framework/stack + stack_parts = ["Rust"] + + if "actix-web" in content: + stack_parts.append("Actix Web") + elif "rocket" in content: + stack_parts.append("Rocket") + elif "axum" in content: + stack_parts.append("Axum") + + if "diesel" in content: + stack_parts.append("Diesel") + elif "sqlx" in content: + stack_parts.append("SQLx") + + tech_stack = ", ".join(stack_parts) + + return { + "name": name, + "tech_stack": tech_stack, + "detected_from": "Cargo.toml", + } + except IOError: + return None + + +def _detect_from_composer_json(cwd: Path) -> Optional[Dict[str, str]]: + """Detect from composer.json (PHP).""" + composer_json = cwd / "composer.json" + if not composer_json.exists(): + return None + + try: + with open(composer_json) as f: + data = json.load(f) + + name = data.get("name", cwd.name).split("/")[-1] + deps = {**data.get("require", {}), **data.get("require-dev", {})} + + # Detect framework/stack + stack_parts = [] + + if any("laravel" in dep for dep in deps): + stack_parts.append("Laravel") + elif any("symfony" in dep for dep in deps): + stack_parts.append("Symfony") + + tech_stack = ", ".join(stack_parts) if stack_parts else "PHP" + + return { + "name": name, + "tech_stack": tech_stack, + "detected_from": "composer.json", + } + except (json.JSONDecodeError, IOError): + return None + + +def _detect_from_gemfile(cwd: Path) -> Optional[Dict[str, str]]: + """Detect from Gemfile (Ruby).""" + gemfile = cwd / "Gemfile" + if not gemfile.exists(): + return None + + try: + content = gemfile.read_text() + + name = cwd.name + + # Detect framework/stack + stack_parts = [] + + if "rails" in content.lower(): + stack_parts.append("Ruby on Rails") + elif "sinatra" in content.lower(): + stack_parts.append("Sinatra") + else: + stack_parts.append("Ruby") + + tech_stack = ", ".join(stack_parts) + + return { + "name": name, + "tech_stack": tech_stack, + "detected_from": "Gemfile", + } + except IOError: + return None + + +if __name__ == "__main__": + # Test detection + info = detect_project_info() + print(json.dumps(info, indent=2)) diff --git a/skills/nav-init/functions/template_customizer.py b/skills/nav-init/functions/template_customizer.py new file mode 100644 index 0000000..433d1a1 --- /dev/null +++ b/skills/nav-init/functions/template_customizer.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +""" +Template customization for Navigator initialization. + +Replaces placeholders in templates with project-specific values. +""" + +import re +from datetime import datetime +from typing import Dict + + +def customize_template(template_content: str, project_info: Dict[str, str]) -> str: + """ + Replace placeholders in template with project-specific values. + + Args: + template_content: Template file content with placeholders + project_info: Dictionary from project_detector.py + + Returns: + Customized template content + + Placeholders: + ${PROJECT_NAME} - Project name (capitalized) + ${project_name} - Project name (lowercase) + ${TECH_STACK} - Technology stack + ${DATE} - Current date (YYYY-MM-DD) + ${YEAR} - Current year + ${DETECTED_FROM} - Source of detection + """ + now = datetime.now() + + # Prepare replacement values + project_name = project_info.get("name", "My Project") + tech_stack = project_info.get("tech_stack", "Unknown") + detected_from = project_info.get("detected_from", "manual") + + # Create title-cased version for display + project_name_title = _title_case(project_name) + + replacements = { + "${PROJECT_NAME}": project_name_title, + "${project_name}": project_name.lower(), + "${TECH_STACK}": tech_stack, + "${DATE}": now.strftime("%Y-%m-%d"), + "${YEAR}": str(now.year), + "${DETECTED_FROM}": detected_from, + } + + # Apply replacements + result = template_content + for placeholder, value in replacements.items(): + result = result.replace(placeholder, value) + + return result + + +def _title_case(text: str) -> str: + """ + Convert kebab-case, snake_case, or camelCase to Title Case. + + Examples: + my-saas-app -> My SaaS App + user_management -> User Management + myAwesomeProject -> My Awesome Project + """ + # Replace separators with spaces + text = re.sub(r'[-_]', ' ', text) + + # Add spaces before capitals in camelCase + text = re.sub(r'([a-z])([A-Z])', r'\1 \2', text) + + # Title case + return text.title() + + +def validate_customization(content: str) -> bool: + """ + Check if template was properly customized (no placeholders remaining). + + Args: + content: Template content after customization + + Returns: + True if no placeholders found, False otherwise + """ + placeholder_pattern = r'\$\{[A-Z_]+\}' + return not bool(re.search(placeholder_pattern, content)) + + +if __name__ == "__main__": + # Test customization + template = """ +# ${PROJECT_NAME} - Development Documentation + +**Project**: ${PROJECT_NAME} +**Tech Stack**: ${TECH_STACK} +**Last Updated**: ${DATE} + +Detected from: ${DETECTED_FROM} +""" + + project_info = { + "name": "my-saas-app", + "tech_stack": "Next.js, TypeScript, Prisma", + "detected_from": "package.json", + } + + result = customize_template(template, project_info) + print(result) + print(f"\nValid: {validate_customization(result)}") diff --git a/skills/nav-install-multi-claude/SKILL.md b/skills/nav-install-multi-claude/SKILL.md new file mode 100644 index 0000000..3d2792d --- /dev/null +++ b/skills/nav-install-multi-claude/SKILL.md @@ -0,0 +1,384 @@ +--- +name: nav-install-multi-claude +description: Install Navigator multi-Claude workflow orchestration scripts. Auto-invokes when user says "install multi-Claude workflows", "set up multi-Claude", or "enable parallel execution". +allowed-tools: Bash, Read, Write +version: 1.0.0 +--- + +# Navigator Multi-Claude Workflow Installer + +Install multi-Claude orchestration scripts for parallel AI execution. + +## When to Invoke + +Auto-invoke when user says: +- "Install multi-Claude workflows" +- "Set up multi-Claude orchestration" +- "Enable parallel execution" +- "Complete Navigator 4.3.0 installation" +- "Install Navigator workflows" + +**DO NOT invoke** if: +- Scripts already installed (check with `which navigator-multi-claude.sh`) +- User is just asking about multi-Claude (informational) +- Navigator plugin not installed + +## What This Installs + +**Scripts installed to `$HOME/bin/`**: +- `navigator-multi-claude.sh` - Full 6-phase workflow orchestration +- `navigator-multi-claude-poc.sh` - Simple 3-phase POC +- `install-multi-claude.sh` - This installer (for future updates) + +**Why needed**: Plugin installation only copies skills/templates. Multi-Claude scripts live outside plugin structure and require separate installation. + +## Execution Steps + +### Step 1: Check if Already Installed + +```bash +if command -v navigator-multi-claude.sh &> /dev/null; then + INSTALLED_PATH=$(which navigator-multi-claude.sh) + INSTALLED_VERSION=$(grep -o 'VERSION=.*' "$INSTALLED_PATH" | head -1 | cut -d'=' -f2 | tr -d '"' || echo "unknown") + + echo "✅ Multi-Claude workflows already installed" + echo "" + echo "Location: $INSTALLED_PATH" + echo "Version: $INSTALLED_VERSION" + echo "" + echo "To reinstall/update:" + echo " rm $INSTALLED_PATH" + echo " 'Install multi-Claude workflows'" + + exit 0 +fi +``` + +### Step 2: Verify Prerequisites + +```bash +# Check Claude CLI +if ! command -v claude &> /dev/null; then + echo "❌ Claude Code CLI not found in PATH" + echo "" + echo "Multi-Claude workflows require Claude Code CLI to spawn sub-Claude instances." + echo "" + echo "Install Claude Code first, then retry:" + echo " https://docs.claude.com/claude-code/installation" + exit 1 +fi + +# Check Navigator plugin installed +PLUGIN_PATHS=( + "$HOME/.claude/plugins/marketplaces/navigator-marketplace" + "$HOME/.config/claude/plugins/navigator" + "$HOME/.claude/plugins/navigator" +) + +PLUGIN_FOUND=false +for path in "${PLUGIN_PATHS[@]}"; do + if [ -d "$path" ]; then + PLUGIN_FOUND=true + PLUGIN_PATH="$path" + break + fi +done + +if [ "$PLUGIN_FOUND" = false ]; then + echo "❌ Navigator plugin not found" + echo "" + echo "Install Navigator plugin first:" + echo " /plugin marketplace add alekspetrov/navigator" + echo " /plugin install navigator" + exit 1 +fi + +echo "✅ Prerequisites verified" +echo " - Claude CLI: $(which claude)" +echo " - Navigator plugin: $PLUGIN_PATH" +echo "" +``` + +### Step 3: Download Latest Scripts from GitHub + +```bash +echo "📥 Downloading multi-Claude scripts from GitHub..." +echo "" + +# Detect installed plugin version +if [ -f "$PLUGIN_PATH/.claude-plugin/plugin.json" ]; then + PLUGIN_VERSION=$(grep -o '"version": "[^"]*"' "$PLUGIN_PATH/.claude-plugin/plugin.json" | head -1 | cut -d'"' -f4) + VERSION_TAG="v$PLUGIN_VERSION" + echo " Plugin version: $PLUGIN_VERSION" + echo " Fetching matching scripts: $VERSION_TAG" +else + # Fallback to latest stable if version detection fails + VERSION_TAG="main" + echo " ⚠️ Could not detect plugin version" + echo " Fetching from: main branch (latest stable)" +fi + +echo "" + +# Clone repository to temp location +TEMP_DIR="/tmp/navigator-install-$$" +if git clone --depth 1 --branch "$VERSION_TAG" https://github.com/alekspetrov/navigator.git "$TEMP_DIR" 2>&1; then + echo "✅ Downloaded Navigator repository" +else + echo "❌ Failed to download from GitHub" + echo "" + echo "Possible causes:" + echo " - No internet connection" + echo " - Version tag $VERSION_TAG doesn't exist" + echo " - GitHub rate limit exceeded" + echo "" + echo "Retry with main branch? [y/N]" + exit 1 +fi + +echo "" +``` + +### Step 4: Run Installation Script + +```bash +echo "📦 Installing multi-Claude scripts..." +echo "" + +cd "$TEMP_DIR" + +if [ -f "scripts/install-multi-claude.sh" ]; then + # Run the installer + chmod +x scripts/install-multi-claude.sh + ./scripts/install-multi-claude.sh + + INSTALL_EXIT=$? + + if [ $INSTALL_EXIT -eq 0 ]; then + echo "" + echo "✅ Multi-Claude workflows installed successfully" + else + echo "" + echo "❌ Installation failed with exit code $INSTALL_EXIT" + echo "" + echo "Check the output above for errors." + exit 1 + fi +else + echo "❌ install-multi-claude.sh not found in repository" + echo "" + echo "This version may not support multi-Claude workflows." + echo "Upgrade to Navigator v4.3.0+ for multi-Claude features." + exit 1 +fi + +echo "" +``` + +### Step 5: Verify Installation + +```bash +echo "🔍 Verifying installation..." +echo "" + +# Check if scripts are in PATH +if command -v navigator-multi-claude.sh &> /dev/null; then + INSTALLED_PATH=$(which navigator-multi-claude.sh) + echo "✅ navigator-multi-claude.sh: $INSTALLED_PATH" +else + echo "⚠️ navigator-multi-claude.sh not in PATH" + echo " May need to restart terminal or run:" + echo " export PATH=\"\$HOME/bin:\$PATH\"" +fi + +if command -v navigator-multi-claude-poc.sh &> /dev/null; then + INSTALLED_PATH=$(which navigator-multi-claude-poc.sh) + echo "✅ navigator-multi-claude-poc.sh: $INSTALLED_PATH" +else + echo "⚠️ navigator-multi-claude-poc.sh not in PATH" +fi + +echo "" +``` + +### Step 6: Cleanup and Next Steps + +```bash +# Cleanup temp directory +rm -rf "$TEMP_DIR" +echo "🧹 Cleaned up temporary files" +echo "" + +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "✅ Multi-Claude Workflows Ready" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "" +echo "Test with simple task:" +echo " navigator-multi-claude-poc.sh \"Add hello world function\"" +echo "" +echo "Full 6-phase workflow:" +echo " navigator-multi-claude.sh \"Implement user authentication\"" +echo "" +echo "Documentation:" +echo " - Release notes: RELEASE-NOTES-v4.3.0.md" +echo " - POC learnings: scripts/POC-LEARNINGS.md" +echo "" +echo "Status: Experimental (30% success rate)" +echo "Recommendation: Use for simple features, monitor output closely" +echo "" +``` + +## Error Handling + +### Git Clone Fails + +``` +❌ Failed to download from GitHub + +Possible causes: + - No internet connection + - Version tag v4.3.1 doesn't exist + - GitHub rate limit exceeded + +Manual installation: + 1. Download: https://github.com/alekspetrov/navigator/archive/refs/heads/main.zip + 2. Extract and cd to directory + 3. Run: ./scripts/install-multi-claude.sh +``` + +### Version Mismatch + +``` +⚠️ Plugin version: 4.3.1 + Latest release: 4.3.0 + Installing from: main branch + +This may include unreleased changes. +Continue? [y/N] +``` + +### Already Installed + +``` +✅ Multi-Claude workflows already installed + +Location: /Users/username/bin/navigator-multi-claude.sh +Version: 4.3.0 + +To reinstall/update: + rm /Users/username/bin/navigator-multi-claude.sh + 'Install multi-Claude workflows' +``` + +### Permission Denied + +``` +❌ Permission denied: /usr/local/bin/ + +Installation requires write access to: + - $HOME/bin/ (recommended) + - /usr/local/bin/ (requires sudo) + +Fix: + mkdir -p $HOME/bin + export PATH="$HOME/bin:$PATH" + +Then retry: 'Install multi-Claude workflows' +``` + +## Success Criteria + +Installation successful when: +- [ ] Scripts downloaded from GitHub +- [ ] install-multi-claude.sh executed without errors +- [ ] Scripts added to PATH (verified with `which`) +- [ ] Version matches plugin version (or explicit override) +- [ ] User can invoke `navigator-multi-claude-poc.sh --help` + +## Rollback Procedure + +If installation fails or causes issues: + +```bash +# Remove installed scripts +rm -f $HOME/bin/navigator-multi-claude.sh +rm -f $HOME/bin/navigator-multi-claude-poc.sh +rm -f $HOME/bin/install-multi-claude.sh + +# Verify removal +which navigator-multi-claude.sh +# Should output: navigator-multi-claude.sh not found +``` + +## Notes + +**Why separate installation**: +- Plugin system only copies skills/templates from `.claude-plugin/` +- Multi-Claude scripts are executable Bash files that need to be in PATH +- Installation location varies by system ($HOME/bin vs /usr/local/bin) +- Scripts need `chmod +x` for execution + +**Version matching**: +- Always fetches scripts matching installed plugin version +- Prevents version drift (v4.3.1 plugin with v4.3.0 scripts) +- Falls back to main branch if version tag doesn't exist + +**What gets installed**: +``` +$HOME/bin/ +├── navigator-multi-claude.sh # Full 6-phase workflow +├── navigator-multi-claude-poc.sh # 3-phase POC +└── install-multi-claude.sh # Reinstaller +``` + +## Related Skills + +- **nav-start**: Detects missing workflows and prompts installation +- **nav-upgrade**: Updates plugin (workflows need separate reinstall) +- **nav-stats**: Shows multi-Claude workflow efficiency metrics + +## Examples + +### Example 1: Fresh Installation + +User: "Install multi-Claude workflows" + +Assistant executes: +1. Checks prerequisites (Claude CLI, Navigator plugin) +2. Downloads from GitHub (v4.3.1 tag) +3. Runs install-multi-claude.sh +4. Verifies installation +5. Shows test commands + +Output: +``` +✅ Multi-Claude Workflows Ready + +Test with simple task: + navigator-multi-claude-poc.sh "Add hello world function" +``` + +### Example 2: Already Installed + +User: "Set up multi-Claude" + +Assistant checks: +```bash +which navigator-multi-claude.sh +# Found at: /Users/alex/bin/navigator-multi-claude.sh +``` + +Output: +``` +✅ Multi-Claude workflows already installed + +Location: /Users/alex/bin/navigator-multi-claude.sh +Version: 4.3.0 + +Already ready to use! +``` + +### Example 3: After Plugin Update + +User updates plugin 4.3.0 → 4.3.1, then: +"Install multi-Claude workflows" \ No newline at end of file diff --git a/skills/nav-marker/SKILL.md b/skills/nav-marker/SKILL.md new file mode 100644 index 0000000..fbab80a --- /dev/null +++ b/skills/nav-marker/SKILL.md @@ -0,0 +1,300 @@ +--- +name: nav-marker +description: Create context save points to preserve conversation state before breaks, risky changes, or compaction. Use when user says "save my progress", "create checkpoint", "mark this point", or before clearing context. +allowed-tools: Read, Write, Bash +version: 1.0.0 +--- + +# Navigator Marker Skill + +Create context markers - save points that preserve conversation state so you can resume work later without re-explaining everything. + +## When to Invoke + +Invoke this skill when the user: +- Says "save my progress", "create checkpoint", "mark this" +- Says "before I take a break", "save before lunch" +- Mentions "risky refactor ahead", "experiment with new approach" +- Says "end of day", "stopping for today" +- Before compacting context + +**DO NOT invoke** if: +- User is asking about existing markers (use listing, not creation) +- Context is fresh (< 5 messages exchanged) + +## Execution Steps + +### Step 1: Check Navigator Structure + +Verify `.agent/.context-markers/` directory exists: + +```bash +mkdir -p .agent/.context-markers +``` + +### Step 2: Determine Marker Name + +**If user provided name**: +- Use their name (sanitize: lowercase, hyphens for spaces) +- Example: "Before Big Refactor" → "before-big-refactor" + +**If no name provided**: +- Auto-generate with timestamp: `marker-{YYYY-MM-DD}-{HHmm}` +- Example: `marker-2025-10-16-1430` + +**Ask user for optional note**: +``` +Creating marker: [name] + +Add a note? (optional - helps remember context later) +Example: "OAuth working, need to add tests" + +Note: +``` + +### Step 3: Generate Marker Content + +Create marker document with this structure: + +```markdown +# Context Marker: [name] + +**Created**: [YYYY-MM-DD HH:MM] +**Note**: [user's note or "No note provided"] + +--- + +## Conversation Summary + +[Summarize last 10-15 messages: +- What user was working on +- Key decisions made +- Problems solved +- Current progress state +] + +## Documentation Loaded + +[List docs that were Read during session: +- Navigator: ✅ .agent/DEVELOPMENT-README.md +- Task: TASK-XX-feature.md +- System: project-architecture.md +- SOPs: [if any] +] + +## Files Modified + +[List files with Write/Edit calls: +- src/auth/login.ts (implemented OAuth) +- src/routes/auth.ts (added endpoints) +- tests/auth.test.ts (created tests) +] + +## Current Focus + +[What user is working on right now: +- Feature: Authentication with OAuth +- Phase: Integration complete, testing pending +- Blockers: [if any] +] + +## Technical Decisions + +[Key architectural choices: +- Using passport.js over next-auth (better control) +- JWT tokens in httpOnly cookies (XSS protection) +- Redis for session storage (scalability) +] + +## Next Steps + +[What to do after restore: +1. Finish writing tests for OAuth flow +2. Add error handling for failed logins +3. Document setup in README +] + +## Restore Instructions + +To restore this marker: +\```bash +Read .agent/.context-markers/[filename] +\``` + +Or use: `/nav:markers` and select this marker +``` + +### Step 4: Save Marker File + +Write marker to file: + +``` +Write( + file_path: ".agent/.context-markers/[timestamp]_[name].md", + content: [generated marker content] +) +``` + +Filename format: `{YYYY-MM-DD-HHmm}_{name}.md` +Example: `2025-10-16-1430_before-big-refactor.md` + +### Step 4.5: Verify Marker Creation + +After creating marker, verify it was written successfully: + +```bash +# Verify file exists and is non-empty +if [ -f ".agent/.context-markers/[filename]" ] && [ -s ".agent/.context-markers/[filename]" ]; then + # Calculate checksum for verification + checksum=$(md5 -q ".agent/.context-markers/[filename]" 2>/dev/null || md5sum ".agent/.context-markers/[filename]" | cut -d' ' -f1) + + # Log to central marker log + echo "[$(date -u +"%Y-%m-%dT%H:%M:%SZ")] ✅ Marker created: [filename] (checksum: $checksum)" >> .agent/.marker-log + + echo "✅ Marker verified successfully" +else + echo "❌ Marker creation failed - file missing or empty" + exit 1 +fi +``` + +Marker verification ensures: +- File exists on disk +- File has content (non-empty) +- Checksum logged for integrity verification +- Creation event logged to central log + +### Step 5: Confirm Creation + +Show success message with verification details: + +``` +✅ Context marker created! + +Marker: [name] +File: .agent/.context-markers/[filename] +Size: [X] KB (~[Y] tokens) +Checksum: [md5-hash] +Verified: ✅ + +This marker captures: +- Last [N] messages of conversation +- Files you were working on +- Technical decisions made +- Next steps to continue + +To restore later: +- Start new session +- Say "load marker [name]" +- Or use /nav:markers to list all markers + +Logged to: .agent/.marker-log +``` + +## Scripts + +**create_marker.py**: Generates marker content from conversation analysis +- Input: Conversation history (from Claude) +- Output: Formatted markdown marker + +## Common Use Cases + +### Before Lunch Break +``` +User: "Save my progress, taking lunch" +→ Creates marker: "lunch-break-2025-10-16" +→ Captures current state +→ User resumes after lunch: "Load my lunch marker" +``` + +### Before Risky Refactor +``` +User: "Mark this before I refactor routing" +→ Creates marker: "before-routing-refactor" +→ If refactor fails, restore marker +→ If refactor succeeds, delete marker +``` + +### End of Day +``` +User: "End of day checkpoint" +→ Creates marker: "eod-2025-10-16" +→ Note: "OAuth done, tests tomorrow" +→ Next morning: "Load yesterday's marker" +``` + +### Before Context Compact +``` +Automatic (via nav-compact skill): +→ Creates marker: "before-compact-2025-10-16-1500" +→ Compact clears conversation +→ Marker preserves knowledge +→ Next session: Auto-offers to restore +``` + +## Marker Best Practices + +**Good marker names**: +- `lunch-break` (clear when/why) +- `before-api-refactor` (indicates purpose) +- `feature-complete` (marks milestone) +- `eod-friday` (specific timing) + +**Bad marker names**: +- `temp` (not descriptive) +- `marker1` (meaningless) +- `test` (confusing) + +**When to create markers**: +- ✅ Before breaks (lunch, EOD) +- ✅ Before risky changes +- ✅ Before context compact +- ✅ At milestones (feature complete) +- ❌ After every single message (noise) +- ❌ When context is fresh (< 5 messages) + +## Error Handling + +**Marker directory missing**: +``` +Creating .agent/.context-markers/ directory... +✅ Ready to save markers +``` + +**Duplicate marker name**: +``` +⚠️ Marker "[name]" already exists + +Options: +1. Overwrite (replace existing) +2. Append timestamp (create "[name]-v2") +3. Choose different name + +Your choice [1-3]: +``` + +**Insufficient context**: +``` +⚠️ Very little context to save (< 5 messages) + +Markers work best when there's significant progress to preserve. +Continue anyway? [y/N]: +``` + +## Success Criteria + +Marker creation is successful when: +- [ ] Marker file created in `.agent/.context-markers/` +- [ ] Filename is unique and descriptive +- [ ] Content includes: summary, loaded docs, files modified, next steps +- [ ] User knows how to restore marker later +- [ ] Marker is 2-5k tokens (comprehensive but efficient) + +## Notes + +- Markers are **git-ignored** (personal session save points) +- Team members don't see each other's markers +- Markers can be deleted anytime with `/nav:markers clean` +- Typical marker size: 2-5k tokens (97.7% compression from 130k conversation) + +This skill provides same functionality as `/nav:marker` command but with natural language invocation. diff --git a/skills/nav-marker/functions/marker_compressor.py b/skills/nav-marker/functions/marker_compressor.py new file mode 100755 index 0000000..969e1e9 --- /dev/null +++ b/skills/nav-marker/functions/marker_compressor.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +""" +Compress conversation context into a concise marker summary. +""" + +import sys +import argparse +from datetime import datetime + +def compress_context(context_text, max_length=5000): + """ + Compress conversation context while preserving key information. + + Args: + context_text: Full conversation context + max_length: Maximum compressed length (default: 5000 chars) + + Returns: + str: Compressed summary + """ + # In a real implementation, this would use AI summarization + # For now, we'll use simple truncation with smart extraction + + # Extract key sections (simplified for v2.0) + lines = context_text.split('\n') + + # Priority extraction: + # 1. Code blocks + # 2. File paths mentioned + # 3. Error messages + # 4. Task descriptions + # 5. Recent conversation + + code_blocks = [] + file_paths = [] + errors = [] + recent_context = [] + + in_code_block = False + code_buffer = [] + + for line in lines[-200:]: # Focus on recent 200 lines + # Extract code blocks + if line.strip().startswith('```'): + if in_code_block: + code_blocks.append('\n'.join(code_buffer)) + code_buffer = [] + in_code_block = not in_code_block + elif in_code_block: + code_buffer.append(line) + + # Extract file paths + if '.md' in line or '.py' in line or '.json' in line or '.sh' in line: + file_paths.append(line.strip()) + + # Extract errors + if 'error' in line.lower() or 'failed' in line.lower(): + errors.append(line.strip()) + + # Keep recent context + if len(recent_context) < 50: + recent_context.append(line) + + # Build compressed summary + summary_parts = [] + + if file_paths: + summary_parts.append("**Files Modified**:\n" + '\n'.join(set(file_paths[:10]))) + + if code_blocks: + summary_parts.append("**Code Snippets**:\n```\n" + '\n\n'.join(code_blocks[:3]) + "\n```") + + if errors: + summary_parts.append("**Errors/Issues**:\n" + '\n'.join(set(errors[:5]))) + + summary_parts.append("**Recent Context**:\n" + '\n'.join(recent_context[-20:])) + + compressed = '\n\n---\n\n'.join(summary_parts) + + # Ensure within max_length + if len(compressed) > max_length: + compressed = compressed[:max_length] + "\n\n[... truncated ...]" + + return compressed + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Compress conversation context for markers") + parser.add_argument("--input", help="Input file (default: stdin)") + parser.add_argument("--max-length", type=int, default=5000, help="Max compressed length") + + args = parser.parse_args() + + if args.input: + with open(args.input, 'r') as f: + context = f.read() + else: + context = sys.stdin.read() + + compressed = compress_context(context, args.max_length) + print(compressed) diff --git a/skills/nav-markers/SKILL.md b/skills/nav-markers/SKILL.md new file mode 100644 index 0000000..e69de29 diff --git a/skills/nav-skill-creator/SKILL.md b/skills/nav-skill-creator/SKILL.md new file mode 100644 index 0000000..09b001e --- /dev/null +++ b/skills/nav-skill-creator/SKILL.md @@ -0,0 +1,532 @@ +--- +name: nav-skill-creator +description: Analyze codebase patterns and create custom skills for repetitive workflows. Use when project needs automation or pattern enforcement. Auto-invoke when user says "create a skill for...", "automate this workflow", or "we keep doing X manually". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash, Task +version: 1.0.0 +--- + +# Navigator Skill Creator + +Create project-specific skills by analyzing codebase patterns and automating repetitive workflows. + +## When to Invoke + +Auto-invoke when user mentions: +- "Create a skill for [pattern]" +- "Automate this workflow" +- "We keep doing X manually" +- "Enforce this pattern" +- "Generate boilerplate for [feature type]" +- "We need consistency for [task type]" + +## What This Does + +1. Analyzes codebase to understand project patterns +2. Identifies best practices from existing code +3. Generates skill with: + - Auto-invocation triggers + - Predefined functions + - Templates + - Examples +4. Tests the generated skill +5. Documents the new skill + +## Execution Steps + +### Step 1: Understand Skill Request + +Ask clarifying questions: +- What pattern/workflow to automate? +- What triggers should invoke this skill? +- What output format is expected? +- Are there existing examples in the codebase? + +**Example dialogue**: +``` +User: "Create a skill for adding React components" +Assistant: "I'll analyze your codebase to understand React component patterns. +- What directory are components in? +- Do you use TypeScript or JavaScript? +- Do you want tests generated automatically? +- Are there style files (CSS/SCSS) per component?" +``` + +### Step 2: Analyze Codebase Patterns + +**Use Task agent to explore** (saves 60-80% tokens): +``` +Use Task agent with subagent_type=Explore: +"Find existing [pattern type] in codebase: + - Locate all [files matching pattern] + - Identify common structure + - Extract best practices + - Find configuration files + - Return summary of findings" +``` + +**What to look for**: +- File naming conventions (kebab-case, PascalCase, etc.) +- Directory structure patterns +- Import/export patterns +- Testing patterns +- Configuration patterns +- Documentation patterns + +**Example for React components**: +``` +Task agent finds: +- Components in src/components/ +- PascalCase naming (UserProfile.tsx) +- Co-located tests (UserProfile.test.tsx) +- Props interfaces defined above component +- Export default at bottom +``` + +### Step 3: Design Skill Structure + +**Determine skill metadata**: +```yaml +name: [project]-[pattern-type] +description: [When to auto-invoke + what it does] +allowed-tools: [Read, Write, Edit, Grep, Glob, Bash, Task] +version: 1.0.0 +``` + +**Plan directory structure**: +``` +skills/[skill-name]/ +├── SKILL.md # Main instructions +├── functions/ # Python helper scripts +│ └── [generator].py +├── examples/ # Reference implementations +│ └── [example].[ext] +└── templates/ # Output format templates + └── [template].[ext] +``` + +**Design predefined functions**: +- What repetitive logic can be automated? +- What validation should be enforced? +- What formatting ensures consistency? + +**Example functions for frontend-component skill**: +- `component_generator.py` - Generate component boilerplate +- `test_generator.py` - Generate test file +- `style_generator.py` - Generate style file +- `name_validator.py` - Validate component naming + +### Step 4: Generate Skill Files + +**4.1 Create SKILL.md** + +```markdown +--- +name: [skill-name] +description: [Auto-invocation triggers + purpose] +allowed-tools: [List of tools] +version: 1.0.0 +--- + +# [Skill Title] + +[Brief description of what this skill does] + +## When to Invoke + +Auto-invoke when user says: +- "[trigger phrase 1]" +- "[trigger phrase 2]" +- "[trigger phrase 3]" + +## What This Does + +1. [Step 1 overview] +2. [Step 2 overview] +3. [Step 3 overview] + +## Execution Steps + +### Step 1: [Step Name] + +[Detailed instructions for this step] + +**Use predefined function**: `functions/[function-name].py` +``` + +**4.2 Create Predefined Functions** + +```python +# functions/[generator].py + +def generate_[output](name, config): + """ + Generate [output type] based on project patterns. + + Args: + name: [Description] + config: [Description] + + Returns: + [output]: [Description] + """ + # Implementation based on codebase analysis + pass +``` + +**4.3 Create Examples** + +``` +examples/ +└── [reference-implementation].[ext] + - Real example from codebase (best practice) + - Shows expected structure + - Demonstrates conventions +``` + +**4.4 Create Templates** + +``` +templates/ +└── [output-template].[ext] + - Skeleton structure with placeholders + - ${VAR_NAME} for substitution + - Comments explaining sections +``` + +### Step 5: Test Generated Skill + +**5.1 Verify skill loads**: +```bash +# In project root +grep -r "name: [skill-name]" skills/ +``` + +**5.2 Test auto-invocation**: +``` +In Claude Code conversation: +"[Use one of the auto-invoke trigger phrases]" + +Expected: Skill should be detected and loaded +``` + +**5.3 Test execution**: +- Run through skill steps +- Verify functions work correctly +- Check output matches template +- Validate generated code follows patterns + +**5.4 Iterate if needed**: +- Fix function bugs +- Improve templates +- Add missing examples +- Clarify instructions + +### Step 6: Document New Skill + +**Update project documentation**: + +1. **CLAUDE.md** - Add to skills section: +```markdown +#### [Skill Name] +**Auto-invoke**: "[trigger phrase]" +**Purpose**: [What it does] +**Generates**: [Output type] +``` + +2. **README.md** - Add to skills list: +```markdown +- **[skill-name]**: [Brief description] +``` + +3. **.agent/system/plugin-patterns.md** - Add to skill registry: +```markdown +### [Skill Name] +**Created**: [Date] +**Pattern**: [What pattern it enforces] +**Functions**: [List of predefined functions] +``` + +**Register in plugin.json** (if applicable): +```json +{ + "skills": [ + { + "name": "[skill-name]", + "path": "skills/[skill-name]/SKILL.md" + } + ] +} +``` + +--- + +## Example Workflows + +### Example 1: Create Skill for Adding API Endpoints + +**User**: "Create a skill for adding REST API endpoints" + +**Execution**: + +1. **Clarify**: + - Which framework? (Express, Fastify, etc.) + - Where are routes defined? + - Authentication required? + - Testing strategy? + +2. **Analyze** (via Task agent): + ``` + Find existing API endpoints: + - Routes in api/routes/ + - Controllers in api/controllers/ + - Middleware in api/middleware/ + - Tests in tests/api/ + ``` + +3. **Design**: + ```yaml + name: backend-api-endpoint + description: Add new REST API endpoint following project conventions. Use when user says "add endpoint", "create API", or "new route". + allowed-tools: Read, Write, Edit, Grep, Glob, Bash + ``` + +4. **Generate**: + ``` + skills/backend-api-endpoint/ + ├── SKILL.md + ├── functions/ + │ ├── endpoint_generator.py + │ └── route_validator.py + ├── examples/ + │ └── user-endpoint.ts + └── templates/ + ├── route-template.ts + └── test-template.spec.ts + ``` + +5. **Test**: + ``` + User: "Add a POST /posts endpoint" + Skill: Auto-invoked, generates route + controller + test + Verify: Files follow project conventions + ``` + +6. **Document**: Update CLAUDE.md, README.md, plugin-patterns.md + +### Example 2: Create Skill for React Components + +**User**: "Automate creating new React components" + +**Execution**: + +1. **Clarify**: + - TypeScript or JavaScript? + - Functional or class components? + - Style approach? (CSS modules, styled-components, etc.) + - Test library? (Jest, React Testing Library, etc.) + +2. **Analyze** (via Task agent): + ``` + Find React components: + - Components in src/components/ + - PascalCase naming + - TypeScript (.tsx) + - CSS modules (.module.css) + - Tests with RTL + ``` + +3. **Design**: + ```yaml + name: frontend-component + description: Create new React component with TypeScript, styles, and tests. Use when user says "create component", "add component", or "new React component". + allowed-tools: Read, Write, Edit, Grep, Glob, Bash + ``` + +4. **Generate**: + ``` + skills/frontend-component/ + ├── SKILL.md + ├── functions/ + │ ├── component_generator.py + │ ├── test_generator.py + │ └── style_generator.py + ├── examples/ + │ ├── Button.tsx + │ └── Button.test.tsx + └── templates/ + ├── component-template.tsx + ├── test-template.test.tsx + └── style-template.module.css + ``` + +5. **Test**: + ``` + User: "Create a UserProfile component" + Skill: Auto-invoked, generates component + test + styles + Verify: Props interface, exports, naming correct + ``` + +6. **Document**: Update project docs + +--- + +## Output Format + +**After generating skill, show summary**: + +``` +✅ Skill Created: [skill-name] + +Structure: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📁 skills/[skill-name]/ + ├── SKILL.md + ├── functions/ + │ └── [N functions created] + ├── examples/ + │ └── [N examples added] + └── templates/ + └── [N templates created] +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Auto-Invocation Triggers: +- "[trigger 1]" +- "[trigger 2]" +- "[trigger 3]" + +Next Steps: +1. Test the skill: "[example trigger phrase]" +2. Iterate if needed +3. Documentation updated + +Try it now: "[example usage]" +``` + +--- + +## Best Practices + +### Pattern Analysis +- Use Task agent for codebase exploration (saves 60-80% tokens) +- Look at 3-5 examples minimum (find patterns vs outliers) +- Identify conventions explicitly followed +- Note edge cases in comments + +### Skill Design +- Keep skills focused (one pattern per skill) +- Clear auto-invocation triggers (3-5 phrases) +- Minimal tools needed (add only what's required) +- Progressive disclosure (details in functions, not main instructions) + +### Function Creation +- One function = one responsibility +- Type hints and docstrings required +- Handle errors gracefully +- Return structured data (not print statements) + +### Template Design +- Use clear placeholders (${VAR_NAME}) +- Include comments explaining sections +- Follow project style guide +- Provide sensible defaults + +### Testing +- Test with real project context +- Verify auto-invocation works +- Check output against best practices +- Iterate based on actual usage + +--- + +## Common Patterns to Automate + +### Backend Patterns +- REST API endpoints +- GraphQL resolvers +- Database migrations +- Background jobs +- Middleware functions +- Authentication guards + +### Frontend Patterns +- React/Vue/Svelte components +- Redux/Vuex store modules +- API client functions +- Form validation schemas +- Route definitions +- Style component creation + +### Infrastructure Patterns +- Docker service configs +- CI/CD pipeline steps +- Deployment scripts +- Environment configs +- Monitoring setup + +### Documentation Patterns +- API documentation +- Component documentation +- Architecture decision records (ADRs) +- Runbook entries +- Changelog entries + +--- + +## Troubleshooting + +### Skill Not Auto-Invoking + +**Problem**: Skill created but doesn't trigger automatically + +**Solutions**: +1. Check description has clear trigger phrases +2. Verify `plugin.json` includes skill registration +3. Reload Claude Code to refresh skill index +4. Test with exact trigger phrase from description + +### Functions Not Executing + +**Problem**: Predefined functions throw errors + +**Solutions**: +1. Check Python syntax is valid +2. Verify function imports are correct +3. Test function independently first +4. Check error messages in execution logs + +### Templates Not Matching Output + +**Problem**: Generated code doesn't match project conventions + +**Solutions**: +1. Re-analyze codebase for missed patterns +2. Update templates with correct structure +3. Add more examples showing variations +4. Validate against linter/formatter + +### Skill Too Broad + +**Problem**: Skill tries to do too much + +**Solutions**: +1. Split into multiple focused skills +2. Remove optional features to separate skills +3. Keep core pattern simple +4. Add extensions as separate skills + +--- + +## Success Criteria + +**This skill succeeds when**: +- [ ] New skill auto-invokes correctly +- [ ] Generated output follows project conventions +- [ ] Functions execute without errors +- [ ] Templates produce valid code +- [ ] Examples are clear and relevant +- [ ] Documentation is updated +- [ ] Skill saves time vs manual work + +--- + +**The skill-creator is Navigator's self-improving engine - it learns your patterns and automates them** 🔄 \ No newline at end of file diff --git a/skills/nav-skill-creator/examples/example-generated-skill.md b/skills/nav-skill-creator/examples/example-generated-skill.md new file mode 100644 index 0000000..33ba268 --- /dev/null +++ b/skills/nav-skill-creator/examples/example-generated-skill.md @@ -0,0 +1,94 @@ +--- +name: example-feature-generator +description: Generate boilerplate for new features following project conventions. Use when user says "create feature", "add feature", or "new feature scaffolding". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash +version: 1.0.0 +--- + +# Example Feature Generator + +This is an example of a generated skill created by nav-skill-creator. + +## When to Invoke + +Auto-invoke when user says: +- "Create a new feature" +- "Add feature scaffolding" +- "Generate feature boilerplate" + +## What This Does + +1. Asks for feature name and type +2. Analyzes existing features for patterns +3. Generates feature files following project conventions +4. Creates tests and documentation + +## Execution Steps + +### Step 1: Gather Feature Requirements + +Ask user: +- Feature name (kebab-case) +- Feature type (API, UI, background job, etc.) +- Dependencies needed +- Testing requirements + +### Step 2: Analyze Existing Patterns + +Use Task agent to explore codebase: +``` +"Find existing features similar to [feature-type]: + - Locate feature files + - Identify structure patterns + - Extract naming conventions + - Find test patterns" +``` + +### Step 3: Generate Feature Files + +Use predefined function: `functions/feature_generator.py` + +```python +# Generates feature structure based on analysis +generate_feature(name, feature_type, config) +``` + +Creates: +- Feature implementation file +- Test file +- Configuration file (if needed) +- Documentation stub + +### Step 4: Validate Generated Files + +Check: +- [ ] Files follow naming conventions +- [ ] Imports are correct +- [ ] Tests are generated +- [ ] Documentation is created + +### Step 5: Show Summary + +Display created files and next steps for user. + +--- + +## Output Format + +``` +✅ Feature Created: [feature-name] + +Files generated: +- features/[feature-name]/index.ts +- features/[feature-name]/[feature-name].test.ts +- features/[feature-name]/README.md + +Next steps: +1. Implement feature logic in index.ts +2. Add test cases in [feature-name].test.ts +3. Document usage in README.md +``` + +--- + +**This is an example - actual generated skills will vary based on project patterns** diff --git a/skills/nav-skill-creator/functions/skill_generator.py b/skills/nav-skill-creator/functions/skill_generator.py new file mode 100644 index 0000000..ab6ed59 --- /dev/null +++ b/skills/nav-skill-creator/functions/skill_generator.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +""" +Skill Generator - Example predefined function for nav-skill-creator + +This is a reference implementation showing how predefined functions work. +Actual implementations will vary based on the skill being created. +""" + +from typing import Dict, List, Optional + + +def generate_skill_structure( + skill_name: str, + description: str, + triggers: List[str], + tools: List[str] = None +) -> Dict[str, str]: + """ + Generate basic skill structure with YAML frontmatter and markdown body. + + Args: + skill_name: Name of the skill (kebab-case) + description: When to auto-invoke and what the skill does + triggers: List of phrases that should auto-invoke the skill + tools: List of allowed tools (default: Read, Write, Edit, Grep, Glob, Bash) + + Returns: + Dictionary with 'frontmatter' and 'body' keys containing the generated content + + Example: + >>> generate_skill_structure( + ... "example-skill", + ... "Example skill for demo", + ... ["create example", "add example"] + ... ) + {'frontmatter': '---\\nname: example-skill\\n...', 'body': '# Example Skill\\n...'} + """ + if tools is None: + tools = ["Read", "Write", "Edit", "Grep", "Glob", "Bash"] + + # Generate YAML frontmatter + frontmatter = f"""--- +name: {skill_name} +description: {description} +allowed-tools: {', '.join(tools)} +version: 1.0.0 +---""" + + # Generate markdown body + trigger_list = '\n'.join([f'- "{trigger}"' for trigger in triggers]) + + body = f""" +# {skill_name.replace('-', ' ').title()} + +[Brief description of what this skill does] + +## When to Invoke + +Auto-invoke when user says: +{trigger_list} + +## What This Does + +1. [Step 1 overview] +2. [Step 2 overview] +3. [Step 3 overview] + +## Execution Steps + +### Step 1: [Step Name] + +[Detailed instructions for this step] + +### Step 2: [Step Name] + +[Detailed instructions for this step] + +### Step 3: [Step Name] + +[Detailed instructions for this step] + +--- + +## Output Format + +``` +✅ [Task Complete] + +[Summary of what was generated or accomplished] +``` + +--- + +## Best Practices + +- [Best practice 1] +- [Best practice 2] +- [Best practice 3] + +--- + +**[Closing statement about the skill]** +""" + + return { + 'frontmatter': frontmatter, + 'body': body.strip(), + 'full': f"{frontmatter}\n\n{body.strip()}" + } + + +def validate_skill_name(name: str) -> tuple[bool, Optional[str]]: + """ + Validate skill name follows conventions. + + Args: + name: Skill name to validate + + Returns: + Tuple of (is_valid, error_message) + + Example: + >>> validate_skill_name("my-skill") + (True, None) + >>> validate_skill_name("MySkill") + (False, "Skill name must be kebab-case") + """ + import re + + if not name: + return False, "Skill name cannot be empty" + + if not re.match(r'^[a-z][a-z0-9]*(-[a-z0-9]+)*$', name): + return False, "Skill name must be kebab-case (lowercase, hyphens only)" + + if len(name) > 50: + return False, "Skill name too long (max 50 characters)" + + return True, None + + +def format_tool_list(tools: List[str]) -> str: + """ + Format list of tools for YAML frontmatter. + + Args: + tools: List of tool names + + Returns: + Comma-separated string of tools + + Example: + >>> format_tool_list(["Read", "Write", "Edit"]) + 'Read, Write, Edit' + """ + return ', '.join(tools) + + +if __name__ == "__main__": + # Example usage + result = generate_skill_structure( + skill_name="example-generator", + description="Generate examples following project patterns", + triggers=["create example", "add example", "new example"] + ) + + print("Generated Skill:") + print("=" * 50) + print(result['full']) + print("=" * 50) + + # Validate some names + test_names = ["my-skill", "MySkill", "my_skill", "skill-123"] + print("\nValidation Tests:") + for name in test_names: + valid, error = validate_skill_name(name) + status = "✅" if valid else "❌" + print(f"{status} {name}: {error or 'Valid'}") diff --git a/skills/nav-skill-creator/templates/skill-template.md b/skills/nav-skill-creator/templates/skill-template.md new file mode 100644 index 0000000..0b1b28d --- /dev/null +++ b/skills/nav-skill-creator/templates/skill-template.md @@ -0,0 +1,126 @@ +--- +name: ${SKILL_NAME} +description: ${DESCRIPTION} +allowed-tools: ${ALLOWED_TOOLS} +version: 1.0.0 +--- + +# ${SKILL_TITLE} + +${BRIEF_DESCRIPTION} + +## When to Invoke + +Auto-invoke when user says: +- "${TRIGGER_1}" +- "${TRIGGER_2}" +- "${TRIGGER_3}" + +## What This Does + +1. ${STEP_1_OVERVIEW} +2. ${STEP_2_OVERVIEW} +3. ${STEP_3_OVERVIEW} + +## Execution Steps + +### Step 1: ${STEP_1_NAME} + +${STEP_1_INSTRUCTIONS} + +**Use predefined function** (if applicable): `functions/${FUNCTION_1_NAME}.py` + +### Step 2: ${STEP_2_NAME} + +${STEP_2_INSTRUCTIONS} + +**Use predefined function** (if applicable): `functions/${FUNCTION_2_NAME}.py` + +### Step 3: ${STEP_3_NAME} + +${STEP_3_INSTRUCTIONS} + +--- + +## Example Workflows + +### Example 1: ${EXAMPLE_1_TITLE} + +**User**: "${EXAMPLE_1_USER_INPUT}" + +**Execution**: + +1. ${EXAMPLE_1_STEP_1} +2. ${EXAMPLE_1_STEP_2} +3. ${EXAMPLE_1_STEP_3} + +**Output**: +``` +${EXAMPLE_1_OUTPUT} +``` + +--- + +## Output Format + +**After completing task, show summary**: + +``` +✅ ${SUCCESS_MESSAGE} + +${OUTPUT_SUMMARY} + +Next Steps: +1. ${NEXT_STEP_1} +2. ${NEXT_STEP_2} +3. ${NEXT_STEP_3} +``` + +--- + +## Best Practices + +### ${BEST_PRACTICE_CATEGORY_1} +- ${BEST_PRACTICE_1} +- ${BEST_PRACTICE_2} +- ${BEST_PRACTICE_3} + +### ${BEST_PRACTICE_CATEGORY_2} +- ${BEST_PRACTICE_4} +- ${BEST_PRACTICE_5} +- ${BEST_PRACTICE_6} + +--- + +## Troubleshooting + +### ${PROBLEM_1} + +**Problem**: ${PROBLEM_1_DESCRIPTION} + +**Solutions**: +1. ${SOLUTION_1_1} +2. ${SOLUTION_1_2} +3. ${SOLUTION_1_3} + +### ${PROBLEM_2} + +**Problem**: ${PROBLEM_2_DESCRIPTION} + +**Solutions**: +1. ${SOLUTION_2_1} +2. ${SOLUTION_2_2} + +--- + +## Success Criteria + +**This skill succeeds when**: +- [ ] ${SUCCESS_CRITERION_1} +- [ ] ${SUCCESS_CRITERION_2} +- [ ] ${SUCCESS_CRITERION_3} +- [ ] ${SUCCESS_CRITERION_4} + +--- + +**${CLOSING_STATEMENT}** diff --git a/skills/nav-sop/SKILL.md b/skills/nav-sop/SKILL.md new file mode 100644 index 0000000..bf3488c --- /dev/null +++ b/skills/nav-sop/SKILL.md @@ -0,0 +1,549 @@ +--- +name: nav-sop +description: Create Standard Operating Procedures after solving novel issues, establishing patterns, or documenting workflows. Use when user says "document this solution", "save this for next time", "create SOP". +allowed-tools: Read, Write, Bash +version: 1.0.0 +--- + +# Navigator SOP Creator Skill + +Create Standard Operating Procedures (SOPs) - reusable documentation for processes, integrations, debugging solutions, and workflows. + +## When to Invoke + +Invoke this skill when the user: +- Says "document this solution", "save this for next time" +- Says "create SOP", "make this reusable" +- Solves a novel issue and mentions "don't want to hit this again" +- Sets up integration and says "document the setup" +- Establishes pattern and mentions "team should follow this" + +**DO NOT invoke** if: +- Creating task documentation (use nav-task skill) +- Updating architecture docs (different purpose) +- Simple bug fix with no reusable pattern + +## Execution Steps + +### Step 1: Determine SOP Category + +Ask user which category (or infer from context): + +**Categories**: +1. **integrations** - Third-party service setups +2. **debugging** - Common issues & solutions +3. **development** - Dev workflows, patterns +4. **deployment** - Deploy procedures, CI/CD + +**Examples**: +- "How to set up Stripe webhooks" → `integrations/` +- "Fixing CORS errors" → `debugging/` +- "Testing authenticated routes" → `development/` +- "Deploy to production" → `deployment/` + +### Step 2: Determine SOP Name + +**If user provided name**: +- Use their name (sanitize: lowercase, hyphens) +- Example: "Stripe Payment Setup" → "stripe-payment-setup" + +**If no name provided**: +- Generate from context: `{service}-{action}` +- Example: "github-oauth-integration" +- Example: "cors-proxy-errors" + +### Step 3: Check if SOP Already Exists + +Check existing SOPs in category: + +```bash +ls .agent/sops/{category}/*.md 2>/dev/null +``` + +**If similar SOP exists**: +``` +⚠️ Similar SOP found: + .agent/sops/{category}/{similar-name}.md + +Options: +1. Read existing SOP (don't duplicate) +2. Update existing SOP (add to it) +3. Create new SOP (different enough) + +Your choice [1-3]: +``` + +### Step 4: Generate SOP Content + +Create SOP document from conversation: + +```markdown +# {SOP Title} + +**Category**: {integrations|debugging|development|deployment} +**Created**: {YYYY-MM-DD} +**Last Updated**: {YYYY-MM-DD} + +--- + +## Context + +**When to use this SOP**: +[Describe the scenario where this applies] + +**Problem it solves**: +[What issue does this address?] + +**Prerequisites**: +- [Requirement 1] +- [Requirement 2] + +--- + +## The Problem + +### Symptoms +[What does the issue look like?] +- Error message: `{specific error}` +- Behavior: [Unexpected behavior] +- Impact: [What breaks] + +### Root Cause +[Why does this happen? Technical explanation] + +--- + +## The Solution + +### Step 1: {Action} + +**Do this**: +```bash +# Command or code +npm install stripe +``` + +**Why**: +[Explanation of what this accomplishes] + +**Expected output**: +``` ++ stripe@12.0.0 +added 1 package +``` + +### Step 2: {Next Action} + +**Do this**: +```typescript +// Code example +import Stripe from 'stripe'; + +const stripe = new Stripe(process.env.STRIPE_SECRET_KEY); +``` + +**Why**: +[Explanation] + +**Configuration**: +Add to `.env`: +``` +STRIPE_SECRET_KEY=sk_test_... +STRIPE_WEBHOOK_SECRET=whsec_... +``` + +### Step 3: {Continue...} +... + +--- + +## Complete Example + +### Full Working Code + +**File**: `src/services/stripe.ts` +```typescript +import Stripe from 'stripe'; + +export class StripeService { + private stripe: Stripe; + + constructor() { + this.stripe = new Stripe(process.env.STRIPE_SECRET_KEY!, { + apiVersion: '2023-10-16', + }); + } + + async createPaymentIntent(amount: number) { + return await this.stripe.paymentIntents.create({ + amount: amount * 100, // Convert to cents + currency: 'usd', + }); + } +} +``` + +**File**: `src/routes/webhook.ts` +```typescript +export async function handleStripeWebhook(req: Request, res: Response) { + const sig = req.headers['stripe-signature']; + + try { + const event = stripe.webhooks.constructEvent( + req.body, + sig, + process.env.STRIPE_WEBHOOK_SECRET! + ); + + // Handle event + switch (event.type) { + case 'payment_intent.succeeded': + // Process successful payment + break; + } + + res.json({ received: true }); + } catch (err) { + res.status(400).send(`Webhook Error: ${err.message}`); + } +} +``` + +--- + +## Testing + +### Verify It Works + +**Test 1: Create payment intent** +```bash +curl -X POST http://localhost:3000/api/create-payment \ + -H "Content-Type: application/json" \ + -d '{"amount": 10}' +``` + +**Expected result**: +```json +{ + "clientSecret": "pi_xxx_secret_yyy" +} +``` + +**Test 2: Webhook delivery** +```bash +stripe listen --forward-to localhost:3000/webhook +``` + +**Expected result**: +``` +Ready! You are using Stripe API Version [2023-10-16] +``` + +--- + +## Prevention + +**How to avoid this issue in future**: +- [Prevention strategy 1] +- [Prevention strategy 2] + +**Red flags to watch for**: +- [Warning sign 1] +- [Warning sign 2] + +--- + +## Troubleshooting + +### Issue: Webhook signature verification fails + +**Symptoms**: +``` +Error: No signatures found matching the expected signature +``` + +**Cause**: Webhook secret mismatch or body already parsed + +**Fix**: +```typescript +// Use raw body for webhook verification +app.post('/webhook', express.raw({type: 'application/json'}), handleStripeWebhook); +``` + +### Issue: Payment amount incorrect + +**Symptoms**: Charged wrong amount + +**Cause**: Forgot to convert to cents + +**Fix**: Always multiply by 100 for Stripe amounts + +--- + +## Related Documentation + +**Stripe Docs**: +- [Payment Intents API](https://stripe.com/docs/api/payment_intents) +- [Webhooks Guide](https://stripe.com/docs/webhooks) + +**Our Docs**: +- Task: `.agent/tasks/TASK-04-stripe-integration.md` +- System: `.agent/system/project-architecture.md` (payments section) + +**External**: +- [Stripe Testing Cards](https://stripe.com/docs/testing) + +--- + +## Maintenance Notes + +**Update when**: +- Stripe API version changes +- Payment flow changes +- New webhook events added + +**Owner**: [Team or person responsible] + +--- + +**Last Updated**: {YYYY-MM-DD} +**Tested With**: Stripe API v2023-10-16, Node.js v18+ +``` + +### Step 5: Save SOP File + +Write to appropriate category: + +``` +Write( + file_path: ".agent/sops/{category}/{name}.md", + content: [generated SOP] +) +``` + +Filename: `.agent/sops/{category}/{name}.md` + +### Step 6: Update Navigator Index + +Edit `.agent/DEVELOPMENT-README.md` to add SOP to index: + +```markdown +## Standard Operating Procedures + +### Integrations +- **{Service}**: `.agent/sops/integrations/{name}.md` - {One-line description} + +### Debugging +- **{Issue}**: `.agent/sops/debugging/{name}.md` - {Description} + +### Development +... + +### Deployment +... +``` + +### Step 7: Link to Related Task (If Applicable) + +If SOP came from specific task, add reference: + +**In task doc**: +```markdown +## Related SOPs + +- `.agent/sops/integrations/stripe-payment-setup.md` +``` + +**In SOP**: +```markdown +## Related Documentation + +- Task: `.agent/tasks/TASK-04-stripe-integration.md` +``` + +Cross-linking helps discoverability. + +### Step 8: Confirm Success + +Show completion message: + +``` +✅ SOP created successfully! + +Title: {SOP Title} +Category: {category} +File: .agent/sops/{category}/{name}.md +Size: {X} KB (~{Y} tokens) + +📚 SOP includes: +- Problem description & symptoms +- Step-by-step solution +- Complete code examples +- Testing instructions +- Troubleshooting guide + +🔗 Navigator index updated +[If linked: Linked to TASK-{XX}] + +To reference later: +Read .agent/sops/{category}/{name}.md +``` + +## SOP Categories Explained + +### 1. integrations/ +**Purpose**: How to set up third-party services + +**Examples**: +- `stripe-payment-setup.md` +- `github-oauth-integration.md` +- `sendgrid-email-config.md` +- `redis-session-store.md` + +**Structure**: Setup steps + Configuration + Testing + +### 2. debugging/ +**Purpose**: How to solve common issues + +**Examples**: +- `cors-proxy-errors.md` +- `jwt-token-expiration.md` +- `database-connection-timeout.md` +- `build-errors-typescript.md` + +**Structure**: Symptoms + Root cause + Fix + Prevention + +### 3. development/ +**Purpose**: Development workflows & patterns + +**Examples**: +- `testing-authenticated-routes.md` +- `adding-new-api-endpoint.md` +- `database-migration-workflow.md` +- `component-testing-patterns.md` + +**Structure**: When to use + Steps + Example + Best practices + +### 4. deployment/ +**Purpose**: Deploy, CI/CD, infrastructure + +**Examples**: +- `deploy-to-production.md` +- `rollback-failed-deploy.md` +- `setup-github-actions.md` +- `environment-variables.md` + +**Structure**: Prerequisites + Steps + Verification + Rollback + +## Common Use Cases + +### After Solving Tricky Bug +``` +User: "Finally fixed CORS issue, save this so we don't hit it again" +→ Creates: .agent/sops/debugging/cors-proxy-errors.md +→ Captures: Error, root cause, fix, prevention +→ Team won't repeat mistake +``` + +### After Integration Setup +``` +User: "Stripe webhooks working, document the setup" +→ Creates: .agent/sops/integrations/stripe-webhooks.md +→ Captures: All config steps, code, testing +→ Next integration is copy-paste +``` + +### Establishing Team Pattern +``` +User: "Document how we test protected routes" +→ Creates: .agent/sops/development/testing-auth-routes.md +→ Captures: Pattern, examples, best practices +→ Team follows consistent approach +``` + +## Error Handling + +**Category directory doesn't exist**: +``` +Creating category: .agent/sops/{category}/ +✅ Directory created +``` + +**SOPs directory missing entirely**: +``` +❌ Navigator not initialized + +Run /nav:init to create .agent/ structure. +``` + +**Duplicate SOP name**: +``` +⚠️ SOP already exists: {name}.md + +Options: +1. Read existing (don't duplicate) +2. Update existing (add new info) +3. Rename new SOP ({name}-v2.md) + +Your choice [1-3]: +``` + +## Success Criteria + +SOP creation is successful when: +- [ ] SOP file created in correct category +- [ ] Contains all required sections +- [ ] Includes working code examples +- [ ] Testing instructions provided +- [ ] Navigator index updated +- [ ] Linked to related task (if applicable) + +## Scripts + +**generate_sop.py**: Create SOP from conversation +- Input: Conversation, category, name +- Output: Formatted SOP markdown + +## Best Practices + +**Good SOP names**: +- `stripe-payment-integration` (specific, descriptive) +- `cors-proxy-configuration` (clear purpose) +- `jwt-token-refresh` (explains what) + +**Bad SOP names**: +- `fix` (too vague) +- `integration` (not specific) +- `sop1` (meaningless) + +**When to create SOPs**: +- ✅ Solved novel issue (will happen again) +- ✅ Set up integration (reusable process) +- ✅ Established pattern (team should follow) +- ✅ Complex workflow (needs documentation) +- ❌ One-off bug (not reusable) +- ❌ Obvious solution (don't over-document) + +**SOP quality checklist**: +- [ ] Clear problem description +- [ ] Step-by-step solution +- [ ] Complete code examples (copy-paste ready) +- [ ] Testing instructions +- [ ] Troubleshooting common issues + +## Notes + +SOPs are **living documents**: +- Created when pattern established +- Updated when solution improves +- Referenced frequently by team +- Prevent repeated mistakes + +They transform: +- Individual knowledge → Team knowledge +- One-time solution → Reusable process +- Tribal knowledge → Documented procedure + +**Impact**: Zero repeated mistakes over time + +This skill provides same functionality as `/nav:doc sop` command but with natural language invocation. diff --git a/skills/nav-sop/functions/sop_formatter.py b/skills/nav-sop/functions/sop_formatter.py new file mode 100755 index 0000000..0bc450c --- /dev/null +++ b/skills/nav-sop/functions/sop_formatter.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +""" +Format Standard Operating Procedure markdown with proper structure. +""" + +import sys +import argparse +from datetime import datetime + +def format_sop(title, category, problem="", solution="", when_to_use=""): + """ + Generate formatted SOP markdown. + + Args: + title: SOP title (e.g., "Linear MCP Setup") + category: SOP category (integrations, debugging, development, deployment) + problem: Problem description + solution: Solution steps + when_to_use: When to use this SOP + + Returns: + str: Formatted markdown content + """ + today = datetime.now().strftime("%Y-%m-%d") + + template = f"""# {title} + +**Category**: {category} +**Created**: {today} +**Last Updated**: {today} + +--- + +## When to Use This SOP + +{when_to_use or "[Describe when this SOP applies]"} + +**Triggers**: +- [Situation 1] +- [Situation 2] +- [Situation 3] + +--- + +## Problem Statement + +{problem or "[Describe the problem this SOP solves]"} + +**Symptoms**: +- [Symptom 1] +- [Symptom 2] + +**Root Cause**: [Why does this problem occur?] + +--- + +## Solution + +### Prerequisites + +- [Requirement 1] +- [Requirement 2] + +### Step-by-Step Instructions + +#### Step 1: [Action Name] + +```bash +# Example command +command --flag value +``` + +**Expected Output**: +``` +[Show what success looks like] +``` + +**If this fails**: +- Check [common issue 1] +- Verify [common issue 2] + +#### Step 2: [Next Action] + +[Continue with detailed steps...] + +--- + +## Verification + +**How to verify the solution worked**: + +```bash +# Verification command +test-command +``` + +**Expected Result**: [What you should see] + +--- + +## Troubleshooting + +### Issue: [Common Problem] + +**Symptoms**: [How you know this is happening] + +**Solution**: +1. [Fix step 1] +2. [Fix step 2] + +### Issue: [Another Problem] + +**Symptoms**: [Indicators] + +**Solution**: [How to fix] + +--- + +## Related SOPs + +- [Link to related procedure 1] +- [Link to related procedure 2] + +--- + +## Notes + +- [Important considerations] +- [Edge cases to be aware of] +- [Future improvements needed] + +--- + +**Created**: {today} +**Category**: {category} +**Maintained By**: Navigator System +""" + return template + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Format Navigator SOP markdown") + parser.add_argument("--title", required=True, help="SOP title") + parser.add_argument("--category", required=True, + choices=["integrations", "debugging", "development", "deployment"], + help="SOP category") + parser.add_argument("--problem", default="", help="Problem description") + parser.add_argument("--solution", default="", help="Solution steps") + parser.add_argument("--when", default="", help="When to use this SOP") + + args = parser.parse_args() + + output = format_sop( + title=args.title, + category=args.category, + problem=args.problem, + solution=args.solution, + when_to_use=args.when + ) + + print(output) diff --git a/skills/nav-start/SKILL.md b/skills/nav-start/SKILL.md new file mode 100644 index 0000000..66980d4 --- /dev/null +++ b/skills/nav-start/SKILL.md @@ -0,0 +1,342 @@ +--- +name: nav-start +description: Load Navigator documentation navigator when starting development session, resuming work, or beginning new feature. Use when user mentions starting work, beginning session, resuming after break, or checking project status. +allowed-tools: Read, Bash +version: 1.0.0 +--- + +# Navigator Navigator Skill + +Load the Navigator documentation navigator to start your development session with optimized context. + +## When to Invoke + +Invoke this skill when the user: +- Says "start my session", "begin work", "start working" +- Says "load the navigator", "show me the docs" +- Asks "what should I work on?" +- Mentions "resume work", "continue from where I left off" +- Asks about project structure or current tasks + +**DO NOT invoke** if: +- User already ran `/nav:start` command this conversation +- Navigator already loaded (check conversation history) +- User is in middle of implementation (only invoke at session start) + +## Execution Steps + +### Step 1: Check Navigator Version + +Check if user is running latest Navigator version: + +```bash +# Run version checker (optional - doesn't block session start) +if [ -f "scripts/check-version.sh" ]; then + bash scripts/check-version.sh + + # Note: Exit code 1 means update available, but don't block session + # Exit code 0 means up to date + # Exit code 2 means cannot check (network issue) +fi +``` + +**Version check behavior**: +- If update available: Show notification, continue session +- If up to date: Show ✅, continue session +- If cannot check: Skip silently, continue session + +**Never block session start** due to version check. + +### Step 2: Check Navigator Initialization + +Check if `.agent/DEVELOPMENT-README.md` exists: + +```bash +if [ ! -f ".agent/DEVELOPMENT-README.md" ]; then + echo "❌ Navigator not initialized in this project" + echo "" + echo "Run /nav:init to set up Navigator structure first." + exit 1 +fi +``` + +If not found, inform user to run `/nav:init` first. + +### Step 3: Load Documentation Navigator + +Read the navigator file: + +``` +Read( + file_path: ".agent/DEVELOPMENT-README.md" +) +``` + +This is the lightweight index (~2k tokens) that tells you: +- What documentation exists +- When to load specific docs +- Current task focus +- Project structure overview + +### Step 4: Check for Active Context Marker + +Check if there's an active marker from previous `/nav:compact`: + +```bash +if [ -f ".agent/.context-markers/.active" ]; then + marker_file=$(cat .agent/.context-markers/.active) + echo "🔄 Active context marker detected!" + echo "" + echo "Marker: $marker_file" + echo "" + echo "This marker was saved during your last /nav:compact." + echo "Load it to continue where you left off?" + echo "" + echo "[Y/n]:" +fi +``` + +If user confirms (Y or Enter): +- Read the marker file: `Read(file_path: ".agent/.context-markers/{marker_file}")` +- Delete `.active` file: `rm .agent/.context-markers/.active` +- Show confirmation: "✅ Context restored from marker!" + +If user declines (n): +- Delete `.active` file +- Show: "Skipping marker load. You can load it later with /nav:markers" + +### Step 5: Load Navigator Configuration + +Read configuration: + +``` +Read( + file_path: ".agent/.nav-config.json" +) +``` + +Parse: +- `project_management`: Which PM tool (linear, github, jira, none) +- `task_prefix`: Task ID format (TASK, GH, LIN, etc.) +- `team_chat`: Team notifications (slack, discord, none) + +### Step 6: Check PM Tool for Assigned Tasks + +**If PM tool is Linear**: +```bash +# Check if Linear MCP available +# Try to list assigned issues +``` + +**If PM tool is GitHub**: +```bash +gh issue list --assignee @me --limit 10 2>/dev/null +``` + +**If PM tool is none**: +Skip task checking. + +### Step 7: Display Session Statistics (OpenTelemetry) + +Run the OpenTelemetry session statistics script: + +```bash +# Get the skill's base directory (passed via SKILL_BASE_DIR) +SKILL_DIR="${SKILL_BASE_DIR:-$HOME/.claude/plugins/marketplaces/jitd-marketplace/skills/nav-start}" +python3 "$SKILL_DIR/scripts/otel_session_stats.py" +``` + +This script: +- **If OTel enabled**: Shows real-time metrics from Claude Code + - Real token usage (input/output/cache) + - Cache hit rate (CLAUDE.md caching performance) + - Session cost (actual USD spent) + - Active time (seconds of work) + - Context availability +- **If OTel disabled**: Shows setup instructions +- **If no metrics yet**: Shows "waiting for export" message + +**Benefits of OTel integration**: +- Real data (not file-size estimates) +- Cache performance validation +- Cost tracking for ROI measurement +- Official API (won't break on updates) + +### Step 8: Display Session Summary + +Show this formatted summary: + +``` +╔══════════════════════════════════════════════════════╗ +║ ║ +║ 🚀 Navigator Session Started ║ +║ ║ +╚══════════════════════════════════════════════════════╝ + +📖 Documentation Navigator: Loaded +🎯 Project Management: [PM tool or "Manual"] +✅ Token Optimization: Active + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +📊 DOCUMENTATION LOADED (MEASURED) +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Navigator (.agent/DEVELOPMENT-README.md): + Size: [nav_bytes] bytes = [nav_tokens] tokens + +CLAUDE.md (auto-loaded): + Size: [claude_bytes] bytes = [claude_tokens] tokens + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Total documentation: [total_tokens] tokens +Available for work: [available] tokens ([percent]%) +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +💡 On-demand loading strategy: + Load task doc when needed: +3-5k tokens + Load system doc if needed: +4-6k tokens + Load SOP if helpful: +2-3k tokens + + Total with all docs: ~[total + 15]k tokens + + vs Traditional (all upfront): ~150k tokens + Savings: ~[150 - total - 15]k tokens + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +🔹 Navigator WORKFLOW REMINDER + +1. Navigator-first loading + - ✅ Loaded: .agent/DEVELOPMENT-README.md + - Next: Load ONLY relevant task/system docs + +2. Use agents for research + - Multi-file searches: Use Task agent (saves 60-80% tokens) + - Code exploration: Use Explore agent + - NOT manual Read of many files + +3. Task documentation + - After features: Use nav-task-manager skill + - After bugs: Use nav-sop-creator skill + +4. Context management + - Run nav-compact skill after isolated sub-tasks + - Context markers save your progress + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +[MULTI-CLAUDE WORKFLOWS CHECK - v4.3.0+] + +Check if multi-Claude workflows installed: +```bash +if ! command -v navigator-multi-claude.sh &> /dev/null; then + echo "" + echo "⚡ Multi-Claude Workflows Available (v4.3.0+)" + echo "" + echo " Enable parallel AI execution for complex tasks." + echo " Status: Not installed" + echo "" + echo " Install: 'Install multi-Claude workflows'" + echo " Learn more: See RELEASE-NOTES-v4.3.0.md" + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +fi +``` + +Only show this prompt: +- If plugin version >= 4.3.0 +- If scripts not installed +- Once per session (set flag in memory) + +Do NOT show if: +- Scripts already installed +- Plugin version < 4.3.0 +- User explicitly dismissed before + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +[If tasks found from PM tool, list them here] + +[If no tasks found:] +No active tasks found. What would you like to work on? +``` + +## Predefined Functions + +### scripts/otel_session_stats.py + +**Purpose**: Display real-time session statistics via OpenTelemetry + +**When to call**: After loading navigator, before presenting session summary + +**Requirements**: +- CLAUDE_CODE_ENABLE_TELEMETRY=1 (optional - shows setup if disabled) +- Metrics available from current session (shows waiting message if not) + +**Execution**: +```bash +SKILL_DIR="${SKILL_BASE_DIR:-$HOME/.claude/plugins/marketplaces/jitd-marketplace/skills/nav-start}" +python3 "$SKILL_DIR/scripts/otel_session_stats.py" +``` + +**Output**: Formatted statistics with: +- Token usage breakdown (input/output/cache) +- Cache hit rate percentage +- Session cost in USD +- Active time +- Context availability + +**Error Handling**: +- If OTel not enabled: Shows setup instructions +- If no metrics yet: Shows "waiting for export" message +- Never crashes - always displays helpful guidance + +## Reference Files + +This skill uses: +- **otel_session_stats.py**: Real-time session stats via OpenTelemetry +- **.agent/DEVELOPMENT-README.md**: Navigator content +- **.agent/.nav-config.json**: Configuration +- **.agent/.context-markers/.active**: Active marker check + +## Error Handling + +**Navigator not found**: +``` +❌ Navigator not initialized + +Run /nav:init to create .agent/ structure first. +``` + +**PM tool configured but not working**: +``` +⚠️ [PM Tool] configured but not accessible + +Check authentication or run setup guide. +``` + +**Config file malformed**: +``` +⚠️ .agent/.nav-config.json is invalid JSON + +Fix syntax or run /nav:init to regenerate. +``` + +## Success Criteria + +Session start is successful when: +- [ ] Navigator loaded successfully +- [ ] Token usage calculated and displayed +- [ ] PM tool status checked (if configured) +- [ ] User knows what to work on next +- [ ] Navigator workflow context set + +## Notes + +This skill provides the same functionality as `/nav:start` command but with: +- Natural language invocation (no need to remember `/` syntax) +- Auto-detection based on user intent +- Composable with other Navigator skills + +If user prefers manual invocation, they can still use `/nav:start` command (both work in hybrid mode). diff --git a/skills/nav-start/scripts/otel_session_stats.py b/skills/nav-start/scripts/otel_session_stats.py new file mode 100755 index 0000000..a535404 --- /dev/null +++ b/skills/nav-start/scripts/otel_session_stats.py @@ -0,0 +1,447 @@ +#!/usr/bin/env python3 +""" +Navigator Session Statistics (OpenTelemetry-powered) + +Queries real token usage from Claude Code OpenTelemetry metrics. +Requires CLAUDE_CODE_ENABLE_TELEMETRY=1 + +Usage: + python3 otel_session_stats.py + +Environment Variables Required: + CLAUDE_CODE_ENABLE_TELEMETRY=1 + OTEL_METRICS_EXPORTER=console (or otlp) +""" + +import os +import sys +import json +import subprocess +from typing import Dict, Optional + + +def check_otel_enabled() -> bool: + """Check if Claude Code telemetry is enabled.""" + return os.getenv("CLAUDE_CODE_ENABLE_TELEMETRY") == "1" + + +def get_otel_metrics() -> Optional[Dict]: + """ + Get OpenTelemetry metrics from Claude Code. + + Strategy: Read from OpenTelemetry SDK's metric reader if available. + + Returns: + Dict with raw metrics data or None if unavailable + """ + # Try to access metrics from OpenTelemetry SDK + try: + from opentelemetry import metrics as otel_metrics + + # Get the global meter provider + meter_provider = otel_metrics.get_meter_provider() + + # Check if metrics are available + if hasattr(meter_provider, '_sdk_config'): + # This would contain the metrics if SDK is properly configured + # For now, we don't have direct access to metric values + # They're exported to console/OTLP but not easily queryable + pass + + except ImportError: + # OpenTelemetry SDK not installed - expected in most cases + pass + + # Alternative: Check if Prometheus exporter is running + exporter_type = os.getenv("OTEL_METRICS_EXPORTER", "") + + if exporter_type == "prometheus": + # Try to query Prometheus endpoint + try: + import urllib.request + response = urllib.request.urlopen("http://localhost:9464/metrics", timeout=1) + prometheus_data = response.read().decode('utf-8') + return {"source": "prometheus", "data": prometheus_data} + except Exception: + pass + + # For console exporter, metrics go to stderr and aren't easily captured + # In a real implementation, we'd need to: + # 1. Store metrics in a shared location + # 2. Use a metrics backend (Prometheus/OTLP collector) + # 3. Query from Claude Code's internal metrics store + + return None + + +def parse_prometheus_metrics(prometheus_data: str) -> Optional[Dict]: + """ + Parse Prometheus format metrics from Claude Code. + + Args: + prometheus_data: Raw Prometheus metrics text + + Returns: + Parsed metrics dictionary or None + """ + # First, find the most recent session_id + current_session_id = None + session_count_max = 0 + + for line in prometheus_data.split('\n'): + # Skip comments + if line.startswith('#'): + continue + + if 'claude_code_session_count_total' in line: + parts = line.split() + if len(parts) >= 2: + try: + count = float(parts[-1]) + if count >= session_count_max: + session_count_max = count + # Extract session_id from labels + if 'session_id="' in line: + session_start = line.find('session_id="') + 12 + session_end = line.find('"', session_start) + if session_end > session_start: + current_session_id = line[session_start:session_end] + except ValueError: + # Skip lines that don't have numeric values + continue + + if not current_session_id: + # Fallback: use any session if we can't determine current + pass + + metrics = { + "input_tokens": 0, + "output_tokens": 0, + "cache_read_tokens": 0, + "cache_creation_tokens": 0, + "cost_usd": 0.0, + "active_time_seconds": 0, + "model": "unknown", + "session_id": current_session_id or "unknown" + } + + try: + for line in prometheus_data.split('\n'): + # Skip comments and empty lines + if line.startswith('#') or not line.strip(): + continue + + # Filter by current session_id for accurate stats + if current_session_id and f'session_id="{current_session_id}"' not in line: + continue + + # Parse token usage metrics + if 'claude_code_token_usage' in line and not line.startswith('#'): + parts = line.split() + if len(parts) >= 2: + value = float(parts[-1]) + + if 'type="input"' in line: + metrics["input_tokens"] += int(value) + elif 'type="output"' in line: + metrics["output_tokens"] += int(value) + elif 'type="cacheRead"' in line: + metrics["cache_read_tokens"] += int(value) + elif 'type="cacheCreation"' in line: + metrics["cache_creation_tokens"] += int(value) + + # Extract model + if 'model="' in line: + model_start = line.find('model="') + 7 + model_end = line.find('"', model_start) + if model_end > model_start: + metrics["model"] = line[model_start:model_end] + + # Parse cost metrics + elif 'claude_code_cost_usage' in line: + parts = line.split() + if len(parts) >= 2: + metrics["cost_usd"] += float(parts[-1]) + + # Parse active time + elif 'claude_code_active_time_total' in line: + parts = line.split() + if len(parts) >= 2: + metrics["active_time_seconds"] = int(float(parts[-1])) + + # Return metrics only if we have actual data + if metrics["input_tokens"] > 0 or metrics["output_tokens"] > 0: + return metrics + + # If current session has no data, try without session filter (most recent data) + if current_session_id: + # Retry without session filter + metrics_fallback = { + "input_tokens": 0, + "output_tokens": 0, + "cache_read_tokens": 0, + "cache_creation_tokens": 0, + "cost_usd": 0.0, + "active_time_seconds": 0, + "model": "unknown", + "session_id": None # No specific session (aggregate) + } + + for line in prometheus_data.split('\n'): + if line.startswith('#') or not line.strip(): + continue + + # No session filtering - aggregate all + if 'claude_code_token_usage' in line: + parts = line.split() + if len(parts) >= 2: + value = float(parts[-1]) + if 'type="input"' in line: + metrics_fallback["input_tokens"] += int(value) + elif 'type="output"' in line: + metrics_fallback["output_tokens"] += int(value) + elif 'type="cacheRead"' in line: + metrics_fallback["cache_read_tokens"] += int(value) + elif 'type="cacheCreation"' in line: + metrics_fallback["cache_creation_tokens"] += int(value) + + if 'model="' in line: + model_start = line.find('model="') + 7 + model_end = line.find('"', model_start) + if model_end > model_start: + metrics_fallback["model"] = line[model_start:model_end] + + elif 'claude_code_cost_usage' in line: + parts = line.split() + if len(parts) >= 2: + metrics_fallback["cost_usd"] += float(parts[-1]) + + elif 'claude_code_active_time_total' in line: + parts = line.split() + if len(parts) >= 2: + metrics_fallback["active_time_seconds"] = int(float(parts[-1])) + + if metrics_fallback["input_tokens"] > 0 or metrics_fallback["output_tokens"] > 0: + return metrics_fallback + + except Exception as e: + print(f"Error parsing Prometheus metrics: {e}", file=sys.stderr) + + return None + + +def query_session_metrics() -> Optional[Dict]: + """ + Query current session metrics from OpenTelemetry. + + Returns: + { + "input_tokens": int, + "output_tokens": int, + "cache_read_tokens": int, + "cache_creation_tokens": int, + "cost_usd": float, + "active_time_seconds": int, + "model": str + } + or None if metrics unavailable + """ + metrics_data = get_otel_metrics() + + if not metrics_data: + return None + + # Parse based on source + if metrics_data.get("source") == "prometheus": + return parse_prometheus_metrics(metrics_data.get("data", "")) + + # For console exporter, we'd need to implement JSON parsing + # This is more complex as it requires capturing stderr output + + return None + + +def display_setup_instructions(): + """Display setup instructions when OTel is not configured.""" + print("⚠️ OpenTelemetry Not Enabled") + print() + print("Navigator can show real-time session statistics with OpenTelemetry.") + print() + print("Quick Setup:") + print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") + print() + print(" # Add to ~/.zshrc or ~/.bashrc:") + print(" export CLAUDE_CODE_ENABLE_TELEMETRY=1") + print(" export OTEL_METRICS_EXPORTER=console") + print() + print(" # Then restart your shell:") + print(" source ~/.zshrc") + print() + print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") + print() + print("What you'll get:") + print(" • Real token usage (not estimates)") + print(" • Cache hit rates (CLAUDE.md caching performance)") + print(" • Session costs (actual USD spent)") + print(" • Active time tracking") + print() + print("For detailed setup: .agent/sops/integrations/opentelemetry-setup.md") + print() + + +def display_no_metrics_message(): + """Display message when OTel is enabled but no metrics available yet.""" + exporter = os.getenv("OTEL_METRICS_EXPORTER", "console") + + print("📊 OpenTelemetry Enabled") + print() + + if exporter == "console": + print("⚠️ Console exporter detected") + print() + print("Console exporter writes metrics to stderr (not queryable by this script).") + print() + print("To see formatted metrics, switch to Prometheus exporter:") + print() + print(" 1. Update ~/.zshrc:") + print(" export OTEL_METRICS_EXPORTER=prometheus") + print() + print(" 2. Restart terminal:") + print(" exec zsh") + print() + print(" 3. Start Claude Code:") + print(" claude") + print() + print(" 4. Run this script again:") + print(" python3 scripts/otel_session_stats.py") + print() + print("Prometheus metrics will be available at: http://localhost:9464/metrics") + else: + print(f"Exporter: {exporter}") + print() + print("Metrics export every 60 seconds by default.") + print("Continue working - stats will appear after first export.") + print() + print("For faster metrics (development):") + print(" export OTEL_METRIC_EXPORT_INTERVAL=10000 # 10 seconds") + print() + + +def display_navigator_stats(metrics: Dict): + """ + Display Navigator-optimized session statistics. + + Args: + metrics: Dictionary with session metrics from OTel + """ + print("📊 Navigator Session Statistics (Real-time via OTel)") + print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") + print() + if metrics.get("session_id"): + print(f"Session: {metrics['session_id'][:8]}...") + else: + print("⚠️ Showing cumulative stats across all recent sessions") + print() + + # Token usage breakdown + input_tokens = metrics["input_tokens"] + output_tokens = metrics["output_tokens"] + cache_read = metrics["cache_read_tokens"] + cache_creation = metrics["cache_creation_tokens"] + + # Calculate totals + total_tokens = input_tokens + output_tokens + cache_read + cache_creation + charged_tokens = input_tokens + output_tokens + + # Visual token distribution bar + bar_width = 50 + if total_tokens > 0: + input_bars = int((input_tokens / total_tokens) * bar_width) + output_bars = int((output_tokens / total_tokens) * bar_width) + cache_read_bars = int((cache_read / total_tokens) * bar_width) + cache_creation_bars = bar_width - input_bars - output_bars - cache_read_bars + + print("Token Distribution:") + print("┌" + "─" * bar_width + "┐") + bar_content = ("🟦" * input_bars + + "🟩" * output_bars + + "🟨" * cache_read_bars + + "🟧" * cache_creation_bars) + print(f"│{bar_content}│") + print("└" + "─" * bar_width + "┘") + print(" 🟦 Input 🟩 Output 🟨 Cache Read (free) 🟧 Cache Creation") + print() + + print(f"📥 Input: {input_tokens:,}") + print(f"📤 Output: {output_tokens:,}") + print(f"💾 Cache Read: {cache_read:,} (free)") + print(f"🔧 Cache Creation: {cache_creation:,}") + print() + + print(f"📊 Total Tokens: {total_tokens:,}") + print(f" ├─ Charged: {charged_tokens:,}") + print(f" └─ Free (cache): {cache_read:,}") + print() + + # Cache efficiency (if cache was used) + if cache_read > 0: + cache_percentage = (cache_read / total_tokens) * 100 + print(f"⚡ Cache Efficiency: {cache_percentage:.1f}% of total tokens") + print() + + # Cost and efficiency analysis + active_seconds = metrics['active_time_seconds'] + minutes = active_seconds // 60 + seconds = active_seconds % 60 + + print(f"💰 Session Cost: ${metrics['cost_usd']:.4f}") + print(f"⏱️ Active Time: {minutes}m {seconds}s") + + # Calculate efficiency metrics + if active_seconds > 0: + cost_per_min = (metrics['cost_usd'] / active_seconds) * 60 + tokens_per_min = (total_tokens / active_seconds) * 60 + print(f"📈 Cost Rate: ${cost_per_min:.4f}/min") + print(f"⚡ Token Rate: {int(tokens_per_min):,} tokens/min") + print() + + # Context availability (only charged tokens count toward window) + context_used = charged_tokens + total_context = 200000 + available = total_context - context_used + percent_available = int((available / total_context) * 100) + + print(f"📦 Context Window:") + print(f" ├─ Used: {context_used:,} tokens") + print(f" └─ Available: {available:,} tokens ({percent_available}%)") + print() + + print(f"🤖 Model: {metrics['model']}") + print() + print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") + print() + + +def main(): + """Main entry point for session statistics.""" + + # Check if OTel is enabled + if not check_otel_enabled(): + display_setup_instructions() + return 0 + + # Try to query metrics + metrics = query_session_metrics() + + if not metrics: + # OTel enabled but no metrics exported yet + display_no_metrics_message() + return 0 + + # Display real statistics + display_navigator_stats(metrics) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/skills/nav-stats/SKILL.md b/skills/nav-stats/SKILL.md new file mode 100644 index 0000000..0286a88 --- /dev/null +++ b/skills/nav-stats/SKILL.md @@ -0,0 +1,288 @@ +--- +name: nav-stats +description: Display session efficiency report showing token savings, cache performance, and optimization recommendations. Use when user asks "show my stats", "how efficient am I?", "show session metrics", or wants to see Navigator's impact. +allowed-tools: Bash, Read +version: 1.0.0 +--- + +# Navigator Session Statistics Skill + +Show real-time efficiency reporting with baseline comparisons, making Navigator's value quantifiable and shareable. + +## When to Invoke + +Invoke this skill when the user: +- Says "show my stats", "show session stats", "show metrics" +- Asks "how efficient am I?", "how much did I save?" +- Says "show my Navigator report", "efficiency report" +- Wants to see token savings or session performance +- Says "show impact", "prove Navigator works" + +**DO NOT invoke** if: +- User just started session (< 5 messages) +- Navigator not initialized in project +- User asking about specific metrics only (answer directly) + +## Execution Steps + +### Step 1: Check Navigator Initialized + +Verify Navigator is set up: + +```bash +if [ ! -f ".agent/DEVELOPMENT-README.md" ]; then + echo "❌ Navigator not initialized in this project" + echo "Run 'Initialize Navigator' first" + exit 1 +fi +``` + +### Step 2: Run Enhanced Session Stats + +Execute the enhanced session statistics script: + +```bash +# Check if enhanced script exists +if [ ! -f "scripts/session-stats.sh" ]; then + echo "❌ Session stats script not found" + echo "This feature requires Navigator v3.5.0+" + exit 1 +fi + +# Run stats script +bash scripts/session-stats.sh +``` + +This script outputs shell-parseable variables: +- `BASELINE_TOKENS` - Total size of all .agent/ docs +- `LOADED_TOKENS` - Actually loaded in session (estimated) +- `TOKENS_SAVED` - Difference +- `SAVINGS_PERCENT` - Percentage saved +- `EFFICIENCY_SCORE` - 0-100 score +- `CACHE_EFFICIENCY` - From OpenTelemetry +- `CONTEXT_USAGE_PERCENT` - Estimated context fill +- `TIME_SAVED_MINUTES` - Estimated time saved + +### Step 3: Calculate Efficiency Score + +Use predefined function to calculate score: + +```bash +# Extract metrics from session-stats.sh +source <(bash scripts/session-stats.sh) + +# Calculate efficiency score using predefined function +EFFICIENCY_SCORE=$(python3 skills/nav-stats/functions/efficiency_scorer.py \ + --tokens-saved-percent ${SAVINGS_PERCENT} \ + --cache-efficiency ${CACHE_EFFICIENCY} \ + --context-usage ${CONTEXT_USAGE_PERCENT}) +``` + +### Step 4: Format and Display Report + +Use predefined function to format visual report: + +```bash +# Generate formatted report +python3 skills/nav-stats/functions/report_formatter.py \ + --baseline ${BASELINE_TOKENS} \ + --loaded ${LOADED_TOKENS} \ + --saved ${TOKENS_SAVED} \ + --savings-percent ${SAVINGS_PERCENT} \ + --cache-efficiency ${CACHE_EFFICIENCY} \ + --context-usage ${CONTEXT_USAGE_PERCENT} \ + --efficiency-score ${EFFICIENCY_SCORE} \ + --time-saved ${TIME_SAVED_MINUTES} +``` + +**Output Format**: +``` +╔══════════════════════════════════════════════════════╗ +║ NAVIGATOR EFFICIENCY REPORT ║ +╚══════════════════════════════════════════════════════╝ + +📊 TOKEN USAGE +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Documentation loaded: 12,000 tokens +Baseline (all docs): 150,000 tokens +Tokens saved: 138,000 tokens (92% ↓) + +💾 CACHE PERFORMANCE +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Cache efficiency: 100.0% (perfect) + +📈 SESSION METRICS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Context usage: 35% (excellent) +Efficiency score: 94/100 (excellent) + +⏱️ TIME SAVED +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Estimated time saved: ~42 minutes + +💡 WHAT THIS MEANS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Navigator loaded 92% fewer tokens than loading all docs. +Your context window is 65% available for actual work. + +🎯 RECOMMENDATIONS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +✅ Excellent efficiency - keep using lazy-loading strategy +✅ Context usage healthy - plenty of room for work + +Share your efficiency: Take a screenshot! #ContextEfficiency +``` + +### Step 5: Add Context-Specific Recommendations + +Based on efficiency score, provide actionable advice: + +**If efficiency_score < 70**: +``` +⚠️ RECOMMENDATIONS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +⚠️ Token savings below target (70%+) +→ Check: Are you loading more docs than needed? +→ Tip: Use navigator to find docs, don't load all upfront + +Read more: .agent/philosophy/CONTEXT-EFFICIENCY.md +``` + +**If context_usage > 80%**: +``` +⚠️ RECOMMENDATIONS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +⚠️ Context usage high (80%+) +→ Consider: Create context marker and compact +→ Tip: Compact after completing sub-tasks + +Read more: .agent/philosophy/ANTI-PATTERNS.md +``` + +**If cache_efficiency < 80%**: +``` +⚠️ RECOMMENDATIONS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +⚠️ Cache efficiency low (<80%) +→ Check: CLAUDE.md properly configured? +→ Tip: Ensure prompt caching enabled + +Read more: .agent/philosophy/PATTERNS.md (Caching pattern) +``` + +## Predefined Functions + +### `efficiency_scorer.py` + +Calculate Navigator efficiency score (0-100) based on: +- Token savings (40 points) +- Cache efficiency (30 points) +- Context usage (30 points) + +**Usage**: +```bash +python3 skills/nav-stats/functions/efficiency_scorer.py \ + --tokens-saved-percent 92 \ + --cache-efficiency 100 \ + --context-usage 35 +``` + +**Output**: `94` (integer score) + +### `report_formatter.py` + +Format efficiency metrics into visual, shareable report. + +**Usage**: +```bash +python3 skills/nav-stats/functions/report_formatter.py \ + --baseline 150000 \ + --loaded 12000 \ + --saved 138000 \ + --savings-percent 92 \ + --cache-efficiency 100 \ + --context-usage 35 \ + --efficiency-score 94 \ + --time-saved 42 +``` + +**Output**: Formatted ASCII report (see Step 4) + +## Philosophy Integration + +**Context Engineering Principle**: Measurement validates optimization + +From `.agent/philosophy/PATTERNS.md`: +> "Measure to validate. Navigator tracks real metrics, not estimates." + +This skill proves: +- **Token savings** are real (baseline comparison) +- **Cache efficiency** works (OpenTelemetry data) +- **Context usage** is healthy (window not overloaded) +- **Time saved** is quantifiable (6s per 1k tokens) + +## User Experience + +**User says**: "Show my stats" + +**Skill displays**: +1. Visual efficiency report +2. Clear metrics (tokens, cache, context) +3. Interpretation ("What this means") +4. Actionable recommendations + +**User can**: +- Screenshot and share (#ContextEfficiency) +- Understand Navigator's impact +- Optimize workflow based on recommendations +- Validate context engineering principles + +## Example Output Scenarios + +### Scenario 1: Excellent Efficiency (Score 94) + +User following lazy-loading pattern, cache working perfectly: +- 92% token savings ✅ +- 100% cache efficiency ✅ +- 35% context usage ✅ +- Score: 94/100 + +**Recommendation**: Keep it up! Share your efficiency. + +### Scenario 2: Fair Efficiency (Score 72) + +User loading too many docs upfront: +- 65% token savings ⚠️ +- 95% cache efficiency ✅ +- 55% context usage ✅ +- Score: 72/100 + +**Recommendation**: Review lazy-loading strategy. Load docs on-demand. + +### Scenario 3: Poor Efficiency (Score 48) + +User not using Navigator patterns: +- 45% token savings ❌ +- 70% cache efficiency ⚠️ +- 85% context usage ❌ +- Score: 48/100 + +**Recommendation**: Read philosophy docs. Consider /nav:compact. Review CLAUDE.md. + +## Success Metrics + +**After using this skill, users should**: +- Understand their efficiency score +- See quantified token savings +- Know what to improve (if anything) +- Feel motivated to share results + +**Long-term impact**: +- Users screenshot reports and share +- "Navigator saved me 138k tokens" becomes common +- Efficiency becomes visible, not abstract +- Continuous improvement through measurement + +--- + +**This skill makes Navigator's value tangible and shareable.** diff --git a/skills/nav-stats/functions/efficiency_scorer.py b/skills/nav-stats/functions/efficiency_scorer.py new file mode 100755 index 0000000..b8233ef --- /dev/null +++ b/skills/nav-stats/functions/efficiency_scorer.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +""" +Calculate Navigator efficiency score (0-100). + +Weights: +- Token savings: 40 points (85%+ = max) +- Cache efficiency: 30 points (100% = max) +- Context usage: 30 points (<40% = max, >80% = 0) +""" + +import sys +import argparse + +def calculate_efficiency_score( + tokens_saved_percent: float, + cache_efficiency: float, + context_usage_percent: float +) -> int: + """ + Calculate Navigator efficiency score (0-100). + + Args: + tokens_saved_percent: Percentage of tokens saved vs baseline (0-100) + cache_efficiency: Cache hit rate (0-100) + context_usage_percent: Percentage of context window used (0-100) + + Returns: + int: Efficiency score (0-100) + """ + # Token savings (40 points max) + # 85%+ savings = 40 points, linear scale below + token_score = min(40, (tokens_saved_percent / 85) * 40) + + # Cache efficiency (30 points max) + # 100% = 30 points, linear scale + cache_score = (cache_efficiency / 100) * 30 + + # Context usage (30 points max) + # <40% = 30 points (excellent) + # 40-80% = linear from 30 to 0 (good → fair) + # >80% = 0 points (poor - context overloaded) + if context_usage_percent < 40: + context_score = 30 + elif context_usage_percent <= 80: + # Linear decay from 30 (at 40%) to 0 (at 80%) + context_score = 30 - ((context_usage_percent - 40) / 40) * 30 + else: + context_score = 0 + + total_score = int(token_score + cache_score + context_score) + + # Ensure score is in valid range + return max(0, min(100, total_score)) + +def interpret_score(score: int) -> str: + """ + Interpret efficiency score into human-readable rating. + + Args: + score: Efficiency score (0-100) + + Returns: + str: Rating (excellent, good, fair, poor) + """ + if score >= 90: + return "excellent" + elif score >= 80: + return "good" + elif score >= 70: + return "fair" + else: + return "poor" + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Calculate Navigator efficiency score" + ) + parser.add_argument( + "--tokens-saved-percent", + type=float, + required=True, + help="Percentage of tokens saved vs baseline (0-100)" + ) + parser.add_argument( + "--cache-efficiency", + type=float, + required=True, + help="Cache hit rate percentage (0-100)" + ) + parser.add_argument( + "--context-usage", + type=float, + required=True, + help="Context window usage percentage (0-100)" + ) + parser.add_argument( + "--verbose", + action="store_true", + help="Show detailed breakdown" + ) + + args = parser.parse_args() + + score = calculate_efficiency_score( + args.tokens_saved_percent, + args.cache_efficiency, + args.context_usage + ) + + if args.verbose: + rating = interpret_score(score) + print(f"Efficiency Score: {score}/100 ({rating})") + print(f" Token savings: {args.tokens_saved_percent}%") + print(f" Cache efficiency: {args.cache_efficiency}%") + print(f" Context usage: {args.context_usage}%") + else: + # Output just the score (parseable) + print(score) + + sys.exit(0) diff --git a/skills/nav-stats/functions/report_formatter.py b/skills/nav-stats/functions/report_formatter.py new file mode 100755 index 0000000..65af4b4 --- /dev/null +++ b/skills/nav-stats/functions/report_formatter.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +""" +Format Navigator efficiency metrics into visual, shareable report. +""" + +import sys +import argparse + +def format_number(num: int) -> str: + """Format number with commas for readability.""" + return f"{num:,}" + +def interpret_score(score: int) -> str: + """Get rating label for score.""" + if score >= 90: + return "excellent" + elif score >= 80: + return "good" + elif score >= 70: + return "fair" + else: + return "poor" + +def get_recommendations( + savings_percent: int, + cache_efficiency: float, + context_usage: int, + efficiency_score: int +) -> list: + """ + Generate actionable recommendations based on metrics. + + Returns: + list: List of recommendation strings + """ + recs = [] + + # Check token savings + if savings_percent < 70: + recs.append(("⚠️", "Token savings below target (70%+)")) + recs.append(("→", "Check: Are you loading more docs than needed?")) + recs.append(("→", "Tip: Use navigator to find docs, don't load all upfront")) + recs.append(("", "Read more: .agent/philosophy/CONTEXT-EFFICIENCY.md")) + elif savings_percent >= 85: + recs.append(("✅", "Excellent token savings - keep using lazy-loading strategy")) + + # Check cache efficiency + if cache_efficiency < 80: + recs.append(("⚠️", "Cache efficiency low (<80%)")) + recs.append(("→", "Check: CLAUDE.md properly configured?")) + recs.append(("→", "Tip: Ensure prompt caching enabled")) + recs.append(("", "Read more: .agent/philosophy/PATTERNS.md (Caching pattern)")) + elif cache_efficiency >= 95: + recs.append(("✅", "Cache working perfectly - no optimization needed")) + + # Check context usage + if context_usage > 80: + recs.append(("⚠️", "Context usage high (80%+)")) + recs.append(("→", "Consider: Create context marker and compact")) + recs.append(("→", "Tip: Compact after completing sub-tasks")) + recs.append(("", "Read more: .agent/philosophy/ANTI-PATTERNS.md")) + elif context_usage < 40: + recs.append(("✅", "Context usage healthy - plenty of room for work")) + + # Default excellent message + if not recs and efficiency_score >= 90: + recs.append(("✅", "Excellent efficiency - keep it up!")) + recs.append(("", "")) + recs.append(("", "Share your efficiency: Take a screenshot! #ContextEfficiency")) + + return recs + +def format_report( + baseline: int, + loaded: int, + saved: int, + savings_percent: int, + cache_efficiency: float, + context_usage: int, + efficiency_score: int, + time_saved: int +) -> str: + """ + Format efficiency report. + + Returns: + str: Formatted report + """ + rating = interpret_score(efficiency_score) + recs = get_recommendations(savings_percent, cache_efficiency, context_usage, efficiency_score) + + report = f"""╔══════════════════════════════════════════════════════╗ +║ NAVIGATOR EFFICIENCY REPORT ║ +╚══════════════════════════════════════════════════════╝ + +📊 TOKEN USAGE +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Documentation loaded: {format_number(loaded):>12} tokens +Baseline (all docs): {format_number(baseline):>12} tokens +Tokens saved: {format_number(saved):>12} tokens ({savings_percent}% ↓) + +💾 CACHE PERFORMANCE +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Cache efficiency: {cache_efficiency:>16.1f}% ({"perfect" if cache_efficiency >= 99 else "good" if cache_efficiency >= 90 else "fair"}) + +📈 SESSION METRICS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Context usage: {context_usage:>16}% ({rating}) +Efficiency score: {efficiency_score:>12}/100 ({rating}) + +⏱️ TIME SAVED +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Estimated time saved: {time_saved:>13} minutes + +💡 WHAT THIS MEANS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Navigator loaded {savings_percent}% fewer tokens than loading all docs. +Your context window is {100 - context_usage}% available for actual work. +""" + + # Add recommendations section + if recs: + report += "\n🎯 RECOMMENDATIONS\n" + report += "━" * 54 + "\n" + for icon, text in recs: + if icon: + report += f"{icon} {text}\n" + else: + report += f"{text}\n" + + return report + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Format Navigator efficiency report" + ) + parser.add_argument("--baseline", type=int, required=True, help="Baseline tokens (all docs)") + parser.add_argument("--loaded", type=int, required=True, help="Actually loaded tokens") + parser.add_argument("--saved", type=int, required=True, help="Tokens saved") + parser.add_argument("--savings-percent", type=int, required=True, help="Savings percentage") + parser.add_argument("--cache-efficiency", type=float, required=True, help="Cache efficiency %") + parser.add_argument("--context-usage", type=int, required=True, help="Context usage %") + parser.add_argument("--efficiency-score", type=int, required=True, help="Efficiency score (0-100)") + parser.add_argument("--time-saved", type=int, required=True, help="Time saved (minutes)") + + args = parser.parse_args() + + report = format_report( + args.baseline, + args.loaded, + args.saved, + args.savings_percent, + args.cache_efficiency, + args.context_usage, + args.efficiency_score, + args.time_saved + ) + + print(report) + sys.exit(0) diff --git a/skills/nav-task/SKILL.md b/skills/nav-task/SKILL.md new file mode 100644 index 0000000..282c5e9 --- /dev/null +++ b/skills/nav-task/SKILL.md @@ -0,0 +1,441 @@ +--- +name: nav-task +description: Manage Navigator task documentation - create implementation plans, archive completed tasks, update task index. Use when user starts new feature, completes work, or says "document this feature". +allowed-tools: Read, Write, Edit, Bash +version: 1.0.0 +--- + +# Navigator Task Manager Skill + +Create and manage task documentation - implementation plans that capture what was built, how, and why. + +## When to Invoke + +Invoke this skill when the user: +- Says "document this feature", "archive this task" +- Says "create task doc for...", "document what I built" +- Completes a feature and mentions "done", "finished", "complete" +- Starts new feature and says "create implementation plan" + +**DO NOT invoke** if: +- User is asking about existing tasks (use Read, not creation) +- Creating SOPs (that's nav-sop skill) +- Updating system docs (different skill) + +## Execution Steps + +### Step 1: Determine Task ID + +**If user provided task ID** (e.g., "TASK-01", "GH-123"): +- Use their ID directly + +**If no ID provided**: +- Read `.agent/.nav-config.json` for `task_prefix` +- Check existing tasks: `ls .agent/tasks/*.md` +- Generate next number: `{prefix}-{next-number}` +- Example: Last task is TASK-05, create TASK-06 + +### Step 2: Determine Action (Create vs Archive) + +**Creating new task** (starting feature): +``` +User: "Create task doc for OAuth implementation" +→ Action: CREATE +→ Generate empty implementation plan template +``` + +**Archiving completed task** (feature done): +``` +User: "Document this OAuth feature I just built" +→ Action: ARCHIVE +→ Generate implementation plan from conversation +``` + +### Step 3A: Create New Task (If Starting Feature) + +Generate task document from template: + +```markdown +# TASK-{XX}: {Feature Name} + +**Status**: 🚧 In Progress +**Created**: {YYYY-MM-DD} +**Assignee**: {from PM tool or "Manual"} + +--- + +## Context + +**Problem**: +[What problem does this solve?] + +**Goal**: +[What are we building?] + +**Success Criteria**: +- [ ] [Specific measurable outcome] +- [ ] [Another outcome] + +--- + +## Implementation Plan + +### Phase 1: {Name} +**Goal**: [What this phase accomplishes] + +**Tasks**: +- [ ] [Specific task] +- [ ] [Another task] + +**Files**: +- `path/to/file.ts` - [Purpose] + +### Phase 2: {Name} +... + +--- + +## Technical Decisions + +| Decision | Options Considered | Chosen | Reasoning | +|----------|-------------------|--------|-----------| +| [What] | [Option A, B, C] | [Chosen] | [Why] | + +--- + +## Dependencies + +**Requires**: +- [ ] {prerequisite task or setup} + +**Blocks**: +- [ ] {tasks waiting on this} + +--- + +## Notes + +[Any additional context, links, references] + +--- + +## Completion Checklist + +Before marking complete: +- [ ] Implementation finished +- [ ] Tests written and passing +- [ ] Documentation updated +- [ ] Code reviewed (if team) +- [ ] Deployed/merged + +--- + +**Last Updated**: {YYYY-MM-DD} +``` + +Save to: `.agent/tasks/TASK-{XX}-{slug}.md` + +### Step 3B: Archive Completed Task (If Feature Done) + +Generate task document from conversation: + +1. **Analyze conversation** (last 30-50 messages): + - What was built? + - How was it implemented? + - What decisions were made? + - What files were modified? + +2. **Generate implementation plan**: + +```markdown +# TASK-{XX}: {Feature Name} + +**Status**: ✅ Completed +**Created**: {YYYY-MM-DD} +**Completed**: {YYYY-MM-DD} + +--- + +## What Was Built + +[1-2 paragraph summary of the feature] + +--- + +## Implementation + +### Phase 1: {Actual phase completed} +**Completed**: {Date} + +**Changes**: +- Created `src/auth/oauth.ts` - OAuth provider integration +- Modified `src/routes/auth.ts` - Added login/logout endpoints +- Updated `src/config/passport.ts` - Passport configuration + +**Key Code**: +```typescript +// Example of key implementation +export const oauthLogin = async (req, res) => { + // Implementation details +}; +``` + +### Phase 2: {Next phase} +... + +--- + +## Technical Decisions + +| Decision | Options | Chosen | Reasoning | +|----------|---------|--------|-----------| +| Auth library | next-auth, passport.js, auth0 | passport.js | Better control over OAuth flow, smaller bundle | +| Token storage | localStorage, cookies, sessionStorage | httpOnly cookies | XSS protection, automatic transmission | +| Session store | memory, Redis, PostgreSQL | Redis | Fast, scalable, separate from DB | + +--- + +## Files Modified + +- `src/auth/oauth.ts` (created) - OAuth integration +- `src/routes/auth.ts` (modified) - Added auth endpoints +- `src/config/passport.ts` (created) - Passport setup +- `tests/auth.test.ts` (created) - Auth tests +- `README.md` (updated) - OAuth setup instructions + +--- + +## Challenges & Solutions + +**Challenge**: OAuth callback URL mismatch +- **Problem**: Redirects failed in production +- **Solution**: Added environment-specific callback URLs +- **Commit**: abc1234 + +**Challenge**: Session persistence across restarts +- **Problem**: Users logged out on server restart +- **Solution**: Redis session store +- **Commit**: def5678 + +--- + +## Testing + +- ✅ Unit tests: `src/auth/*.test.ts` (15 tests, 100% coverage) +- ✅ Integration tests: OAuth flow end-to-end +- ✅ Manual testing: Tested with Google, GitHub providers + +--- + +## Documentation + +- ✅ README updated with OAuth setup instructions +- ✅ Environment variables documented in `.env.example` +- ✅ API endpoints documented in `docs/api.md` + +--- + +## Related + +**SOPs Created**: +- `.agent/sops/integrations/oauth-setup.md` + +**System Docs Updated**: +- `.agent/system/project-architecture.md` (added auth section) + +--- + +**Completed**: {YYYY-MM-DD} +**Implementation Time**: {X hours/days} +``` + +Save to: `.agent/tasks/TASK-{XX}-{slug}.md` + +### Step 4: Update Navigator Index + +Edit `.agent/DEVELOPMENT-README.md` to add task to index: + +```markdown +## Active Tasks + +- **TASK-{XX}**: {Feature Name} (Status: In Progress/Completed) + - File: `.agent/tasks/TASK-{XX}-{slug}.md` + - Started: {Date} + - [Completed: {Date}] +``` + +Keep index organized (active tasks first, completed below). + +### Step 5: Update PM Tool (If Configured) + +**If PM tool is Linear**: +```typescript +create_comment({ + issueId: "TASK-XX", + body: "📚 Implementation plan documented: .agent/tasks/TASK-XX-feature.md" +}) +``` + +**If PM tool is GitHub**: +```bash +gh issue comment {ISSUE-NUMBER} -b "📚 Implementation plan: .agent/tasks/TASK-XX-feature.md" +``` + +**If PM tool is none**: +Skip PM update. + +### Step 6: Confirm Success + +Show completion message: + +``` +✅ Task documentation created! + +Task: TASK-{XX} - {Feature Name} +File: .agent/tasks/TASK-{XX}-{slug}.md +Size: {X} KB (~{Y} tokens) + +📋 Contains: +- Implementation phases +- Technical decisions +- Files modified +- [If archived: Challenges & solutions] +- [If archived: Testing & documentation] + +🔗 Navigator index updated +[If PM tool: PM tool comment added] + +To reference later: +Read .agent/tasks/TASK-{XX}-{slug}.md +``` + +## Task Document Template Structure + +### For New Tasks (Planning) +1. Context (problem/goal) +2. Implementation plan (phases) +3. Technical decisions (to be made) +4. Dependencies +5. Completion checklist + +### For Completed Tasks (Archive) +1. What was built (summary) +2. Implementation (actual phases) +3. Technical decisions (what was chosen) +4. Files modified +5. Challenges & solutions +6. Testing & documentation + +## Common Use Cases + +### Starting New Feature +``` +User: "Create task doc for payments integration" +→ Generates TASK-07-payments.md +→ Empty template for planning +→ User fills in as they work +``` + +### Completing Feature +``` +User: "Document the auth feature I just finished" +→ Analyzes conversation +→ Generates TASK-06-auth.md +→ Complete implementation record +→ Archives for future reference +``` + +### Mid-Feature Update +``` +User: "Update TASK-05 with OAuth decision" +→ Reads existing TASK-05-auth.md +→ Adds to Technical Decisions section +→ Preserves rest of document +``` + +## Error Handling + +**Navigator not initialized**: +``` +❌ .agent/tasks/ directory not found + +Run /nav:init to set up Navigator structure first. +``` + +**Task ID already exists (for creation)**: +``` +⚠️ TASK-{XX} already exists + +Options: +1. Read existing task +2. Use different ID +3. Archive/overwrite existing + +Your choice [1-3]: +``` + +**Insufficient context to archive**: +``` +⚠️ Not enough conversation context to generate implementation plan + +Consider: +- Provide more details about what was built +- Manually create task doc +- Skip archiving + +Continue with template? [y/N]: +``` + +## Success Criteria + +Task documentation is successful when: +- [ ] Task file created in `.agent/tasks/` +- [ ] Filename follows convention: `TASK-{XX}-{slug}.md` +- [ ] Contains all required sections +- [ ] Navigator index updated +- [ ] PM tool updated (if configured) +- [ ] User can reference task later + +## Scripts + +**generate_task.py**: Create task documentation from conversation +- Input: Conversation history, task ID +- Output: Formatted task markdown + +**update_index.py**: Update DEVELOPMENT-README.md task index +- Input: New task info +- Output: Updated index + +## Best Practices + +**Good task slugs**: +- `oauth-implementation` (descriptive) +- `stripe-payment-flow` (clear purpose) +- `user-profile-page` (specific feature) + +**Bad task slugs**: +- `feature` (too vague) +- `fix` (not descriptive) +- `task1` (meaningless) + +**When to create task docs**: +- ✅ Starting major feature (> 1 day work) +- ✅ Completing any feature (archive) +- ✅ Complex implementation (capture decisions) +- ❌ Tiny bug fixes (use SOPs instead) +- ❌ Exploratory work (wait until direction clear) + +## Notes + +Task docs are **living documents**: +- Created when starting feature (template) +- Updated during implementation (decisions) +- Finalized when complete (archive) + +They serve as: +- Planning tool (before implementation) +- Progress tracker (during implementation) +- Historical record (after completion) +- Knowledge base (for team/future) + +This skill provides same functionality as `/nav:doc feature` command but with natural language invocation. diff --git a/skills/nav-task/functions/index_updater.py b/skills/nav-task/functions/index_updater.py new file mode 100755 index 0000000..c3e6931 --- /dev/null +++ b/skills/nav-task/functions/index_updater.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +""" +Update DEVELOPMENT-README.md task index with new task entry. +""" + +import os +import sys +import re +from datetime import datetime + +def update_task_index(task_file, status="Planning", description=""): + """ + Add task entry to DEVELOPMENT-README.md index. + + Args: + task_file: Task filename (e.g., TASK-10-feature-name.md) + status: Task status (Planning, In Progress, Completed) + description: Short task description + + Returns: + bool: True if updated successfully + """ + readme_path = ".agent/DEVELOPMENT-README.md" + + if not os.path.exists(readme_path): + print(f"Error: {readme_path} not found", file=sys.stderr) + return False + + # Extract task ID and title from filename + match = re.match(r'(TASK-\d+)-(.*?)\.md', task_file) + if not match: + print(f"Error: Invalid task filename format: {task_file}", file=sys.stderr) + return False + + task_id = match.group(1) + task_slug = match.group(2).replace('-', ' ').title() + + # Read current README + with open(readme_path, 'r') as f: + content = f.read() + + # Find the task index section + task_section_pattern = r'(### Implementation Plans \(`tasks/`\).*?)(###|\Z)' + task_section_match = re.search(task_section_pattern, content, re.DOTALL) + + if not task_section_match: + print("Error: Could not find task index section", file=sys.stderr) + return False + + # Create new task entry + status_emoji = { + "Planning": "📋", + "In Progress": "🚧", + "Completed": "✅" + }.get(status, "📋") + + today = datetime.now().strftime("%Y-%m-%d") + + new_entry = f""" +#### [{task_id}: {task_slug}](./tasks/{task_file}) +**Status**: {status_emoji} {status} +**Created**: {today} + +**What**: {description or "Description pending"} + +--- +""" + + # Insert before the next section marker + task_section = task_section_match.group(1) + rest_of_doc = content[task_section_match.end(1):] + + # Add new entry at the end of task section + updated_section = task_section.rstrip() + "\n" + new_entry + updated_content = content[:task_section_match.start(1)] + updated_section + rest_of_doc + + # Write back + with open(readme_path, 'w') as f: + f.write(updated_content) + + print(f"✅ Added {task_id} to DEVELOPMENT-README.md index") + return True + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Usage: index_updater.py [status] [description]", file=sys.stderr) + sys.exit(1) + + task_file = sys.argv[1] + status = sys.argv[2] if len(sys.argv) > 2 else "Planning" + description = sys.argv[3] if len(sys.argv) > 3 else "" + + success = update_task_index(task_file, status, description) + sys.exit(0 if success else 1) diff --git a/skills/nav-task/functions/task_formatter.py b/skills/nav-task/functions/task_formatter.py new file mode 100755 index 0000000..40cf3cd --- /dev/null +++ b/skills/nav-task/functions/task_formatter.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +""" +Format task markdown with proper structure and metadata. +""" + +import sys +import argparse +from datetime import datetime + +def format_task(title, task_id, priority="Medium", complexity="Medium", status="Planning"): + """ + Generate formatted task markdown. + + Args: + title: Task title + task_id: Task ID (e.g., TASK-10) + priority: Priority level (Low, Medium, High, Critical) + complexity: Complexity level (Low, Medium, High) + status: Task status (Planning, In Progress, Completed) + + Returns: + str: Formatted markdown content + """ + today = datetime.now().strftime("%Y-%m-%d") + + template = f"""# {task_id}: {title} + +**Created**: {today} +**Status**: {status} +**Priority**: {priority} +**Complexity**: {complexity} + +--- + +## Context + +[Describe the problem, feature request, or improvement needed] + +**Problem**: [What needs to be solved?] + +**Goal**: [What should be achieved?] + +--- + +## Implementation Plan + +### Phase 1: [Phase Name] + +**Tasks**: +- [ ] Task 1 +- [ ] Task 2 +- [ ] Task 3 + +**Expected Outcome**: [What this phase delivers] + +--- + +## Success Metrics + +**Functionality**: +- [ ] Feature works as expected +- [ ] Edge cases handled +- [ ] Error handling implemented + +**Quality**: +- [ ] Tests written and passing +- [ ] Documentation updated +- [ ] Code reviewed + +**Token Efficiency** (if applicable): +- [ ] Token usage measured +- [ ] Optimization targets met +- [ ] No context pollution + +--- + +## Testing Plan + +1. **Unit tests**: [What to test] +2. **Integration tests**: [What to test] +3. **Manual testing**: [Steps to verify] + +--- + +## Related Tasks + +- [Link to related tasks] + +--- + +## Notes + +- [Additional context, decisions, or considerations] + +--- + +**Task created**: {today} +**Priority**: {priority} +**Effort**: {complexity} +""" + return template + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Format Navigator task markdown") + parser.add_argument("--title", required=True, help="Task title") + parser.add_argument("--id", required=True, help="Task ID (e.g., TASK-10)") + parser.add_argument("--priority", default="Medium", choices=["Low", "Medium", "High", "Critical"]) + parser.add_argument("--complexity", default="Medium", choices=["Low", "Medium", "High"]) + parser.add_argument("--status", default="Planning", choices=["Planning", "In Progress", "Completed"]) + + args = parser.parse_args() + + output = format_task( + title=args.title, + task_id=args.id, + priority=args.priority, + complexity=args.complexity, + status=args.status + ) + + print(output) diff --git a/skills/nav-task/functions/task_id_generator.py b/skills/nav-task/functions/task_id_generator.py new file mode 100755 index 0000000..35fa246 --- /dev/null +++ b/skills/nav-task/functions/task_id_generator.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +""" +Generate next sequential TASK-XX ID by scanning existing task files. +""" + +import os +import re +import sys + +def get_next_task_id(agent_dir=".agent", prefix="TASK"): + """ + Scan tasks/ directory and return next available TASK-XX ID. + + Args: + agent_dir: Path to .agent directory (default: .agent) + prefix: Task ID prefix (default: TASK) + + Returns: + str: Next task ID (e.g., "TASK-10") + """ + tasks_dir = os.path.join(agent_dir, "tasks") + + if not os.path.exists(tasks_dir): + return f"{prefix}-01" + + # Find all task files matching pattern TASK-XX-*.md + task_pattern = re.compile(rf"{prefix}-(\d+)-.*\.md") + task_numbers = [] + + for filename in os.listdir(tasks_dir): + if filename == "archive": # Skip archive directory + continue + + match = task_pattern.match(filename) + if match: + task_numbers.append(int(match.group(1))) + + if not task_numbers: + return f"{prefix}-01" + + # Get next sequential number + next_num = max(task_numbers) + 1 + return f"{prefix}-{next_num:02d}" + +if __name__ == "__main__": + # Support optional arguments + agent_dir = sys.argv[1] if len(sys.argv) > 1 else ".agent" + prefix = sys.argv[2] if len(sys.argv) > 2 else "TASK" + + next_id = get_next_task_id(agent_dir, prefix) + print(next_id) diff --git a/skills/nav-update-claude/functions/claude_updater.py b/skills/nav-update-claude/functions/claude_updater.py new file mode 100755 index 0000000..79cba50 --- /dev/null +++ b/skills/nav-update-claude/functions/claude_updater.py @@ -0,0 +1,448 @@ +#!/usr/bin/env python3 +""" +Navigator CLAUDE.md Updater +Extracts customizations and generates updated CLAUDE.md with v3.1 template +""" + +import sys +import json +import re +from pathlib import Path +from typing import Dict, List, Optional +from urllib import request +from urllib.error import URLError, HTTPError + +def get_plugin_version() -> Optional[str]: + """ + Get installed Navigator plugin version from plugin.json. + + Returns: + Version string (e.g., "4.3.0") or None if not found + """ + possible_paths = [ + Path.home() / '.claude' / 'plugins' / 'marketplaces' / 'navigator-marketplace' / '.claude-plugin' / 'plugin.json', + Path.home() / '.config' / 'claude' / 'plugins' / 'navigator' / '.claude-plugin' / 'plugin.json', + Path.home() / '.claude' / 'plugins' / 'navigator' / '.claude-plugin' / 'plugin.json', + ] + + for path in possible_paths: + if path.exists(): + try: + with open(path, 'r') as f: + data = json.load(f) + return data.get('version') + except (json.JSONDecodeError, FileNotFoundError, PermissionError): + continue + + return None + +def fetch_template_from_github(version: Optional[str] = None) -> Optional[str]: + """ + Fetch CLAUDE.md template from GitHub releases. + + Priority: + 1. Specified version (e.g., 'v4.3.0' or '4.3.0') + 2. Detected plugin version + 3. Returns None (caller should use bundled fallback) + + Args: + version: Specific version to fetch (optional) + + Returns: + Template content as string, or None if fetch fails + """ + if not version: + version = get_plugin_version() + + if not version: + return None + + # Ensure version has 'v' prefix for GitHub URL + if not version.startswith('v'): + version = f'v{version}' + + github_url = f"https://raw.githubusercontent.com/alekspetrov/navigator/{version}/templates/CLAUDE.md" + + try: + req = request.Request(github_url) + req.add_header('User-Agent', 'Navigator-CLAUDE-Updater') + + with request.urlopen(req, timeout=10) as response: + if response.status == 200: + content = response.read().decode('utf-8') + return content + except (URLError, HTTPError, TimeoutError) as e: + # Silent fail - caller will use bundled template + print(f"⚠️ Could not fetch template from GitHub ({version}): {e}", file=sys.stderr) + print(f" Falling back to bundled template", file=sys.stderr) + return None + + return None + +def get_template_path(bundled_template_dir: str, version: Optional[str] = None) -> tuple[str, bool]: + """ + Get template path, preferring GitHub source over bundled. + + Args: + bundled_template_dir: Path to bundled templates directory + version: Optional specific version to fetch + + Returns: + Tuple of (template_path_or_content, is_from_github) + """ + # Try GitHub first + github_template = fetch_template_from_github(version) + + if github_template: + # Write to temporary file + import tempfile + temp_file = tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False) + temp_file.write(github_template) + temp_file.close() + + detected_version = version or get_plugin_version() + print(f"✓ Using template from GitHub ({detected_version})", file=sys.stderr) + return (temp_file.name, True) + + # Fallback to bundled + bundled_path = Path(bundled_template_dir) / 'CLAUDE.md' + if bundled_path.exists(): + bundled_version = get_plugin_version() or "unknown" + print(f"✓ Using bundled template (v{bundled_version})", file=sys.stderr) + return (str(bundled_path), False) + + raise FileNotFoundError(f"No template found (GitHub failed, bundled not at {bundled_path})") + +def extract_section(content: str, header: str, next_headers: List[str]) -> Optional[str]: + """Extract content between header and next section header""" + # Find header (supports ## or # with various markdown formats) + header_pattern = r'^#{1,2}\s+' + re.escape(header) + r'.*?$' + match = re.search(header_pattern, content, re.MULTILINE | re.IGNORECASE) + + if not match: + return None + + start = match.end() + + # Find next header + next_pattern = r'^#{1,2}\s+(' + '|'.join(re.escape(h) for h in next_headers) + r').*?$' + next_match = re.search(next_pattern, content[start:], re.MULTILINE | re.IGNORECASE) + + if next_match: + end = start + next_match.start() + else: + end = len(content) + + section = content[start:end].strip() + return section if section else None + +def extract_customizations(claude_md_path: str) -> Dict: + """Extract project-specific customizations from CLAUDE.md""" + + with open(claude_md_path, 'r', encoding='utf-8') as f: + content = f.read() + + customizations = { + "project_name": "", + "description": "", + "tech_stack": [], + "code_standards": [], + "forbidden_actions": [], + "pm_tool": "none", + "custom_sections": {} + } + + # Extract project name (first # header) + title_match = re.search(r'^#\s+(.+?)\s*-\s*Claude Code Configuration', content, re.MULTILINE) + if title_match: + customizations["project_name"] = title_match.group(1).strip() + + # Extract description from Context section + context = extract_section(content, "Context", [ + "Navigator Quick Start", "Quick Start", "Project-Specific", "Code Standards", + "Forbidden Actions", "Documentation", "Project Management" + ]) + + if context: + # Extract brief description (text before tech stack, excluding brackets) + lines = context.split('\n') + desc_lines = [] + for line in lines: + line = line.strip() + if line and not line.startswith('**Tech Stack') and not line.startswith('['): + desc_lines.append(line) + if line.startswith('**Tech Stack'): + break + if desc_lines: + customizations["description"] = ' '.join(desc_lines) + + # Extract tech stack + tech_match = re.search(r'\*\*Tech Stack\*\*:\s*(.+?)(?:\n|$)', context) + if tech_match: + tech_text = tech_match.group(1).strip() + # Remove brackets and split by comma + tech_text = re.sub(r'\[|\]', '', tech_text) + customizations["tech_stack"] = [t.strip() for t in tech_text.split(',')] + + # Extract code standards + standards_section = extract_section(content, "Project-Specific Code Standards", [ + "Forbidden", "Documentation", "Project Management", "Configuration", + "Commit Guidelines", "Success Metrics" + ]) + + if not standards_section: + standards_section = extract_section(content, "Code Standards", [ + "Forbidden", "Documentation", "Project Management", "Configuration" + ]) + + if standards_section: + # Extract custom rules (lines that aren't in default template) + default_rules = [ + "KISS, DRY, SOLID", + "TypeScript", + "Strict mode", + "Line Length", + "Max 100", + "Testing", + "Framework-Specific", + "General Standards", + "Architecture" + ] + + lines = standards_section.split('\n') + for line in lines: + line = line.strip() + # Skip empty lines, headers, and default rules + if not line or line.startswith('#') or line.startswith('**'): + continue + # Check if it's a custom rule + is_default = any(rule in line for rule in default_rules) + if not is_default: + if line.startswith('-') or line.startswith('*'): + customizations["code_standards"].append(line.lstrip('-*').strip()) + elif ':' in line: # Format like "Custom rule: Always use hooks" + customizations["code_standards"].append(line) + + # Extract forbidden actions + forbidden_section = extract_section(content, "Forbidden Actions", [ + "Documentation", "Project Management", "Configuration", + "Commit Guidelines", "Success Metrics" + ]) + + if forbidden_section: + # Extract custom forbidden actions (not in default template) + default_forbidden = [ + "NEVER wait for explicit commit", + "NEVER leave tickets open", + "NEVER skip documentation", + "NEVER load all `.agent/`", + "NEVER load all .agent", + "NEVER skip reading DEVELOPMENT-README", + "No Claude Code mentions", + "No package.json modifications", + "Never commit secrets", + "Don't delete tests", + "NEVER skip tests" + ] + + lines = forbidden_section.split('\n') + for line in lines: + line = line.strip() + # Skip empty lines and headers + if not line or line.startswith('#') or line.startswith('**'): + continue + if line.startswith('❌') or line.startswith('-'): + action = line.lstrip('❌- ').strip() + # Check if it's truly custom + is_default = any(df in action for df in default_forbidden) + if action and not is_default: + # Remove any leading emoji that might remain + action = action.lstrip('❌ ') + customizations["forbidden_actions"].append(action) + + # Extract PM tool configuration + pm_section = extract_section(content, "Project Management", [ + "Configuration", "Commit Guidelines", "Success Metrics" + ]) + + if pm_section: + # Look for configured tool + tool_match = re.search(r'\*\*Configured Tool\*\*:\s*(\w+)', pm_section, re.IGNORECASE) + if tool_match: + tool = tool_match.group(1).lower() + if tool in ['linear', 'github', 'jira', 'gitlab']: + customizations["pm_tool"] = tool + + # Extract custom sections (not in standard template) + standard_sections = [ + "Context", "Navigator", "Quick Start", "Code Standards", + "Project-Specific Code Standards", "Forbidden Actions", + "Documentation Structure", "Project Management", + "Configuration", "Commit Guidelines", "Success Metrics" + ] + + # Find all ## headers + headers = re.findall(r'^##\s+(.+?)$', content, re.MULTILINE) + for header in headers: + if header.strip() not in standard_sections: + section_content = extract_section(content, header, standard_sections + headers) + if section_content: + customizations["custom_sections"][header.strip()] = section_content + + return customizations + +def generate_updated_claude_md(customizations: Dict, template_path: str, output_path: str): + """Generate updated CLAUDE.md using v3.1 template and customizations""" + + with open(template_path, 'r', encoding='utf-8') as f: + template = f.read() + + # Replace project name + if customizations["project_name"]: + template = template.replace('[Project Name]', customizations["project_name"]) + + # Replace description + if customizations["description"]: + template = template.replace( + '[Brief project description - explain what this project does]', + customizations["description"] + ) + + # Replace tech stack + if customizations["tech_stack"]: + tech_stack = ', '.join(customizations["tech_stack"]) + template = template.replace( + '[List your technologies, e.g., Next.js, TypeScript, PostgreSQL]', + tech_stack + ) + + # Append custom code standards + if customizations["code_standards"]: + standards_marker = "[Add project-specific violations here]" + if standards_marker in template: + custom_standards = "\n\n### Additional Project Standards\n\n" + for standard in customizations["code_standards"]: + custom_standards += f"- {standard}\n" + template = template.replace(standards_marker, custom_standards + "\n" + standards_marker) + + # Append custom forbidden actions + if customizations["forbidden_actions"]: + forbidden_marker = "[Add project-specific violations here]" + if forbidden_marker in template: + custom_forbidden = "\n### Additional Forbidden Actions\n\n" + for action in customizations["forbidden_actions"]: + custom_forbidden += f"- ❌ {action}\n" + # Find the marker and append after it + template = template.replace(forbidden_marker, custom_forbidden) + + # Update PM tool + if customizations["pm_tool"] != "none": + template = template.replace( + '**Configured Tool**: [Linear / GitHub Issues / Jira / GitLab / None]', + f'**Configured Tool**: {customizations["pm_tool"].title()}' + ) + # Update config JSON + template = template.replace( + '"project_management": "none"', + f'"project_management": "{customizations["pm_tool"]}"' + ) + + # Append custom sections at the end + if customizations["custom_sections"]: + template += "\n\n---\n\n## Custom Project Sections\n\n" + for section_name, section_content in customizations["custom_sections"].items(): + template += f"### {section_name}\n\n{section_content}\n\n" + + # Write updated file + with open(output_path, 'w', encoding='utf-8') as f: + f.write(template) + +def main(): + if len(sys.argv) < 3: + print("Usage:", file=sys.stderr) + print(" Extract: python3 claude_updater.py extract CLAUDE.md > customizations.json", file=sys.stderr) + print(" Generate: python3 claude_updater.py generate --customizations file.json --template template.md --output CLAUDE.md", file=sys.stderr) + sys.exit(1) + + command = sys.argv[1] + + if command == "extract": + claude_md_path = sys.argv[2] + + if not Path(claude_md_path).exists(): + print(f"Error: File not found: {claude_md_path}", file=sys.stderr) + sys.exit(1) + + try: + customizations = extract_customizations(claude_md_path) + print(json.dumps(customizations, indent=2)) + except Exception as e: + print(f"Error extracting customizations: {e}", file=sys.stderr) + sys.exit(2) + + elif command == "generate": + # Parse arguments + args = { + 'customizations': None, + 'template': None, + 'output': None + } + + i = 2 + while i < len(sys.argv): + if sys.argv[i] == '--customizations' and i + 1 < len(sys.argv): + args['customizations'] = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '--template' and i + 1 < len(sys.argv): + args['template'] = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '--output' and i + 1 < len(sys.argv): + args['output'] = sys.argv[i + 1] + i += 2 + else: + i += 1 + + if not all(args.values()): + print("Error: Missing required arguments", file=sys.stderr) + print("Required: --customizations, --template, --output", file=sys.stderr) + sys.exit(1) + + try: + with open(args['customizations'], 'r') as f: + customizations = json.load(f) + + # Use get_template_path for GitHub fetch with bundled fallback + # If --template is a directory, treat it as bundled_template_dir + # Otherwise, use it directly as a file path + template_arg = args['template'] + + if Path(template_arg).is_dir(): + # Directory provided - use get_template_path for smart fetching + template_path, is_github = get_template_path(template_arg) + elif Path(template_arg).is_file(): + # File provided directly - use as-is (backward compatibility) + template_path = template_arg + is_github = False + else: + # Try parent directory for get_template_path + template_dir = str(Path(template_arg).parent) + template_path, is_github = get_template_path(template_dir) + + generate_updated_claude_md(customizations, template_path, args['output']) + print(f"✓ Generated {args['output']}", file=sys.stderr) + + # Cleanup temp file if from GitHub + if is_github and Path(template_path).exists(): + Path(template_path).unlink() + + except Exception as e: + print(f"Error generating CLAUDE.md: {e}", file=sys.stderr) + sys.exit(2) + + else: + print(f"Error: Unknown command: {command}", file=sys.stderr) + print("Valid commands: extract, generate", file=sys.stderr) + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/skills/nav-update-claude/functions/version_detector.py b/skills/nav-update-claude/functions/version_detector.py new file mode 100755 index 0000000..456c069 --- /dev/null +++ b/skills/nav-update-claude/functions/version_detector.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 +""" +Navigator CLAUDE.md Version Detector +Detects if CLAUDE.md is outdated, current (v3.1), or unknown +""" + +import sys +import re +from pathlib import Path +from typing import Literal + +VersionStatus = Literal["outdated", "current", "unknown"] + +def detect_version(claude_md_path: str) -> VersionStatus: + """Detect CLAUDE.md version status""" + + if not Path(claude_md_path).exists(): + print(f"Error: File not found: {claude_md_path}", file=sys.stderr) + sys.exit(1) + + with open(claude_md_path, 'r', encoding='utf-8') as f: + content = f.read() + + # Check for version marker + version_match = re.search(r'Navigator Version[:\s]+(\d+\.\d+\.\d+)', content, re.IGNORECASE) + + if version_match: + version_str = version_match.group(1) + major, minor, patch = map(int, version_str.split('.')) + + # Version 3.1+ is current + if major > 3 or (major == 3 and minor >= 1): + # Double-check for natural language (should have it in v3+) + if has_natural_language_examples(content): + return "current" + else: + # Has v3.1 marker but no natural language - partial migration + return "outdated" + + # Version 3.0 - check for natural language + elif major == 3 and minor == 0: + if has_natural_language_examples(content) and not has_slash_commands(content): + return "current" + else: + return "outdated" + + # Version < 3.0 is definitely outdated + else: + return "outdated" + + # No version marker - use heuristics + return detect_by_heuristics(content) + +def has_slash_commands(content: str) -> bool: + """Check if content has slash command references""" + slash_patterns = [ + r'/nav:start', + r'/nav:init', + r'/nav:doc', + r'/nav:marker', + r'/nav:markers', + r'/nav:compact', + r'/jitd:', + ] + + for pattern in slash_patterns: + if re.search(pattern, content): + return True + + return False + +def has_natural_language_examples(content: str) -> bool: + """Check if content has natural language command examples""" + natural_language_patterns = [ + r'"Start my Navigator session"', + r'"Initialize Navigator in this project"', + r'"Archive TASK-\w+ documentation"', + r'"Create an SOP for', + r'"Clear context and preserve markers"', + r'"Start my session"', + r'"Load the navigator"', + ] + + matches = 0 + for pattern in natural_language_patterns: + if re.search(pattern, content, re.IGNORECASE): + matches += 1 + + # Need at least 2 natural language examples to be considered current + return matches >= 2 + +def has_skills_explanation(content: str) -> bool: + """Check if content explains skills architecture""" + skills_markers = [ + r'skills-only architecture', + r'skills that auto-invoke', + r'How Claude Discovers.*Skills', + r'Progressive disclosure.*skills', + ] + + for pattern in skills_markers: + if re.search(pattern, content, re.IGNORECASE | re.DOTALL): + return True + + return False + +def has_navigator_markers(content: str) -> bool: + """Check if content has any Navigator-specific markers""" + navigator_markers = [ + r'Navigator', + r'\.agent/', + r'DEVELOPMENT-README\.md', + r'nav-start', + r'nav-task', + r'nav-compact', + r'context markers', + r'token optimization', + ] + + matches = 0 + for pattern in navigator_markers: + if re.search(pattern, content, re.IGNORECASE): + matches += 1 + + # Need at least 3 Navigator markers to be considered Navigator-related + return matches >= 3 + +def detect_by_heuristics(content: str) -> VersionStatus: + """Detect version using heuristics when no version marker present""" + + # Check if it's Navigator-related at all + if not has_navigator_markers(content): + return "unknown" + + # Has slash commands → definitely outdated + if has_slash_commands(content): + return "outdated" + + # Has natural language + skills explanation → current + if has_natural_language_examples(content) and has_skills_explanation(content): + return "current" + + # Has natural language but no skills explanation → partial migration + if has_natural_language_examples(content): + return "outdated" + + # Has Navigator markers but no natural language → old version + if has_navigator_markers(content): + return "outdated" + + # Can't determine + return "unknown" + +def main(): + if len(sys.argv) < 2: + print("Usage: python3 version_detector.py CLAUDE.md", file=sys.stderr) + sys.exit(1) + + claude_md_path = sys.argv[1] + + try: + status = detect_version(claude_md_path) + print(status) + except Exception as e: + print(f"Error detecting version: {e}", file=sys.stderr) + sys.exit(2) + +if __name__ == "__main__": + main() diff --git a/skills/nav-update-claude/skill.md b/skills/nav-update-claude/skill.md new file mode 100644 index 0000000..ef34007 --- /dev/null +++ b/skills/nav-update-claude/skill.md @@ -0,0 +1,362 @@ +--- +name: nav-update-claude +description: Update project CLAUDE.md to latest Navigator version, preserving customizations. Use when user says "update CLAUDE.md", "migrate to v3", or when detecting outdated Navigator configuration. +allowed-tools: Read, Write, Edit, Bash +version: 1.0.0 +--- + +# Navigator CLAUDE.md Updater Skill + +Update project's CLAUDE.md to latest Navigator version (v3.1) while preserving project-specific customizations. + +## When to Invoke + +Invoke this skill when the user: +- Says "update my CLAUDE.md", "migrate CLAUDE.md to v3" +- Says "update Navigator configuration", "fix my CLAUDE.md" +- Mentions outdated commands like "/nav:start" and wants to upgrade +- Complains that Claude doesn't understand Navigator workflow + +**DO NOT invoke** if: +- CLAUDE.md already references v3.1 and natural language commands +- User is editing CLAUDE.md for project-specific reasons (not Navigator updates) +- Working on plugin's root CLAUDE.md (not user projects) + +## Execution Steps + +### Step 1: Detect Current CLAUDE.md Version + +Check if CLAUDE.md exists and detect version: + +```bash +if [ ! -f "CLAUDE.md" ]; then + echo "❌ No CLAUDE.md found in current directory" + echo "" + echo "Run 'Initialize Navigator in this project' first." + exit 1 +fi +``` + +Use `version_detector.py` to analyze CLAUDE.md: + +```bash +python3 "$SKILL_BASE_DIR/functions/version_detector.py" CLAUDE.md +``` + +This script checks for: +- Version markers (e.g., "Navigator Version: 3.1.0") +- Slash command references (`/nav:start`, `/nav:doc`, etc.) +- Skills vs commands language +- Natural language examples + +**Outputs**: +- `outdated` - Has `/nav:` commands or v1/v2 markers +- `current` - Already v3.1 with natural language +- `unknown` - Can't determine (custom/non-Navigator file) + +**If `current`**: +``` +✅ CLAUDE.md is already up to date (v3.1) + +No migration needed. +``` +Exit successfully. + +**If `unknown`**: +``` +⚠️ CLAUDE.md doesn't appear to be a Navigator file + +This might be a custom configuration. Manual review recommended. +Proceed with migration anyway? [y/N] +``` + +If user declines, exit. If accepts, continue. + +### Step 2: Backup Current CLAUDE.md + +Always create backup before modifying: + +```bash +cp CLAUDE.md CLAUDE.md.backup +echo "📦 Backup created: CLAUDE.md.backup" +``` + +### Step 3: Extract Project-Specific Customizations + +Use `claude_updater.py` to parse current CLAUDE.md: + +```bash +python3 "$SKILL_BASE_DIR/functions/claude_updater.py" extract CLAUDE.md > /tmp/nav-customizations.json +``` + +This extracts: +- **Project name** (from title) +- **Project description** (from Context section) +- **Tech stack** (languages, frameworks) +- **Code standards** (custom rules beyond Navigator defaults) +- **Forbidden actions** (project-specific restrictions) +- **PM tool configuration** (Linear, GitHub, Jira, etc.) +- **Custom sections** (anything not in Navigator template) + +### Step 4: Generate Updated CLAUDE.md + +Apply latest template with extracted customizations: + +```bash +# Template fetching now automatic via get_template_path(): +# 1. Tries GitHub (version-matched) +# 2. Falls back to bundled if offline +python3 "$SKILL_BASE_DIR/functions/claude_updater.py" generate \ + --customizations /tmp/nav-customizations.json \ + --template "$SKILL_BASE_DIR/../../templates/CLAUDE.md" \ + --output CLAUDE.md +``` + +**Template Source Priority**: +1. **GitHub** (version-matched): Fetches from `https://raw.githubusercontent.com/alekspetrov/navigator/v{version}/templates/CLAUDE.md` + - Matches installed plugin version (e.g., v4.3.0) + - Always up-to-date with release + - Works with pre-releases +2. **Bundled** (fallback): Uses `templates/CLAUDE.md` from installed plugin + - Offline fallback + - Guaranteed availability + +**What this does**: +1. Loads template (GitHub or bundled) +2. Replaces placeholders with extracted data +3. Preserves custom sections +4. Updates Navigator workflow to natural language +5. Removes slash command references +6. Adds skills explanation + +### Step 5: Show Diff and Confirm + +Display changes for user review: + +```bash +echo "" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "📝 CHANGES TO CLAUDE.MD" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "" + +# Show unified diff +diff -u CLAUDE.md.backup CLAUDE.md || true + +echo "" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +``` + +### Step 6: Verify and Commit + +Show summary of changes: + +``` +✅ CLAUDE.md Updated to v3.1 + +Key changes: + ✓ Removed slash command references (e.g., /nav:start) + ✓ Added natural language examples ("Start my Navigator session") + ✓ Added skills architecture explanation + ✓ Updated Navigator workflow section + ✓ Preserved your project-specific customizations: + - Tech stack: [list] + - Code standards: [count] custom rules + - Forbidden actions: [count] custom rules + +Backup saved: CLAUDE.md.backup + +Next steps: + 1. Review changes: git diff CLAUDE.md + 2. Test: "Start my Navigator session" should work + 3. Commit: git add CLAUDE.md && git commit -m "chore: update CLAUDE.md to Navigator v3.1" + 4. Remove backup: rm CLAUDE.md.backup + +Rollback if needed: mv CLAUDE.md.backup CLAUDE.md +``` + +### Step 7: Optional - Update .nav-config.json + +If config exists, check version: + +```bash +if [ -f ".agent/.nav-config.json" ]; then + version=$(jq -r '.version' .agent/.nav-config.json) + if [ "$version" != "4.5.0" ]; then + echo "" + echo "💡 .nav-config.json is version $version" + echo " Update to 4.5.0? [Y/n]" + read -r response + + if [[ "$response" =~ ^([yY][eE][sS]|[yY]|)$ ]]; then + jq '.version = "4.5.0"' .agent/.nav-config.json > /tmp/nav-config.tmp + mv /tmp/nav-config.tmp .agent/.nav-config.json + echo " ✓ Updated config to v4.5.0" + fi + fi +fi +``` + +## Predefined Functions + +### functions/version_detector.py + +**Purpose**: Detect CLAUDE.md version (outdated, current, unknown) + +**Usage**: +```bash +python3 version_detector.py CLAUDE.md +``` + +**Output**: Prints one of: `outdated`, `current`, `unknown` + +**Exit codes**: +- 0: Success (version detected) +- 1: File not found +- 2: Parse error + +**Detection logic**: +1. Check for version marker: `Navigator Version: X.X.X` +2. Check for slash commands: `/nav:start`, `/jitd:`, etc. +3. Check for natural language examples: `"Start my Navigator session"` +4. Check for skills section + +**Heuristics**: +- Has `/nav:` → outdated +- Version < 3.0 → outdated +- Version >= 3.0 + natural language → current +- No version + no Navigator markers → unknown + +### functions/claude_updater.py + +**Purpose**: Extract customizations and generate updated CLAUDE.md + +**Usage**: +```bash +# Extract customizations +python3 claude_updater.py extract CLAUDE.md > customizations.json + +# Generate updated file +python3 claude_updater.py generate \ + --customizations customizations.json \ + --template ../../templates/CLAUDE.md \ + --output CLAUDE.md +``` + +**Extract mode** outputs JSON: +```json +{ + "project_name": "MyApp", + "description": "Brief project description", + "tech_stack": ["Next.js", "TypeScript", "PostgreSQL"], + "code_standards": ["Custom rule 1", "Custom rule 2"], + "forbidden_actions": ["Custom restriction 1"], + "pm_tool": "github", + "custom_sections": { + "Deployment": "Custom deployment instructions..." + } +} +``` + +**Generate mode**: +1. Loads template +2. Replaces `[Project Name]` with `project_name` +3. Replaces `[Brief project description]` with `description` +4. Replaces `[List your technologies...]` with `tech_stack` +5. Appends custom code standards +6. Appends custom forbidden actions +7. Inserts custom sections at end + +## Error Handling + +**No CLAUDE.md found**: +``` +❌ No CLAUDE.md found in current directory + +This project doesn't appear to have Navigator initialized. +Run "Initialize Navigator in this project" first. +``` + +**Backup failed**: +``` +❌ Failed to create backup: CLAUDE.md.backup + +Check file permissions and disk space. +``` + +**Parse error**: +``` +❌ Failed to parse CLAUDE.md + +The file might be corrupted or have unusual formatting. +Manual review required. + +Backup saved at: CLAUDE.md.backup +``` + +**Template not found**: +``` +❌ Navigator template not found + +This might be a plugin installation issue. +Try reinstalling Navigator plugin: /plugin update navigator +``` + +## Success Criteria + +Migration is successful when: +- [ ] CLAUDE.md backed up successfully +- [ ] Version detected correctly +- [ ] Customizations extracted +- [ ] New file generated with v3.1 template +- [ ] Project-specific content preserved +- [ ] Diff shown to user for review +- [ ] Commit instructions provided + +## Rollback Procedure + +If migration fails or user is unhappy: + +```bash +# Restore backup +mv CLAUDE.md.backup CLAUDE.md + +# Or compare and manually fix +diff CLAUDE.md.backup CLAUDE.md +``` + +## Notes + +This skill: +- **Preserves all customizations** (tech stack, standards, restrictions) +- **Non-destructive** (always creates backup) +- **Idempotent** (running multiple times is safe) +- **Transparent** (shows diff before finalizing) + +**What gets updated**: +- Navigator version marker +- Slash commands → natural language +- Workflow examples +- Skills vs commands explanation +- Token optimization strategy + +**What gets preserved**: +- Project name and description +- Tech stack +- Code standards +- Forbidden actions +- PM tool configuration +- Custom sections + +## Related Skills + +- **nav-init**: Initialize Navigator in new project (creates CLAUDE.md from scratch) +- **nav-start**: Start session (uses updated CLAUDE.md) +- **nav-task**: Task documentation (benefits from updated workflow) + +## Examples + +### Example 1: Simple Update + +``` +User: "Update my CLAUDE.md to v3.1" \ No newline at end of file diff --git a/skills/nav-upgrade/SKILL.md b/skills/nav-upgrade/SKILL.md new file mode 100644 index 0000000..b5be78e --- /dev/null +++ b/skills/nav-upgrade/SKILL.md @@ -0,0 +1,667 @@ +--- +name: nav-upgrade +description: Automates Navigator plugin updates. Detects current version, updates plugin, verifies installation, updates project CLAUDE.md, and validates new features. Auto-invoke when user mentions upgrading Navigator or getting new features. +allowed-tools: Bash, Read, Write, Edit, TodoWrite +version: 1.0.0 +--- + +# Navigator Upgrade Skill + +Automate Navigator plugin updates with version detection, conflict resolution, and post-update validation. + +## When to Invoke + +Auto-invoke when user says: +- "Update Navigator" +- "Upgrade Navigator plugin" +- "Get latest Navigator version" +- "Update to Navigator v3.3.0" +- "Install new Navigator features" +- "Check for Navigator updates" + +## What This Does + +**5-Step Workflow**: +1. **Version Detection**: Check current Navigator version vs latest +2. **Plugin Update**: Execute `/plugin update navigator` +3. **Verification**: Confirm update succeeded +4. **CLAUDE.md Update**: Update project configuration (via nav-update-claude) +5. **Feature Discovery**: Show new features available + +**Time Savings**: Manual update (10-15 min) → Automated (2 min) + +--- + +## Prerequisites + +- Navigator plugin installed +- Project initialized with Navigator +- Internet connection for plugin update + +--- + +## Workflow Protocol + +### Step 1: Version Detection + +**Execute**: `version_detector.py` + +**Check both stable and pre-release versions**: +```bash +# Current installed version +grep '"version"' .claude-plugin/plugin.json + +# Get all releases (including pre-releases) +curl -s https://api.github.com/repos/alekspetrov/navigator/releases + +# Parse: +# - Latest stable (prerelease: false) +# - Latest pre-release (prerelease: true) +# - Compare with current version +``` + +**Output scenarios**: + +**Scenario 1: Stable update available** +```json +{ + "current_version": "4.0.0", + "latest_stable": "4.2.0", + "latest_prerelease": null, + "recommendation": "update_to_stable" +} +``` + +**Scenario 2: Pre-release available (user on stable)** +```json +{ + "current_version": "4.0.0", + "latest_stable": "4.0.0", + "latest_prerelease": "4.3.0", + "recommendation": "offer_prerelease_option" +} +``` + +**Present choice**: +``` +✅ You're on the latest stable version (v4.0.0) + +⚡ Experimental version available: v4.3.0 + +New in v4.3.0 (Experimental): +• Multi-Claude agentic workflows +• 30% success rate (use for simple features) +• PM integration with ticket closing + +Options: +[1] Stay on stable v4.0.0 (recommended) +[2] Try experimental v4.3.0 (early adopter) + +Your choice [1-2]: +``` + +**Scenario 3: Already on latest (stable or pre-release)** +``` +✅ You're on v4.3.0 (latest experimental) + +Latest stable: v4.0.0 +Status: You're ahead of stable (testing experimental features) + +New features in your version: +- Multi-Claude workflows +- Task agents in sub-Claude phases +``` + +Skip to Step 5 (Feature Discovery). + +**Scenario 4: On pre-release, newer stable available** +``` +⚠️ You're on v4.3.0 (experimental) +Latest stable: v4.5.0 + +Recommendation: Update to stable v4.5.0 +Experimental features from v4.3.0 are now stable. +``` + +--- + +### Step 2: Plugin Update + +**Scenario-based update strategy**: + +#### Scenario 2: Pre-release Available (User on Stable) + +When pre-release detected, present choice using AskUserQuestion tool: + +```markdown +✅ You're on latest stable version (v4.0.0) + +⚡ Experimental version available: v4.3.0 + +New in v4.3.0 (Experimental): +• Multi-Claude agentic workflows +• 30% success rate (use for simple features) +• PM integration with ticket closing + +**Question**: Which version would you like? + +**Options**: +[1] **Stay on stable v4.0.0** (recommended) + - Production-ready + - No experimental features + - Most reliable + +[2] **Try experimental v4.3.0** (early adopter) + - Multi-Claude workflows + - Latest features + - 30% completion rate + - Help test new functionality + +Your choice? +``` + +**If user chooses [1] (Stay stable)**: +``` +✓ Staying on v4.0.0 (latest stable) + +No action needed. Run nav-upgrade again when you're ready to try experimental features. +``` + +**If user chooses [2] (Try experimental)**: +```bash +# Uninstall current version +/plugin uninstall navigator + +# Add marketplace (if not already added) +/plugin marketplace add alekspetrov/navigator + +# Install specific pre-release version +# Note: /plugin update only fetches stable, must install specific version +git clone https://github.com/alekspetrov/navigator.git /tmp/navigator-v4.3.0 +cd /tmp/navigator-v4.3.0 +git checkout v4.3.0 + +# Install from local checkout +/plugin install /tmp/navigator-v4.3.0 +``` + +**Then verify installation**: +```bash +/plugin list | grep navigator +# Should show: navigator (v4.3.0) +``` + +#### Scenario 1: Stable Update Available + +**Execute**: `/plugin update navigator` + +**Monitor output**: +``` +Updating navigator... +✅ Navigator updated to v4.2.0 +``` + +**If update fails**: +``` +❌ Update failed: [error message] + +Troubleshooting: +1. Restart Claude Code +2. Try: /plugin uninstall navigator && /plugin install navigator +3. Check internet connection +4. Report issue: https://github.com/alekspetrov/navigator/issues +``` + +**Automatic retry** (once): +If update fails, try uninstall/reinstall automatically: +```bash +/plugin uninstall navigator +/plugin marketplace add alekspetrov/navigator +/plugin install navigator +``` + +--- + +### Step 3: Verification + +**Execute**: `plugin_verifier.py` + +**Verify**: +1. Plugin version matches latest +2. New skills registered in plugin.json +3. Skills are invokable + +**Test new skills** (v3.3.0 example): +```bash +# Test that visual-regression skill exists +ls ~/.config/claude/plugins/navigator/skills/visual-regression/SKILL.md 2>/dev/null || echo "Skill not found" +``` + +**Output**: +``` +✅ Update Verification + +Version: v3.3.0 ✅ +New Skills Registered: visual-regression ✅ +Skills Invokable: ✅ + +Update successful! +``` + +**If verification fails**: +``` +⚠️ Update completed but verification failed + +Issue: visual-regression skill not found +Fix: Restart Claude Code to reload skills + +After restarting, verify: +"Set up visual regression for Button" +``` + +Prompt user to restart Claude Code. + +--- + +### Step 4: Update Project CLAUDE.md (Automatic) + +**After plugin update, automatically invoke**: `nav-update-claude` skill + +``` +🔄 Syncing project CLAUDE.md with updated plugin... + +✓ Using template from GitHub (v4.3.0) +✓ Extracted customizations +✓ Generated updated CLAUDE.md +``` + +**What happens automatically**: +1. Detects new plugin version (e.g., v4.3.0) +2. Fetches matching template from GitHub +3. Preserves project customizations +4. Updates CLAUDE.md in current project +5. Shows diff for review + +**Template sync benefits**: +- ✅ CLAUDE.md always matches installed plugin version +- ✅ No template drift (v4.0 templates with v4.3 plugin) +- ✅ Pre-release templates accessible +- ✅ Offline fallback to bundled templates + +**User action required**: +``` +Review changes and commit: + +git add CLAUDE.md +git commit -m "chore: update CLAUDE.md to Navigator v4.3.0" +``` + +**See**: `nav-update-claude` skill for details. + +--- + +### Step 5: Post-Upgrade Setup Check + +**Check if new features require setup**: + +```bash +# Check for skills with setup requirements +if [ -f "$NAVIGATOR_PATH/skills/product-design/setup.sh" ]; then + # Check if venv exists + if [ ! -d "$NAVIGATOR_PATH/skills/product-design/venv" ]; then + echo "⚠️ product-design skill requires setup" + NEEDS_SETUP=true + fi +fi +``` + +**If setup needed, show instructions**: + +```markdown +⚠️ New Feature Requires Setup + +The product-design skill (v3.4.0+) requires Python dependencies: + +**One-time setup** (30 seconds): +```bash +cd ~/.claude/plugins/marketplaces/jitd-marketplace/skills/product-design +./setup.sh +``` + +**What this installs**: +- Python MCP SDK for direct Figma connection +- 95% orchestration reduction +- 92% token savings + +**After setup, use**: +"Review this Figma design: [URL]" +``` + +**Record setup needed in TodoWrite** for tracking. + +--- + +### Step 6: Feature Discovery + +**Show new features** available in updated version. + +**For v3.3.0 update**: +````markdown +🎉 Navigator v3.3.0 Update Complete! + +## New Features Available + +### visual-regression Skill (NEW) +Set up Storybook + Chromatic in 5 minutes instead of 2-3 hours. + +**Usage**: +``` +"Set up visual regression for ProfileCard" +"Add Chromatic to Button component" +"Configure visual tests for Input, Card, Modal" +``` + +**What it does**: +✅ Generates Storybook stories with all variants +✅ Configures Chromatic/Percy/BackstopJS +✅ Creates CI workflows (GitHub Actions, GitLab CI) +✅ Adds accessibility tests + +**Complete Design Pipeline** (v3.2 + v3.3): +1. "Review this design from Figma" (v3.2) +2. Implement components +3. "Set up visual regression" (v3.3 NEW) +4. Automated visual testing in CI + +### Updated Skills Count +- **17 total skills** (was 16) +- 10 core Navigator skills +- 7 development skills + +### Integration +visual-regression integrates with product-design skill for complete design→code→testing workflow. + +## Try It Now + +If you have Storybook in this project: +``` +"Set up visual regression for [ComponentName]" +``` + +If you don't have Storybook: +```bash +npx storybook init +``` + +Then: +``` +"Set up visual regression for [ComponentName]" +``` + +## Documentation + +- Release Notes: https://github.com/alekspetrov/navigator/releases/tag/v3.3.0 +- Skill Docs: skills/visual-regression/SKILL.md +- Examples: skills/visual-regression/examples/ +- SOP: .agent/sops/testing/visual-regression-setup.md (created in your project) +```` + +--- + +## Predefined Functions + +### functions/version_detector.py + +**Purpose**: Detect current and latest Navigator versions + +**Usage**: +```bash +python3 functions/version_detector.py +``` + +**Output**: +```json +{ + "current_version": "3.2.0", + "latest_version": "3.3.0", + "update_available": true, + "release_url": "https://github.com/alekspetrov/navigator/releases/tag/v3.3.0", + "changes": { + "new_skills": ["visual-regression"], + "updated_skills": ["product-design"], + "new_features": ["Multi-tool VR support", "CI workflows"], + "breaking_changes": [] + } +} +``` + +### functions/plugin_updater.py + +**Purpose**: Execute plugin update with retry logic + +**Usage**: +```bash +python3 functions/plugin_updater.py --target-version 3.3.0 +``` + +**Actions**: +1. Execute `/plugin update navigator` +2. If fails, retry with uninstall/reinstall +3. Verify update succeeded +4. Return status + +### functions/plugin_verifier.py + +**Purpose**: Verify update completed successfully + +**Usage**: +```bash +python3 functions/plugin_verifier.py --expected-version 3.3.0 +``` + +**Checks**: +- Plugin version matches expected +- New skills exist in filesystem +- Skills registered in plugin.json +- Skills are invokable (test invocation) + +--- + +## Error Handling + +### Update Failed: Network Error + +``` +❌ Update failed: Could not connect to plugin marketplace + +Fix: +1. Check internet connection +2. Try again in a few minutes +3. Manual update: /plugin uninstall navigator && /plugin install navigator +``` + +### Update Failed: Permission Denied + +``` +❌ Update failed: Permission denied + +Fix: +1. Close Claude Code +2. Check ~/.config/claude/plugins/ permissions +3. Restart Claude Code +4. Try update again +``` + +### Verification Failed: Skills Not Found + +``` +⚠️ Update completed but new skills not found + +Fix: +1. Restart Claude Code (required for skill reload) +2. Verify: /plugin list +3. Test: "Set up visual regression for Button" +``` + +Automatically prompt user to restart. + +### CLAUDE.md Update Conflicts + +``` +⚠️ CLAUDE.md update has conflicts with your customizations + +Options: +[1] Keep my customizations (merge new features) +[2] Use new template (lose customizations) +[3] Show me the diff first + +Reply with choice +``` + +Let user decide how to handle conflicts. + +--- + +## Upgrade Paths + +### From v3.0.x to v3.3.0 + +**Changes**: +- +2 skills (nav-markers in v3.1, visual-regression in v3.3) +- OpenTelemetry integration (v3.1) +- Product design skill (v3.2) +- Visual regression skill (v3.3) + +**Breaking changes**: None (fully backward compatible) + +### From v3.1.x to v3.3.0 + +**Changes**: +- Product design skill (v3.2) +- Visual regression skill (v3.3) +- Updated skills count (17 total) + +**Breaking changes**: None + +### From v3.2.x to v3.3.0 + +**Changes**: +- Visual regression skill +- Integration with product-design workflow +- Updated skills count (17 total) + +**Breaking changes**: None + +--- + +## Post-Update Checklist + +After upgrade, verify: + +- ✅ `/plugin list` shows new version +- ✅ CLAUDE.md updated with new patterns +- ✅ New skills auto-invoke on natural language +- ✅ Existing skills still work +- ✅ No conflicts in project configuration + +**If all checked**: Update successful! + +--- + +## Rollback + +If update causes issues: + +``` +"Rollback Navigator to v3.2.0" +``` + +This will: +1. Uninstall current version +2. Install specific version from marketplace +3. Update CLAUDE.md to match +4. Verify rollback succeeded + +--- + +## Integration Points + +### With nav-update-claude + +After plugin update, automatically invokes `nav-update-claude` to sync project configuration. + +### With nav-start + +After update, `nav-start` shows new features available in session statistics. + +### With nav-init + +If upgrading before project initialization, suggests running `nav-init` with latest features. + +--- + +## Examples + +### Example 1: Simple Update + +``` +User: "Update Navigator" + +→ Detects: v3.2.0 → v3.3.0 available +→ Updates plugin +→ Updates CLAUDE.md +→ Shows: "visual-regression skill now available" +→ Suggests: "Set up visual regression for [Component]" +``` + +### Example 2: Already on Latest + +``` +User: "Update Navigator" + +→ Detects: Already on v3.3.0 +→ Shows new features available +→ Suggests trying visual-regression if not used yet +``` + +### Example 3: Update with Restart Required + +``` +User: "Update Navigator" + +→ Updates plugin +→ Verification: Skills not found (needs restart) +→ Prompts: "Please restart Claude Code to complete update" +→ After restart: Verification succeeds +``` + +--- + +## Best Practices + +1. **Update regularly**: Check for updates monthly +2. **Read release notes**: Understand new features before using +3. **Test new skills**: Try new features in test project first +4. **Report issues**: File GitHub issues for update problems +5. **Backup CLAUDE.md**: Keep backup before update (auto-created) + +--- + +## Version History + +- **v1.0.0**: Initial nav-upgrade skill (Navigator v3.3.1) + +--- + +## Future Enhancements + +- Auto-update check on `nav-start` (opt-in) +- Changelog display in CLI +- Update notifications for major versions +- Automated migration scripts for breaking changes + +--- + +**Last Updated**: 2025-10-21 +**Skill Type**: Core Navigator +**Auto-Invocation**: Yes diff --git a/skills/nav-upgrade/functions/plugin_updater.py b/skills/nav-upgrade/functions/plugin_updater.py new file mode 100644 index 0000000..4706c59 --- /dev/null +++ b/skills/nav-upgrade/functions/plugin_updater.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 +""" +Navigator Plugin Updater + +Executes plugin update with retry logic and verification. + +Usage: + python plugin_updater.py [--target-version VERSION] +""" + +import argparse +import json +import subprocess +import sys +import time +from typing import Dict + + +def update_plugin_via_claude() -> Dict: + """ + Execute /plugin update navigator command. + + Returns: + Dict with success status and output + """ + try: + # Execute update command + result = subprocess.run( + ['claude', 'plugin', 'update', 'navigator'], + capture_output=True, + text=True, + timeout=60 + ) + + success = result.returncode == 0 + + return { + 'success': success, + 'output': result.stdout, + 'error': result.stderr, + 'method': 'update' + } + except subprocess.TimeoutExpired: + return { + 'success': False, + 'error': 'Update timed out after 60 seconds', + 'method': 'update' + } + except FileNotFoundError: + return { + 'success': False, + 'error': 'claude command not found. Is Claude Code installed?', + 'method': 'update' + } + except Exception as e: + return { + 'success': False, + 'error': str(e), + 'method': 'update' + } + + +def reinstall_plugin() -> Dict: + """ + Uninstall and reinstall Navigator plugin. + + Returns: + Dict with success status + """ + try: + # Uninstall + uninstall_result = subprocess.run( + ['claude', 'plugin', 'uninstall', 'navigator'], + capture_output=True, + text=True, + timeout=30 + ) + + if uninstall_result.returncode != 0: + return { + 'success': False, + 'error': f'Uninstall failed: {uninstall_result.stderr}', + 'method': 'reinstall' + } + + # Wait a moment + time.sleep(2) + + # Add from marketplace + add_result = subprocess.run( + ['claude', 'plugin', 'marketplace', 'add', 'alekspetrov/navigator'], + capture_output=True, + text=True, + timeout=30 + ) + + if add_result.returncode != 0: + return { + 'success': False, + 'error': f'Marketplace add failed: {add_result.stderr}', + 'method': 'reinstall' + } + + # Wait a moment + time.sleep(2) + + # Install + install_result = subprocess.run( + ['claude', 'plugin', 'install', 'navigator'], + capture_output=True, + text=True, + timeout=60 + ) + + success = install_result.returncode == 0 + + return { + 'success': success, + 'output': install_result.stdout, + 'error': install_result.stderr if not success else None, + 'method': 'reinstall' + } + except subprocess.TimeoutExpired: + return { + 'success': False, + 'error': 'Reinstall timed out', + 'method': 'reinstall' + } + except Exception as e: + return { + 'success': False, + 'error': str(e), + 'method': 'reinstall' + } + + +def update_with_retry(target_version: str = None) -> Dict: + """ + Update Navigator plugin with automatic retry on failure. + + Args: + target_version: Optional specific version to install + + Returns: + Dict with final update status + """ + report = { + 'attempts': [], + 'final_success': False, + 'target_version': target_version + } + + # Attempt 1: Normal update + print("Attempting plugin update...", file=sys.stderr) + attempt1 = update_plugin_via_claude() + report['attempts'].append(attempt1) + + if attempt1['success']: + report['final_success'] = True + return report + + # Attempt 2: Reinstall + print("Update failed. Attempting reinstall...", file=sys.stderr) + time.sleep(2) + + attempt2 = reinstall_plugin() + report['attempts'].append(attempt2) + + if attempt2['success']: + report['final_success'] = True + return report + + # Both failed + return report + + +def get_post_update_instructions(success: bool, method: str) -> str: + """Generate post-update instructions.""" + if success: + return """ +✅ Update Successful + +Next steps: +1. Restart Claude Code to reload skills +2. Verify version: /plugin list +3. Update project CLAUDE.md: "Update my CLAUDE.md to latest Navigator version" +4. Try new features (if any) +""" + else: + return f""" +❌ Update Failed (method: {method}) + +Troubleshooting: +1. Restart Claude Code +2. Try manual update: + /plugin uninstall navigator + /plugin marketplace add alekspetrov/navigator + /plugin install navigator + +3. Check internet connection +4. Report issue: https://github.com/alekspetrov/navigator/issues +""" + + +def main(): + """CLI entry point.""" + parser = argparse.ArgumentParser(description='Update Navigator plugin') + parser.add_argument('--target-version', help='Target version to install', default=None) + args = parser.parse_args() + + # Run update with retry + report = update_with_retry(args.target_version) + + # Add instructions + final_attempt = report['attempts'][-1] if report['attempts'] else {} + method = final_attempt.get('method', 'unknown') + report['instructions'] = get_post_update_instructions(report['final_success'], method) + + # Output as JSON + print(json.dumps(report, indent=2)) + + # Exit code + sys.exit(0 if report['final_success'] else 1) + + +if __name__ == '__main__': + main() diff --git a/skills/nav-upgrade/functions/plugin_verifier.py b/skills/nav-upgrade/functions/plugin_verifier.py new file mode 100644 index 0000000..c585cd8 --- /dev/null +++ b/skills/nav-upgrade/functions/plugin_verifier.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python3 +""" +Navigator Plugin Verifier + +Verifies that Navigator plugin update completed successfully. + +Usage: + python plugin_verifier.py --expected-version 3.3.0 +""" + +import argparse +import json +import os +import re +import subprocess +import sys +from pathlib import Path +from typing import Dict, List + + +def get_installed_version() -> str: + """ + Get installed Navigator version from /plugin list. + + Returns: + Version string or None + """ + try: + result = subprocess.run( + ['claude', 'plugin', 'list'], + capture_output=True, + text=True, + timeout=10 + ) + + for line in result.stdout.split('\n'): + if 'navigator' in line.lower(): + match = re.search(r'v?(\d+\.\d+\.\d+)', line) + if match: + return match.group(1) + + return None + except Exception: + return None + + +def find_plugin_directory() -> Path: + """ + Find Navigator plugin installation directory. + + Returns: + Path to plugin directory or None + """ + possible_paths = [ + Path.home() / '.config' / 'claude' / 'plugins' / 'navigator', + Path.home() / '.claude' / 'plugins' / 'navigator', + Path.home() / 'Library' / 'Application Support' / 'Claude' / 'plugins' / 'navigator', + ] + + for path in possible_paths: + if path.exists() and path.is_dir(): + return path + + return None + + +def verify_skills_exist(plugin_dir: Path, expected_skills: List[str]) -> Dict: + """ + Verify that expected skills exist in plugin directory. + + Args: + plugin_dir: Path to plugin directory + expected_skills: List of skill names to check + + Returns: + Dict with verification results + """ + skills_dir = plugin_dir / 'skills' + + if not skills_dir.exists(): + return { + 'success': False, + 'error': 'Skills directory not found' + } + + results = {} + for skill_name in expected_skills: + skill_path = skills_dir / skill_name / 'SKILL.md' + results[skill_name] = skill_path.exists() + + all_exist = all(results.values()) + + return { + 'success': all_exist, + 'skills_checked': results, + 'missing_skills': [name for name, exists in results.items() if not exists] + } + + +def verify_plugin_json(plugin_dir: Path, expected_skills: List[str]) -> Dict: + """ + Verify that skills are registered in plugin.json. + + Args: + plugin_dir: Path to plugin directory + expected_skills: List of skill names to check + + Returns: + Dict with verification results + """ + plugin_json_path = plugin_dir / '.claude-plugin' / 'plugin.json' + + if not plugin_json_path.exists(): + return { + 'success': False, + 'error': 'plugin.json not found' + } + + try: + with open(plugin_json_path, 'r') as f: + data = json.load(f) + + registered_skills = data.get('skills', []) + + # Check each expected skill + results = {} + for skill_name in expected_skills: + skill_path = f'./skills/{skill_name}' + results[skill_name] = skill_path in registered_skills + + all_registered = all(results.values()) + + return { + 'success': all_registered, + 'skills_checked': results, + 'unregistered_skills': [name for name, registered in results.items() if not registered] + } + except (json.JSONDecodeError, FileNotFoundError) as e: + return { + 'success': False, + 'error': str(e) + } + + +def verify_update(expected_version: str, expected_new_skills: List[str] = None) -> Dict: + """ + Comprehensive verification of Navigator plugin update. + + Args: + expected_version: Expected version after update (e.g., "3.3.0") + expected_new_skills: List of new skills expected in this version + + Returns: + Complete verification report + """ + report = { + 'expected_version': expected_version, + 'checks': {}, + 'overall_success': False, + 'needs_restart': False + } + + # Check 1: Version matches + installed_version = get_installed_version() + report['checks']['version'] = { + 'expected': expected_version, + 'actual': installed_version, + 'success': installed_version == expected_version + } + + # Check 2: Plugin directory exists + plugin_dir = find_plugin_directory() + report['checks']['plugin_directory'] = { + 'success': plugin_dir is not None, + 'path': str(plugin_dir) if plugin_dir else None + } + + if not plugin_dir: + report['recommendation'] = 'Plugin directory not found. Reinstall Navigator.' + return report + + # Check 3: New skills exist (if specified) + if expected_new_skills: + skills_check = verify_skills_exist(plugin_dir, expected_new_skills) + report['checks']['skills_exist'] = skills_check + + # Check 4: Skills registered in plugin.json + registration_check = verify_plugin_json(plugin_dir, expected_new_skills) + report['checks']['skills_registered'] = registration_check + + # If skills exist but verification shows they're not accessible, needs restart + if skills_check['success'] and not registration_check['success']: + report['needs_restart'] = True + + # Overall success + all_checks_passed = all( + check.get('success', False) + for check in report['checks'].values() + ) + + report['overall_success'] = all_checks_passed + + # Generate recommendation + if all_checks_passed: + report['recommendation'] = 'Update verified successfully!' + elif report['needs_restart']: + report['recommendation'] = 'Update completed. Restart Claude Code to reload skills.' + else: + failed_checks = [name for name, check in report['checks'].items() if not check.get('success')] + report['recommendation'] = f"Verification failed: {', '.join(failed_checks)}" + + return report + + +def main(): + """CLI entry point.""" + parser = argparse.ArgumentParser(description='Verify Navigator plugin update') + parser.add_argument('--expected-version', required=True, help='Expected version (e.g., 3.3.0)') + parser.add_argument('--new-skills', nargs='*', help='New skills to verify', default=[]) + args = parser.parse_args() + + # Run verification + report = verify_update(args.expected_version, args.new_skills or None) + + # Output as JSON + print(json.dumps(report, indent=2)) + + # Exit code + if report['overall_success']: + sys.exit(0) + elif report['needs_restart']: + sys.exit(2) # Special exit code for restart needed + else: + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/skills/nav-upgrade/functions/version_detector.py b/skills/nav-upgrade/functions/version_detector.py new file mode 100644 index 0000000..0acf454 --- /dev/null +++ b/skills/nav-upgrade/functions/version_detector.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python3 +""" +Navigator Version Detector + +Detects current Navigator version and checks for updates from GitHub releases. + +Usage: + python version_detector.py +""" + +import json +import os +import re +import subprocess +import sys +from pathlib import Path +from typing import Dict, Optional +from urllib import request + + +def get_current_version() -> Optional[str]: + """ + Get currently installed Navigator version from /plugin list. + + Returns: + Version string (e.g., "3.3.0") or None if not found + """ + try: + # Try to run claude plugin list command + result = subprocess.run( + ['claude', 'plugin', 'list'], + capture_output=True, + text=True, + timeout=10 + ) + + # Parse output for navigator version + for line in result.stdout.split('\n'): + if 'navigator' in line.lower(): + # Extract version (e.g., "navigator (v3.3.0)" or "navigator (3.3.0)") + match = re.search(r'v?(\d+\.\d+\.\d+)', line) + if match: + return match.group(1) + + return None + except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError): + return None + + +def get_plugin_json_version() -> Optional[str]: + """ + Fallback: Get version from plugin.json in Navigator plugin directory. + + Returns: + Version string or None + """ + # Common plugin installation paths + possible_paths = [ + Path.home() / '.config' / 'claude' / 'plugins' / 'navigator' / '.claude-plugin' / 'plugin.json', + Path.home() / '.claude' / 'plugins' / 'navigator' / '.claude-plugin' / 'plugin.json', + Path.home() / 'Library' / 'Application Support' / 'Claude' / 'plugins' / 'navigator' / '.claude-plugin' / 'plugin.json', + ] + + for path in possible_paths: + if path.exists(): + try: + with open(path, 'r') as f: + data = json.load(f) + return data.get('version') + except (json.JSONDecodeError, FileNotFoundError, PermissionError): + continue + + return None + + +def get_latest_version_from_github() -> Dict: + """ + Get latest Navigator version from GitHub releases API. + + Returns: + Dict with version, release_url, and changes + """ + try: + url = 'https://api.github.com/repos/alekspetrov/navigator/releases/latest' + + req = request.Request(url) + req.add_header('User-Agent', 'Navigator-Version-Detector') + + with request.urlopen(req, timeout=10) as response: + data = json.load(response) + + # Extract version from tag_name (e.g., "v3.3.0" → "3.3.0") + tag_name = data.get('tag_name', '') + version = tag_name.lstrip('v') + + # Parse release notes for key changes + body = data.get('body', '') + changes = parse_release_notes(body) + + return { + 'version': version, + 'release_url': data.get('html_url', ''), + 'release_date': data.get('published_at', '').split('T')[0], + 'changes': changes + } + except Exception as e: + return { + 'version': None, + 'error': str(e) + } + + +def parse_release_notes(body: str) -> Dict: + """ + Parse release notes to extract key changes. + + Args: + body: Release notes markdown + + Returns: + Dict with new_skills, updated_skills, new_features, breaking_changes + """ + changes = { + 'new_skills': [], + 'updated_skills': [], + 'new_features': [], + 'breaking_changes': [] + } + + # Extract new skills + skill_pattern = r'-\s+\*\*(\w+-[\w-]+)\*\*:.*\(NEW\)' + for match in re.finditer(skill_pattern, body): + changes['new_skills'].append(match.group(1)) + + # Extract features from "What's New" section + features_section = re.search(r'##\s+.*What.*s New(.*?)(?=##|\Z)', body, re.DOTALL | re.IGNORECASE) + if features_section: + # Find bullet points + for line in features_section.group(1).split('\n'): + if line.strip().startswith('-') or line.strip().startswith('*'): + feature = line.strip().lstrip('-*').strip() + if feature and len(feature) < 100: # Reasonable feature description + changes['new_features'].append(feature) + + # Check for breaking changes + if 'breaking change' in body.lower() or '⚠️' in body: + breaking_section = re.search(r'##\s+.*Breaking.*Changes(.*?)(?=##|\Z)', body, re.DOTALL | re.IGNORECASE) + if breaking_section: + for line in breaking_section.group(1).split('\n'): + if line.strip().startswith('-') or line.strip().startswith('*'): + change = line.strip().lstrip('-*').strip() + if change: + changes['breaking_changes'].append(change) + + return changes + + +def compare_versions(current: str, latest: str) -> int: + """ + Compare two semantic versions. + + Args: + current: Current version (e.g., "3.2.0") + latest: Latest version (e.g., "3.3.0") + + Returns: + -1 if current < latest (update available) + 0 if current == latest (up to date) + 1 if current > latest (ahead of latest, e.g., dev version) + """ + try: + current_parts = [int(x) for x in current.split('.')] + latest_parts = [int(x) for x in latest.split('.')] + + # Pad to same length + while len(current_parts) < len(latest_parts): + current_parts.append(0) + while len(latest_parts) < len(current_parts): + latest_parts.append(0) + + # Compare + for c, l in zip(current_parts, latest_parts): + if c < l: + return -1 + elif c > l: + return 1 + + return 0 + except (ValueError, AttributeError): + return 0 # Can't compare, assume equal + + +def detect_version() -> Dict: + """ + Detect current and latest Navigator versions. + + Returns: + Complete version detection report + """ + # Get current version + current_version = get_current_version() + + if not current_version: + # Fallback to plugin.json + current_version = get_plugin_json_version() + + # Get latest version from GitHub + latest_info = get_latest_version_from_github() + latest_version = latest_info.get('version') + + # Determine if update available + update_available = False + if current_version and latest_version: + comparison = compare_versions(current_version, latest_version) + update_available = (comparison == -1) + + # Build report + report = { + 'current_version': current_version, + 'latest_version': latest_version, + 'update_available': update_available, + 'release_url': latest_info.get('release_url', ''), + 'release_date': latest_info.get('release_date', ''), + 'changes': latest_info.get('changes', {}), + 'error': latest_info.get('error'), + 'recommendation': get_recommendation(current_version, latest_version, update_available) + } + + return report + + +def get_recommendation(current: Optional[str], latest: Optional[str], update_available: bool) -> str: + """Generate recommendation based on version status.""" + if not current: + return "Navigator not detected. Install: /plugin marketplace add alekspetrov/navigator && /plugin install navigator" + + if not latest: + return "Could not check for updates. Try again later or check GitHub releases manually." + + if update_available: + return f"Update recommended: v{current} → v{latest}. Run: /plugin update navigator" + + return f"You're on the latest version (v{current}). No update needed." + + +def main(): + """CLI entry point.""" + report = detect_version() + + # Output as JSON + print(json.dumps(report, indent=2)) + + # Exit with code + # 0 = up to date + # 1 = update available + # 2 = error + if report.get('error'): + sys.exit(2) + elif report.get('update_available'): + sys.exit(1) + else: + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/skills/plugin-slash-command/SKILL.md b/skills/plugin-slash-command/SKILL.md new file mode 100644 index 0000000..5742408 --- /dev/null +++ b/skills/plugin-slash-command/SKILL.md @@ -0,0 +1,484 @@ +--- +name: plugin-slash-command +description: Generate new Navigator slash commands following project conventions. Use when user says "add slash command", "create command", "new /nav command", or "add /nav:[name] command". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash +version: 1.0.0 +--- + +# Navigator Slash Command Generator + +Generate new slash commands for the Navigator plugin following established conventions and patterns. + +## When to Invoke + +Auto-invoke when user says: +- "Add a slash command for..." +- "Create a new /nav:[name] command" +- "Generate slash command..." +- "Add /nav:[feature] command" +- "New Navigator command for..." + +## What This Does + +1. Asks for command details (name, purpose, complexity) +2. Analyzes existing commands for pattern matching +3. Generates command markdown file with proper structure +4. Validates YAML frontmatter and formatting +5. Shows usage example + +## Execution Steps + +### Step 1: Gather Command Requirements + +Ask user: +- **Command name** (kebab-case, without /nav: prefix) + - Example: "marker", "compact", "update-doc" +- **Command purpose** (one sentence description) + - Example: "Create context markers to save conversation state" +- **Command complexity**: + - Simple: Single action, minimal steps (e.g., marker) + - Medium: Multiple steps, some logic (e.g., compact) + - Complex: Multi-phase execution, integrations (e.g., init, start) +- **User-facing or internal**? + - User-facing: Part of standard Navigator workflow + - Internal: For plugin development/maintenance + +### Step 2: Analyze Similar Commands + +Use Task agent to find similar commands: +``` +"Find existing Navigator commands similar to [purpose]: + - Commands in commands/*.md + - Similar complexity level + - Common structure patterns + - Return 2-3 best examples" +``` + +**What to extract from examples**: +- Section structure (What This Does, When to Use, etc.) +- Tone and style (conversational, 2nd person) +- Emoji usage patterns +- Example format +- Troubleshooting patterns + +### Step 3: Design Command Structure + +Based on complexity level: + +**Simple commands**: +``` +- YAML frontmatter (description) +- Title +- What This Does (2-3 sentences) +- Usage (basic syntax + examples) +- When to Use (2-3 scenarios) +- Expected Output +- Troubleshooting (2-3 common issues) +- Closing statement +``` + +**Medium commands**: +``` +- YAML frontmatter +- Title + overview +- What This Does (detailed explanation) +- When to Use (5-6 scenarios with examples) +- Usage / Execution Steps +- Output Format +- Integration notes (if applicable) +- Troubleshooting (4-5 issues) +- Best Practices +- Closing statement +``` + +**Complex commands**: +``` +- YAML frontmatter +- Title + comprehensive overview +- What This Does (with comparisons) +- Execution Plan (multi-step) +- Pre-flight checks +- Step-by-step implementation +- Validation steps +- Integration with PM tools (if applicable) +- Success criteria +- Troubleshooting (comprehensive) +- Edge cases +- Performance notes +- Closing statement +``` + +### Step 4: Generate Command File + +**Use predefined function**: `functions/command_generator.py` + +```python +# Generates command markdown following Navigator conventions +generate_command( + name="[command-name]", + description="[one-line purpose]", + complexity="simple|medium|complex", + sections={ + "what_this_does": "...", + "when_to_use": [...], + "usage": "...", + "execution_steps": [...], + "troubleshooting": [...] + } +) +``` + +**File location**: `commands/[command-name].md` + +**Structure**: +1. YAML frontmatter with description +2. Title (# Command Name - Navigator Context) +3. Overview paragraph (what user is doing) +4. Content sections (based on complexity) +5. Closing statement (emoji + key takeaway) + +### Step 5: Validate Generated Command + +**Use predefined function**: `functions/command_validator.py` + +Checks: +- [ ] YAML frontmatter is valid +- [ ] Description field exists and is clear +- [ ] Markdown structure is correct +- [ ] All sections have content +- [ ] Code blocks are properly formatted +- [ ] Examples are realistic +- [ ] Troubleshooting addresses real issues +- [ ] Tone matches Navigator style (conversational, helpful) +- [ ] Emoji usage is consistent with other commands +- [ ] File name matches command name (kebab-case) + +### Step 6: Show Usage Example + +Display: +``` +✅ Slash Command Created: /nav:[command-name] + +File: commands/[command-name].md +Lines: [count] +Complexity: [simple/medium/complex] + +Try it: +/nav:[command-name] + +What it does: +[brief description] + +Next steps: +1. Test the command manually +2. Update plugin documentation if public-facing +3. Add to CLAUDE.md command list if needed +``` + +--- + +## Example Workflows + +### Example 1: Simple Command + +**User**: "Add a slash command for showing Navigator version" + +**Execution**: + +1. **Gather requirements**: + ``` + Name: version + Purpose: Display current Navigator plugin version + Complexity: Simple + Type: User-facing + ``` + +2. **Analyze similar** (via Task agent): + ``` + Found similar: marker.md (simple, informational) + Pattern: Short description → Usage → When to use → Output + ``` + +3. **Design structure**: + ``` + - Frontmatter + - Title: "Navigator Version Info" + - What This Does: 1 paragraph + - Usage: Basic syntax + - When to Use: 2-3 scenarios + - Output Format: Example + - Closing statement + ``` + +4. **Generate**: `commands/version.md` + ```markdown + --- + description: Display Navigator plugin version and system info + --- + + # Navigator Version Info + + Check which version of Navigator you're running. + + ## What This Does + + Displays the current Navigator plugin version, Claude Code version, and system configuration. + + ## Usage + + ```bash + /nav:version + ``` + + ## When to Use + + **Reporting issues**: + - Include version info in bug reports + - Help maintainers reproduce issues + + **Checking updates**: + - See if you're on latest version + - Compare with marketplace version + + **System diagnostics**: + - Verify plugin installed correctly + - Check configuration loaded + + ## Output Format + + ``` + Navigator Plugin + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + Version: 2.2.0 + Installed: /Users/you/.claude/plugins/jitd + Config: .agent/.nav-config.json + + System: + - Claude Code: 1.5.0 + - Project: /Users/you/project + - Platform: darwin + ``` + + **Keep your Navigator up to date for latest features** 🚀 + ``` + +5. **Validate**: ✅ All checks pass + +6. **Show usage**: + ``` + ✅ Slash Command Created: /nav:version + + File: commands/version.md + Lines: 45 + Complexity: Simple + + Try it: /nav:version + ``` + +### Example 2: Medium Command + +**User**: "Create a command for archiving old task documentation" + +**Execution**: + +1. **Gather requirements**: + ``` + Name: archive-tasks + Purpose: Move completed tasks to archive directory + Complexity: Medium (file operations, validation) + Type: User-facing + ``` + +2. **Analyze similar**: + ``` + Found: markers.md (file management, user selection) + Found: compact.md (multi-step process) + Pattern: Overview → Execution Plan → Steps → Validation + ``` + +3. **Design structure**: + ``` + - Frontmatter + - Title + overview + - What This Does (comparison with manual approach) + - When to Use (5 scenarios) + - Execution Plan (Step 1-4) + - Output Format + - Troubleshooting (4 issues) + - Best Practices + - Closing + ``` + +4. **Generate**: `commands/archive-tasks.md` (full content) + +5. **Validate**: ✅ All checks pass + +6. **Show usage**: Command ready to use + +--- + +## Output Format + +**After generating command, show**: + +``` +✅ Slash Command Created: /nav:[name] + +Structure: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📄 commands/[name].md + +Sections: +✅ YAML frontmatter +✅ Title and overview +✅ What This Does +✅ When to Use ([N] scenarios) +✅ Usage / Execution Plan +✅ [Additional sections based on complexity] +✅ Troubleshooting +✅ Closing statement +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Command: /nav:[name] +Purpose: [brief description] +Lines: [count] + +Test it: +/nav:[name] + +Documentation: +- Add to README.md if user-facing +- Update CLAUDE.md command list +- Add to plugin.json if needed +``` + +--- + +## Best Practices + +### Command Naming +- Use kebab-case (marker, update-doc, archive-tasks) +- Be specific but concise (not too generic, not too verbose) +- Match feature purpose (nav:compact for compacting) +- Avoid abbreviations unless very common + +### Description Writing +- One sentence, clear purpose +- Action-oriented ("Create", "Display", "Update") +- Mention key benefit or what it does +- Under 100 characters + +### Content Structure +- Start with user perspective ("You are...") +- Use 2nd person ("your task", "you can") +- Include realistic examples +- Show expected output +- Address common issues in troubleshooting + +### Tone and Style +- Conversational and helpful +- Use emojis for visual markers (✅❌📖🚀) +- Bold key terms and actions +- Code blocks for all commands/output +- Bullet lists for readability + +### Examples Quality +- Real-world scenarios (not toy examples) +- Show before/after when relevant +- Include expected output +- Cover common use cases +- Demonstrate Navigator benefits + +### Troubleshooting Section +- Address real issues users might encounter +- Provide specific solutions (not generic advice) +- Include verification commands +- Link to related docs if helpful + +--- + +## Common Command Patterns + +### Informational Commands +**Pattern**: Simple structure, quick output +**Examples**: version, status, list +**Sections**: Description → Usage → Output + +### Action Commands +**Pattern**: Execute something, show result +**Examples**: marker, compact, archive +**Sections**: Description → Execution → Validation → Result + +### Setup/Configuration Commands +**Pattern**: Multi-step process, checks +**Examples**: init, migrate, setup +**Sections**: Pre-flight → Steps → Validation → Troubleshooting + +### Management Commands +**Pattern**: User selection, operations, feedback +**Examples**: markers (list/load), tasks (list/select) +**Sections**: Overview → Modes → Operations → Results + +--- + +## Troubleshooting + +### Generated Command Too Short + +**Problem**: Command content is sparse, missing sections + +**Solutions**: +1. Increase complexity level (simple → medium) +2. Add more scenarios to "When to Use" +3. Expand troubleshooting section (more common issues) +4. Add Best Practices section +5. Include more examples + +### Command Doesn't Match Navigator Style + +**Problem**: Tone or structure feels off + +**Solutions**: +1. Re-analyze example commands (marker.md, compact.md) +2. Check emoji usage (should match existing patterns) +3. Verify 2nd person perspective ("You are...") +4. Ensure conversational tone (not technical manual) +5. Add personality to closing statement + +### YAML Validation Fails + +**Problem**: Invalid frontmatter + +**Solutions**: +1. Check YAML syntax (proper indentation) +2. Ensure `description` field exists +3. Verify no special characters break parsing +4. Test with: `python -c "import yaml; yaml.safe_load(open('commands/[name].md').read().split('---')[1])"` + +### Examples Are Too Generic + +**Problem**: Examples don't feel realistic + +**Solutions**: +1. Base examples on actual Navigator usage +2. Use real file paths (not /path/to/file) +3. Show actual output format (not [output here]) +4. Include context (why user would run this) + +--- + +## Success Criteria + +**This skill succeeds when**: +- [ ] Generated command file is syntactically valid +- [ ] YAML frontmatter passes validation +- [ ] All required sections present +- [ ] Tone matches Navigator style (conversational, helpful) +- [ ] Examples are realistic and useful +- [ ] Troubleshooting addresses real issues +- [ ] Command can be invoked in Claude Code +- [ ] Minimal manual editing needed (<10% of content) + +--- + +**The plugin-slash-command skill automates Navigator command creation, ensuring consistency and saving development time** 🔧 diff --git a/skills/plugin-slash-command/examples/medium-command-example.md b/skills/plugin-slash-command/examples/medium-command-example.md new file mode 100644 index 0000000..657c9d6 --- /dev/null +++ b/skills/plugin-slash-command/examples/medium-command-example.md @@ -0,0 +1,456 @@ +--- +description: Smart context compact - preserve essential Navigator markers and documentation context +--- + +# Navigator Smart Compact + +You are performing a context-optimized compact operation that preserves essential Navigator documentation markers. + +## What This Does + +**Regular `/compact`**: Clears all conversation history, loses context + +**Navigator `/nav:compact`**: +- Generates a **context marker** (snapshot of where you are) +- Saves marker to `.agent/.context-markers/` +- Shows you exactly how to resume +- Clears conversation history +- You restore context in your next session by reading the marker + +**The Magic**: Context markers compress your entire session (50+ messages, 130k tokens) into a focused summary (3k tokens) that captures only what matters: current task, decisions made, next steps. + +## How Context Markers Work + +Think of it like save points in a video game: + +``` +Before Compact: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +You: "Help me implement auth" +Claude: [50 messages of implementation] +You: "Now add OAuth" +Claude: [20 messages of OAuth work] +Total: 130k tokens, approaching limit + +After /nav:compact: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +✅ Marker saved: .agent/.context-markers/2025-10-12.md + +Contains: +- Task: TASK-45 (auth + OAuth) +- Status: OAuth integrated, needs testing +- Decisions: Using passport.js, JWT tokens +- Next: Write tests for OAuth flow +- 3k tokens + +Next session: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +You: Read @.agent/.context-markers/2025-10-12.md +Claude: *knows exactly where you left off* +You: "Write the OAuth tests" +Claude: *continues seamlessly* +``` + +**You never lose progress. The knowledge is preserved, just compressed.** + +## When to Use + +### ✅ Good Times to Compact + +**After isolated sub-tasks**: +- Just finished documentation update +- Created SOP for solved issue +- Archived feature implementation plan +- Completed debugging session + +**Before context switches**: +- Switching from feature A to feature B +- Moving from debugging to new feature +- Starting new sprint/milestone +- After research phase, before implementation + +**Token optimization**: +- Approaching 70% token usage +- Long conversation with repeated info +- After multiple /nav:update-doc operations + +### ❌ Bad Times to Compact + +**In middle of work**: +- Feature half-implemented +- Debugging complex issue +- Multiple related sub-tasks pending + +**Context still needed**: +- Next sub-task depends on current conversation +- Need to reference recent decisions +- Team discussion ongoing + +## Compact Process + +### Step 1: Identify Essential Context + +Scan conversation for: + +**Must preserve**: +- Current task ID (TASK-XX) +- Active feature/epic name +- Key technical decisions made +- Unresolved blockers/questions +- Next steps planned + +**Can clear**: +- Completed sub-tasks details +- Resolved debugging sessions +- Documentation already written +- Exploratory research (if documented) + +### Step 2: Generate Context Marker + +Create compact marker to preserve essentials: + +```markdown +# Navigator Context Marker (Post-Compact) + +**Session**: [Date/Time] +**Navigator**: .agent/DEVELOPMENT-README.md + +## Active Work +- **Task**: TASK-XX - [Feature Name] +- **Status**: [Phase/Progress] +- **Location**: [File/component being worked on] + +## Recent Decisions +- [Decision 1] +- [Decision 2] + +## Documentation State +- **Task docs**: [List updated docs] +- **System docs**: [List updated docs] +- **SOPs**: [List created SOPs] + +## Next Steps +1. [Next action] +2. [Following action] + +## Blockers +- [Blocker 1 if any] + +## Don't Load Again (Already Documented) +- [Doc 1] - Already in .agent/ +- [Doc 2] - Already in .agent/ + +--- +Load this context marker after compacting to resume efficiently. +``` + +### Step 3: Save Context Marker + +**IMPORTANT**: You MUST save the marker where the user can access it after compact. + +**Recommended**: Save to `.agent/.context-markers/` directory + +```bash +# Create directory if doesn't exist +mkdir -p .agent/.context-markers + +# Save with timestamp +Write( + file_path: ".agent/.context-markers/2025-10-12-143022-compact.md" + content: [context marker from Step 2] +) +``` + +### Step 3.5: Mark as Active Marker + +**NEW**: Create `.active` file to enable automatic resume + +```bash +# Create .active file pointing to this marker +echo "2025-10-12-143022-compact.md" > .agent/.context-markers/.active +``` + +**This enables**: `/nav:start` will auto-detect and load this marker + +**Show user the saved location**: +``` +✅ Context marker saved and marked as active: + .agent/.context-markers/2025-10-12-143022-compact.md + +This marker will be auto-loaded on next session start. +``` + +**Alternative locations**: + +**Option 2**: Append to current task doc (if task exists) +``` +Append to: .agent/tasks/TASK-XX-feature.md + +## Session Notes +### Compact Point - [Date] +[Context marker content] + +After compact: Read @.agent/tasks/TASK-XX-feature.md +``` + +**Option 3**: User clipboard (if no task doc yet) +``` +⚠️ No task doc exists yet. + +Copy this marker and paste it in your next session: + +[Show marker content] + +Or save it manually before compacting. +``` + +### Step 4: Show Resume Instructions + +**CRITICAL**: Tell the user exactly how to resume. + +``` +╔══════════════════════════════════════════════════════╗ +║ ║ +║ 🔄 Ready to Compact ║ +║ ║ +╚══════════════════════════════════════════════════════╝ + +✅ Context marker created and marked as active: + .agent/.context-markers/2025-10-12-143022-compact.md + +TO RESUME AFTER COMPACT: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Simply run: /nav:start + +This will automatically: +• Load navigator (.agent/DEVELOPMENT-README.md) +• Detect active marker +• Restore your context (~3k tokens) +• Load current task (if applicable) + +All in one command. No manual steps needed. + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Proceed with compact? Type 'yes' to continue. +``` + +**Wait for confirmation before compacting**. + +### Step 5: Perform Compact + +**Only after user confirms**, execute Claude Code's `/compact` command. + +### Step 6: Post-Compact Resume (For User's Next Session) + +**Immediately after compact**: + +1. **Load navigator** (always): + ``` + Read .agent/DEVELOPMENT-README.md (~2k tokens) + ``` + +2. **Load context marker**: + ``` + Read context marker from Step 2 + ``` + +3. **Load active task doc** (if exists): + ``` + Read .agent/tasks/TASK-XX-feature.md (~3k tokens) + ``` + +4. **Resume work**: Continue where left off + +**Total tokens loaded**: ~7k (vs 60k+ if keeping full conversation) + +## Compact Strategies + +### Aggressive (Compact Often) + +**When**: Token-constrained, switching tasks frequently + +**Trigger**: +- After every sub-task +- Before every new task +- Every 50% token usage + +**Trade-off**: More compacts, less context continuity + +**Best for**: Multiple short tasks, exploratory work + +### Conservative (Compact Rarely) + +**When**: Deep work on single feature, need context continuity + +**Trigger**: +- After major milestones only +- When reaching 70%+ tokens +- Between unrelated epics + +**Trade-off**: Fewer compacts, more token usage + +**Best for**: Complex features, long debugging sessions + +### Manual (User Decides) + +**When**: User knows when to compact + +**Trigger**: User runs `/nav:compact` explicitly + +**Trade-off**: Full control, requires judgment + +**Best for**: Experienced users, custom workflows + +## Configuration + +Set in `.agent/.nav-config.json`: + +```json +{ + "compact_strategy": "conservative", + "compact_trigger_percent": 70, + "save_context_markers": true, + "context_marker_location": ".agent/.context-markers/" +} +``` + +## Example Compact Scenarios + +### Scenario 1: Feature Complete + +``` +Before Compact: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Tokens: 65% (130k used) +Conversation: 50+ messages +Feature TASK-123 complete +Docs updated +Tests passing + +Action: /nav:compact +Reason: Feature done, docs archived, ready for next task + +After Compact: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Tokens: 5% (10k used) +- Navigator loaded (2k) +- Context marker (3k) +- Ready for TASK-124 + +Savings: 120k tokens freed (60% of budget) +``` + +### Scenario 2: Research → Implementation + +``` +Before Compact: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Tokens: 45% (90k used) +Research: Explored 5 different approaches +Decision: Chose approach #3 +Key findings: Documented in SOP + +Action: /nav:compact +Reason: Research done, documented, time to implement + +After Compact: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Tokens: 7% (14k used) +- Navigator (2k) +- Task doc with decision (3k) +- Relevant SOP (2k) +- Implementation ready + +Savings: 76k tokens freed +``` + +### Scenario 3: Multi-Task Day + +``` +Morning: +- TASK-101: Bug fix (15k tokens) +- /nav:compact +- TASK-102: New feature (25k tokens) +- /nav:compact + +Afternoon: +- TASK-103: Integration (20k tokens) +- /nav:compact +- TASK-104: Documentation (10k tokens) + +Total work: 4 tasks +Peak usage: 25k tokens (12.5%) +Without compact: Would hit 70k+ (35%), slower responses + +Benefit: Maintained fast responses all day +``` + +## Compact Checklist + +Before running `/nav:compact`: + +- [ ] Current task completed or at good stopping point +- [ ] Important decisions documented (task doc or SOP) +- [ ] No unresolved blockers requiring conversation context +- [ ] Ready to switch tasks or take break +- [ ] Context marker generated (if needed) + +After running `/nav:compact`: + +- [ ] Load navigator (.agent/DEVELOPMENT-README.md) +- [ ] Load context marker (if saved) +- [ ] Load active task doc (if continuing work) +- [ ] Verify ready to continue + +## Advanced: Auto-Compact + +**Future enhancement**: Automatically compact based on triggers + +```json +{ + "auto_compact": { + "enabled": false, + "triggers": { + "token_percent": 70, + "after_update_doc": true, + "between_tasks": true + }, + "require_confirmation": true + } +} +``` + +When trigger hit: +``` +⚠️ Navigator Auto-Compact Suggested + +Reason: Token usage at 71% +Action: Run /nav:compact to free 60k+ tokens + +Compact now? [Y/n]: +``` + +## Metrics + +Track compact efficiency: + +**Before Compact**: +- Tokens used: 130k (65%) +- Message count: 50+ +- Time: 2 hours + +**After Compact**: +- Tokens used: 10k (5%) +- Context preserved: Task doc + decision markers +- Ready for: Next task immediately + +**Savings**: +- 120k tokens freed +- 60% of budget reclaimed +- Fast responses restored + +--- + +**Remember**: Navigator compact preserves what matters (documented knowledge) and clears what doesn't (conversation history). This keeps your context lean and your sessions productive. diff --git a/skills/plugin-slash-command/examples/simple-command-example.md b/skills/plugin-slash-command/examples/simple-command-example.md new file mode 100644 index 0000000..17c7029 --- /dev/null +++ b/skills/plugin-slash-command/examples/simple-command-example.md @@ -0,0 +1,642 @@ +--- +description: Create context markers on-demand - save your progress anytime +--- + +# Navigator Marker - Save Points for Your Conversation + +Create context markers during work to capture your current state. Think of it as **git commits for your AI conversation**. + +--- + +## What This Does + +Creates a snapshot of your current work state that you can restore later. + +**Traditional approach**: Work until compact, lose intermediate context + +**With markers**: +- Save progress anytime +- Multiple markers per session +- Resume from any point +- Safety nets before risky changes + +--- + +## When to Use + +### ✅ Perfect Times for Markers + +**Before taking breaks**: +``` +You: "Implemented auth flow, going to lunch" +You: /nav:marker lunch-break +Result: Resume perfectly after lunch +``` + +**Before exploring approaches**: +``` +You: /nav:marker before-refactor +You: "Let's try refactoring X" +*doesn't work* +You: Read @.agent/.context-markers/before-refactor.md +Result: Back to known good state +``` + +**During long features**: +``` +Day 1: Core implementation → /nav:marker day1-core +Day 2: Add integrations → /nav:marker day2-integrations +Day 3: Tests & polish → /nav:marker day3-complete +Result: Checkpoints throughout multi-day work +``` + +**Before risky changes**: +``` +You: "About to refactor entire routing system" +You: /nav:marker pre-routing-refactor +Result: Safety net if things go wrong +``` + +**End of day**: +``` +You: /nav:marker eod-2025-10-12 +Result: Tomorrow starts with perfect context +``` + +**After important decisions**: +``` +You: "We decided to use PostgreSQL instead of MongoDB" +You: /nav:marker architecture-decision +Result: Decision captured with full context +``` + +--- + +## Usage + +### Basic Usage + +```bash +/nav:marker +``` + +Creates marker with auto-generated name: `marker-2025-10-12-143022.md` + +### Named Markers + +```bash +/nav:marker before-refactor +/nav:marker lunch-break +/nav:marker pre-deployment +/nav:marker day1-complete +``` + +Creates marker with your name: `before-refactor-2025-10-12-143022.md` + +### With Description + +```bash +/nav:marker oauth-working "OAuth flow implemented and tested" +``` + +Adds description to marker content. + +--- + +## Marker Creation Process + +### Step 1: Analyze Current State + +Scan conversation for: + +**Active work**: +- Current task/feature +- Files being modified +- What's implemented +- What's remaining + +**Recent context**: +- Technical decisions made +- Approaches tried (successful and failed) +- Dependencies added +- Blockers encountered + +**Next steps**: +- What you planned to do next +- Open questions +- Ideas to explore + +### Step 2: Generate Marker Content + +Create comprehensive marker: + +```markdown +# Navigator Context Marker: [Name] + +**Created**: 2025-10-12 14:30:22 +**Type**: On-demand marker +**Navigator**: .agent/DEVELOPMENT-README.md + +--- + +## 📍 Current Location + +**Task**: TASK-123 - Implement OAuth authentication +**Phase**: Integration complete, testing pending +**Files**: +- src/auth/oauth.ts (implemented) +- src/routes/auth.ts (updated) +- tests/auth.test.ts (needs work) + +**Progress**: 70% complete + +--- + +## 🎯 What's Done + +- ✅ OAuth flow implemented with passport.js +- ✅ JWT token generation working +- ✅ Login/logout endpoints created +- ✅ Session management configured +- ✅ Google OAuth provider integrated + +--- + +## 🔧 Technical Decisions + +**OAuth Library**: Chose passport.js over next-auth +- Reason: More control over flow, simpler for our use case +- Trade-off: More manual config, but cleaner integration + +**Token Strategy**: JWT in httpOnly cookies +- Reason: XSS protection, no localStorage needed +- Expiration: 7 days, refresh token pattern + +**Session Store**: Redis +- Reason: Fast, scalable, easy invalidation +- Config: TTL matches JWT expiration + +--- + +## ⚠️ Challenges & Solutions + +**Challenge**: CORS issues with OAuth callback +**Solution**: Added credentials: 'include' and proper CORS headers +**File**: src/middleware/cors.ts + +**Challenge**: Token not persisting across requests +**Solution**: Missing httpOnly flag in cookie options +**File**: src/auth/tokens.ts:45 + +--- + +## 📝 Next Steps + +1. Write comprehensive tests for OAuth flow + - Happy path: successful login + - Error cases: invalid tokens, expired sessions + - Edge cases: concurrent logins, token refresh + +2. Add error handling for failed OAuth + - Network errors + - Provider downtime + - Invalid credentials + +3. Document OAuth setup in README + - Environment variables needed + - Provider setup instructions + - Local development flow + +--- + +## 🔗 Related Documentation + +**Already documented**: +- .agent/system/auth-architecture.md - Auth system design +- .agent/sops/integrations/oauth-setup.md - OAuth provider config + +**Needs documentation**: +- Testing strategy for OAuth +- Production deployment checklist + +--- + +## 💡 Ideas to Explore + +- Add more OAuth providers (GitHub, Twitter) +- Implement OAuth scope management +- Add "Sign in with" UI component library +- Consider magic link as alternative auth method + +--- + +## 🚫 Don't Load Again + +These are already documented, don't reload: +- .agent/tasks/TASK-123-oauth-auth.md +- .agent/system/auth-architecture.md +- Previous markers (if any) + +--- + +## 🔄 Restore Instructions + +To resume from this marker: + +1. Read this marker: + ``` + Read @.agent/.context-markers/oauth-working-2025-10-12-143022.md + ``` + +2. Continue with: "Write tests for OAuth flow" + +**Context restored in ~3k tokens instead of replaying 130k token conversation.** + +--- + +Generated by: /nav:marker oauth-working +``` + +### Step 3: Save Marker + +**Create directory if needed**: +```bash +mkdir -p .agent/.context-markers +``` + +**Save with naming convention**: +``` +Format: [user-name]-YYYY-MM-DD-HHMMSS.md +Example: oauth-working-2025-10-12-143022.md +``` + +**Write marker**: +``` +Write( + file_path: ".agent/.context-markers/oauth-working-2025-10-12-143022.md" + content: [marker content from Step 2] +) +``` + +### Step 4: Confirm to User + +Show clear confirmation: + +``` +✅ Marker saved! + +📍 Location: .agent/.context-markers/oauth-working-2025-10-12-143022.md + +🔄 To restore this state later: + Read @.agent/.context-markers/oauth-working-2025-10-12-143022.md + +💾 Marker size: ~3k tokens +📊 Current session: ~85k tokens + +Tip: You can continue working or use /nav:compact to free up space. +``` + +--- + +## Advanced Features + +### List All Markers + +```bash +/nav:marker list +``` + +Shows all available markers: + +``` +📍 Available Context Markers + +Recent markers (last 7 days): +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +1. oauth-working-2025-10-12-143022.md + Created: 2 hours ago + Task: TASK-123 - OAuth authentication + Size: 3.2k tokens + +2. before-refactor-2025-10-12-091500.md + Created: 7 hours ago + Task: TASK-122 - Routing refactor + Size: 2.8k tokens + +3. day1-complete-2025-10-11-170000.md + Created: yesterday + Task: TASK-121 - User dashboard + Size: 3.5k tokens + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Total: 3 markers | Combined size: 9.5k tokens + +To restore: Read @.agent/.context-markers/[filename] +To clean up: /nav:marker clean +``` + +### Clean Old Markers + +```bash +/nav:marker clean +``` + +Interactive cleanup: + +``` +🧹 Marker Cleanup + +Found 15 markers older than 7 days: +- [list with dates and sizes] + +Keep only: +1. Last 7 days (recommended) +2. Last 30 days +3. Keep all, just show me +4. Custom selection + +Choice [1-4]: +``` + +### Compare Markers + +```bash +/nav:marker diff oauth-working before-refactor +``` + +Shows what changed between two markers: + +``` +📊 Marker Comparison + +From: before-refactor (7 hours ago) +To: oauth-working (2 hours ago) + +Changes: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Tasks: + - Completed: TASK-122 (routing refactor) + + Started: TASK-123 (OAuth auth) + +Files Modified: + + src/auth/oauth.ts (new) + + src/auth/tokens.ts (new) + ~ src/routes/auth.ts (modified) + +Decisions Made: + + Using passport.js for OAuth + + JWT in httpOnly cookies + + Redis for session storage + +Progress: 30% → 70% (task) +``` + +--- + +## Marker Strategies + +### Checkpoint Strategy + +Create markers at natural checkpoints: + +``` +Feature Planning: +/nav:marker planning-complete + +Core Implementation: +/nav:marker core-working + +Integration: +/nav:marker integration-done + +Testing: +/nav:marker tests-passing + +Ready for Review: +/nav:marker ready-for-pr +``` + +**Benefit**: Clear progression, easy to resume at any stage + +### Daily Markers + +End each day with a marker: + +```bash +/nav:marker eod-2025-10-12 "Finished OAuth, need tests tomorrow" +``` + +**Benefit**: Perfect context on Monday for Friday's work + +### Experiment Markers + +Before trying new approaches: + +```bash +/nav:marker before-experiment +# Try risky refactor +# Doesn't work? +# Restore from marker, try different approach +``` + +**Benefit**: Safe exploration, easy rollback + +### Decision Markers + +After important decisions: + +```bash +/nav:marker architecture-decision "Chose PostgreSQL over MongoDB" +``` + +**Benefit**: Capture why decisions were made with full context + +--- + +## Marker Best Practices + +### ✅ Do + +- Create markers before breaks (lunch, end of day) +- Name markers descriptively (`oauth-working` not `marker-1`) +- Add descriptions for important markers +- Clean up old markers monthly +- Use markers as conversation save points + +### ❌ Don't + +- Don't create markers every 5 minutes (too granular) +- Don't use generic names (`test`, `stuff`, `work`) +- Don't forget to clean up (markers accumulate) +- Don't rely solely on markers (still commit code!) + +--- + +## Integration with Navigator Workflow + +### Markers + Compact + +``` +Work on feature → /nav:marker feature-done +Continue to polish → Token usage high +/nav:compact +Result: Marker preserved, conversation cleared +``` + +**Benefit**: Markers survive compacts + +### Markers + Tasks + +``` +Start task → Load task doc +Make progress → /nav:marker progress-update +Complete → /nav:update-doc feature TASK-XX +``` + +**Benefit**: Markers complement task documentation + +### Markers + SOPs + +``` +Hit bug → Debug → Solve +/nav:marker bug-solved "Fixed CORS issue with OAuth" +/nav:update-doc sop debugging cors-oauth-fix +``` + +**Benefit**: Markers capture point-in-time, SOPs capture solution + +--- + +## Technical Implementation + +### Marker Storage + +``` +.agent/.context-markers/ +├── oauth-working-2025-10-12-143022.md +├── before-refactor-2025-10-12-091500.md +├── day1-complete-2025-10-11-170000.md +└── .gitkeep +``` + +**Naming**: `[name]-YYYY-MM-DD-HHMMSS.md` + +**Size**: ~3k tokens each + +**Git**: Ignored by default (in .gitignore) + +### Marker Format + +**Required sections**: +- Current Location (task, files, progress) +- What's Done (achievements) +- Technical Decisions (with rationale) +- Next Steps (what's remaining) +- Restore Instructions (how to resume) + +**Optional sections**: +- Challenges & Solutions +- Ideas to Explore +- Related Documentation + +--- + +## Examples + +### Example 1: End of Day Marker + +```bash +You: "Finished implementing user settings page, need to add tests tomorrow" +You: /nav:marker eod-settings-done + +Result: +✅ Marker saved: .agent/.context-markers/eod-settings-done-2025-10-12-170000.md + +Tomorrow: Read @.agent/.context-markers/eod-settings-done-2025-10-12-170000.md +``` + +### Example 2: Before Risky Change + +```bash +You: "Current routing works. About to refactor to use new router" +You: /nav:marker before-routing-refactor +You: "Refactor the routing system to use express-router" + +*After testing...* + +You: "The refactor broke auth. Let me restore" +You: Read @.agent/.context-markers/before-routing-refactor.md +You: "Take different approach - migrate gradually" +``` + +### Example 3: Multi-Day Feature + +```bash +Monday: +You: /nav:marker day1-foundation "Built database models and API structure" + +Tuesday: +You: Read @.agent/.context-markers/day1-foundation.md +You: *continues work* +You: /nav:marker day2-integration "Integrated with frontend, working on auth" + +Wednesday: +You: Read @.agent/.context-markers/day2-integration.md +You: *continues work* +You: /nav:marker day3-complete "Feature complete, tests passing" +``` + +--- + +## Success Metrics + +**Without markers**: +- Resume after break: 5-10 min re-explaining context +- Session restart: Lose all context +- Risky changes: No safety net +- Multi-day work: Fragmented understanding + +**With markers**: +- Resume after break: 30 seconds (read marker) +- Session restart: Full context restored +- Risky changes: Rollback point available +- Multi-day work: Continuous context thread + +**Token efficiency**: +- Marker: 3k tokens to restore full context +- Re-explaining: 20-30k tokens of back-and-forth +- **Savings**: 85-90% fewer tokens to resume + +--- + +## Future Enhancements + +**Auto-markers**: +```json +{ + "auto_marker": { + "on_task_complete": true, + "on_break_detected": true, + "every_n_hours": 2 + } +} +``` + +**Marker search**: +```bash +/nav:marker search "OAuth" +# Returns all markers mentioning OAuth +``` + +**Marker merge**: +```bash +/nav:marker merge day1 day2 day3 → feature-complete +# Combines multiple markers into one +``` + +--- + +**Markers transform your AI workflow from stateless to stateful. Never lose context again.** 🎯 diff --git a/skills/plugin-slash-command/functions/command_generator.py b/skills/plugin-slash-command/functions/command_generator.py new file mode 100644 index 0000000..38533bb --- /dev/null +++ b/skills/plugin-slash-command/functions/command_generator.py @@ -0,0 +1,430 @@ +#!/usr/bin/env python3 +""" +Command Generator - Generate Navigator slash command markdown files + +Generates properly structured command files following Navigator conventions. +""" + +from typing import Dict, List, Optional +from datetime import datetime + + +def generate_command( + name: str, + description: str, + complexity: str = "medium", + sections: Optional[Dict] = None +) -> str: + """ + Generate complete Navigator command markdown file. + + Args: + name: Command name (kebab-case, without /nav: prefix) + description: One-line purpose description + complexity: Command complexity level ("simple", "medium", "complex") + sections: Dictionary of section content (optional, uses templates if not provided) + + Returns: + Complete markdown content for the command file + + Example: + >>> content = generate_command( + ... name="example", + ... description="Example command for testing", + ... complexity="simple" + ... ) + >>> "---" in content and "description:" in content + True + """ + if sections is None: + sections = {} + + # Validate inputs + valid, error = validate_command_name(name) + if not valid: + raise ValueError(f"Invalid command name: {error}") + + if complexity not in ["simple", "medium", "complex"]: + raise ValueError(f"Complexity must be 'simple', 'medium', or 'complex', got: {complexity}") + + # Generate frontmatter + frontmatter = f"""--- +description: {description} +---""" + + # Generate title + title = f"# {format_title(name)}" + + # Generate content based on complexity + if complexity == "simple": + content = generate_simple_command(name, description, sections) + elif complexity == "medium": + content = generate_medium_command(name, description, sections) + else: # complex + content = generate_complex_command(name, description, sections) + + # Combine all parts + return f"{frontmatter}\n\n{title}\n\n{content}" + + +def generate_simple_command(name: str, description: str, sections: Dict) -> str: + """Generate content for a simple command.""" + what_this_does = sections.get("what_this_does", f"[Explain what /nav:{name} does in 2-3 sentences]") + usage = sections.get("usage", f"/nav:{name}") + when_to_use = sections.get("when_to_use", [ + "Scenario 1", + "Scenario 2", + "Scenario 3" + ]) + output_format = sections.get("output_format", "[Example output]") + troubleshooting = sections.get("troubleshooting", { + "Issue 1": "Solution 1", + "Issue 2": "Solution 2" + }) + + # Build when_to_use section + when_to_use_content = "\n\n".join([ + f"**{scenario}**:\n```\n[Example]\n```" for scenario in when_to_use + ]) + + # Build troubleshooting section + troubleshooting_content = "\n\n".join([ + f"### {issue}\n\n**Problem**: [Description]\n\n**Solution**:\n{solution}" + for issue, solution in troubleshooting.items() + ]) + + return f"""## What This Does + +{what_this_does} + +--- + +## Usage + +```bash +{usage} +``` + +--- + +## When to Use + +{when_to_use_content} + +--- + +## Output Format + +``` +{output_format} +``` + +--- + +## Troubleshooting + +{troubleshooting_content} + +--- + +**[Closing statement about the command]** 🚀""" + + +def generate_medium_command(name: str, description: str, sections: Dict) -> str: + """Generate content for a medium complexity command.""" + overview = sections.get("overview", f"You are using Navigator's `/nav:{name}` command.\n\n[Explain context and purpose]") + what_this_does = sections.get("what_this_does", "[Detailed explanation with comparisons]") + when_to_use = sections.get("when_to_use", [f"Scenario {i+1}" for i in range(5)]) + execution_steps = sections.get("execution_steps", [f"Step {i+1}" for i in range(3)]) + troubleshooting = sections.get("troubleshooting", {f"Issue {i+1}": f"Solution {i+1}" for i in range(4)}) + + # Build when_to_use section + when_to_use_content = "\n\n".join([ + f"**{scenario}**:\n```\n[Example]\n```" for scenario in when_to_use + ]) + + # Build execution steps + execution_content = "\n\n".join([ + f"### {step}\n\n[Instructions for this step]\n\n**Expected outcome**: [What happens]" + for step in execution_steps + ]) + + # Build troubleshooting + troubleshooting_content = "\n\n".join([ + f"### {issue}\n\n**Problem**: [Description]\n\n**Solutions**:\n1. {solution}\n2. [Additional solution]\n3. [Additional solution]" + for issue, solution in troubleshooting.items() + ]) + + return f"""{overview} + +--- + +## What This Does + +{what_this_does} + +--- + +## When to Use + +{when_to_use_content} + +--- + +## Execution Steps + +{execution_content} + +--- + +## Output Format + +``` +[Expected output format] +``` + +--- + +## Best Practices + +- [Best practice 1] +- [Best practice 2] +- [Best practice 3] + +--- + +## Troubleshooting + +{troubleshooting_content} + +--- + +**[Closing statement emphasizing key benefit]** 🚀""" + + +def generate_complex_command(name: str, description: str, sections: Dict) -> str: + """Generate content for a complex command.""" + return f"""You are executing the `/nav:{name}` command. + +[Comprehensive overview explaining the command's role in Navigator workflow] + +--- + +## What This Does + +[Detailed explanation with comparisons to alternative approaches] + +**Traditional approach**: [Manual process] + +**With `/nav:{name}`**: +- [Benefit 1] +- [Benefit 2] +- [Benefit 3] + +--- + +## EXECUTION PLAN + +You will execute these steps in order. Each step has explicit outcomes. + +--- + +### Step 1: Pre-Flight Checks + +[Validation and preparation steps] + +**Checks**: +- [ ] Check 1 +- [ ] Check 2 +- [ ] Check 3 + +--- + +### Step 2: [Main Operation] + +[Detailed implementation instructions] + +**Process**: +1. [Substep 1] +2. [Substep 2] +3. [Substep 3] + +**Expected outcome**: [What should happen] + +--- + +### Step 3: Validation + +[Verification steps] + +**Verify**: +- [ ] Verification 1 +- [ ] Verification 2 +- [ ] Verification 3 + +--- + +### Step 4: Completion + +[Finalization and user feedback] + +**Show summary**: +``` +✅ [Success message] + +[Summary of what was accomplished] +``` + +--- + +## Integration Notes + +[How this command integrates with other Navigator features or external tools] + +--- + +## Success Criteria + +**This command succeeds when**: +- [ ] Criterion 1 +- [ ] Criterion 2 +- [ ] Criterion 3 +- [ ] Criterion 4 + +--- + +## Troubleshooting + +### Common Issue 1 + +**Error**: [Error message or symptom] + +**Solution**: +[Detailed solution with commands] + +### Common Issue 2 + +**Error**: [Error message or symptom] + +**Solution**: +[Detailed solution] + +### Edge Case 1 + +**Scenario**: [When this happens] + +**Handling**: +[How to handle this case] + +--- + +## Performance Notes + +[Any performance considerations, optimization tips, or scalability notes] + +--- + +**[Comprehensive closing statement]** 🚀""" + + +def validate_command_name(name: str) -> tuple[bool, Optional[str]]: + """ + Validate command name follows Navigator conventions. + + Args: + name: Command name to validate + + Returns: + Tuple of (is_valid, error_message) + + Example: + >>> validate_command_name("my-command") + (True, None) + >>> validate_command_name("MyCommand") + (False, 'Command name must be kebab-case') + """ + import re + + if not name: + return False, "Command name cannot be empty" + + if not re.match(r'^[a-z][a-z0-9]*(-[a-z0-9]+)*$', name): + return False, "Command name must be kebab-case (lowercase, hyphens only)" + + if len(name) > 50: + return False, "Command name too long (max 50 characters)" + + # Reserved names + reserved = ["help", "clear", "reset"] + if name in reserved: + return False, f"Command name '{name}' is reserved" + + return True, None + + +def format_title(name: str) -> str: + """ + Format command name as title. + + Args: + name: Command name (kebab-case) + + Returns: + Formatted title string + + Example: + >>> format_title("update-doc") + 'Update Doc - Navigator' + >>> format_title("marker") + 'Marker - Navigator' + """ + # Convert kebab-case to Title Case + title = name.replace('-', ' ').title() + + # Add Navigator branding + return f"{title} - Navigator" + + +def generate_description(name: str, purpose: str) -> str: + """ + Generate command description for YAML frontmatter. + + Args: + name: Command name + purpose: Brief purpose statement + + Returns: + Formatted description (under 100 chars) + + Example: + >>> desc = generate_description("marker", "save conversation state") + >>> len(desc) < 100 + True + """ + # Ensure it starts with a verb and is concise + if len(purpose) > 90: + purpose = purpose[:87] + "..." + + return purpose + + +if __name__ == "__main__": + # Example usage + print("Generating simple command...") + simple = generate_command( + name="example", + description="Example command for demonstration", + complexity="simple" + ) + + print("\n" + "=" * 50) + print(simple[:500] + "...") + print("=" * 50) + + # Validate names + test_names = ["my-command", "MyCommand", "my_command", "valid-name-123"] + print("\nValidation Tests:") + for name in test_names: + valid, error = validate_command_name(name) + status = "✅" if valid else "❌" + print(f"{status} {name}: {error or 'Valid'}") diff --git a/skills/plugin-slash-command/functions/command_validator.py b/skills/plugin-slash-command/functions/command_validator.py new file mode 100644 index 0000000..40c8bdc --- /dev/null +++ b/skills/plugin-slash-command/functions/command_validator.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +""" +Command Validator - Validate Navigator slash command files + +Validates command markdown files follow Navigator conventions and standards. +""" + +import re +from typing import List, Tuple, Optional +from pathlib import Path + + +def validate_command_file(file_path: str) -> Tuple[bool, List[str]]: + """ + Validate complete command markdown file. + + Args: + file_path: Path to command .md file + + Returns: + Tuple of (is_valid, list_of_errors) + + Example: + >>> valid, errors = validate_command_file("commands/marker.md") + >>> valid or len(errors) > 0 + True + """ + errors = [] + + # Check file exists + path = Path(file_path) + if not path.exists(): + return False, [f"File not found: {file_path}"] + + # Read content + try: + content = path.read_text() + except Exception as e: + return False, [f"Cannot read file: {e}"] + + # Validate sections + errors.extend(validate_frontmatter(content)) + errors.extend(validate_structure(content)) + errors.extend(validate_formatting(content)) + errors.extend(validate_style(content)) + + return len(errors) == 0, errors + + +def validate_frontmatter(content: str) -> List[str]: + """ + Validate YAML frontmatter. + + Args: + content: File content + + Returns: + List of errors (empty if valid) + """ + errors = [] + + # Check frontmatter exists + if not content.startswith("---"): + errors.append("Missing YAML frontmatter (must start with '---')") + return errors + + # Extract frontmatter + parts = content.split("---", 2) + if len(parts) < 3: + errors.append("Invalid frontmatter structure (must be surrounded by '---')") + return errors + + frontmatter = parts[1].strip() + + # Check description field + if "description:" not in frontmatter: + errors.append("Missing 'description' field in frontmatter") + else: + # Extract description value + desc_match = re.search(r'description:\s*(.+)', frontmatter) + if desc_match: + desc = desc_match.group(1).strip() + if not desc: + errors.append("Description field is empty") + elif len(desc) > 150: + errors.append(f"Description too long ({len(desc)} chars, max 150)") + else: + errors.append("Cannot parse description field") + + # Check for invalid fields (Navigator commands use minimal frontmatter) + valid_fields = ["description", "author", "version", "deprecated"] + for line in frontmatter.split("\n"): + if ":" in line: + field = line.split(":")[0].strip() + if field and field not in valid_fields: + errors.append(f"Unexpected frontmatter field: '{field}'") + + return errors + + +def validate_structure(content: str) -> List[str]: + """ + Validate document structure and required sections. + + Args: + content: File content + + Returns: + List of errors (empty if valid) + """ + errors = [] + + # Extract markdown body (after frontmatter) + parts = content.split("---", 2) + if len(parts) < 3: + return ["Cannot extract markdown body"] + + body = parts[2].strip() + + # Check for title (# heading) + if not body.startswith("#"): + errors.append("Missing main title (must start with # heading)") + else: + title_match = re.match(r'^#\s+(.+)$', body.split("\n")[0]) + if not title_match: + errors.append("Invalid title format") + else: + title = title_match.group(1) + # Navigator commands typically end with " - Navigator" or context + if "navigator" not in title.lower() and "jitd" not in title.lower(): + errors.append(f"Title should include Navigator branding: '{title}'") + + # Check for required sections (vary by complexity, so we check for minimum) + required_keywords = ["what", "usage", "when"] + for keyword in required_keywords: + if keyword.lower() not in body.lower(): + errors.append(f"Missing section with '{keyword}' (recommended sections: What This Does, Usage, When to Use)") + + # Check for code blocks (commands should have examples) + if "```" not in body: + errors.append("No code blocks found (commands should include examples)") + + # Check for closing statement + last_line = body.strip().split("\n")[-1] + if not last_line.startswith("**") or not last_line.endswith("**"): + errors.append("Missing closing statement (should be bold text at end)") + + return errors + + +def validate_formatting(content: str) -> List[str]: + """ + Validate markdown formatting and syntax. + + Args: + content: File content + + Returns: + List of errors (empty if valid) + """ + errors = [] + + # Check for proper heading hierarchy + headings = re.findall(r'^(#{1,6})\s+(.+)$', content, re.MULTILINE) + prev_level = 0 + for heading, text in headings: + level = len(heading) + if level > prev_level + 1: + errors.append(f"Heading hierarchy skip: {heading} {text} (jumped from h{prev_level} to h{level})") + prev_level = level + + # Check for unclosed code blocks + code_block_count = content.count("```") + if code_block_count % 2 != 0: + errors.append(f"Unclosed code block (found {code_block_count} backticks, must be even)") + + # Check for proper list formatting + lines = content.split("\n") + in_list = False + for i, line in enumerate(lines, 1): + if re.match(r'^\s*[-*+]\s+', line): + in_list = True + # Check indentation consistency + if not re.match(r'^( |\t)?[-*+]\s+\S', line): + errors.append(f"Line {i}: Improper list item format (needs space after bullet)") + elif in_list and line.strip() and not line.startswith(" ") and not line.startswith("\t"): + in_list = False + + # Check for broken links (markdown links with empty href) + broken_links = re.findall(r'\[([^\]]+)\]\(\s*\)', content) + if broken_links: + errors.append(f"Broken markdown links found: {broken_links}") + + return errors + + +def validate_style(content: str) -> List[str]: + """ + Validate Navigator style conventions. + + Args: + content: File content + + Returns: + List of errors (empty if valid) + """ + errors = [] + + # Check for 2nd person perspective (Navigator style) + first_person = ["I am", "I will", "I have", "we are", "we will", "we have"] + for phrase in first_person: + if phrase.lower() in content.lower(): + errors.append(f"Use 2nd person perspective ('you are') not 1st person ('{phrase}')") + + # Check emoji usage (Navigator commands use emojis sparingly) + # Common Navigator emojis: ✅ ❌ 📖 🚀 ⚠️ 💡 🔹 + emoji_count = len(re.findall(r'[\U0001F300-\U0001F9FF]', content)) + if emoji_count > 15: + errors.append(f"Too many emojis ({emoji_count} found, keep under 15 for professionalism)") + + # Check for proper bash/shell syntax in code blocks + bash_blocks = re.findall(r'```(?:bash|shell|sh)\n(.*?)\n```', content, re.DOTALL) + for block in bash_blocks: + if "$(" in block and not re.search(r'\)\s*$', block, re.MULTILINE): + errors.append("Potential unclosed command substitution in bash block") + + # Check for Navigator command references (should use /nav: prefix) + nav_cmds = re.findall(r'`/(?:jitd|nav):([a-z-]+)`', content) + jitd_cmds = re.findall(r'`/jitd:([a-z-]+)`', content) + if len(jitd_cmds) > len(nav_cmds) * 0.5: # More than 50% use old prefix + errors.append("Prefer /nav: prefix over /jitd: (for consistency)") + + return errors + + +def validate_example_realism(content: str) -> List[str]: + """ + Check if examples are realistic (not placeholders). + + Args: + content: File content + + Returns: + List of warnings (empty if examples look good) + """ + warnings = [] + + # Check for common placeholder patterns + placeholders = [ + r'\[.*?\]', # [placeholder] + r'<.*?>', # + r'\.\.\.+', # ... + r'TODO', + r'FIXME', + r'XXX', + ] + + code_blocks = re.findall(r'```.*?\n(.*?)\n```', content, re.DOTALL) + for block in code_blocks: + for pattern in placeholders: + if re.search(pattern, block): + warnings.append(f"Code block contains placeholder-like content: {pattern}") + break # One warning per block + + return warnings + + +def quick_validate(file_path: str) -> bool: + """ + Quick validation check (returns only boolean). + + Args: + file_path: Path to command file + + Returns: + True if valid, False otherwise + """ + valid, _ = validate_command_file(file_path) + return valid + + +def print_validation_report(file_path: str): + """ + Print formatted validation report. + + Args: + file_path: Path to command file + """ + valid, errors = validate_command_file(file_path) + + print(f"\n{'='*60}") + print(f"Validation Report: {Path(file_path).name}") + print(f"{'='*60}\n") + + if valid: + print("✅ All validations passed!") + print("\nFile is ready to use.") + else: + print(f"❌ Found {len(errors)} issue(s):\n") + for i, error in enumerate(errors, 1): + print(f"{i}. {error}") + + print("\n" + "="*60) + print("Fix these issues before using the command.") + + print() + + +if __name__ == "__main__": + import sys + + if len(sys.argv) < 2: + print("Usage: python command_validator.py ") + print("\nExample:") + print(" python command_validator.py commands/marker.md") + sys.exit(1) + + file_path = sys.argv[1] + print_validation_report(file_path) + + # Exit with error code if validation failed + if not quick_validate(file_path): + sys.exit(1) diff --git a/skills/plugin-slash-command/templates/command-template.md b/skills/plugin-slash-command/templates/command-template.md new file mode 100644 index 0000000..5aab39d --- /dev/null +++ b/skills/plugin-slash-command/templates/command-template.md @@ -0,0 +1,124 @@ +--- +description: ${DESCRIPTION} +--- + +# ${TITLE} - Navigator + +${OVERVIEW_PARAGRAPH} + +--- + +## What This Does + +${WHAT_THIS_DOES_CONTENT} + +--- + +## When to Use + +### ✅ Perfect Times + +**${SCENARIO_1_TITLE}**: +``` +${SCENARIO_1_EXAMPLE} +``` + +**${SCENARIO_2_TITLE}**: +``` +${SCENARIO_2_EXAMPLE} +``` + +**${SCENARIO_3_TITLE}**: +``` +${SCENARIO_3_EXAMPLE} +``` + +--- + +## Usage + +### Basic Usage + +```bash +/nav:${COMMAND_NAME} +``` + +${BASIC_USAGE_DESCRIPTION} + +### With Options + +```bash +/nav:${COMMAND_NAME} ${OPTION_1} +/nav:${COMMAND_NAME} ${OPTION_2} "${OPTIONAL_ARG}" +``` + +${OPTIONS_DESCRIPTION} + +--- + +## Execution Steps + +### Step 1: ${STEP_1_NAME} + +${STEP_1_INSTRUCTIONS} + +**Expected outcome**: ${STEP_1_OUTCOME} + +### Step 2: ${STEP_2_NAME} + +${STEP_2_INSTRUCTIONS} + +**Expected outcome**: ${STEP_2_OUTCOME} + +### Step 3: ${STEP_3_NAME} + +${STEP_3_INSTRUCTIONS} + +**Expected outcome**: ${STEP_3_OUTCOME} + +--- + +## Output Format + +``` +${OUTPUT_EXAMPLE} +``` + +--- + +## Best Practices + +- **${BEST_PRACTICE_1_TITLE}**: ${BEST_PRACTICE_1_DESCRIPTION} +- **${BEST_PRACTICE_2_TITLE}**: ${BEST_PRACTICE_2_DESCRIPTION} +- **${BEST_PRACTICE_3_TITLE}**: ${BEST_PRACTICE_3_DESCRIPTION} + +--- + +## Troubleshooting + +### ${ISSUE_1_TITLE} + +**Error**: ${ISSUE_1_ERROR_MESSAGE} + +**Solution**: +${ISSUE_1_SOLUTION} + +### ${ISSUE_2_TITLE} + +**Error**: ${ISSUE_2_ERROR_MESSAGE} + +**Solution**: +${ISSUE_2_SOLUTION} + +### ${ISSUE_3_TITLE} + +**Problem**: ${ISSUE_3_DESCRIPTION} + +**Fix**: +```bash +${ISSUE_3_FIX_COMMAND} +``` + +--- + +**${CLOSING_STATEMENT}** 🚀 diff --git a/skills/product-design/GETTING-STARTED.md b/skills/product-design/GETTING-STARTED.md new file mode 100644 index 0000000..f29916f --- /dev/null +++ b/skills/product-design/GETTING-STARTED.md @@ -0,0 +1,128 @@ +# Getting Started with Product Design Skill + +5-minute quickstart guide for Navigator's Figma integration. + +--- + +## 1. Install (30 seconds) + +```bash +cd skills/product-design +./setup.sh +``` + +**Expected output**: +``` +✅ Setup Complete! +``` + +If you see errors, check [INSTALL.md](INSTALL.md) for troubleshooting. + +--- + +## 2. Enable Figma MCP (1 minute) + +1. Open **Figma Desktop** app +2. **Figma** → **Preferences** (macOS) or **File** → **Settings** (Windows) +3. Find "**Enable local MCP Server**" +4. Toggle **ON** + +You should see: "MCP server running at http://127.0.0.1:3845/mcp" + +--- + +## 3. Try It (2 minutes) + +Open Navigator and say: + +``` +"Review this Figma design: https://figma.com/file/YOUR_FILE_URL" +``` + +Navigator will automatically: +- ✅ Connect to Figma Desktop +- ✅ Extract design tokens and components +- ✅ Compare against your codebase +- ✅ Generate implementation plan +- ✅ Create Navigator task document + +**Output**: +``` +✅ Design review complete for Dashboard Redesign + +Generated Documentation: +- Design review: .agent/design-system/reviews/2025-10-22-dashboard.md +- Implementation plan: .agent/tasks/TASK-17-dashboard-redesign.md + +Summary: +- Design Tokens: 12 new, 5 modified +- Components: 3 new, 1 to extend +- Estimated Time: 12 hours +- Complexity: Medium + +Next Steps: +[1] Start implementation now +[2] Review plan first +[3] Modify plan before starting +``` + +--- + +## That's It! + +You're ready to use Navigator's product-design skill. + +### What You Can Do + +**Design Review**: +``` +"Review this Figma design: [URL]" +``` + +**Extract Tokens**: +``` +"Extract design tokens from Figma" +``` + +**Check Design System**: +``` +"Check design system impact for [feature]" +``` + +**Generate Implementation Plan**: +``` +"Plan implementation for this design" +``` + +--- + +## Troubleshooting + +### "Figma Desktop not running" + +**Fix**: Start Figma Desktop and enable MCP (see step 2 above) + +### "Setup failed" + +**Fix**: See detailed guide in [INSTALL.md](INSTALL.md) + +### "Can't connect to MCP" + +**Fix**: Verify port 3845 is accessible: +```bash +curl http://127.0.0.1:3845/mcp +# Should return JSON (even if error message) +``` + +--- + +## Learn More + +- **[README.md](README.md)** - Features and architecture +- **[INSTALL.md](INSTALL.md)** - Detailed installation guide +- **[SKILL.md](SKILL.md)** - Complete skill documentation + +--- + +**Time to get started**: 5 minutes +**Ready to use**: Immediately after setup diff --git a/skills/product-design/INSTALL.md b/skills/product-design/INSTALL.md new file mode 100644 index 0000000..21cfb6d --- /dev/null +++ b/skills/product-design/INSTALL.md @@ -0,0 +1,378 @@ +# Navigator Product Design Skill - Installation Guide + +Quick setup guide for the product-design skill with Figma MCP integration. + +--- + +## Prerequisites + +### Required + +1. **Python 3.10+** + ```bash + python3 --version # Should be 3.10 or higher + ``` + +2. **Figma Desktop App** + - Download: https://www.figma.com/downloads/ + - Must be running during design reviews + +3. **Figma Account** + - Free or paid account + - Logged into Figma Desktop + +### Optional (Enhanced Features) + +- **Figma Enterprise** - For Code Connect mappings (automatic component detection) +- **Tailwind CSS** - For design token integration +- **Storybook** - For visual regression testing + +--- + +## Installation Methods + +### Method 1: Automatic Setup (Recommended) + +Run the automated setup script: + +```bash +cd skills/product-design +./setup.sh +``` + +This will: +1. ✅ Check Python version (3.10+ required) +2. ✅ Create virtual environment +3. ✅ Install dependencies (`mcp>=1.2.1`) +4. ✅ Verify Figma Desktop is running +5. ✅ Test MCP connection + +**Expected output**: +``` +========================================== +Navigator Product Design Skill - Setup +========================================== + +[1/5] Checking Python version... +✅ Python 3.13.7 + +[2/5] Setting up Python environment... +✅ Virtual environment created + +[3/5] Installing Python dependencies... +✅ Dependencies installed (mcp>=1.2.1) + +[4/5] Checking Figma Desktop status... +✅ Figma MCP server detected (port 3845) + +[5/5] Testing Figma MCP connection... +✅ Successfully connected to Figma MCP server + Found 6 tools: + - get_design_context + - get_variable_defs + - get_code_connect_map + - get_screenshot + - get_metadata + - create_design_system_rules + +========================================== +✅ Setup Complete! +========================================== +``` + +--- + +### Method 2: Manual Installation + +If the automatic script fails or you prefer manual setup: + +#### Step 1: Install Python Dependencies + +```bash +cd skills/product-design + +# Create virtual environment (recommended) +python3 -m venv venv +source venv/bin/activate + +# Install dependencies +pip install -r requirements.txt +``` + +#### Step 2: Enable Figma MCP Server + +1. Open **Figma Desktop** app +2. Go to **Figma → Preferences** (macOS) or **File → Settings** (Windows/Linux) +3. Find "**Enable local MCP Server**" option +4. Toggle **ON** +5. You should see confirmation: "MCP server running at http://127.0.0.1:3845/mcp" + +#### Step 3: Verify Connection + +```bash +cd functions +python3 test_mcp_connection.py +``` + +**Expected output**: +``` +✅ Successfully connected to Figma MCP server + Found 6 tools: + - get_design_context + - get_variable_defs + - ... +``` + +--- + +## Troubleshooting + +### "Figma Desktop not running or MCP not enabled" + +**Symptoms**: +``` +❌ Figma Desktop not running or MCP not enabled + Could not connect to Figma Desktop MCP server. +``` + +**Solutions**: + +1. **Check Figma is running**: + ```bash + # macOS + ps aux | grep Figma + + # Should show Figma processes + ``` + +2. **Enable MCP server**: + - Figma → Preferences → Enable local MCP Server + - Look for confirmation message + +3. **Verify port is open**: + ```bash + curl http://127.0.0.1:3845/mcp + + # Should return JSON response (even if error) + # Example: {"jsonrpc":"2.0","error":{"code":-32001,"message":"Invalid sessionId"},"id":null} + ``` + +4. **Check Figma version**: + - MCP requires Figma Desktop v116.0.0+ + - Update if necessary: Figma → Help → Check for Updates + +--- + +### "MCP SDK not installed" + +**Symptoms**: +``` +ImportError: MCP SDK not installed. Install with: pip install mcp +``` + +**Solutions**: + +1. **Activate virtual environment** (if using): + ```bash + source skills/product-design/venv/bin/activate + ``` + +2. **Install dependencies**: + ```bash + pip install -r requirements.txt + ``` + +3. **Verify installation**: + ```bash + python3 -c "import mcp; print(mcp.__version__)" + # Should print: 1.2.1 or higher + ``` + +--- + +### "Python 3.10+ required" + +**Symptoms**: +``` +❌ Python 3.10+ required (found 3.9.6) +``` + +**Solutions**: + +1. **Install Python 3.10+**: + ```bash + # macOS (Homebrew) + brew install python@3.13 + + # Ubuntu/Debian + sudo apt install python3.13 + + # Windows + # Download from python.org + ``` + +2. **Use specific Python version**: + ```bash + python3.13 -m venv venv + source venv/bin/activate + ``` + +--- + +### "Port 3845 already in use" + +**Symptoms**: +- Figma MCP server won't start +- Connection errors + +**Solutions**: + +1. **Check what's using port 3845**: + ```bash + lsof -i :3845 + ``` + +2. **Kill conflicting process**: + ```bash + # If another process is using the port + kill -9 + ``` + +3. **Restart Figma Desktop**: + - Quit Figma completely + - Restart app + - Re-enable MCP server + +--- + +## Verifying Installation + +### Quick Test + +```bash +cd skills/product-design/functions +python3 -c " +import asyncio +from figma_mcp_client import get_figma_variables + +async def test(): + try: + # This will use currently selected node in Figma + vars = await get_figma_variables() + print(f'✅ Connected! Found {len(vars)} variables') + except Exception as e: + print(f'❌ Error: {e}') + +asyncio.run(test()) +" +``` + +### Full Test + +```bash +cd skills/product-design +./setup.sh +``` + +--- + +## What Gets Installed + +### Python Packages + +```txt +mcp>=1.2.1 # Official MCP SDK for Figma integration +anyio>=4.0.0 # Async I/O (transitive dependency) +httpx>=0.25.0 # HTTP client (transitive dependency) +pydantic>=2.0.0 # Data validation (transitive dependency) +``` + +### File Structure After Installation + +``` +skills/product-design/ +├── venv/ # Virtual environment (created) +│ ├── bin/ +│ ├── lib/ +│ └── ... +├── functions/ +│ ├── figma_mcp_client.py # MCP client wrapper ✨ NEW +│ ├── test_mcp_connection.py # Connection test ✨ NEW +│ ├── design_analyzer.py # Existing functions (to be refactored) +│ └── ... +├── requirements.txt # Dependencies ✨ NEW +├── setup.sh # Setup script ✨ NEW +├── INSTALL.md # This file ✨ NEW +└── SKILL.md # Skill documentation +``` + +--- + +## Next Steps + +After successful installation: + +1. **Try the skill**: + ``` + User: "Review this Figma design: https://figma.com/file/..." + ``` + +2. **Read documentation**: + - `SKILL.md` - Complete skill guide + - `functions/figma_mcp_client.py` - API documentation + +3. **Set up design system** (optional): + ```bash + mkdir -p .agent/design-system/reviews + touch .agent/design-system/design-tokens.json + touch .agent/design-system/ui-kit-inventory.json + ``` + +--- + +## Uninstalling + +To remove the skill: + +```bash +cd skills/product-design + +# Remove virtual environment +rm -rf venv + +# Disable MCP server in Figma +# Figma → Preferences → Disable local MCP Server +``` + +--- + +## Support + +### Documentation + +- **Skill Guide**: `SKILL.md` +- **MCP Client API**: `functions/figma_mcp_client.py` +- **Figma MCP Docs**: https://help.figma.com/hc/en-us/articles/32132100833559 + +### Common Issues + +- **Connection errors**: Ensure Figma Desktop running and MCP enabled +- **Import errors**: Activate virtual environment: `source venv/bin/activate` +- **Version errors**: Upgrade Python to 3.10+ + +### Reporting Issues + +Open issue at: https://github.com/navigator-plugin/navigator/issues + +Include: +- Python version: `python3 --version` +- Figma version: Figma → Help → About Figma +- Error message and full stack trace +- Output from: `python3 functions/test_mcp_connection.py` + +--- + +**Last Updated**: 2025-10-22 +**Navigator Version**: 3.3.1 +**Skill Version**: 1.0.0 +**MCP SDK Version**: 1.2.1+ diff --git a/skills/product-design/README.md b/skills/product-design/README.md new file mode 100644 index 0000000..2b8acc9 --- /dev/null +++ b/skills/product-design/README.md @@ -0,0 +1,336 @@ +# Product Design Skill + +Automate Figma design handoff with Navigator's intelligent design system integration. + +**Time Savings**: 6-10 hours → 15 minutes (95% reduction) + +--- + +## Features + +✨ **Direct Figma MCP Integration** - Python connects directly to Figma Desktop (no manual orchestration) +🎯 **Progressive Refinement** - Smart token usage (fetches only needed data) +🔄 **Design Token Sync** - Auto-extract variables in W3C DTCG format +🗺️ **Component Mapping** - Figma → codebase with similarity detection +📊 **Drift Detection** - Compare design vs implementation automatically +📝 **Task Generation** - Phased implementation plans for Navigator + +--- + +## Quick Start + +### 1. Install + +```bash +cd skills/product-design +./setup.sh +``` + +**What this does**: +- ✅ Checks Python 3.10+ installed +- ✅ Creates virtual environment +- ✅ Installs `mcp` SDK (1.2.1+) +- ✅ Verifies Figma Desktop connection +- ✅ Tests MCP server availability + +**Expected output**: +``` +✅ Setup Complete! +``` + +### 2. Enable Figma MCP + +1. Open **Figma Desktop** +2. Go to **Figma → Preferences** +3. Enable "**Enable local MCP Server**" +4. Confirm server running at `http://127.0.0.1:3845/mcp` + +### 3. Use the Skill + +``` +User: "Review this Figma design: https://figma.com/file/ABC123..." +``` + +Navigator will: +1. Connect to Figma MCP automatically +2. Extract design tokens and components +3. Compare against codebase +4. Generate implementation plan +5. Create Navigator task document + +--- + +## Architecture + +### Before (Manual Orchestration) + +``` +User → Claude → MCP tools (15-20 manual calls) → temp files → Python → Claude → User +``` + +**Time**: 15-20 orchestration steps + +### After (Direct MCP Client) + +``` +User → Python (MCP client) → Figma Desktop → Results → User +``` + +**Time**: 1 step (95% reduction) + +### How It Works + +```python +# Python functions now connect directly to Figma +from figma_mcp_client import FigmaMCPClient + +async with FigmaMCPClient() as client: + # Smart data fetching + metadata = await client.get_metadata() + components = extract_components(metadata) + + # Progressive refinement - fetch details only if needed + for comp in high_complexity_components: + detail = await client.get_design_context(comp['id']) + + # Get design tokens + tokens = await client.get_variable_defs() +``` + +**Benefits**: +- No Claude orchestration overhead +- Automatic connection management +- Progressive refinement (token efficient) +- Built-in error handling + +--- + +## Available Tools + +### Figma MCP Tools (Auto-Connected) + +| Tool | Purpose | Use Case | +|------|---------|----------| +| `get_metadata` | Component structure (XML) | Discover node IDs, hierarchy | +| `get_variable_defs` | Design tokens | Token extraction, sync | +| `get_code_connect_map` | Component → code mapping | Auto-map Figma to codebase | +| `get_design_context` | UI code generation | Component implementation | +| `get_screenshot` | Visual snapshots | Visual regression testing | +| `create_design_system_rules` | Design system automation | Rule generation | + +### Python Functions + +| Function | Purpose | Input | Output | +|----------|---------|-------|--------| +| `design_analyzer.py` | Extract design patterns | Figma URL/data | Component list | +| `token_extractor.py` | Convert to DTCG format | Variables JSON | DTCG tokens + diff | +| `component_mapper.py` | Map components | Figma + codebase | Mappings with confidence | +| `design_system_auditor.py` | Detect drift | Design + code | Drift report | +| `implementation_planner.py` | Generate task doc | Analysis results | Navigator task | + +--- + +## Documentation + +- **[INSTALL.md](INSTALL.md)** - Detailed installation guide with troubleshooting +- **[SKILL.md](SKILL.md)** - Complete skill documentation and workflows +- **[functions/figma_mcp_client.py](functions/figma_mcp_client.py)** - MCP client API reference + +--- + +## Requirements + +### System + +- **Python 3.10+** +- **Figma Desktop** v116.0.0+ +- **macOS, Linux, or Windows** + +### Python Packages + +```txt +mcp>=1.2.1 # Official MCP SDK +anyio>=4.0.0 # Async I/O +httpx>=0.25.0 # HTTP client +pydantic>=2.0.0 # Data validation +``` + +Installed automatically via `./setup.sh` + +### Optional + +- **Figma Enterprise** - For Code Connect (automatic component mapping) +- **Tailwind CSS** - For design token integration +- **Storybook** - For visual regression testing + +--- + +## Example Usage + +### Design Review + +``` +User: "Review dashboard redesign: https://figma.com/file/..." + +Navigator: +1. Connects to Figma MCP +2. Extracts 12 design tokens, 3 new components +3. Maps to existing Button component (78% similarity) +4. Detects 5 token drift issues +5. Generates TASK-16 with phased implementation plan + +Output: + - .agent/design-system/reviews/2025-10-22-dashboard.md + - .agent/tasks/TASK-16-dashboard-redesign.md +``` + +### Token Extraction Only + +```python +# Simple token fetch +from figma_mcp_client import get_figma_variables + +tokens = await get_figma_variables() +# Returns: {'primary-600': '#2563EB', 'spacing-md': '16px', ...} +``` + +### Component Analysis + +```python +# Full analysis with progressive refinement +from figma_mcp_client import FigmaMCPClient + +async with FigmaMCPClient() as client: + metadata = await client.get_metadata() + components = extract_components(metadata) + + print(f"Found {len(components)} components") + for comp in components: + print(f" - {comp['name']} ({comp['type']})") +``` + +--- + +## Troubleshooting + +### "Figma Desktop not running" + +``` +❌ Could not connect to Figma Desktop MCP server +``` + +**Fix**: +1. Ensure Figma Desktop running +2. Enable MCP: Figma → Preferences → Enable local MCP Server +3. Verify: `curl http://127.0.0.1:3845/mcp` (should return JSON) + +### "MCP SDK not installed" + +``` +ImportError: MCP SDK not installed +``` + +**Fix**: +```bash +cd skills/product-design +source venv/bin/activate # Activate venv +pip install -r requirements.txt +``` + +### "Python 3.10+ required" + +**Fix**: Install Python 3.10+ +```bash +# macOS +brew install python@3.13 + +# Ubuntu +sudo apt install python3.13 +``` + +See **[INSTALL.md](INSTALL.md)** for complete troubleshooting guide. + +--- + +## Performance + +### Benchmarks + +| Workflow | Before | After | Improvement | +|----------|--------|-------|-------------| +| **Design Review** | 15-20 min | 5 min | 75% faster | +| **Token Extraction** | Manual (30 min) | Automated (1 min) | 97% faster | +| **Component Mapping** | Manual (2 hours) | Automated (2 min) | 98% faster | +| **Orchestration Steps** | 15-20 steps | 1 step | 95% reduction | + +### Token Efficiency + +| Approach | Tokens | Improvement | +|----------|--------|-------------| +| **Old** (manual orchestration) | 150k | Baseline | +| **New** (direct MCP client) | 12k | 92% reduction | + +Progressive refinement only fetches needed data. + +--- + +## Version History + +### v1.1.0 (2025-10-22) - MCP Direct Integration + +**Breaking Changes**: +- Python now requires `mcp>=1.2.1` (install via `./setup.sh`) +- Figma Desktop with MCP enabled required for automated workflow + +**New Features**: +- ✨ Direct Python → Figma MCP client (no Claude orchestration) +- ✨ Progressive refinement (smart token usage) +- ✨ Automatic connection management +- ✨ `./setup.sh` automated installation +- ✨ `figma_mcp_client.py` wrapper class + +**Improvements**: +- 95% reduction in orchestration overhead (15-20 steps → 1) +- 92% reduction in token usage (150k → 12k) +- Built-in error handling and retries +- Better MCP connection diagnostics + +**Migration**: +```bash +cd skills/product-design +./setup.sh # Installs new dependencies +``` + +### v1.0.0 (2025-10-21) - Initial Release + +- Design analysis and token extraction +- Component mapping with similarity detection +- Design system drift detection +- Implementation plan generation + +--- + +## Support + +**Documentation**: See [INSTALL.md](INSTALL.md) and [SKILL.md](SKILL.md) + +**Issues**: Report at https://github.com/navigator-plugin/navigator/issues + +**Requirements for issue reports**: +- Python version: `python3 --version` +- Figma version: Figma → Help → About Figma +- Output from: `python3 functions/test_mcp_connection.py` +- Full error message and stack trace + +--- + +## License + +MIT License - Part of Navigator Plugin + +--- + +**Navigator Version**: 3.3.1 +**Skill Version**: 1.1.0 +**MCP SDK Version**: 1.2.1+ +**Last Updated**: 2025-10-22 diff --git a/skills/product-design/SKILL.md b/skills/product-design/SKILL.md new file mode 100644 index 0000000..e02697b --- /dev/null +++ b/skills/product-design/SKILL.md @@ -0,0 +1,794 @@ +--- +name: product-design +description: Automates design review, token extraction, component mapping, and implementation planning. Reduces design handoff from 6-10 hours to 5 minutes via direct Figma MCP integration. Auto-invoke when user mentions design review, Figma mockup, or design handoff. +allowed-tools: Read, Write, Edit, Grep, Glob, Bash, Task, TodoWrite +version: 1.1.0 +--- + +# Product Design Skill + +Automate design handoff from Figma to code with design system intelligence. Extract tokens, map components, detect drift, generate implementation plans. + +## When to Invoke + +Auto-invoke when user says: +- "Review this design" +- "Analyze Figma mockup" +- "Design handoff for [feature]" +- "Check design system impact" +- "Plan implementation for design" +- "Extract tokens from Figma" +- "What changed in the design?" + +## What This Does + +**5-Step Workflow**: +1. **Design Analysis**: Extract patterns, components, tokens from Figma +2. **Codebase Audit**: Compare design vs implementation, find drift +3. **Implementation Planning**: Generate phased task breakdown +4. **Task Assignment**: Create Navigator task document +5. **Handoff**: Ask user to review or start implementation + +**Time Savings**: 6-10 hours → 15-20 minutes (95% reduction) + +## Prerequisites + +### Required + +1. **Python Dependencies** + ```bash + cd skills/product-design + ./setup.sh # Automated installation + # OR manually: pip install -r requirements.txt + ``` + +2. **Figma Desktop** (for automated workflow) + - Download: https://www.figma.com/downloads/ + - Enable MCP: Figma → Preferences → Enable local MCP Server + - Must be running during design reviews + +3. **Project Structure** + - `.agent/design-system/` directory (created on first run) + - Project with components (React/Vue/Svelte) + +### Optional (Enhanced Features) +- **Figma Enterprise**: Code Connect for automatic component mapping +- **Tailwind CSS**: Design token integration via @theme +- **Storybook**: Component documentation and visual regression + +### Installation + +**Quick start**: +```bash +cd skills/product-design +./setup.sh +``` + +See `INSTALL.md` for detailed installation guide and troubleshooting. + +## Workflow Protocol + +### Step 0: Check Setup (Auto-Run) + +**Before starting, verify Python dependencies installed**: + +```bash +# Get Navigator plugin path +PLUGIN_PATH=$(dirname "$(dirname "$(dirname "$PWD")")") + +# Check if venv exists +if [ ! -d "$PLUGIN_PATH/skills/product-design/venv" ]; then + echo "❌ product-design skill not set up" + echo "" + echo "Run setup (30 seconds):" + echo " cd $PLUGIN_PATH/skills/product-design && ./setup.sh" + echo "" + echo "Or use manual workflow (no Python needed)" + exit 1 +fi +``` + +**If setup missing**: +- Show setup instructions +- Offer manual workflow as alternative +- **Do not proceed** with automated Figma workflow + +**If setup complete**: +- Continue to Step 1 (Design Analysis) + +--- + +### Step 1: Design Analysis + +**Objective**: Extract design patterns from Figma or manual description + +#### With Figma MCP (Automated) ✨ SIMPLIFIED + +**New Architecture** (v1.1.0+): Python directly connects to Figma MCP - no manual orchestration! + +```python +# Python functions now handle MCP connection automatically +from figma_mcp_client import FigmaMCPClient + +async with FigmaMCPClient() as client: + # Progressive refinement - fetch only what's needed + metadata = await client.get_metadata() + components = extract_components(metadata) + + # Fetch details only for complex components + for comp in components: + if comp['complexity'] == 'high': + comp['detail'] = await client.get_design_context(comp['id']) + + # Get design tokens + variables = await client.get_variable_defs() +``` + +**Workflow** (fully automated): +1. User provides Figma URL +2. Run `python3 functions/design_analyzer.py --figma-url ` +3. Python connects to Figma MCP (http://127.0.0.1:3845/mcp) +4. Fetches metadata → analyzes → fetches details only if needed +5. Returns complete analysis + +**Benefits**: +- ✅ No manual MCP tool calls by Claude +- ✅ Progressive refinement (smart token usage) +- ✅ Automatic connection management +- ✅ Built-in error handling + +**Requirements**: +- Figma Desktop running +- MCP enabled in preferences +- Python dependencies installed (`./setup.sh`) + +#### Manual Workflow (No MCP) + +```markdown +**Ask user for design information**: + +What is the feature name? [e.g., "Dashboard Redesign"] + +Figma link (optional): [figma.com/file/...] + +**Design Tokens**: +List new or modified tokens: +- Colors (name: value, e.g., "primary-600: #2563EB") +- Spacing (e.g., "spacing-lg: 24px") +- Typography (e.g., "heading-xl: 36px/600") +- Other (radius, shadow, etc.) + +**Components**: +List components in design: +- Component name +- Type (atom, molecule, organism) +- Variants (if any, e.g., "Button: primary/secondary, sm/md/lg") +- Similar to existing component? (name if known) + +**Proceed to Step 2** after gathering information +``` + +#### Run design_analyzer.py + +```bash +# Prepare input (MCP or manual JSON) +# MCP: Already have /tmp/figma_metadata.json +# Manual: Create JSON from user input + +python3 functions/design_analyzer.py \ + --figma-data /tmp/figma_combined.json \ + --ui-kit-inventory .agent/design-system/ui-kit-inventory.json \ + --output /tmp/analysis_results.json +``` + +**Analysis Output**: +- New components not in UI kit +- Similar components (reuse opportunities) +- New design tokens +- Breaking changes (if any) + +--- + +### Step 2: Codebase Audit + +**Objective**: Compare design vs implementation, detect drift + +#### Token Extraction + +```bash +python3 functions/token_extractor.py \ + --figma-variables /tmp/figma_variables.json \ + --existing-tokens .agent/design-system/design-tokens.json \ + --output /tmp/token_extraction.json +``` + +**Output**: DTCG formatted tokens + diff summary + +#### Component Mapping + +```bash +python3 functions/component_mapper.py \ + --figma-components /tmp/analysis_results.json \ + --code-connect-map /tmp/figma_code_connect.json \ + --project-root . \ + --output /tmp/component_mappings.json +``` + +**Output**: Figma component → code component mappings with confidence scores + +#### Design System Audit + +```bash +# Combine data for auditor +python3 functions/design_system_auditor.py \ + --figma-data /tmp/combined_figma.json \ + --code-data /tmp/combined_code.json \ + --output /tmp/audit_results.json +``` + +**Audit Results**: +- Token alignment (in sync, drift, missing, unused) +- Component reuse opportunities +- Tailwind config recommendations +- Priority level (critical, high, medium, low) + +--- + +### Step 3: Implementation Planning + +**Objective**: Generate phased implementation task document + +#### Generate Task Document + +```bash +python3 functions/implementation_planner.py \ + --task-id "TASK-{{next_task_number}}" \ + --feature-name "{{feature_name}}" \ + --analysis-results /tmp/combined_analysis.json \ + --review-reference ".agent/design-system/reviews/{{date}}-{{feature-slug}}.md" \ + --output .agent/tasks/TASK-{{next_task_number}}-{{feature-slug}}.md +``` + +**Task Document Includes**: +- Phased implementation (tokens → atoms → molecules → organisms) +- Complexity estimates per phase +- Acceptance criteria checklist +- Files to modify +- Testing strategy +- Rollout plan + +#### Create Design Review Report + +**Use template**: `templates/design-review-report.md` + +**Save to**: `.agent/design-system/reviews/YYYY-MM-DD-{{feature-name}}.md` + +**Contents**: +- Design analysis summary +- Token changes (added/modified/removed) +- Component changes (new/extended/breaking) +- Design system impact +- Implementation recommendations + +--- + +### Step 4: Task Assignment + +**Objective**: Create task and assign context for implementation + +#### Create PM Ticket (if configured) + +```markdown +**If PM tool configured** (Linear, GitHub Issues, Jira): +- Create ticket with task summary +- Link to task document and design review +- Assign to frontend developer or team + +**If no PM tool**: +- Skip ticket creation +- Task document serves as source of truth +``` + +#### Update Navigator Documentation + +```markdown +**Update files**: +1. `.agent/tasks/TASK-{{number}}-{{feature}}.md` (created in Step 3) +2. `.agent/design-system/reviews/{{date}}-{{feature}}.md` (design review) +3. `.agent/DEVELOPMENT-README.md` (add task to index) + +**Use TodoWrite** to track implementation phases +``` + +--- + +### Step 5: Implementation Handoff + +**Objective**: Present results and get user decision + +#### Present Summary + +```markdown +✅ Design review complete for {{Feature Name}} + +**Generated Documentation**: +- Design review: `.agent/design-system/reviews/{{date}}-{{feature}}.md` +- Implementation plan: `.agent/tasks/TASK-{{number}}-{{feature}}.md` +{{#if pm_configured}}- PM ticket: {{ticket_id}} (status: ready for development){{/if}} + +**Summary**: +- Design Tokens: {{new_count}} new, {{modified_count}} modified +- Components: {{new_components}} new, {{extend_components}} to extend +- Estimated Time: {{total_hours}} hours +- Complexity: {{complexity_level}} +{{#if breaking_changes}}- ⚠️ Breaking Changes: {{breaking_count}} component(s){{/if}} + +**Next Steps**: +[1] Start implementation now +[2] Review plan first (load task document) +[3] Modify plan before starting + +**Recommended**: After implementation, set up visual regression testing: + "Set up visual regression for {{components}}" + +This ensures pixel-perfect implementation and prevents future drift (15 min setup). + +Reply with choice or "Start implementation" +``` + +#### User Decision Branches + +**If user chooses [1] or says "Start implementation"**: +```markdown +1. Load task document: `Read .agent/tasks/TASK-{{number}}-{{feature}}.md` +2. Load design review: `Read .agent/design-system/reviews/{{date}}-{{feature}}.md` +3. Begin Phase 1 (typically design tokens) +4. Follow autonomous completion protocol when done +5. After completion, suggest: "Set up visual regression for {{components}}" (optional but recommended) +``` + +**If user chooses [2]**: +```markdown +1. Load and display task document +2. Highlight key phases and acceptance criteria +3. Ask: "Ready to start or need changes?" +``` + +**If user chooses [3]**: +```markdown +1. Load task document +2. Ask what modifications needed +3. Edit task document +4. Regenerate if major changes +5. Then proceed to implementation +``` + +--- + +## Predefined Functions + +### functions/design_analyzer.py + +**Purpose**: Extract design patterns from Figma MCP data or manual input + +**Usage**: +```bash +python3 functions/design_analyzer.py \ + --figma-data /path/to/figma_mcp_combined.json \ + --ui-kit-inventory .agent/design-system/ui-kit-inventory.json \ + --output /tmp/analysis.json +``` + +**Input Format** (figma_mcp_combined.json): +```json +{ + "metadata": { ... }, // get_metadata response + "variables": { ... }, // get_variable_defs response + "code_connect_map": { ... } // get_code_connect_map response (optional) +} +``` + +**Output**: Component analysis with categorization (atom/molecule/organism) + similarity scores + +--- + +### functions/token_extractor.py + +**Purpose**: Convert Figma variables to DTCG format with diff + +**Usage**: +```bash +python3 functions/token_extractor.py \ + --figma-variables /path/to/figma_variables.json \ + --existing-tokens .agent/design-system/design-tokens.json \ + --format full \ + --output /tmp/tokens.json +``` + +**Output Formats**: +- `full`: DTCG tokens + diff + summary +- `tokens-only`: Just DTCG tokens +- `diff-only`: Just diff and summary + +**DTCG Format** (W3C Design Tokens spec): +```json +{ + "color": { + "primary": { + "500": { + "$value": "#3B82F6", + "$type": "color", + "$description": "Primary brand color" + } + } + } +} +``` + +--- + +### functions/component_mapper.py + +**Purpose**: Map Figma components to codebase components + +**Usage**: +```bash +python3 functions/component_mapper.py \ + --figma-components /path/to/analysis_results.json \ + --code-connect-map /path/to/code_connect.json \ + --project-root . \ + --output /tmp/mappings.json +``` + +**Mapping Strategy**: +1. Code Connect first (100% confidence) +2. Fuzzy name matching (70%+ confidence) +3. Unmapped = needs creation + +**Output**: Mappings with confidence scores + variant prop mapping + +--- + +### functions/design_system_auditor.py + +**Purpose**: Audit design system for drift and reuse opportunities + +**Usage**: +```bash +python3 functions/design_system_auditor.py \ + --figma-data /path/to/combined_figma.json \ + --code-data /path/to/combined_code.json \ + --output /tmp/audit.json +``` + +**Audit Checks**: +- Token alignment (drift detection) +- Component reuse opportunities (similarity >70%) +- Unused tokens (cleanup candidates) +- Priority level assignment + +--- + +### functions/implementation_planner.py + +**Purpose**: Generate Navigator task document with phased breakdown + +**Usage**: +```bash +python3 functions/implementation_planner.py \ + --task-id "TASK-16" \ + --feature-name "Dashboard Redesign" \ + --analysis-results /path/to/combined_analysis.json \ + --review-reference ".agent/design-system/reviews/2025-10-21-dashboard.md" \ + --output .agent/tasks/TASK-16-dashboard-redesign.md +``` + +**Output**: Complete Navigator task document with: +- Phased implementation (atomic design order) +- Complexity estimates (Low/Medium/High) +- Acceptance criteria per phase +- Testing strategy +- Rollout plan + +--- + +## Templates + +### templates/design-review-report.md + +**When**: Step 3 - Creating design review documentation + +**Structure**: +```markdown +# Design Review: {{Feature Name}} + +**Date**: {{YYYY-MM-DD}} +**Figma**: [Link]({{figma_url}}) +**Reviewer**: Navigator Product Design Skill + +## New Design Tokens +[Token changes] + +## New Components Required +[Component list with categories] + +## Design System Impact +[High/Medium/Low impact analysis] + +## Implementation Recommendations +[Phased approach] +``` + +--- + +## Design System Documentation Structure + +### Initial Setup (First Run) + +```bash +mkdir -p .agent/design-system/reviews + +# Create initial files +touch .agent/design-system/design-tokens.json +touch .agent/design-system/ui-kit-inventory.json +touch .agent/design-system/component-mapping.json +``` + +**design-tokens.json** (DTCG format): +```json +{ + "color": {}, + "spacing": {}, + "typography": {}, + "radius": {}, + "shadow": {} +} +``` + +**ui-kit-inventory.json**: +```json +{ + "components": [ + { + "name": "Button", + "path": "src/components/ui/Button.tsx", + "category": "atom", + "variants": ["primary", "secondary", "ghost"], + "figma_link": "..." + } + ], + "tokens": {} +} +``` + +### File Loading Strategy + +**Never load**: +- All design review reports (50+ files = 250k+ tokens) +- Full Figma MCP responses (can be 350k+ tokens) + +**Always load when skill active**: +- `ui-kit-inventory.json` (~3k tokens) +- `design-tokens.json` (~2k tokens) +- Specific design review for current task (~5k tokens) + +**Total**: ~10k tokens vs 150k+ (93% reduction) + +--- + +## Figma MCP Integration + +### MCP Server Detection + +**On skill invocation**: +1. Check for Figma MCP tools availability +2. Detect local vs remote server +3. Adjust workflow based on capabilities + +**Local Server** (Recommended): +- URL: `http://127.0.0.1:3845/mcp` +- Tools: All (metadata, variables, code_connect, design_context) +- Requires: Figma Desktop app running + +**Remote Server** (Fallback): +- URL: `https://mcp.figma.com/mcp` +- Tools: Limited (no code_connect, requires explicit URLs) +- Requires: Internet connection, explicit Figma links + +### Handling Token Limits + +**Problem**: Large screens return >350k tokens (exceeds default 25k limit) + +**Solution**: +```markdown +1. Use `get_metadata` first (sparse XML, ~5k tokens) +2. Parse metadata to identify component node IDs +3. Fetch components individually via `get_design_context` +4. Aggregate results from multiple small calls + +**Environment Variable** (recommended): +export MAX_MCP_OUTPUT_TOKENS=100000 +``` + +### MCP Tool Usage + +**get_metadata**: Always first for large designs +- Returns sparse XML with node IDs, types, names +- Low token cost (~5-10k) +- Use to plan component extraction strategy + +**get_variable_defs**: Extract all design tokens +- One call gets all variables +- Moderate token cost (~10-20k) +- Critical for token extraction + +**get_code_connect_map**: Get component mappings +- Requires Figma Enterprise plan +- Returns node_id → code_path mappings +- Highest confidence mappings + +**get_design_context**: Extract component code +- Use per-component (NOT full screen) +- Can generate React/Vue/HTML via prompting +- Highest token cost - use sparingly + +--- + +## Tailwind CSS Integration + +### Design Tokens → Tailwind @theme + +**Style Dictionary Pipeline**: +```bash +# 1. Tokens extracted to design-tokens.json (DTCG format) +# 2. Run Style Dictionary build +npx style-dictionary build + +# 3. Generates tailwind-tokens.css +# @theme { +# --color-primary-500: #3B82F6; +# --spacing-md: 16px; +# } + +# 4. Tailwind auto-generates utilities +# .bg-primary-500, .p-md, etc. +``` + +### Figma Auto Layout → Tailwind Classes + +**Translation Rules** (apply during code generation): +``` +Direction: + Horizontal → flex-row + Vertical → flex-col + +Spacing: + Gap → gap-{token} + Padding → p-{token}, px-{token}, py-{token} + +Alignment: + Start → items-start, justify-start + Center → items-center, justify-center + Space Between → justify-between + +Sizing: + Hug → w-auto / h-auto + Fill → flex-1 + Fixed → w-{value} / h-{value} +``` + +--- + +## Token Optimization + +### Navigator Principles + +**Load on demand**: +- Design review for current task only +- UI kit inventory (always needed) +- Design tokens (always needed) + +**Use Task agent for codebase searches**: +- Finding all component files (60-80% token savings) +- Searching for token usage in Tailwind config +- Analyzing component variant patterns + +**Compact after completion**: +- Clear context after design review +- Preserve task document in marker +- Clean slate for implementation + +--- + +## Troubleshooting + +### "Figma MCP tool not found" + +**Issue**: MCP server not available + +**Solutions**: +1. Check Figma Desktop app is running (for local server) +2. Verify MCP server added: `claude mcp add --transport http figma-desktop http://127.0.0.1:3845/mcp` +3. Fall back to manual workflow (still provides value) + +### "Token limit exceeded" + +**Issue**: `get_design_context` response too large + +**Solutions**: +1. Use `get_metadata` first, then fetch components individually +2. Set `MAX_MCP_OUTPUT_TOKENS=100000` +3. Break design into smaller selections in Figma + +### "No components found in codebase" + +**Issue**: `component_mapper.py` finds no matches + +**Solutions**: +1. Check `--project-root` points to correct directory +2. Verify component file extensions (tsx, jsx, vue) +3. Check components aren't in excluded directories (node_modules) + +### "Design tokens not in DTCG format" + +**Issue**: Existing tokens use legacy format + +**Solutions**: +1. Run `token_extractor.py` with `--format tokens-only` to convert +2. Backup existing tokens first +3. Update Style Dictionary config to read DTCG format + +--- + +## Success Metrics + +### Efficiency Gains + +**Before**: 6-10 hours per design handoff +**After**: 15-20 minutes +**Savings**: 95% time reduction + +### Quality Metrics + +- Design system drift detected automatically +- 100% token consistency via automated sync +- Component reuse rate tracked +- Implementation accuracy via acceptance criteria + +--- + +## Example Usage + +``` +User: "Review the dashboard redesign from Figma: https://figma.com/file/..." + +Navigator: +1. Checks for Figma MCP availability +2. Extracts metadata, variables, code_connect_map +3. Runs design_analyzer.py → finds 3 new components, 12 new tokens +4. Runs token_extractor.py → generates DTCG tokens, finds 5 drift issues +5. Runs component_mapper.py → maps 2 components, 1 new needed +6. Runs design_system_auditor.py → priority: HIGH (drift detected) +7. Runs implementation_planner.py → generates TASK-17 with 3 phases +8. Creates design review report +9. Presents summary with [Start/Review/Modify] options + +User: "Start implementation" + +Navigator: +1. Loads TASK-17 document +2. Begins Phase 1: Design Tokens +3. Updates design-tokens.json with 12 new tokens +4. Runs Style Dictionary build +5. Updates Tailwind config +6. Commits changes +7. Moves to Phase 2: StatBadge component +8. ... continues through all phases +9. Autonomous completion when done +``` + +--- + +**Last Updated**: 2025-10-21 +**Navigator Version**: 3.2.0 (target) +**Skill Version**: 1.0.0 diff --git a/skills/product-design/examples/dashboard-redesign-review.md b/skills/product-design/examples/dashboard-redesign-review.md new file mode 100644 index 0000000..7b9fd77 --- /dev/null +++ b/skills/product-design/examples/dashboard-redesign-review.md @@ -0,0 +1,238 @@ +# Design Review: Dashboard Redesign + +**Date**: 2025-10-21 +**Figma**: [Dashboard Mockup](https://figma.com/file/example123) +**Reviewer**: Navigator Product Design Skill + +--- + +## Summary + +Dashboard redesign introduces new metric visualization components and updates color system for better data hierarchy. + +**Changes Overview**: +- Design Tokens: 12 new, 5 modified +- Components: 3 new, 1 to extend +- Breaking Changes: 1 (MetricCard props) + +--- + +## New Design Tokens + +### Colors +- **color.status.warning.500**: `#F59E0B` (color) + _Warning state for metrics below threshold_ +- **color.status.error.600**: `#DC2626` (color) + _Error state for critical metrics_ +- **color.status.success.500**: `#10B981` (color) + _Success state for metrics above target_ +- **color.neutral.50**: `#F9FAFB` (color) + _Card background for dashboard widgets_ + +### Spacing +- **spacing.section.gap**: `48px` (dimension) + _Gap between dashboard sections_ +- **spacing.widget.padding**: `24px` (dimension) + _Internal padding for metric widgets_ +- **spacing.metric.gap**: `12px` (dimension) + _Gap between metric label and value_ + +### Typography +- **typography.heading.xl**: `36px/600/42px` (typography) + _Large dashboard headings_ +- **typography.metric.value**: `48px/700/52px` (typography) + _Metric display values_ +- **typography.metric.label**: `14px/500/20px` (typography) + _Metric labels_ + +### Other Tokens +- **radius.widget**: `12px` (dimension) + _Border radius for dashboard widgets_ +- **shadow.widget**: `0 1px 3px rgba(0,0,0,0.1)` (shadow) + _Subtle shadow for elevated widgets_ + +--- + +## Modified Design Tokens + +### color.primary.600 +- **Old Value**: `#1D4ED8` +- **New Value**: `#2563EB` +- **Impact**: Affects primary buttons and links throughout dashboard + +### spacing.md +- **Old Value**: `16px` +- **New Value**: `20px` +- **Impact**: Increases default spacing in grid layouts + +### typography.body.medium +- **Old Value**: `16px/400/24px` +- **New Value**: `16px/500/24px` +- **Impact**: Slightly bolder body text for better readability + +--- + +## New Components Required + +### Atoms (Basic Elements) + +#### StatBadge + +**Purpose**: Small metric indicator with icon and optional pulse animation +**Variants**: success, warning, error, info +**States**: default, pulse (animated) +**Similar to**: Badge (78% match) + +**Recommendation**: Extend existing Badge component with `variant="stat"` prop instead of creating new component. Add icon prop and pulse animation state. + +### Molecules (Simple Combinations) + +#### TrendIndicator + +**Purpose**: Show metric trend with arrow and percentage change +**Composition**: Icon (arrow up/down) + Text (percentage) + StatBadge +**Variants**: up (green), down (red), neutral (gray) +**Similar to**: None (0% match) + +**Recommendation**: Create new molecule component. Reuse StatBadge internally. + +### Organisms (Complex Components) + +#### DashboardGrid + +**Purpose**: Responsive grid layout for dashboard widgets +**Composition**: Grid container + flexible widget slots +**Responsive**: 1 col (mobile), 2 col (tablet), 3 col (desktop) +**Similar to**: None (0% match) + +**Recommendation**: Create new organism component with responsive grid behavior. Use CSS Grid for layout. + +--- + +## Component Reuse Opportunities + +### StatBadge → Extend Badge + +**Similarity**: 78% +**Recommendation**: Extend existing Badge component with new variant instead of creating duplicate component +**Time Saved**: 2-3 hours + +**Approach**: Add `variant="stat"` option to Badge props. Add `icon` prop for optional icon display. Add `pulse` boolean prop for animation state. Maintains existing Badge API while adding new functionality. + +### MetricCard → Enhance Existing + +**Similarity**: 85% +**Recommendation**: Add trend and comparison props to existing MetricCard component +**Time Saved**: 2 hours + +**Approach**: Add `trend` prop (up/down/neutral). Add `comparisonPeriod` prop (string). Both optional initially for backward compatibility. Mark as required in v3.0.0. + +--- + +## Design System Impact + +### Token Health + +- **In Sync**: 87 tokens +- **Drift Detected**: 5 tokens +- **Missing in Code**: 12 tokens +- **Unused in Design**: 3 tokens + +**Sync Status**: Drift Detected +**Priority Level**: High + +### High Impact Changes + +- **Color primary.600 modification** + - **Impact**: Breaking change for custom theme consumers + - **Action Required**: Update documentation, notify users in changelog + +- **Spacing.md increase** (16px → 20px) + - **Impact**: Layout shifts in existing grid components + - **Action Required**: Visual regression testing on all layouts + +### Low Impact Changes + +- Typography weight increase (400 → 500) - minimal visual change +- New status colors - additive only, no conflicts +- New widget tokens - isolated to dashboard feature + +--- + +## Implementation Recommendations + +### Phased Approach + +**Phase 1: Design Tokens** (2 hours) +- Priority: High +- Add 12 new tokens to design-tokens.json +- Update 5 existing tokens +- Run Style Dictionary build +- Update Tailwind @theme + +**Phase 2: Atomic Components** (3 hours) +- Priority: High +- Extend Badge component with stat variant (2h) +- Add pulse animation to Badge (1h) + +**Phase 3: Molecule Components** (2 hours) +- Priority: Medium +- Create TrendIndicator component (2h) + +**Phase 4: Organism Components** (5 hours) +- Priority: Medium +- Create DashboardGrid component (3h) +- Enhance MetricCard with trend props (2h) + +### Total Estimated Time + +**12 hours** (Medium complexity) + +--- + +## Breaking Changes + +### MetricCard + +**Issue**: Adding required `trend` and `comparisonPeriod` props breaks existing usage +**Previous Mapping**: `src/components/molecules/MetricCard.tsx` (8 existing usages) +**Recommendation**: Add props as optional first, then require in major version + +**Migration Steps**: +- Add props as optional in v2.4.0 +- Add deprecation warning when props not provided +- Update all 8 existing usages in codebase +- Document migration in CHANGELOG.md +- Make props required in v3.0.0 (breaking change) +- Provide codemod script for automated migration + +--- + +## Next Steps + +1. **Review Implementation Plan**: `.agent/tasks/TASK-16-dashboard-redesign.md` +2. **Update Design Tokens**: Phase 1 implementation +3. **Implement Components**: Follow atomic design hierarchy +4. **Test & Verify**: Visual regression, accessibility, unit tests +5. **Update UI Kit Inventory**: After each component completion + +--- + +## Design Fidelity Checklist + +- [ ] All 12 new design tokens extracted and added to design system +- [ ] StatBadge extends Badge component correctly +- [ ] TrendIndicator composition matches Figma +- [ ] DashboardGrid responsive behavior (1/2/3 cols) +- [ ] MetricCard shows trend indicator +- [ ] Spacing matches Figma exactly (48px section gap, 24px widget padding) +- [ ] Typography scales applied (XL heading 36px, metric value 48px) +- [ ] Status colors used correctly (success/warning/error) +- [ ] Widget shadows and radius applied +- [ ] Interactive states (hover, active) match design + +--- + +**Generated**: 2025-10-21 17:30:00 +**Navigator Version**: 3.2.0 +**Next Review**: After Phase 4 completion diff --git a/skills/product-design/functions/component_mapper.py b/skills/product-design/functions/component_mapper.py new file mode 100755 index 0000000..858ca3a --- /dev/null +++ b/skills/product-design/functions/component_mapper.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python3 +""" +Map Figma components to codebase components using Code Connect data and fuzzy matching. +""" + +import json +import argparse +import os +from typing import Dict, List, Any +from difflib import SequenceMatcher + + +def calculate_similarity(str1: str, str2: str) -> float: + """Calculate similarity ratio between two strings.""" + return SequenceMatcher(None, str1.lower(), str2.lower()).ratio() + + +def find_component_files(project_root: str, extensions: List[str] = None) -> List[Dict[str, str]]: + """ + Find all component files in project. + + Args: + project_root: Project root directory + extensions: File extensions to search (default: ['tsx', 'jsx', 'vue']) + + Returns: + List of component file info (path, name) + """ + if extensions is None: + extensions = ['tsx', 'jsx', 'vue', 'svelte'] + + components = [] + + for root, dirs, files in os.walk(project_root): + # Skip node_modules, dist, build directories + dirs[:] = [d for d in dirs if d not in ['node_modules', 'dist', 'build', '.git', '.next']] + + for file in files: + if any(file.endswith(f'.{ext}') for ext in extensions): + full_path = os.path.join(root, file) + rel_path = os.path.relpath(full_path, project_root) + + # Extract component name (filename without extension) + comp_name = os.path.splitext(file)[0] + + # Skip test files, stories, etc. + if any(suffix in comp_name.lower() for suffix in ['.test', '.spec', '.stories', '.story']): + continue + + components.append({ + 'name': comp_name, + 'path': rel_path, + 'full_path': full_path + }) + + return components + + +def fuzzy_match_component(figma_name: str, codebase_components: List[Dict[str, str]], + threshold: float = 0.6) -> List[Dict[str, Any]]: + """ + Fuzzy match Figma component name to codebase components. + + Args: + figma_name: Figma component name + codebase_components: List of codebase component info + threshold: Minimum similarity threshold + + Returns: + List of matches with confidence scores + """ + matches = [] + + # Clean Figma name (remove variant info) + # "Button/Primary/Large" → "Button" + base_name = figma_name.split('/')[0].strip() + + for comp in codebase_components: + comp_name = comp['name'] + similarity = calculate_similarity(base_name, comp_name) + + if similarity >= threshold: + matches.append({ + 'figma_name': figma_name, + 'code_component': comp_name, + 'code_path': comp['path'], + 'confidence': round(similarity, 3), + 'match_type': 'fuzzy' + }) + + # Sort by confidence + matches.sort(key=lambda x: x['confidence'], reverse=True) + + return matches + + +def extract_variant_mapping(figma_name: str) -> Dict[str, str]: + """ + Extract variant information from Figma component name. + + Examples: + "Button/Primary/Large" → {"variant": "primary", "size": "lg"} + "Card/Elevated" → {"variant": "elevated"} + + Args: + figma_name: Figma component name with variants + + Returns: + Dictionary of variant properties + """ + parts = [p.strip() for p in figma_name.split('/')] + + if len(parts) == 1: + return {} + + # Base component is first part + variants = parts[1:] + + # Map common variant patterns + mapping = {} + + for variant in variants: + variant_lower = variant.lower() + + # Size variants + if variant_lower in ['small', 'sm', 'xs', 'tiny']: + mapping['size'] = 'sm' + elif variant_lower in ['medium', 'md', 'base']: + mapping['size'] = 'md' + elif variant_lower in ['large', 'lg']: + mapping['size'] = 'lg' + elif variant_lower in ['xl', 'xlarge', 'extra-large']: + mapping['size'] = 'xl' + + # Style variants + elif variant_lower in ['primary', 'main']: + mapping['variant'] = 'primary' + elif variant_lower in ['secondary', 'outline', 'outlined']: + mapping['variant'] = 'secondary' + elif variant_lower in ['tertiary', 'ghost', 'link', 'text']: + mapping['variant'] = 'ghost' + + # State variants + elif variant_lower in ['disabled', 'inactive']: + mapping['state'] = 'disabled' + elif variant_lower in ['loading', 'busy']: + mapping['state'] = 'loading' + + # Type variants + elif variant_lower in ['solid', 'filled']: + mapping['type'] = 'solid' + elif variant_lower in ['elevated', 'raised']: + mapping['type'] = 'elevated' + elif variant_lower in ['flat', 'plain']: + mapping['type'] = 'flat' + + # If no pattern matches, use as generic variant + else: + if 'variant' not in mapping: + mapping['variant'] = variant_lower + + return mapping + + +def map_components(figma_components: List[Dict[str, Any]], + code_connect_map: Dict[str, Any], + project_root: str) -> Dict[str, Any]: + """ + Main mapping function: map Figma components to codebase components. + + Args: + figma_components: List of Figma components from design_analyzer + code_connect_map: Figma Code Connect mappings + project_root: Project root directory for component search + + Returns: + Component mappings with confidence scores + """ + # Find all component files in codebase + codebase_components = find_component_files(project_root) + + mappings = { + 'mapped': [], + 'unmapped': [], + 'low_confidence': [], + 'summary': {} + } + + for figma_comp in figma_components: + comp_id = figma_comp.get('id') + comp_name = figma_comp.get('name') + + # Check Code Connect first (highest confidence) + if comp_id and comp_id in code_connect_map: + code_connect_data = code_connect_map[comp_id] + mappings['mapped'].append({ + 'figma_id': comp_id, + 'figma_name': comp_name, + 'code_component': code_connect_data.get('codeConnectName'), + 'code_path': code_connect_data.get('codeConnectSrc'), + 'confidence': 1.0, + 'match_type': 'code_connect', + 'props_mapping': extract_variant_mapping(comp_name) + }) + else: + # Fallback to fuzzy matching + matches = fuzzy_match_component(comp_name, codebase_components, threshold=0.6) + + if matches and matches[0]['confidence'] >= 0.8: + # High confidence match + best_match = matches[0] + best_match['figma_id'] = comp_id + best_match['props_mapping'] = extract_variant_mapping(comp_name) + mappings['mapped'].append(best_match) + + elif matches: + # Low confidence match (manual review needed) + for match in matches[:3]: # Top 3 matches + match['figma_id'] = comp_id + match['props_mapping'] = extract_variant_mapping(comp_name) + mappings['low_confidence'].append(match) + + else: + # No match found + mappings['unmapped'].append({ + 'figma_id': comp_id, + 'figma_name': comp_name, + 'recommendation': 'Create new component', + 'props_mapping': extract_variant_mapping(comp_name) + }) + + # Generate summary + total = len(figma_components) + mappings['summary'] = { + 'total_figma_components': total, + 'mapped_count': len(mappings['mapped']), + 'low_confidence_count': len(mappings['low_confidence']), + 'unmapped_count': len(mappings['unmapped']), + 'mapping_coverage': f"{(len(mappings['mapped']) / max(total, 1)) * 100:.1f}%" + } + + return mappings + + +def main(): + parser = argparse.ArgumentParser( + description='Map Figma components to codebase components' + ) + parser.add_argument( + '--figma-components', + required=True, + help='Path to JSON file with Figma components (from design_analyzer)' + ) + parser.add_argument( + '--code-connect-map', + help='Path to Code Connect map JSON (optional)' + ) + parser.add_argument( + '--project-root', + required=True, + help='Project root directory' + ) + parser.add_argument( + '--output', + help='Output file path (default: stdout)' + ) + + args = parser.parse_args() + + # Load Figma components + with open(args.figma_components, 'r') as f: + figma_components = json.load(f) + + # Load Code Connect map if provided + code_connect_map = {} + if args.code_connect_map: + with open(args.code_connect_map, 'r') as f: + code_connect_map = json.load(f) + + # Run mapping + mappings = map_components(figma_components, code_connect_map, args.project_root) + + # Output results + output_json = json.dumps(mappings, indent=2) + + if args.output: + with open(args.output, 'w') as f: + f.write(output_json) + else: + print(output_json) + + +if __name__ == '__main__': + main() diff --git a/skills/product-design/functions/design_analyzer.py b/skills/product-design/functions/design_analyzer.py new file mode 100755 index 0000000..3f9d020 --- /dev/null +++ b/skills/product-design/functions/design_analyzer.py @@ -0,0 +1,445 @@ +#!/usr/bin/env python3 +""" +Analyze Figma design data and extract patterns, components, and tokens. +Compares against existing UI kit to identify new components and potential reuse opportunities. +""" + +import json +import sys +import argparse +from typing import Dict, List, Any +from difflib import SequenceMatcher + + +def calculate_similarity(str1: str, str2: str) -> float: + """ + Calculate similarity ratio between two strings. + + Args: + str1: First string + str2: Second string + + Returns: + float: Similarity ratio (0.0 to 1.0) + """ + return SequenceMatcher(None, str1.lower(), str2.lower()).ratio() + + +def extract_components_from_metadata(metadata: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Extract component information from Figma metadata. + + Args: + metadata: Figma MCP get_metadata response or manual structure + + Returns: + List of components with their properties + """ + components = [] + + def traverse_nodes(node, depth=0): + """Recursively traverse Figma node tree.""" + if not isinstance(node, dict): + return + + node_type = node.get('type', '') + node_name = node.get('name', 'Unnamed') + node_id = node.get('id', '') + + # Identify components (COMPONENT, COMPONENT_SET, or instances) + if node_type in ['COMPONENT', 'COMPONENT_SET', 'INSTANCE']: + components.append({ + 'id': node_id, + 'name': node_name, + 'type': node_type, + 'depth': depth, + 'properties': extract_node_properties(node) + }) + + # Traverse children + children = node.get('children', []) + for child in children: + traverse_nodes(child, depth + 1) + + # Handle both MCP format and manual format + if 'document' in metadata: + traverse_nodes(metadata['document']) + elif 'nodes' in metadata: + for node in metadata['nodes']: + traverse_nodes(node) + elif isinstance(metadata, dict): + traverse_nodes(metadata) + + return components + + +def extract_node_properties(node: Dict[str, Any]) -> Dict[str, Any]: + """ + Extract relevant properties from Figma node. + + Args: + node: Figma node data + + Returns: + Dictionary of extracted properties + """ + properties = {} + + # Extract layout properties + if 'layoutMode' in node: + properties['layout'] = { + 'mode': node.get('layoutMode'), + 'direction': node.get('layoutDirection'), + 'gap': node.get('itemSpacing'), + 'padding': { + 'top': node.get('paddingTop'), + 'right': node.get('paddingRight'), + 'bottom': node.get('paddingBottom'), + 'left': node.get('paddingLeft') + } + } + + # Extract sizing + if 'absoluteBoundingBox' in node: + bbox = node['absoluteBoundingBox'] + properties['size'] = { + 'width': bbox.get('width'), + 'height': bbox.get('height') + } + + # Extract variant properties + if 'componentProperties' in node: + properties['variants'] = node['componentProperties'] + + return properties + + +def categorize_component_by_name(component_name: str) -> str: + """ + Categorize component by atomic design level based on name patterns. + + Args: + component_name: Component name from Figma + + Returns: + 'atom', 'molecule', 'organism', or 'template' + """ + name_lower = component_name.lower() + + # Atoms: Basic elements + atoms = ['button', 'input', 'icon', 'text', 'badge', 'avatar', 'checkbox', + 'radio', 'switch', 'label', 'link', 'image'] + + # Molecules: Simple combinations + molecules = ['field', 'card', 'list-item', 'menu-item', 'tab', 'breadcrumb', + 'tooltip', 'dropdown', 'search', 'pagination'] + + # Organisms: Complex components + organisms = ['header', 'footer', 'sidebar', 'navigation', 'modal', 'form', + 'table', 'dashboard', 'profile', 'chart', 'grid'] + + for atom in atoms: + if atom in name_lower: + return 'atom' + + for molecule in molecules: + if molecule in name_lower: + return 'molecule' + + for organism in organisms: + if organism in name_lower: + return 'organism' + + # Default to molecule if unclear + return 'molecule' + + +def find_similar_components(new_component: Dict[str, Any], + ui_kit_inventory: List[Dict[str, Any]], + threshold: float = 0.7) -> List[Dict[str, Any]]: + """ + Find similar components in existing UI kit. + + Args: + new_component: Component from Figma design + ui_kit_inventory: List of existing UI kit components + threshold: Similarity threshold (0.0 to 1.0) + + Returns: + List of similar components with similarity scores + """ + similar = [] + new_name = new_component.get('name', '') + + for existing in ui_kit_inventory: + existing_name = existing.get('name', '') + similarity = calculate_similarity(new_name, existing_name) + + if similarity >= threshold: + similar.append({ + 'name': existing_name, + 'path': existing.get('path', ''), + 'similarity': similarity, + 'recommendation': generate_recommendation(similarity, new_name, existing_name) + }) + + # Sort by similarity descending + similar.sort(key=lambda x: x['similarity'], reverse=True) + + return similar + + +def generate_recommendation(similarity: float, new_name: str, existing_name: str) -> str: + """ + Generate recommendation based on similarity score. + + Args: + similarity: Similarity ratio + new_name: New component name + existing_name: Existing component name + + Returns: + Recommendation string + """ + if similarity >= 0.9: + return f"Very similar to {existing_name}. Consider reusing existing component." + elif similarity >= 0.7: + return f"Similar to {existing_name}. Consider extending with new variant/prop." + else: + return f"Some similarity to {existing_name}. Review for potential shared patterns." + + +def analyze_design(figma_data: Dict[str, Any], + ui_kit_inventory: Dict[str, Any]) -> Dict[str, Any]: + """ + Main analysis function: extract patterns from Figma and compare with UI kit. + + Args: + figma_data: Combined Figma MCP data (metadata, variables, code_connect_map) + ui_kit_inventory: Current UI kit inventory + + Returns: + Analysis results with new tokens, components, similarities, breaking changes + """ + results = { + 'new_tokens': [], + 'new_components': [], + 'similar_components': [], + 'breaking_changes': [], + 'summary': {} + } + + # Extract components from Figma metadata + metadata = figma_data.get('metadata', {}) + figma_components = extract_components_from_metadata(metadata) + + # Extract existing UI kit components + existing_components = ui_kit_inventory.get('components', []) + + # Analyze each Figma component + for figma_comp in figma_components: + comp_name = figma_comp.get('name', '') + + # Skip system components (starting with _, . or #) + if comp_name.startswith(('_', '.', '#')): + continue + + # Find similar components + similar = find_similar_components(figma_comp, existing_components, threshold=0.7) + + if similar: + # Component has similarities - potential reuse + results['similar_components'].append({ + 'figma_component': comp_name, + 'figma_id': figma_comp.get('id'), + 'category': categorize_component_by_name(comp_name), + 'similar_to': similar, + 'properties': figma_comp.get('properties', {}) + }) + else: + # New component - needs creation + results['new_components'].append({ + 'name': comp_name, + 'id': figma_comp.get('id'), + 'category': categorize_component_by_name(comp_name), + 'properties': figma_comp.get('properties', {}), + 'depth': figma_comp.get('depth', 0) + }) + + # Analyze design tokens from variables + variables = figma_data.get('variables', {}) + if variables: + results['new_tokens'] = analyze_tokens(variables, ui_kit_inventory) + + # Analyze breaking changes + code_connect_map = figma_data.get('code_connect_map', {}) + if code_connect_map: + results['breaking_changes'] = detect_breaking_changes( + figma_components, + code_connect_map, + existing_components + ) + + # Generate summary + results['summary'] = { + 'total_figma_components': len(figma_components), + 'new_components_count': len(results['new_components']), + 'similar_components_count': len(results['similar_components']), + 'new_tokens_count': len(results['new_tokens']), + 'breaking_changes_count': len(results['breaking_changes']), + 'reuse_potential': f"{(len(results['similar_components']) / max(len(figma_components), 1)) * 100:.1f}%" + } + + return results + + +def analyze_tokens(variables: Dict[str, Any], + ui_kit_inventory: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Analyze design tokens from Figma variables. + + Args: + variables: Figma variables data + ui_kit_inventory: Current UI kit inventory with existing tokens + + Returns: + List of new tokens not in current inventory + """ + new_tokens = [] + existing_tokens = ui_kit_inventory.get('tokens', {}) + + # Handle different variable formats + for var_name, var_data in variables.items(): + if isinstance(var_data, dict): + value = var_data.get('$value') or var_data.get('value') + var_type = var_data.get('$type') or var_data.get('type') + else: + value = var_data + var_type = infer_token_type(var_name, value) + + # Check if token exists + if var_name not in existing_tokens: + new_tokens.append({ + 'name': var_name, + 'value': value, + 'type': var_type, + 'status': 'new' + }) + + return new_tokens + + +def infer_token_type(name: str, value: Any) -> str: + """ + Infer token type from name and value. + + Args: + name: Token name + value: Token value + + Returns: + Token type string + """ + name_lower = name.lower() + + if 'color' in name_lower or (isinstance(value, str) and value.startswith('#')): + return 'color' + elif 'spacing' in name_lower or 'gap' in name_lower or 'padding' in name_lower: + return 'dimension' + elif 'font' in name_lower or 'typography' in name_lower: + return 'typography' + elif 'radius' in name_lower or 'border' in name_lower: + return 'dimension' + elif 'shadow' in name_lower: + return 'shadow' + else: + return 'unknown' + + +def detect_breaking_changes(figma_components: List[Dict[str, Any]], + code_connect_map: Dict[str, Any], + existing_components: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """ + Detect breaking changes in component mappings. + + Args: + figma_components: Components from Figma + code_connect_map: Figma Code Connect mappings + existing_components: Existing UI kit components + + Returns: + List of breaking changes detected + """ + breaking_changes = [] + + for figma_comp in figma_components: + comp_id = figma_comp.get('id') + comp_name = figma_comp.get('name') + + # Check if component was previously mapped + if comp_id in code_connect_map: + mapping = code_connect_map[comp_id] + mapped_path = mapping.get('codeConnectSrc') + + # Check if mapped component still exists + exists = any( + existing.get('path') == mapped_path + for existing in existing_components + ) + + if not exists: + breaking_changes.append({ + 'figma_component': comp_name, + 'figma_id': comp_id, + 'previous_mapping': mapped_path, + 'issue': 'Mapped component no longer exists in codebase', + 'recommendation': 'Re-map to new component or create new implementation' + }) + + return breaking_changes + + +def main(): + parser = argparse.ArgumentParser( + description='Analyze Figma design data and compare with UI kit' + ) + parser.add_argument( + '--figma-data', + required=True, + help='Path to JSON file with Figma MCP data' + ) + parser.add_argument( + '--ui-kit-inventory', + required=True, + help='Path to UI kit inventory JSON file' + ) + parser.add_argument( + '--output', + help='Output file path (default: stdout)' + ) + + args = parser.parse_args() + + # Load Figma data + with open(args.figma_data, 'r') as f: + figma_data = json.load(f) + + # Load UI kit inventory + with open(args.ui_kit_inventory, 'r') as f: + ui_kit_inventory = json.load(f) + + # Run analysis + results = analyze_design(figma_data, ui_kit_inventory) + + # Output results + output_json = json.dumps(results, indent=2) + + if args.output: + with open(args.output, 'w') as f: + f.write(output_json) + else: + print(output_json) + + +if __name__ == '__main__': + main() diff --git a/skills/product-design/functions/design_system_auditor.py b/skills/product-design/functions/design_system_auditor.py new file mode 100755 index 0000000..dbc414e --- /dev/null +++ b/skills/product-design/functions/design_system_auditor.py @@ -0,0 +1,359 @@ +#!/usr/bin/env python3 +""" +Audit design system for drift between Figma design and code implementation. +Compares tokens, components, and generates recommendations. +""" + +import json +import argparse +from typing import Dict, List, Any + + +def audit_token_alignment(figma_tokens: Dict[str, Any], + code_tokens: Dict[str, Any]) -> Dict[str, Any]: + """ + Audit token alignment between Figma and code. + + Args: + figma_tokens: Tokens from Figma (DTCG format) + code_tokens: Tokens from code (design-tokens.json) + + Returns: + Alignment report with drift analysis + """ + def flatten_tokens(tokens, prefix=''): + """Flatten nested tokens to dot notation.""" + flat = {} + for key, value in tokens.items(): + path = f"{prefix}.{key}" if prefix else key + if isinstance(value, dict) and '$value' in value: + flat[path] = value + elif isinstance(value, dict): + flat.update(flatten_tokens(value, path)) + return flat + + figma_flat = flatten_tokens(figma_tokens) + code_flat = flatten_tokens(code_tokens) + + alignment = { + 'in_sync': [], + 'drift_detected': [], + 'missing_in_code': [], + 'unused_in_design': [] + } + + # Compare Figma tokens with code + for token_path, figma_data in figma_flat.items(): + figma_value = figma_data.get('$value') + + if token_path in code_flat: + code_value = code_flat[token_path].get('$value') + + if figma_value == code_value: + alignment['in_sync'].append({ + 'path': token_path, + 'value': figma_value + }) + else: + alignment['drift_detected'].append({ + 'path': token_path, + 'figma_value': figma_value, + 'code_value': code_value, + 'type': figma_data.get('$type') + }) + else: + alignment['missing_in_code'].append({ + 'path': token_path, + 'value': figma_value, + 'type': figma_data.get('$type') + }) + + # Find tokens in code but not in Figma + for token_path in code_flat.keys(): + if token_path not in figma_flat: + alignment['unused_in_design'].append({ + 'path': token_path, + 'value': code_flat[token_path].get('$value'), + 'type': code_flat[token_path].get('$type') + }) + + return alignment + + +def analyze_component_reuse(figma_components: List[Dict[str, Any]], + component_mappings: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Analyze component reuse opportunities. + + Args: + figma_components: Components from design_analyzer + component_mappings: Mappings from component_mapper + + Returns: + List of reuse opportunities + """ + opportunities = [] + + # Get similar components from mappings + similar_components = component_mappings.get('low_confidence', []) + + for similar in similar_components: + confidence = similar.get('confidence', 0) + figma_name = similar.get('figma_name') + code_component = similar.get('code_component') + + if confidence >= 0.7: + # Strong similarity - suggest extending existing + opportunities.append({ + 'figma_component': figma_name, + 'existing_component': code_component, + 'code_path': similar.get('code_path'), + 'similarity': confidence, + 'recommendation': f"Extend {code_component} with new variant/prop instead of creating new component", + 'estimated_time_saved': '2-3 hours' + }) + elif confidence >= 0.5: + # Moderate similarity - suggest reviewing for shared patterns + opportunities.append({ + 'figma_component': figma_name, + 'existing_component': code_component, + 'code_path': similar.get('code_path'), + 'similarity': confidence, + 'recommendation': f"Review {code_component} for shared patterns before implementing", + 'estimated_time_saved': '1-2 hours' + }) + + return opportunities + + +def audit_tailwind_config(tokens: Dict[str, Any], tailwind_config_path: str = None) -> Dict[str, Any]: + """ + Audit Tailwind config alignment with design tokens. + + Args: + tokens: Design tokens (DTCG format) + tailwind_config_path: Path to tailwind.config.js (optional) + + Returns: + Tailwind alignment report + """ + # This is a simplified version - real implementation would parse tailwind.config.js + # For now, return structure for manual audit + + alignment = { + 'status': 'manual_audit_required', + 'recommendations': [] + } + + def flatten_tokens(tokens, prefix=''): + flat = {} + for key, value in tokens.items(): + path = f"{prefix}.{key}" if prefix else key + if isinstance(value, dict) and '$value' in value: + flat[path] = value + elif isinstance(value, dict): + flat.update(flatten_tokens(value, path)) + return flat + + flat_tokens = flatten_tokens(tokens) + + # Generate recommendations based on token types + color_tokens = [t for t in flat_tokens.keys() if t.startswith('color.')] + spacing_tokens = [t for t in flat_tokens.keys() if t.startswith('spacing.')] + typography_tokens = [t for t in flat_tokens.keys() if t.startswith('typography.')] + + if color_tokens: + alignment['recommendations'].append({ + 'category': 'colors', + 'action': f'Add {len(color_tokens)} color tokens to Tailwind theme.extend.colors', + 'example': f'"{color_tokens[0]}": "var(--{color_tokens[0].replace(".", "-")})"' + }) + + if spacing_tokens: + alignment['recommendations'].append({ + 'category': 'spacing', + 'action': f'Add {len(spacing_tokens)} spacing tokens to Tailwind theme.extend.spacing', + 'example': f'"{spacing_tokens[0].split(".")[-1]}": "var(--{spacing_tokens[0].replace(".", "-")})"' + }) + + if typography_tokens: + alignment['recommendations'].append({ + 'category': 'typography', + 'action': f'Add {len(typography_tokens)} typography tokens to Tailwind theme.extend.fontSize', + 'example': 'Use Style Dictionary to generate Tailwind @theme directive' + }) + + return alignment + + +def generate_audit_summary(token_alignment: Dict[str, Any], + component_reuse: List[Dict[str, Any]]) -> Dict[str, Any]: + """ + Generate overall audit summary with priority levels. + + Args: + token_alignment: Token alignment report + component_reuse: Component reuse opportunities + + Returns: + Summary with priority levels and recommendations + """ + total_tokens = ( + len(token_alignment['in_sync']) + + len(token_alignment['drift_detected']) + + len(token_alignment['missing_in_code']) + + len(token_alignment['unused_in_design']) + ) + + drift_count = len(token_alignment['drift_detected']) + missing_count = len(token_alignment['missing_in_code']) + + # Determine priority + if drift_count > 10 or (drift_count / max(total_tokens, 1)) > 0.2: + priority = 'critical' + elif drift_count > 5 or missing_count > 10: + priority = 'high' + elif drift_count > 0 or missing_count > 0: + priority = 'medium' + else: + priority = 'low' + + summary = { + 'token_health': { + 'total': total_tokens, + 'in_sync': len(token_alignment['in_sync']), + 'drift_detected': drift_count, + 'missing_in_code': missing_count, + 'unused_in_design': len(token_alignment['unused_in_design']), + 'sync_percentage': f"{(len(token_alignment['in_sync']) / max(total_tokens, 1)) * 100:.1f}%" + }, + 'component_reuse': { + 'opportunities_found': len(component_reuse), + 'estimated_time_savings': f"{len(component_reuse) * 2}-{len(component_reuse) * 3} hours" + }, + 'priority': priority, + 'top_recommendations': generate_top_recommendations( + token_alignment, + component_reuse, + priority + ) + } + + return summary + + +def generate_top_recommendations(token_alignment: Dict[str, Any], + component_reuse: List[Dict[str, Any]], + priority: str) -> List[str]: + """Generate top 3-5 recommendations based on audit results.""" + recommendations = [] + + drift_count = len(token_alignment['drift_detected']) + missing_count = len(token_alignment['missing_in_code']) + + if drift_count > 0: + recommendations.append( + f"⚠️ Fix {drift_count} drifted tokens - update design-tokens.json with Figma values" + ) + + if missing_count > 0: + recommendations.append( + f"➕ Add {missing_count} new tokens to design system - run Style Dictionary build after" + ) + + if len(token_alignment['unused_in_design']) > 5: + recommendations.append( + f"🗑️ Clean up {len(token_alignment['unused_in_design'])} unused tokens in codebase" + ) + + if component_reuse: + top_reuse = component_reuse[0] + recommendations.append( + f"♻️ Reuse opportunity: Extend {top_reuse['existing_component']} instead of creating {top_reuse['figma_component']}" + ) + + if priority == 'low': + recommendations.append("✅ Design system is well-aligned - good maintenance!") + + return recommendations[:5] + + +def audit_design_system(figma_data: Dict[str, Any], + code_data: Dict[str, Any]) -> Dict[str, Any]: + """ + Main audit function: comprehensive design system health check. + + Args: + figma_data: Combined Figma data (tokens, components, mappings) + code_data: Combined code data (design-tokens.json, ui-kit-inventory, etc.) + + Returns: + Complete audit report with recommendations + """ + # Extract data + figma_tokens = figma_data.get('tokens', {}) + figma_components = figma_data.get('components', []) + component_mappings = figma_data.get('component_mappings', {}) + + code_tokens = code_data.get('design_tokens', {}) + ui_kit_inventory = code_data.get('ui_kit_inventory', {}) + + # Run audits + token_alignment = audit_token_alignment(figma_tokens, code_tokens) + component_reuse = analyze_component_reuse(figma_components, component_mappings) + tailwind_alignment = audit_tailwind_config(code_tokens) + + # Generate summary + summary = generate_audit_summary(token_alignment, component_reuse) + + return { + 'token_alignment': token_alignment, + 'component_reuse_opportunities': component_reuse, + 'tailwind_alignment': tailwind_alignment, + 'summary': summary + } + + +def main(): + parser = argparse.ArgumentParser( + description='Audit design system for drift and reuse opportunities' + ) + parser.add_argument( + '--figma-data', + required=True, + help='Path to JSON file with Figma data (tokens, components, mappings)' + ) + parser.add_argument( + '--code-data', + required=True, + help='Path to JSON file with code data (design-tokens.json, ui-kit-inventory)' + ) + parser.add_argument( + '--output', + help='Output file path (default: stdout)' + ) + + args = parser.parse_args() + + # Load data + with open(args.figma_data, 'r') as f: + figma_data = json.load(f) + + with open(args.code_data, 'r') as f: + code_data = json.load(f) + + # Run audit + audit_results = audit_design_system(figma_data, code_data) + + # Output results + output_json = json.dumps(audit_results, indent=2) + + if args.output: + with open(args.output, 'w') as f: + f.write(output_json) + else: + print(output_json) + + +if __name__ == '__main__': + main() diff --git a/skills/product-design/functions/figma_mcp_client.py b/skills/product-design/functions/figma_mcp_client.py new file mode 100644 index 0000000..9b07837 --- /dev/null +++ b/skills/product-design/functions/figma_mcp_client.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 +""" +Figma MCP Client - Direct Python interface to Figma Desktop MCP server. + +This module provides a simple async interface to Figma's Model Context Protocol +server running locally at http://127.0.0.1:3845/mcp + +Usage: + async with FigmaMCPClient() as client: + # Get design tokens + tokens = await client.get_variable_defs() + + # Get component metadata + metadata = await client.get_metadata(node_id="1:23") + + # Get code mappings + mappings = await client.get_code_connect_map() + +Requirements: + - Figma Desktop app must be running + - MCP server enabled in Figma Preferences + - User logged into Figma + - pip install mcp +""" + +import json +import logging +from typing import Optional, Dict, Any, List + +try: + from mcp import ClientSession + from mcp.client.streamable_http import streamablehttp_client +except ImportError as e: + raise ImportError( + "MCP SDK not installed. Install with: pip install mcp" + ) from e + + +logger = logging.getLogger(__name__) + + +class FigmaMCPError(Exception): + """Base exception for Figma MCP client errors.""" + pass + + +class FigmaNotRunningError(FigmaMCPError): + """Raised when Figma Desktop is not running or MCP server not enabled.""" + pass + + +class FigmaMCPClient: + """ + Async client for Figma Desktop MCP server. + + Provides direct access to Figma's design data through the Model Context Protocol. + Use as async context manager to ensure proper connection lifecycle. + + Example: + async with FigmaMCPClient() as client: + variables = await client.get_variable_defs() + print(f"Found {len(variables)} design tokens") + """ + + def __init__(self, mcp_url: str = "http://127.0.0.1:3845/mcp"): + """ + Initialize Figma MCP client. + + Args: + mcp_url: URL of Figma Desktop MCP server (default: http://127.0.0.1:3845/mcp) + """ + self.mcp_url = mcp_url + self.session = None + self.transport = None + self.session_context = None + + async def __aenter__(self): + """Async context manager entry - establishes MCP connection.""" + try: + # Connect to Figma MCP server + self.transport = streamablehttp_client(self.mcp_url) + self.read_stream, self.write_stream, _ = await self.transport.__aenter__() + + # Create MCP session + self.session_context = ClientSession(self.read_stream, self.write_stream) + self.session = await self.session_context.__aenter__() + + # Initialize MCP protocol + init_result = await self.session.initialize() + logger.info( + f"Connected to {init_result.serverInfo.name} " + f"v{init_result.serverInfo.version}" + ) + + return self + + except Exception as e: + logger.error(f"Failed to connect to Figma MCP server: {e}") + raise FigmaNotRunningError( + "Could not connect to Figma Desktop MCP server. " + "Please ensure:\n" + " 1. Figma Desktop app is running\n" + " 2. MCP server is enabled in Figma → Preferences\n" + " 3. You are logged into Figma\n" + f"Error: {e}" + ) from e + + async def __aexit__(self, *args): + """Async context manager exit - closes MCP connection.""" + try: + if self.session_context: + await self.session_context.__aexit__(*args) + if self.transport: + await self.transport.__aexit__(*args) + logger.info("Disconnected from Figma MCP server") + except Exception as e: + logger.warning(f"Error during disconnect: {e}") + + async def _call_tool(self, tool_name: str, params: Optional[Dict[str, Any]] = None) -> Any: + """ + Internal method to call MCP tool and extract content. + + Args: + tool_name: Name of the MCP tool to call + params: Tool parameters + + Returns: + Tool response content (parsed as JSON if possible) + """ + if not self.session: + raise FigmaMCPError("Client not connected. Use 'async with FigmaMCPClient()'") + + try: + result = await self.session.call_tool(tool_name, params or {}) + + # Extract content from MCP response + if result.content and len(result.content) > 0: + content_item = result.content[0] + + # Handle different content types + if hasattr(content_item, 'text'): + # Text content (most common) + content = content_item.text + + # Try to parse as JSON + try: + return json.loads(content) + except (json.JSONDecodeError, TypeError): + # Return raw text if not JSON + return content + + elif hasattr(content_item, 'data'): + # Image or binary content + return content_item.data + + else: + # Unknown content type - return as-is + return content_item + + return None + + except Exception as e: + logger.error(f"Error calling {tool_name}: {e}") + raise FigmaMCPError(f"Failed to call {tool_name}: {e}") from e + + async def get_metadata(self, node_id: Optional[str] = None) -> Dict[str, Any]: + """ + Get metadata for a node or page in XML format. + + Includes node IDs, layer types, names, positions, and sizes. + Use this to discover component structure before fetching full details. + + Args: + node_id: Specific node or page ID (e.g., "1:23" or "0:1") + If None, uses currently selected node in Figma + + Returns: + Metadata dictionary with node structure + + Example: + metadata = await client.get_metadata(node_id="0:1") + # Parse to find component node IDs + """ + params = {"nodeId": node_id} if node_id else {} + return await self._call_tool("get_metadata", params) + + async def get_variable_defs(self, node_id: Optional[str] = None) -> Dict[str, str]: + """ + Get design token variable definitions. + + Returns mapping of variable names to values. + + Args: + node_id: Specific node ID (if None, uses currently selected) + + Returns: + Dictionary mapping variable names to values + Example: {'icon/default/secondary': '#949494', 'spacing/md': '16px'} + + Example: + tokens = await client.get_variable_defs() + for name, value in tokens.items(): + print(f"{name}: {value}") + """ + params = {"nodeId": node_id} if node_id else {} + return await self._call_tool("get_variable_defs", params) + + async def get_code_connect_map(self, node_id: Optional[str] = None) -> Dict[str, Dict[str, str]]: + """ + Get mapping of Figma components to code components. + + Requires Figma Enterprise plan with Code Connect configured. + + Args: + node_id: Specific node ID (if None, uses currently selected) + + Returns: + Dictionary mapping node IDs to code locations + Example: { + '1:2': { + 'codeConnectSrc': 'https://github.com/foo/components/Button.tsx', + 'codeConnectName': 'Button' + } + } + + Example: + mappings = await client.get_code_connect_map() + for node_id, mapping in mappings.items(): + print(f"{node_id} → {mapping['codeConnectName']}") + """ + params = {"nodeId": node_id} if node_id else {} + return await self._call_tool("get_code_connect_map", params) + + async def get_design_context(self, node_id: Optional[str] = None) -> str: + """ + Generate UI code for a component. + + Returns React/Vue/HTML implementation code for the selected component. + Use sparingly - can return large responses (50-100k tokens). + + Args: + node_id: Specific node ID (if None, uses currently selected) + + Returns: + UI code as string (React/Vue/HTML) + + Example: + code = await client.get_design_context(node_id="1:23") + # Returns React component code + """ + params = {"nodeId": node_id} if node_id else {} + return await self._call_tool("get_design_context", params) + + async def get_screenshot(self, node_id: Optional[str] = None) -> str: + """ + Generate screenshot for a component. + + Args: + node_id: Specific node ID (if None, uses currently selected) + + Returns: + Screenshot image data (format depends on Figma response) + + Example: + screenshot = await client.get_screenshot(node_id="1:23") + # Save or process screenshot data + """ + params = {"nodeId": node_id} if node_id else {} + return await self._call_tool("get_screenshot", params) + + async def create_design_system_rules(self) -> str: + """ + Generate design system rules for the repository. + + Returns: + Prompt for design system rules generation + + Example: + rules = await client.create_design_system_rules() + """ + return await self._call_tool("create_design_system_rules") + + async def list_available_tools(self) -> List[str]: + """ + List all available MCP tools. + + Useful for debugging or discovering what Figma MCP supports. + + Returns: + List of tool names + + Example: + tools = await client.list_available_tools() + print(f"Available: {', '.join(tools)}") + """ + if not self.session: + raise FigmaMCPError("Client not connected") + + result = await self.session.list_tools() + return [tool.name for tool in result.tools] + + +# Convenience function for simple use cases +async def get_figma_variables() -> Dict[str, str]: + """ + Quick helper to fetch Figma design tokens. + + Returns: + Dictionary of variable name → value mappings + + Example: + tokens = await get_figma_variables() + """ + async with FigmaMCPClient() as client: + return await client.get_variable_defs() + + +async def get_figma_metadata(node_id: Optional[str] = None) -> Dict[str, Any]: + """ + Quick helper to fetch Figma node metadata. + + Args: + node_id: Specific node ID (if None, uses currently selected) + + Returns: + Metadata dictionary + + Example: + metadata = await get_figma_metadata(node_id="0:1") + """ + async with FigmaMCPClient() as client: + return await client.get_metadata(node_id) diff --git a/skills/product-design/functions/implementation_planner.py b/skills/product-design/functions/implementation_planner.py new file mode 100755 index 0000000..62aa025 --- /dev/null +++ b/skills/product-design/functions/implementation_planner.py @@ -0,0 +1,391 @@ +#!/usr/bin/env python3 +""" +Generate implementation task documentation from design review analysis. +Creates phased breakdown with acceptance criteria and complexity estimates. +""" + +import json +import argparse +from datetime import datetime +from typing import Dict, List, Any + + +def estimate_complexity(component_category: str, has_variants: bool, breaking_change: bool) -> tuple: + """ + Estimate implementation complexity and time. + + Args: + component_category: atom, molecule, organism, template + has_variants: Whether component has variants/props + breaking_change: Whether this is a breaking change + + Returns: + Tuple of (complexity_level, estimated_hours) + """ + base_hours = { + 'atom': 2, + 'molecule': 3, + 'organism': 5, + 'template': 8 + } + + hours = base_hours.get(component_category, 3) + + if has_variants: + hours += 1 + + if breaking_change: + hours += 2 + + if hours <= 2: + complexity = 'Low' + elif hours <= 4: + complexity = 'Medium' + else: + complexity = 'High' + + return complexity, hours + + +def generate_token_phase(new_tokens: List[Dict[str, Any]], + modified_tokens: List[Dict[str, Any]]) -> Dict[str, Any]: + """Generate Phase 1: Design Tokens implementation plan.""" + total_tokens = len(new_tokens) + len(modified_tokens) + hours = max(1, total_tokens // 10 + 1) # 10 tokens per hour estimate + + subtasks = [ + f"Add {len(new_tokens)} new tokens to design-tokens.json" if new_tokens else None, + f"Update {len(modified_tokens)} modified tokens" if modified_tokens else None, + "Run Style Dictionary build to generate platform outputs", + "Update Tailwind @theme with new variables", + "Verify token availability in Storybook tokens page" + ] + + acceptance_criteria = [ + f"All {total_tokens} new/modified tokens available in Tailwind utilities", + "No breaking changes to existing token references", + "Style Dictionary build completes without errors", + "Storybook tokens page shows all additions" + ] + + return { + 'name': 'Design Tokens', + 'priority': 'High', + 'estimated_hours': hours, + 'description': f'Add and update {total_tokens} design tokens', + 'subtasks': [task for task in subtasks if task], + 'acceptance_criteria': acceptance_criteria, + 'files_to_modify': [ + '.agent/design-system/design-tokens.json', + 'tailwind.config.js (or CSS @theme)', + 'Storybook tokens documentation' + ] + } + + +def generate_component_phase(component: Dict[str, Any], phase_number: int) -> Dict[str, Any]: + """Generate component implementation phase.""" + comp_name = component.get('name') + category = component.get('category', 'molecule') + properties = component.get('properties', {}) + similar_to = component.get('similar_to', []) + + has_variants = bool(properties.get('variants')) + breaking_change = component.get('breaking_change', False) + + complexity, hours = estimate_complexity(category, has_variants, breaking_change) + + # Determine approach + if similar_to and similar_to[0]['similarity'] >= 0.7: + approach = f"Extend existing {similar_to[0]['name']} component" + action = 'extend' + else: + approach = f"Create new {category} component" + action = 'create' + + # Generate subtasks based on action + if action == 'extend': + subtasks = [ + f"Add new variant props to {similar_to[0]['name']}", + "Update TypeScript interface with new props", + "Add styles for new variants", + "Update existing tests", + "Add Storybook stories for new variants" + ] + files = [ + similar_to[0].get('path', f'src/components/{category}/{comp_name}.tsx'), + f"src/components/{category}/{comp_name}.test.tsx", + f"src/components/{category}/{comp_name}.stories.tsx" + ] + else: + subtasks = [ + f"Create {comp_name} component file", + "Implement TypeScript props interface", + "Add styles (CSS modules/Tailwind)", + "Write unit tests", + "Create Storybook stories", + "Add barrel export (index.ts)" + ] + files = [ + f"src/components/{category}/{comp_name}.tsx", + f"src/components/{category}/{comp_name}.test.tsx", + f"src/components/{category}/{comp_name}.stories.tsx", + f"src/components/{category}/index.ts" + ] + + acceptance_criteria = [ + f"{comp_name} renders correctly with all variants", + "100% test coverage for new props/variants" if action == 'extend' else "90%+ test coverage", + "Storybook shows all component states", + "No visual regression in existing components" if action == 'extend' else "Passes visual regression tests", + "Accessibility audit passes (a11y addon)" + ] + + if breaking_change: + acceptance_criteria.insert(0, "Migration guide created for breaking changes") + subtasks.append("Create migration documentation") + + return { + 'number': phase_number, + 'name': comp_name, + 'category': category, + 'priority': 'High' if breaking_change else 'Medium', + 'complexity': complexity, + 'estimated_hours': hours, + 'approach': approach, + 'subtasks': subtasks, + 'files_to_modify': files, + 'acceptance_criteria': acceptance_criteria, + 'breaking_change': breaking_change + } + + +def generate_task_document(task_id: str, + feature_name: str, + analysis_results: Dict[str, Any], + review_reference: str) -> str: + """ + Generate complete Navigator task document. + + Args: + task_id: Task identifier (e.g., "TASK-16") + feature_name: Feature name (e.g., "Dashboard Redesign") + analysis_results: Combined analysis from all functions + review_reference: Path to design review report + + Returns: + Markdown task document + """ + date = datetime.now().strftime('%Y-%m-%d') + + # Extract data + new_tokens = analysis_results.get('new_tokens', []) + modified_tokens = analysis_results.get('token_diff', {}).get('modified', []) + new_components = analysis_results.get('new_components', []) + similar_components = analysis_results.get('similar_components', []) + breaking_changes = analysis_results.get('breaking_changes', []) + + # Generate phases + phases = [] + + # Phase 1: Always start with tokens if any exist + if new_tokens or modified_tokens: + phases.append(generate_token_phase(new_tokens, modified_tokens)) + + # Phase 2+: Component implementations + for i, comp in enumerate(new_components + similar_components, start=2): + phases.append(generate_component_phase(comp, i)) + + # Calculate totals + total_hours = sum(phase.get('estimated_hours', 0) for phase in phases) + total_complexity = 'High' if total_hours > 10 else 'Medium' if total_hours > 5 else 'Low' + + # Build markdown document + doc = f"""# {task_id}: {feature_name} Implementation + +**Created**: {date} +**Status**: Ready for Development +**Priority**: High +**Complexity**: {total_complexity} +**Estimated Time**: {total_hours} hours + +--- + +## Context + +Implement {feature_name} from Figma mockup with design system integration. + +**Design Review**: `{review_reference}` + +--- + +## Overview + +**Changes Required**: +- Design Tokens: {len(new_tokens)} new, {len(modified_tokens)} modified +- Components: {len(new_components)} new, {len(similar_components)} to extend +- Breaking Changes: {len(breaking_changes)} + +**Implementation Strategy**: Phased approach following atomic design hierarchy + +--- + +## Implementation Phases + +""" + + # Add each phase + for i, phase in enumerate(phases, start=1): + doc += f"""### Phase {i}: {phase['name']} + +**Priority**: {phase['priority']} +**Complexity**: {phase.get('complexity', 'Medium')} +**Estimated Time**: {phase['estimated_hours']} hours + +#### Approach +{phase.get('approach', phase.get('description', 'Implement component following project patterns'))} + +#### Subtasks +""" + for subtask in phase['subtasks']: + doc += f"- {subtask}\n" + + doc += f""" +#### Files to Modify +""" + for file in phase.get('files_to_modify', []): + doc += f"- `{file}`\n" + + doc += f""" +**Acceptance Criteria**: +""" + for criterion in phase['acceptance_criteria']: + doc += f"- [ ] {criterion}\n" + + doc += "\n---\n\n" + + # Add testing strategy + doc += """## Testing Strategy + +### Unit Tests +- All new/modified components +- Test all variants and props +- Error states and edge cases +- Target: 90%+ coverage + +### Visual Regression +- Chromatic for all component stories +- Test all variants and states +- Verify no regressions in existing components + +### Integration Tests +- Test component composition +- Verify design token usage +- Test responsive behavior + +### Accessibility +- Run a11y addon in Storybook +- Keyboard navigation testing +- Screen reader verification +- WCAG 2.2 Level AA compliance + +--- + +## Rollout Plan + +1. **Phase 1: Tokens** (no visual changes, safe to deploy) +2. **Phase 2-N: Components** (incremental deployment) + - Deploy each component after testing + - Monitor for issues before next phase +3. **Final: Integration** (full feature integration) + +**Rollback Strategy**: Each phase is independent and can be reverted + +--- + +## Success Metrics + +- [ ] 100% design fidelity vs Figma mockup +- [ ] All acceptance criteria met +- [ ] No visual regressions +- [ ] All accessibility checks pass +- [ ] Performance budget maintained (no layout shifts) + +--- + +## Design System Impact + +**UI Kit Inventory**: Update after each component completion + +**Token Additions**: {len(new_tokens)} new tokens added to design system + +**Component Reuse**: {len(similar_components)} opportunities to extend existing components + +--- + +## Notes + +{f"⚠️ **Breaking Changes**: {len(breaking_changes)} component(s) require migration - see phase details" if breaking_changes else "✅ No breaking changes - backward compatible implementation"} + +--- + +**Last Updated**: {date} +**Navigator Version**: 3.2.0 +""" + + return doc + + +def main(): + parser = argparse.ArgumentParser( + description='Generate implementation task document from design review' + ) + parser.add_argument( + '--task-id', + required=True, + help='Task identifier (e.g., TASK-16)' + ) + parser.add_argument( + '--feature-name', + required=True, + help='Feature name (e.g., "Dashboard Redesign")' + ) + parser.add_argument( + '--analysis-results', + required=True, + help='Path to JSON file with combined analysis results' + ) + parser.add_argument( + '--review-reference', + required=True, + help='Path to design review report' + ) + parser.add_argument( + '--output', + help='Output file path (default: stdout)' + ) + + args = parser.parse_args() + + # Load analysis results + with open(args.analysis_results, 'r') as f: + analysis_results = json.load(f) + + # Generate task document + task_doc = generate_task_document( + args.task_id, + args.feature_name, + analysis_results, + args.review_reference + ) + + # Output + if args.output: + with open(args.output, 'w') as f: + f.write(task_doc) + else: + print(task_doc) + + +if __name__ == '__main__': + main() diff --git a/skills/product-design/functions/test_mcp_connection.py b/skills/product-design/functions/test_mcp_connection.py new file mode 100644 index 0000000..6c511af --- /dev/null +++ b/skills/product-design/functions/test_mcp_connection.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +""" +Test Figma MCP connection - Quick validation script. + +Tests connection to Figma Desktop MCP server and lists available tools. +""" +import asyncio +import sys + +try: + from figma_mcp_client import FigmaMCPClient, FigmaNotRunningError +except ImportError: + print("❌ Error: figma_mcp_client not found") + print(" Ensure you're in the correct directory: skills/product-design/functions/") + sys.exit(1) + + +async def test_connection(): + """Test Figma MCP connection.""" + try: + async with FigmaMCPClient() as client: + # List available tools + tools = await client.list_available_tools() + + print("✅ Successfully connected to Figma MCP server") + print(f" Found {len(tools)} tools:") + for tool in tools: + print(f" - {tool}") + + return True + + except FigmaNotRunningError as e: + print("❌ Figma Desktop not running or MCP not enabled") + print(f" {e}") + return False + + except Exception as e: + print(f"❌ Unexpected error: {e}") + import traceback + traceback.print_exc() + return False + + +async def main(): + """Main entry point.""" + success = await test_connection() + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/skills/product-design/functions/token_extractor.py b/skills/product-design/functions/token_extractor.py new file mode 100755 index 0000000..5faf401 --- /dev/null +++ b/skills/product-design/functions/token_extractor.py @@ -0,0 +1,394 @@ +#!/usr/bin/env python3 +""" +Extract design tokens from Figma variables and convert to DTCG format. +Compares with existing tokens and generates diff summary. +""" + +import json +import sys +import argparse +from typing import Dict, List, Any, Tuple + + +def normalize_token_name(figma_name: str) -> str: + """ + Normalize Figma variable name to DTCG semantic naming. + + Examples: + "Primary 500" → "color.primary.500" + "Spacing MD" → "spacing.md" + "Font Heading Large" → "typography.heading.large" + + Args: + figma_name: Original Figma variable name + + Returns: + Normalized DTCG token path + """ + name = figma_name.strip() + + # Convert to lowercase and split + parts = name.lower().replace('-', ' ').replace('_', ' ').split() + + # Detect token type from name + if any(keyword in parts for keyword in ['color', 'colour']): + token_type = 'color' + parts = [p for p in parts if p not in ['color', 'colour']] + elif any(keyword in parts for keyword in ['spacing', 'space', 'gap', 'padding', 'margin']): + token_type = 'spacing' + parts = [p for p in parts if p not in ['spacing', 'space', 'gap', 'padding', 'margin']] + elif any(keyword in parts for keyword in ['font', 'typography', 'text']): + token_type = 'typography' + parts = [p for p in parts if p not in ['font', 'typography', 'text']] + elif any(keyword in parts for keyword in ['radius', 'border']): + token_type = 'radius' + parts = [p for p in parts if p not in ['radius', 'border']] + elif any(keyword in parts for keyword in ['shadow', 'elevation']): + token_type = 'shadow' + parts = [p for p in parts if p not in ['shadow', 'elevation']] + else: + # Infer from first part + first_part = parts[0] if parts else '' + if first_part in ['primary', 'secondary', 'success', 'error', 'warning', 'info']: + token_type = 'color' + elif first_part in ['xs', 'sm', 'md', 'lg', 'xl', '2xl', '3xl']: + token_type = 'spacing' + else: + token_type = 'other' + + # Build token path + if parts: + return f"{token_type}.{'.'.join(parts)}" + else: + return token_type + + +def detect_token_type(name: str, value: Any) -> str: + """ + Detect DTCG token type from name and value. + + Args: + name: Token name + value: Token value + + Returns: + DTCG type string + """ + name_lower = name.lower() + + # Check by name first + if 'color' in name_lower or 'colour' in name_lower: + return 'color' + elif 'spacing' in name_lower or 'gap' in name_lower or 'padding' in name_lower or 'margin' in name_lower: + return 'dimension' + elif 'font' in name_lower or 'typography' in name_lower: + if isinstance(value, dict): + return 'typography' + else: + return 'fontFamily' if 'family' in name_lower else 'dimension' + elif 'radius' in name_lower or 'border' in name_lower: + return 'dimension' + elif 'shadow' in name_lower or 'elevation' in name_lower: + return 'shadow' + elif 'duration' in name_lower or 'transition' in name_lower: + return 'duration' + elif 'opacity' in name_lower or 'alpha' in name_lower: + return 'number' + + # Infer from value + if isinstance(value, str): + if value.startswith('#') or value.startswith('rgb'): + return 'color' + elif value.endswith('px') or value.endswith('rem') or value.endswith('em'): + return 'dimension' + elif value.endswith('ms') or value.endswith('s'): + return 'duration' + elif isinstance(value, (int, float)): + return 'number' + elif isinstance(value, dict): + if 'fontFamily' in value or 'fontSize' in value: + return 'typography' + elif 'x' in value and 'y' in value: + return 'shadow' + + return 'other' + + +def convert_to_dtcg(figma_variables: Dict[str, Any]) -> Dict[str, Any]: + """ + Convert Figma variables to DTCG format. + + Args: + figma_variables: Figma get_variable_defs response + + Returns: + DTCG formatted tokens + """ + dtcg_tokens = {} + + for var_name, var_data in figma_variables.items(): + # Extract value and type + if isinstance(var_data, dict): + value = var_data.get('$value') or var_data.get('value') + var_type = var_data.get('$type') or var_data.get('type') + description = var_data.get('$description') or var_data.get('description', '') + else: + value = var_data + var_type = None + description = '' + + # Detect type if not provided + if not var_type: + var_type = detect_token_type(var_name, value) + + # Normalize token name to DTCG path + token_path = normalize_token_name(var_name) + + # Build nested structure + path_parts = token_path.split('.') + current = dtcg_tokens + + for i, part in enumerate(path_parts): + if i == len(path_parts) - 1: + # Last part - add token definition + current[part] = { + '$value': value, + '$type': var_type + } + if description: + current[part]['$description'] = description + else: + # Intermediate path - create nested dict + if part not in current: + current[part] = {} + current = current[part] + + return dtcg_tokens + + +def generate_diff(new_tokens: Dict[str, Any], + existing_tokens: Dict[str, Any]) -> Dict[str, List[Dict[str, Any]]]: + """ + Generate diff between new and existing tokens. + + Args: + new_tokens: New tokens from Figma (DTCG format) + existing_tokens: Existing tokens from design-tokens.json + + Returns: + Diff summary with added, modified, removed, unchanged + """ + diff = { + 'added': [], + 'modified': [], + 'removed': [], + 'unchanged': [] + } + + # Flatten tokens for comparison + new_flat = flatten_tokens(new_tokens) + existing_flat = flatten_tokens(existing_tokens) + + # Find added and modified + for token_path, token_data in new_flat.items(): + if token_path not in existing_flat: + diff['added'].append({ + 'path': token_path, + 'value': token_data.get('$value'), + 'type': token_data.get('$type') + }) + else: + existing_value = existing_flat[token_path].get('$value') + new_value = token_data.get('$value') + + if existing_value != new_value: + diff['modified'].append({ + 'path': token_path, + 'old_value': existing_value, + 'new_value': new_value, + 'type': token_data.get('$type') + }) + else: + diff['unchanged'].append({ + 'path': token_path, + 'value': new_value + }) + + # Find removed + for token_path, token_data in existing_flat.items(): + if token_path not in new_flat: + diff['removed'].append({ + 'path': token_path, + 'value': token_data.get('$value'), + 'type': token_data.get('$type') + }) + + return diff + + +def flatten_tokens(tokens: Dict[str, Any], prefix: str = '') -> Dict[str, Any]: + """ + Flatten nested DTCG tokens to dot notation paths. + + Args: + tokens: Nested DTCG token structure + prefix: Current path prefix + + Returns: + Flattened dictionary with dot notation keys + """ + flat = {} + + for key, value in tokens.items(): + current_path = f"{prefix}.{key}" if prefix else key + + if isinstance(value, dict) and '$value' in value: + # This is a token definition + flat[current_path] = value + elif isinstance(value, dict): + # This is a nested group + flat.update(flatten_tokens(value, current_path)) + + return flat + + +def generate_summary(diff: Dict[str, List[Dict[str, Any]]]) -> Dict[str, Any]: + """ + Generate summary statistics from diff. + + Args: + diff: Token diff + + Returns: + Summary statistics + """ + total_new = len(diff['added']) + len(diff['unchanged']) + total_existing = len(diff['modified']) + len(diff['removed']) + len(diff['unchanged']) + + return { + 'total_new_tokens': total_new, + 'total_existing_tokens': total_existing, + 'added_count': len(diff['added']), + 'modified_count': len(diff['modified']), + 'removed_count': len(diff['removed']), + 'unchanged_count': len(diff['unchanged']), + 'sync_status': 'in_sync' if len(diff['added']) == 0 and len(diff['modified']) == 0 and len(diff['removed']) == 0 else 'drift_detected', + 'drift_percentage': f"{((len(diff['modified']) + len(diff['removed'])) / max(total_existing, 1)) * 100:.1f}%" + } + + +def extract_tokens(figma_variables: Dict[str, Any], + existing_tokens: Dict[str, Any] = None) -> Dict[str, Any]: + """ + Main extraction function: convert Figma variables to DTCG and generate diff. + + Args: + figma_variables: Figma get_variable_defs response + existing_tokens: Current design-tokens.json (optional) + + Returns: + Extraction results with DTCG tokens, diff, and summary + """ + # Convert to DTCG format + dtcg_tokens = convert_to_dtcg(figma_variables) + + # Generate diff if existing tokens provided + if existing_tokens: + diff = generate_diff(dtcg_tokens, existing_tokens) + summary = generate_summary(diff) + else: + # No existing tokens - all are new + flat = flatten_tokens(dtcg_tokens) + diff = { + 'added': [ + { + 'path': path, + 'value': data.get('$value'), + 'type': data.get('$type') + } + for path, data in flat.items() + ], + 'modified': [], + 'removed': [], + 'unchanged': [] + } + summary = { + 'total_new_tokens': len(flat), + 'total_existing_tokens': 0, + 'added_count': len(flat), + 'modified_count': 0, + 'removed_count': 0, + 'unchanged_count': 0, + 'sync_status': 'initial_extraction', + 'drift_percentage': '0.0%' + } + + return { + 'dtcg_tokens': dtcg_tokens, + 'diff': diff, + 'summary': summary + } + + +def main(): + parser = argparse.ArgumentParser( + description='Extract design tokens from Figma and convert to DTCG format' + ) + parser.add_argument( + '--figma-variables', + required=True, + help='Path to JSON file with Figma variables (get_variable_defs response)' + ) + parser.add_argument( + '--existing-tokens', + help='Path to existing design-tokens.json (optional)' + ) + parser.add_argument( + '--output', + help='Output file path (default: stdout)' + ) + parser.add_argument( + '--format', + choices=['full', 'tokens-only', 'diff-only'], + default='full', + help='Output format (default: full)' + ) + + args = parser.parse_args() + + # Load Figma variables + with open(args.figma_variables, 'r') as f: + figma_variables = json.load(f) + + # Load existing tokens if provided + existing_tokens = None + if args.existing_tokens: + with open(args.existing_tokens, 'r') as f: + existing_tokens = json.load(f) + + # Run extraction + results = extract_tokens(figma_variables, existing_tokens) + + # Format output based on --format flag + if args.format == 'tokens-only': + output = results['dtcg_tokens'] + elif args.format == 'diff-only': + output = { + 'diff': results['diff'], + 'summary': results['summary'] + } + else: + output = results + + output_json = json.dumps(output, indent=2) + + # Write output + if args.output: + with open(args.output, 'w') as f: + f.write(output_json) + else: + print(output_json) + + +if __name__ == '__main__': + main() diff --git a/skills/product-design/requirements.txt b/skills/product-design/requirements.txt new file mode 100644 index 0000000..1290454 --- /dev/null +++ b/skills/product-design/requirements.txt @@ -0,0 +1,9 @@ +# Navigator Product Design Skill - Python Dependencies + +# MCP SDK for direct Figma Desktop connection +mcp>=1.2.1 + +# Already included as transitive dependencies of mcp, but listing for clarity: +# anyio>=4.0.0 # Async I/O +# httpx>=0.25.0 # HTTP client +# pydantic>=2.0.0 # Data validation diff --git a/skills/product-design/setup.sh b/skills/product-design/setup.sh new file mode 100755 index 0000000..f054e48 --- /dev/null +++ b/skills/product-design/setup.sh @@ -0,0 +1,95 @@ +#!/bin/bash +# +# Navigator Product Design Skill - Setup Script +# +# Automatically installs Python dependencies and verifies Figma MCP connection. +# +# Usage: +# chmod +x setup.sh +# ./setup.sh + +set -e + +SKILL_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +FUNCTIONS_DIR="$SKILL_DIR/functions" + +echo "==========================================" +echo "Navigator Product Design Skill - Setup" +echo "==========================================" +echo "" + +# Step 1: Check Python version +echo "[1/5] Checking Python version..." +PYTHON_VERSION=$(python3 --version 2>&1 | awk '{print $2}') +PYTHON_MAJOR=$(echo "$PYTHON_VERSION" | cut -d. -f1) +PYTHON_MINOR=$(echo "$PYTHON_VERSION" | cut -d. -f2) + +if [ "$PYTHON_MAJOR" -lt 3 ] || ([ "$PYTHON_MAJOR" -eq 3 ] && [ "$PYTHON_MINOR" -lt 10 ]); then + echo "❌ Python 3.10+ required (found $PYTHON_VERSION)" + exit 1 +fi +echo "✅ Python $PYTHON_VERSION" +echo "" + +# Step 2: Create virtual environment (optional but recommended) +echo "[2/5] Setting up Python environment..." +if [ ! -d "$SKILL_DIR/venv" ]; then + echo "Creating virtual environment..." + python3 -m venv "$SKILL_DIR/venv" + echo "✅ Virtual environment created at $SKILL_DIR/venv" +else + echo "✅ Virtual environment already exists" +fi + +# Activate venv +source "$SKILL_DIR/venv/bin/activate" +echo "" + +# Step 3: Install dependencies +echo "[3/5] Installing Python dependencies..." +pip install --quiet --upgrade pip +pip install --quiet -r "$SKILL_DIR/requirements.txt" +echo "✅ Dependencies installed (mcp>=1.2.1)" +echo "" + +# Step 4: Verify Figma Desktop is running +echo "[4/5] Checking Figma Desktop status..." +if command -v lsof &> /dev/null; then + if lsof -i :3845 &> /dev/null; then + echo "✅ Figma MCP server detected (port 3845)" + else + echo "⚠️ Figma MCP server not detected" + echo " Please ensure:" + echo " 1. Figma Desktop app is running" + echo " 2. MCP server enabled: Figma → Preferences → Enable local MCP Server" + echo " 3. You are logged into Figma" + echo "" + echo " Setup will continue, but MCP features won't work until Figma is running." + fi +else + echo "⚠️ Cannot check port (lsof not available)" + echo " Please manually verify Figma Desktop is running" +fi +echo "" + +# Step 5: Test MCP connection +echo "[5/5] Testing Figma MCP connection..." +python3 "$FUNCTIONS_DIR/test_mcp_connection.py" 2>&1 +echo "" + +echo "==========================================" +echo "✅ Setup Complete!" +echo "==========================================" +echo "" +echo "Next steps:" +echo " 1. Ensure Figma Desktop is running" +echo " 2. Enable MCP: Figma → Preferences → Enable local MCP Server" +echo " 3. Try the skill: \"Review this Figma design: [URL]\"" +echo "" +echo "To activate the virtual environment manually:" +echo " source $SKILL_DIR/venv/bin/activate" +echo "" +echo "Documentation:" +echo " - SKILL.md: Skill usage guide" +echo " - functions/figma_mcp_client.py: MCP client API" +echo "" diff --git a/skills/product-design/templates/design-review-report.md b/skills/product-design/templates/design-review-report.md new file mode 100644 index 0000000..12eee07 --- /dev/null +++ b/skills/product-design/templates/design-review-report.md @@ -0,0 +1,230 @@ +# Design Review: {{FEATURE_NAME}} + +**Date**: {{DATE}} +**Figma**: {{FIGMA_LINK}} +**Reviewer**: Navigator Product Design Skill + +--- + +## Summary + +{{SUMMARY_DESCRIPTION}} + +**Changes Overview**: +- Design Tokens: {{NEW_TOKENS_COUNT}} new, {{MODIFIED_TOKENS_COUNT}} modified +- Components: {{NEW_COMPONENTS_COUNT}} new, {{EXTEND_COMPONENTS_COUNT}} to extend +- Breaking Changes: {{BREAKING_CHANGES_COUNT}} + +--- + +## New Design Tokens + +### Colors +{{#each NEW_COLOR_TOKENS}} +- **{{name}}**: `{{value}}` ({{type}}) + {{#if description}}_{{description}}_{{/if}} +{{/each}} + +### Spacing +{{#each NEW_SPACING_TOKENS}} +- **{{name}}**: `{{value}}` +{{/each}} + +### Typography +{{#each NEW_TYPOGRAPHY_TOKENS}} +- **{{name}}**: `{{value}}` +{{/each}} + +### Other Tokens +{{#each OTHER_TOKENS}} +- **{{name}}**: `{{value}}` ({{type}}) +{{/each}} + +--- + +## Modified Design Tokens + +{{#each MODIFIED_TOKENS}} +### {{path}} +- **Old Value**: `{{old_value}}` +- **New Value**: `{{new_value}}` +- **Impact**: {{impact_description}} +{{/each}} + +{{#if NO_MODIFIED_TOKENS}} +_No tokens modified - all changes are additive._ +{{/if}} + +--- + +## New Components Required + +### Atoms (Basic Elements) + +{{#each ATOM_COMPONENTS}} +#### {{name}} + +**Purpose**: {{purpose}} +**Variants**: {{variants}} +**States**: {{states}} +**Similar to**: {{similar_component}} ({{similarity_score}}% match) + +**Recommendation**: {{recommendation}} +{{/each}} + +### Molecules (Simple Combinations) + +{{#each MOLECULE_COMPONENTS}} +#### {{name}} + +**Purpose**: {{purpose}} +**Composition**: {{composition}} +**Variants**: {{variants}} +**Similar to**: {{similar_component}} ({{similarity_score}}% match) + +**Recommendation**: {{recommendation}} +{{/each}} + +### Organisms (Complex Components) + +{{#each ORGANISM_COMPONENTS}} +#### {{name}} + +**Purpose**: {{purpose}} +**Composition**: {{composition}} +**Responsive**: {{responsive_behavior}} +**Similar to**: {{similar_component}} ({{similarity_score}}% match) + +**Recommendation**: {{recommendation}} +{{/each}} + +--- + +## Component Reuse Opportunities + +{{#each REUSE_OPPORTUNITIES}} +### {{figma_component}} → Extend {{existing_component}} + +**Similarity**: {{similarity}}% +**Recommendation**: {{recommendation}} +**Time Saved**: {{time_saved}} + +**Approach**: {{approach_description}} +{{/each}} + +{{#if NO_REUSE_OPPORTUNITIES}} +_No reuse opportunities identified - all components are net new._ +{{/if}} + +--- + +## Design System Impact + +### Token Health + +- **In Sync**: {{IN_SYNC_COUNT}} tokens +- **Drift Detected**: {{DRIFT_COUNT}} tokens +- **Missing in Code**: {{MISSING_COUNT}} tokens +- **Unused in Design**: {{UNUSED_COUNT}} tokens + +**Sync Status**: {{SYNC_STATUS}} +**Priority Level**: {{PRIORITY_LEVEL}} + +### High Impact Changes + +{{#each HIGH_IMPACT_CHANGES}} +- {{change_description}} + - **Impact**: {{impact_type}} + - **Action Required**: {{action_required}} +{{/each}} + +### Low Impact Changes + +{{#each LOW_IMPACT_CHANGES}} +- {{change_description}} +{{/each}} + +--- + +## Implementation Recommendations + +### Phased Approach + +**Phase 1: Design Tokens** ({{PHASE_1_HOURS}} hours) +- Priority: {{PHASE_1_PRIORITY}} +- Add {{NEW_TOKENS_COUNT}} new tokens to design-tokens.json +- Update {{MODIFIED_TOKENS_COUNT}} existing tokens +- Run Style Dictionary build +- Update Tailwind @theme + +**Phase 2: Atomic Components** ({{PHASE_2_HOURS}} hours) +- Priority: {{PHASE_2_PRIORITY}} +{{#each ATOM_COMPONENTS}} +- Implement {{name}} ({{complexity}}, {{estimated_hours}}h) +{{/each}} + +**Phase 3: Molecule Components** ({{PHASE_3_HOURS}} hours) +- Priority: {{PHASE_3_PRIORITY}} +{{#each MOLECULE_COMPONENTS}} +- Implement {{name}} ({{complexity}}, {{estimated_hours}}h) +{{/each}} + +**Phase 4: Organism Components** ({{PHASE_4_HOURS}} hours) +- Priority: {{PHASE_4_PRIORITY}} +{{#each ORGANISM_COMPONENTS}} +- Implement {{name}} ({{complexity}}, {{estimated_hours}}h) +{{/each}} + +### Total Estimated Time + +**{{TOTAL_HOURS}} hours** ({{TOTAL_COMPLEXITY}} complexity) + +--- + +## Breaking Changes + +{{#each BREAKING_CHANGES}} +### {{component_name}} + +**Issue**: {{issue_description}} +**Previous Mapping**: `{{previous_mapping}}` +**Recommendation**: {{recommendation}} + +**Migration Steps**: +{{#each migration_steps}} +- {{step}} +{{/each}} +{{/each}} + +{{#if NO_BREAKING_CHANGES}} +✅ **No breaking changes** - all updates are backward compatible. +{{/if}} + +--- + +## Next Steps + +1. **Review Implementation Plan**: `.agent/tasks/TASK-{{TASK_NUMBER}}-{{FEATURE_SLUG}}.md` +2. **Update Design Tokens**: Phase 1 implementation +3. **Implement Components**: Follow atomic design hierarchy +4. **Test & Verify**: Visual regression, accessibility, unit tests +5. **Update UI Kit Inventory**: After each component completion + +--- + +## Design Fidelity Checklist + +- [ ] All design tokens extracted and added to design system +- [ ] Component structure matches Figma composition +- [ ] Variants and states implemented correctly +- [ ] Responsive behavior preserved +- [ ] Spacing and layout match pixel-perfect +- [ ] Typography styles applied correctly +- [ ] Colors and themes consistent +- [ ] Interactive states (hover, active, disabled) implemented + +--- + +**Generated**: {{TIMESTAMP}} +**Navigator Version**: {{NAVIGATOR_VERSION}} +**Next Review**: After implementation completion diff --git a/skills/visual-regression/SKILL.md b/skills/visual-regression/SKILL.md new file mode 100644 index 0000000..8c23e0c --- /dev/null +++ b/skills/visual-regression/SKILL.md @@ -0,0 +1,545 @@ +# Visual Regression Testing Setup Skill + +**Auto-invokes when user says**: +- "Set up visual regression" +- "Add Chromatic tests" +- "Create visual tests for [component]" +- "Configure visual regression testing" +- "Add screenshot testing" +- "Set up Percy" +- "Add BackstopJS" + +--- + +## Skill Purpose + +Generate complete visual regression testing setup with Storybook stories, configuration files, and CI/CD workflows. + +**Supports**: Chromatic, Percy, BackstopJS +**Frameworks**: React, Vue, Svelte (TypeScript/JavaScript) +**CI/CD**: GitHub Actions, GitLab CI, CircleCI + +--- + +## What This Skill Does + +1. **Detects existing setup**: Storybook version, VR tool, CI platform +2. **Validates component**: Extract props, variants, states +3. **Generates stories**: Complete `.stories.tsx` with all variants +4. **Creates config files**: Chromatic, Percy, or BackstopJS configuration +5. **Sets up CI/CD**: Auto-generate workflow files +6. **Provides instructions**: Next steps for API tokens, first baseline + +--- + +## Workflow + +### Step 1: Validate Project Setup + +**Execute**: `vr_setup_validator.py` + +**Check**: +- Framework (React/Vue/Svelte) from package.json +- Existing Storybook config (.storybook/ directory) +- Existing VR tool (chromatic, percy, backstopjs in dependencies) +- CI platform (.github/, .gitlab-ci.yml, .circleci/) +- Component file exists and is valid + +**Output**: +```json +{ + "framework": "react", + "storybook_version": "7.6.0", + "vr_tool": "chromatic", + "ci_platform": "github", + "component": { + "path": "src/components/ProfileCard.tsx", + "name": "ProfileCard", + "props": [...], + "valid": true + }, + "dependencies": { + "installed": ["@storybook/react", "@storybook/addon-essentials"], + "missing": ["chromatic", "@chromatic-com/storybook"] + } +} +``` + +**If Storybook not found**: Ask user if they want to install Storybook first, provide setup instructions. + +**If multiple VR tools found**: Ask user which to use (Chromatic recommended). + +--- + +### Step 2: Generate Storybook Stories + +**Execute**: `story_generator.py` + +**Process**: +1. Parse component file (TypeScript/JSX/Vue SFC) +2. Extract props, prop types, default values +3. Identify variants (size, variant, disabled, etc.) +4. Generate story file from template +5. Add accessibility tests (@storybook/addon-a11y) +6. Add interaction tests (if @storybook/test available) + +**Template**: `templates/story-template.tsx.j2` + +**Example output** (`ProfileCard.stories.tsx`): +```typescript +import type { Meta, StoryObj } from '@storybook/react'; +import { ProfileCard } from './ProfileCard'; + +const meta = { + title: 'Components/ProfileCard', + component: ProfileCard, + parameters: { + layout: 'centered', + }, + tags: ['autodocs'], + argTypes: { + size: { control: 'select', options: ['sm', 'md', 'lg'] }, + variant: { control: 'select', options: ['default', 'compact'] }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + name: 'John Doe', + avatar: 'https://example.com/avatar.jpg', + bio: 'Software Engineer', + size: 'md', + variant: 'default', + }, +}; + +export const Small: Story = { + args: { + ...Default.args, + size: 'sm', + }, +}; + +export const Large: Story = { + args: { + ...Default.args, + size: 'lg', + }, +}; + +export const Compact: Story = { + args: { + ...Default.args, + variant: 'compact', + }, +}; + +// Accessibility test +Default.parameters = { + a11y: { + config: { + rules: [ + { id: 'color-contrast', enabled: true }, + { id: 'label', enabled: true }, + ], + }, + }, +}; +``` + +**Write to**: `{component_directory}/{ComponentName}.stories.tsx` + +--- + +### Step 3: Generate Configuration Files + +**Execute**: `chromatic_config_generator.py` (or percy/backstop equivalent) + +#### For Chromatic: + +**Generate 3 files**: + +1. **chromatic.config.json**: +```json +{ + "projectId": "", + "buildScriptName": "build-storybook", + "exitZeroOnChanges": true, + "exitOnceUploaded": true, + "onlyChanged": true, + "externals": ["public/**"], + "skip": "dependabot/**", + "ignoreLastBuildOnBranch": "main" +} +``` + +2. **Update .storybook/main.js** (add addon): +```javascript +module.exports = { + stories: ['../src/**/*.stories.@(js|jsx|ts|tsx)'], + addons: [ + '@storybook/addon-links', + '@storybook/addon-essentials', + '@chromatic-com/storybook', // ← Added + '@storybook/addon-interactions', + ], + framework: { + name: '@storybook/react-vite', + options: {}, + }, +}; +``` + +3. **Update package.json** (add scripts): +```json +{ + "scripts": { + "chromatic": "npx chromatic", + "chromatic:ci": "npx chromatic --exit-zero-on-changes" + } +} +``` + +**For Percy**: Generate `.percy.yml` instead +**For BackstopJS**: Generate `backstop.config.js` instead + +--- + +### Step 4: Generate CI/CD Workflow + +**Execute**: `ci_workflow_generator.py` + +**Detect CI platform** from existing files: +- `.github/workflows/` → GitHub Actions +- `.gitlab-ci.yml` → GitLab CI +- `.circleci/config.yml` → CircleCI +- None → Ask user, default to GitHub Actions + +#### GitHub Actions Example: + +**Generate**: `.github/workflows/chromatic.yml` + +```yaml +name: Visual Regression Tests + +on: + push: + branches: [main, develop] + pull_request: + branches: [main] + +jobs: + chromatic: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Required for Chromatic + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Run Chromatic + uses: chromaui/action@latest + with: + projectToken: ${{ secrets.CHROMATIC_PROJECT_TOKEN }} + exitZeroOnChanges: true + onlyChanged: true +``` + +**For GitLab CI**: Add job to `.gitlab-ci.yml` +**For CircleCI**: Add job to `.circleci/config.yml` + +--- + +### Step 5: Provide Setup Instructions + +**Output to user**: + +````markdown +✅ Visual regression testing setup complete! + +## Files Created/Modified + +✅ {ComponentName}.stories.tsx (Storybook story with variants) +✅ chromatic.config.json (Chromatic configuration) +✅ .storybook/main.js (Added @chromatic-com/storybook addon) +✅ package.json (Added chromatic scripts) +✅ .github/workflows/chromatic.yml (CI workflow) + +## Next Steps + +### 1. Install Dependencies + +```bash +npm install --save-dev chromatic @chromatic-com/storybook +``` + +### 2. Create Chromatic Project + +1. Go to https://www.chromatic.com/start +2. Sign in with GitHub +3. Create new project +4. Copy project token + +### 3. Add Secret to GitHub + +1. Go to repository Settings → Secrets and variables → Actions +2. Create secret: `CHROMATIC_PROJECT_TOKEN` +3. Paste your project token + +### 4. Update chromatic.config.json + +Replace `` with your actual project ID from Chromatic dashboard. + +### 5. Create Baseline + +```bash +npm run chromatic +``` + +This captures the initial screenshots as your baseline. + +### 6. Test Visual Regression + +1. Make a visual change to ProfileCard +2. Commit and push +3. CI will run Chromatic automatically +4. Review changes in Chromatic dashboard + +## Documentation + +See `.agent/sops/testing/visual-regression-setup.md` for detailed workflow. + +## Troubleshooting + +**Storybook build fails**: Ensure all component dependencies are installed +**Chromatic upload fails**: Check project token in secrets +**No changes detected**: Chromatic only runs on changed stories (use `--force-rebuild` to test) +```` + +--- + +## Predefined Functions Reference + +### vr_setup_validator.py + +```python +def detect_storybook_config(project_root: str) -> dict +def detect_vr_tool(project_root: str) -> str +def validate_component_path(component_path: str) -> dict +def check_dependencies(project_root: str) -> dict +``` + +**Returns**: Validation report with detected setup and missing dependencies + +### story_generator.py + +```python +def analyze_component(component_path: str, framework: str) -> dict +def generate_story(component_info: dict, template_path: str) -> str +def create_accessibility_tests(component_info: dict) -> str +def create_interaction_tests(component_info: dict) -> str +``` + +**Returns**: Generated story file content + +### chromatic_config_generator.py + +```python +def generate_chromatic_config(project_info: dict) -> str +def generate_storybook_config(existing_config: dict) -> str +def generate_package_scripts(existing_scripts: dict) -> dict +def generate_percy_config(project_info: dict) -> str # Percy alternative +def generate_backstop_config(project_info: dict) -> str # BackstopJS alternative +``` + +**Returns**: Configuration file contents as strings + +### ci_workflow_generator.py + +```python +def detect_ci_platform(project_root: str) -> str +def generate_github_workflow(project_info: dict) -> str +def generate_gitlab_ci(project_info: dict) -> str +def generate_circleci_config(project_info: dict) -> str +``` + +**Returns**: CI workflow file contents + +--- + +## Templates Reference + +- **story-template.tsx.j2**: React/TypeScript story template +- **story-template.vue.j2**: Vue SFC story template +- **chromatic-config.json.j2**: Chromatic configuration +- **percy-config.yml.j2**: Percy configuration +- **github-workflow.yml.j2**: GitHub Actions workflow +- **gitlab-ci.yml.j2**: GitLab CI job +- **storybook-main.js.j2**: Storybook addon configuration + +--- + +## Examples + +### Example 1: Simple Component + +``` +User: "Set up visual regression for ProfileCard component" + +→ Detects: React, existing Storybook, no VR tool +→ Generates: ProfileCard.stories.tsx with 4 variants +→ Creates: Chromatic config, GitHub workflow +→ Outputs: Setup instructions +``` + +See: `examples/simple-component-vr.md` + +### Example 2: Full Design System + +``` +User: "Set up visual regression for entire design system" + +→ Detects: React, Storybook, components in src/components/ +→ Generates: Stories for all components (Button, Input, Card, etc.) +→ Creates: Chromatic config with design token validation +→ Outputs: Bulk setup instructions +``` + +See: `examples/design-system-vr.md` + +### Example 3: Existing Storybook + +``` +User: "Add Chromatic to existing Storybook" + +→ Detects: Storybook v7, existing stories +→ Adds: @chromatic-com/storybook addon +→ Creates: Chromatic config, CI workflow +→ Preserves: Existing stories and configuration +``` + +See: `examples/existing-storybook-vr.md` + +--- + +## Integration with product-design Skill + +After `product-design` generates implementation plan, suggest visual regression: + +``` +"Implementation plan created! Consider setting up visual regression testing: + + 'Set up visual regression for {ComponentName}' + +This ensures pixel-perfect implementation and prevents visual drift." +``` + +--- + +## Tool Comparison + +### Chromatic (Recommended) +- ✅ Purpose-built for Storybook +- ✅ Component-focused testing +- ✅ UI review workflow +- ✅ Free tier: 5,000 snapshots/month +- ❌ Requires cloud service + +### Percy +- ✅ Multi-framework support +- ✅ Responsive testing +- ✅ Visual reviews +- ❌ More expensive +- ❌ Less Storybook-specific + +### BackstopJS +- ✅ Open source, self-hosted +- ✅ No cloud dependency +- ✅ Free +- ❌ More manual setup +- ❌ Less automation + +**Default**: Chromatic (best Storybook integration) + +--- + +## Error Handling + +### Component Not Found +``` +Error: Component file not found at {path} + +Please provide correct path: + "Set up visual regression for src/components/ProfileCard.tsx" +``` + +### Storybook Not Installed +``` +Storybook not detected. Install first: + + npm install --save-dev @storybook/react @storybook/addon-essentials + npx storybook init + +Then retry: "Set up visual regression for ProfileCard" +``` + +### Multiple VR Tools Detected +``` +Multiple VR tools found: chromatic, percy + +Which should I use? + - "Use Chromatic for visual regression" + - "Use Percy for visual regression" +``` + +--- + +## Best Practices + +1. **Start with key components**: Don't test everything, focus on design system primitives +2. **Use interaction tests**: Combine visual + functional testing +3. **Baseline on main**: Always merge baselines to main branch +4. **Review changes**: Don't auto-accept visual changes +5. **Test states**: Capture hover, focus, error states +6. **Accessibility**: Include a11y tests in all stories + +--- + +## Token Efficiency + +**Traditional approach** (50k tokens): +1. Read Storybook docs (20k) +2. Read Chromatic docs (15k) +3. Write stories manually (10k) +4. Configure CI (5k) + +**With visual-regression skill** (3k tokens): +1. Skill auto-invokes (0 tokens) +2. Instructions load (3k tokens) +3. Functions execute (0 tokens) + +**Savings**: 94% (47k tokens) + +--- + +## Version History + +- **v3.3.0**: Initial release with Chromatic support +- **Future**: Percy, BackstopJS, Vue, Svelte support + +--- + +**Last Updated**: 2025-10-21 +**Skill Type**: Project-specific +**Generator**: nav-skill-creator (self-improving) diff --git a/skills/visual-regression/examples/design-system-vr.md b/skills/visual-regression/examples/design-system-vr.md new file mode 100644 index 0000000..4993e4e --- /dev/null +++ b/skills/visual-regression/examples/design-system-vr.md @@ -0,0 +1,107 @@ +# Example: Visual Regression for Full Design System + +Setup visual regression for entire design system with token validation. + +--- + +## Scenario + +You have a design system with multiple components: +- Button, Input, Card, Avatar, Badge, Modal, etc. +- Design tokens extracted from Figma (via product-design skill) +- Want to ensure pixel-perfect implementation + +--- + +## Usage + +``` +"Set up visual regression for entire design system in src/components" +``` + +--- + +## What Skill Does + +1. **Discovers components**: Scans `src/components/` directory +2. **Generates stories**: Creates `.stories.tsx` for each component +3. **Token validation**: Compares CSS values to design tokens +4. **Bulk setup**: Single Chromatic config for all components + +--- + +## Generated Files + +``` +src/components/ +├── Button/ +│ ├── Button.tsx +│ └── Button.stories.tsx # ← Generated +├── Input/ +│ ├── Input.tsx +│ └── Input.stories.tsx # ← Generated +├── Card/ +│ ├── Card.tsx +│ └── Card.stories.tsx # ← Generated +... + +chromatic.config.json # ← Generated +.github/workflows/chromatic.yml # ← Generated +``` + +--- + +## Integration with product-design Skill + +If you used `product-design` skill to extract Figma tokens: + +``` +1. "Review this design from Figma" + → Extracts tokens to tokens.json + +2. "Set up visual regression for design system" + → Generates stories with token values + → Validates implementation matches tokens +``` + +--- + +## Token Validation Example + +**Design token** (from Figma): +```json +{ + "color": { + "primary": { + "value": "#3B82F6" + } + } +} +``` + +**Story validation**: +```typescript +export const Primary: Story = { + args: { variant: 'primary' }, + play: async ({ canvasElement }) => { + const button = within(canvasElement).getByRole('button'); + const computedStyle = window.getComputedStyle(button); + expect(computedStyle.backgroundColor).toBe('rgb(59, 130, 246)'); // #3B82F6 + }, +}; +``` + +--- + +## Benefits + +- **Prevent drift**: Catch when code diverges from designs +- **Scale testing**: Test 50+ components in one workflow +- **Token enforcement**: Ensure design tokens are used correctly +- **Design review**: Designers see visual diffs in Chromatic + +--- + +**Time saved**: 6-10 hours → 15 minutes (95% reduction) +**Components**: All in design system +**Tokens validated**: Automatically diff --git a/skills/visual-regression/examples/existing-storybook-vr.md b/skills/visual-regression/examples/existing-storybook-vr.md new file mode 100644 index 0000000..daf9ec9 --- /dev/null +++ b/skills/visual-regression/examples/existing-storybook-vr.md @@ -0,0 +1,109 @@ +# Example: Add Chromatic to Existing Storybook + +Add visual regression to project that already has Storybook configured. + +--- + +## Scenario + +- Storybook 7.x already installed and configured +- Existing `.stories.tsx` files for components +- Want to add Chromatic without breaking existing setup + +--- + +## Usage + +``` +"Add Chromatic to existing Storybook" +``` + +--- + +## What Skill Does + +1. **Detects existing setup**: Reads `.storybook/main.js`, existing stories +2. **Non-destructive update**: Only adds Chromatic addon +3. **Preserves config**: Keeps existing addons, framework, settings +4. **CI integration**: Generates workflow + +--- + +## Changes Made + +### Before + +**.storybook/main.js**: +```javascript +module.exports = { + stories: ['../src/**/*.stories.@(js|jsx|ts|tsx)'], + addons: [ + '@storybook/addon-links', + '@storybook/addon-essentials', + '@storybook/addon-interactions', + ], + framework: { + name: '@storybook/react-vite', + options: {}, + }, +}; +``` + +### After + +```javascript +module.exports = { + stories: ['../src/**/*.stories.@(js|jsx|ts|tsx)'], + addons: [ + '@storybook/addon-links', + '@storybook/addon-essentials', + '@chromatic-com/storybook', // ← Added + '@storybook/addon-interactions', + ], + framework: { + name: '@storybook/react-vite', + options: {}, + }, +}; +``` + +--- + +## Additional Files + +- `chromatic.config.json` (new) +- `.github/workflows/chromatic.yml` (new) +- `package.json` scripts updated + +--- + +## No Stories Generated + +Skill detects existing stories and **skips generation**: + +``` +✅ Existing Storybook detected +✅ Found 23 existing story files +✅ Skipping story generation +✅ Adding Chromatic configuration only +``` + +--- + +## Next Steps + +```bash +# Install Chromatic +npm install --save-dev chromatic @chromatic-com/storybook + +# Run on existing stories +npm run chromatic +``` + +All existing stories are captured as baseline automatically. + +--- + +**Time saved**: 1-2 hours → 3 minutes +**Stories affected**: 0 (uses existing) +**Breaking changes**: None diff --git a/skills/visual-regression/examples/simple-component-vr.md b/skills/visual-regression/examples/simple-component-vr.md new file mode 100644 index 0000000..4bd13c5 --- /dev/null +++ b/skills/visual-regression/examples/simple-component-vr.md @@ -0,0 +1,297 @@ +# Example: Visual Regression for Simple Component + +This example shows how to set up visual regression testing for a single React component. + +--- + +## Scenario + +You have a `ProfileCard` component that you want to test for visual regressions. + +**Component**: `src/components/ProfileCard.tsx` + +```typescript +interface ProfileCardProps { + name: string; + avatar: string; + bio: string; + size?: 'sm' | 'md' | 'lg'; + variant?: 'default' | 'compact'; +} + +export function ProfileCard({ name, avatar, bio, size = 'md', variant = 'default' }: ProfileCardProps) { + // Component implementation +} +``` + +--- + +## Usage + +### Step 1: Invoke Skill + +``` +"Set up visual regression for ProfileCard component" +``` + +### Step 2: Skill Auto-Invokes + +Navigator detects the command and invokes `visual-regression` skill. + +### Step 3: Validation + +Skill executes `vr_setup_validator.py`: + +```json +{ + "framework": "react", + "storybook": { + "installed": true, + "version": "7.6.0" + }, + "vr_tool": null, + "ci_platform": "github", + "component": { + "valid": true, + "path": "src/components/ProfileCard.tsx", + "name": "ProfileCard" + }, + "dependencies": { + "installed": ["@storybook/react", "@storybook/addon-essentials"], + "missing": ["chromatic", "@chromatic-com/storybook"] + }, + "ready": true +} +``` + +### Step 4: Generate Story + +Skill executes `story_generator.py`: + +**Created**: `src/components/ProfileCard.stories.tsx` + +```typescript +import type { Meta, StoryObj } from '@storybook/react'; +import { ProfileCard } from './ProfileCard'; + +const meta = { + title: 'Components/ProfileCard', + component: ProfileCard, + parameters: { + layout: 'centered', + }, + tags: ['autodocs'], + argTypes: { + size: { control: 'select', options: ['sm', 'md', 'lg'] }, + variant: { control: 'select', options: ['default', 'compact'] }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + name: 'John Doe', + avatar: 'https://via.placeholder.com/150', + bio: 'Software Engineer', + size: 'md', + variant: 'default', + }, +}; + +export const Small: Story = { + args: { + ...Default.args, + size: 'sm', + }, +}; + +export const Large: Story = { + args: { + ...Default.args, + size: 'lg', + }, +}; + +export const Compact: Story = { + args: { + ...Default.args, + variant: 'compact', + }, +}; + +// Accessibility tests +Default.parameters = { + a11y: { + config: { + rules: [ + { id: 'color-contrast', enabled: true }, + { id: 'label', enabled: true }, + ], + }, + }, +}; +``` + +### Step 5: Generate Chromatic Config + +Skill executes `chromatic_config_generator.py`: + +**Created**: `chromatic.config.json` + +```json +{ + "projectId": "", + "buildScriptName": "build-storybook", + "exitZeroOnChanges": true, + "exitOnceUploaded": true, + "onlyChanged": true, + "externals": ["public/**"], + "skip": "dependabot/**", + "ignoreLastBuildOnBranch": "main" +} +``` + +**Updated**: `.storybook/main.js` + +```diff + addons: [ + '@storybook/addon-links', + '@storybook/addon-essentials', ++ '@chromatic-com/storybook', + '@storybook/addon-interactions', + ], +``` + +**Updated**: `package.json` + +```diff + "scripts": { ++ "chromatic": "npx chromatic", ++ "chromatic:ci": "npx chromatic --exit-zero-on-changes", + "build-storybook": "storybook build" + } +``` + +### Step 6: Generate CI Workflow + +Skill executes `ci_workflow_generator.py`: + +**Created**: `.github/workflows/chromatic.yml` + +```yaml +name: Visual Regression Tests + +on: + push: + branches: ['main', 'develop'] + pull_request: + branches: ['main'] + +jobs: + chromatic: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Run Chromatic + uses: chromaui/action@latest + with: + projectToken: ${{ secrets.CHROMATIC_PROJECT_TOKEN }} + exitZeroOnChanges: true + onlyChanged: true + autoAcceptChanges: 'main' +``` + +### Step 7: Setup Instructions + +Skill outputs: + +```markdown +✅ Visual regression testing setup complete! + +## Files Created/Modified + +✅ ProfileCard.stories.tsx (Storybook story with 4 variants) +✅ chromatic.config.json (Chromatic configuration) +✅ .storybook/main.js (Added @chromatic-com/storybook addon) +✅ package.json (Added chromatic scripts) +✅ .github/workflows/chromatic.yml (CI workflow) + +## Next Steps + +### 1. Install Dependencies + +```bash +npm install --save-dev chromatic @chromatic-com/storybook +``` + +### 2. Create Chromatic Project + +1. Go to https://www.chromatic.com/start +2. Sign in with GitHub +3. Create new project +4. Copy project token + +### 3. Add Secret to GitHub + +1. Go to repository Settings → Secrets and variables → Actions +2. Create secret: `CHROMATIC_PROJECT_TOKEN` +3. Paste your project token + +### 4. Update chromatic.config.json + +Replace `` with your actual project ID. + +### 5. Create Baseline + +```bash +npm run chromatic +``` + +### 6. Test Visual Regression + +1. Make a visual change to ProfileCard +2. Commit and push +3. CI will run Chromatic automatically +4. Review changes in Chromatic dashboard +``` + +--- + +## Result + +**Time saved**: 2-3 hours → 5 minutes (96% reduction) + +**Files created**: 4 +**Lines of code generated**: ~150 +**Token usage**: 3k tokens (vs 50k manual) + +--- + +## Follow-up Tasks + +After setup, you can: + +1. **Add more variants**: Edit `ProfileCard.stories.tsx` to add edge cases +2. **Customize Chromatic**: Adjust `chromatic.config.json` settings +3. **Add interaction tests**: Use `@storybook/test` for click/focus states +4. **Review visual diffs**: Monitor Chromatic dashboard for changes + +--- + +**Skill used**: visual-regression +**Time to complete**: ~5 minutes +**Automated**: Story generation, config creation, CI setup diff --git a/skills/visual-regression/functions/chromatic_config_generator.py b/skills/visual-regression/functions/chromatic_config_generator.py new file mode 100644 index 0000000..298c81f --- /dev/null +++ b/skills/visual-regression/functions/chromatic_config_generator.py @@ -0,0 +1,363 @@ +#!/usr/bin/env python3 +""" +Chromatic Configuration Generator + +Generates Chromatic config files, updates Storybook configuration, +and adds package.json scripts for visual regression testing. + +Usage: + python chromatic_config_generator.py [vr_tool] +""" + +import json +import os +import re +import sys +from pathlib import Path +from typing import Dict, Optional + + +def generate_chromatic_config(project_info: Dict) -> str: + """ + Generate chromatic.config.json content. + + Args: + project_info: Project information (main_branch, etc.) + + Returns: + JSON config as string + """ + main_branch = project_info.get('main_branch', 'main') + + config = { + "projectId": "", + "buildScriptName": "build-storybook", + "exitZeroOnChanges": True, + "exitOnceUploaded": True, + "onlyChanged": True, + "externals": ["public/**"], + "skip": "dependabot/**", + "ignoreLastBuildOnBranch": main_branch + } + + return json.dumps(config, indent=2) + + +def generate_percy_config(project_info: Dict) -> str: + """ + Generate .percy.yml content for Percy. + + Args: + project_info: Project information + + Returns: + YAML config as string + """ + config = """version: 2 +static: + build-dir: storybook-static + clean-urls: false +snapshot: + widths: + - 375 + - 768 + - 1280 + min-height: 1024 + percy-css: '' +""" + return config + + +def generate_backstop_config(project_info: Dict) -> str: + """ + Generate backstop.config.js for BackstopJS. + + Args: + project_info: Project information + + Returns: + JS config as string + """ + config = """module.exports = { + id: 'backstop_default', + viewports: [ + { + label: 'phone', + width: 375, + height: 667 + }, + { + label: 'tablet', + width: 768, + height: 1024 + }, + { + label: 'desktop', + width: 1280, + height: 1024 + } + ], + scenarios: [], + paths: { + bitmaps_reference: 'backstop_data/bitmaps_reference', + bitmaps_test: 'backstop_data/bitmaps_test', + engine_scripts: 'backstop_data/engine_scripts', + html_report: 'backstop_data/html_report', + ci_report: 'backstop_data/ci_report' + }, + report: ['browser'], + engine: 'puppeteer', + engineOptions: { + args: ['--no-sandbox'] + }, + asyncCaptureLimit: 5, + asyncCompareLimit: 50, + debug: false, + debugWindow: false +}; +""" + return config + + +def update_storybook_main_config(main_js_path: str, vr_tool: str = 'chromatic') -> str: + """ + Update .storybook/main.js to include VR tool addon. + + Args: + main_js_path: Path to main.js file + vr_tool: VR tool name ('chromatic', 'percy', 'backstopjs') + + Returns: + Updated main.js content + """ + # Read existing config + if not os.path.exists(main_js_path): + # Generate new config if doesn't exist + return generate_new_storybook_config(vr_tool) + + with open(main_js_path, 'r') as f: + content = f.read() + + # Determine addon to add + if vr_tool == 'chromatic': + addon = '@chromatic-com/storybook' + elif vr_tool == 'percy': + addon = '@percy/storybook' + else: + return content # BackstopJS doesn't need addon + + # Check if addon already exists + if addon in content: + return content # Already configured + + # Find addons array and insert + addons_pattern = r'addons:\s*\[(.*?)\]' + match = re.search(addons_pattern, content, re.DOTALL) + + if match: + existing_addons = match.group(1).strip() + # Add new addon + updated_addons = f"{existing_addons},\n '{addon}'" + updated_content = content.replace(match.group(0), f"addons: [\n {updated_addons}\n ]") + return updated_content + else: + # No addons array found - append at end + return content + f"\n// Added by Navigator visual-regression skill\nmodule.exports.addons.push('{addon}');\n" + + +def generate_new_storybook_config(vr_tool: str = 'chromatic') -> str: + """ + Generate new .storybook/main.js from scratch. + + Args: + vr_tool: VR tool name + + Returns: + main.js content + """ + addon = '@chromatic-com/storybook' if vr_tool == 'chromatic' else '@percy/storybook' + + config = f"""module.exports = {{ + stories: ['../src/**/*.stories.@(js|jsx|ts|tsx)'], + addons: [ + '@storybook/addon-links', + '@storybook/addon-essentials', + '{addon}', + '@storybook/addon-interactions', + ], + framework: {{ + name: '@storybook/react-vite', + options: {{}}, + }}, +}}; +""" + return config + + +def update_package_json_scripts(package_json_path: str, vr_tool: str = 'chromatic') -> Dict: + """ + Add VR tool scripts to package.json. + + Args: + package_json_path: Path to package.json + vr_tool: VR tool name + + Returns: + Updated package.json data + """ + with open(package_json_path, 'r') as f: + package_data = json.load(f) + + scripts = package_data.get('scripts', {}) + + # Add VR tool scripts + if vr_tool == 'chromatic': + scripts['chromatic'] = 'npx chromatic' + scripts['chromatic:ci'] = 'npx chromatic --exit-zero-on-changes' + elif vr_tool == 'percy': + scripts['percy'] = 'percy storybook storybook-static' + scripts['percy:ci'] = 'percy storybook storybook-static --partial' + elif vr_tool == 'backstopjs': + scripts['backstop:reference'] = 'backstop reference' + scripts['backstop:test'] = 'backstop test' + scripts['backstop:approve'] = 'backstop approve' + + # Ensure build-storybook script exists + if 'build-storybook' not in scripts: + scripts['build-storybook'] = 'storybook build' + + package_data['scripts'] = scripts + + return package_data + + +def detect_main_branch(project_root: str) -> str: + """ + Detect main branch name from git. + + Args: + project_root: Project root directory + + Returns: + Branch name ('main' or 'master') + """ + git_head = Path(project_root) / '.git' / 'HEAD' + + if git_head.exists(): + with open(git_head, 'r') as f: + content = f.read().strip() + if 'refs/heads/main' in content: + return 'main' + elif 'refs/heads/master' in content: + return 'master' + + return 'main' # Default + + +def generate_configs(project_root: str, vr_tool: str = 'chromatic') -> Dict: + """ + Generate all configuration files for VR setup. + + Args: + project_root: Project root directory + vr_tool: VR tool to configure + + Returns: + Dict with file paths and contents + """ + project_info = { + 'main_branch': detect_main_branch(project_root) + } + + result = { + 'configs_generated': [], + 'configs_updated': [], + 'errors': [] + } + + # Generate tool-specific config + if vr_tool == 'chromatic': + config_path = os.path.join(project_root, 'chromatic.config.json') + config_content = generate_chromatic_config(project_info) + result['configs_generated'].append({ + 'path': config_path, + 'content': config_content + }) + + elif vr_tool == 'percy': + config_path = os.path.join(project_root, '.percy.yml') + config_content = generate_percy_config(project_info) + result['configs_generated'].append({ + 'path': config_path, + 'content': config_content + }) + + elif vr_tool == 'backstopjs': + config_path = os.path.join(project_root, 'backstop.config.js') + config_content = generate_backstop_config(project_info) + result['configs_generated'].append({ + 'path': config_path, + 'content': config_content + }) + + # Update Storybook main.js + storybook_dir = Path(project_root) / '.storybook' + main_js_candidates = [ + storybook_dir / 'main.js', + storybook_dir / 'main.ts' + ] + + main_js_path = None + for candidate in main_js_candidates: + if candidate.exists(): + main_js_path = str(candidate) + break + + if main_js_path: + main_js_content = update_storybook_main_config(main_js_path, vr_tool) + result['configs_updated'].append({ + 'path': main_js_path, + 'content': main_js_content + }) + elif storybook_dir.exists(): + # Create new main.js + main_js_path = str(storybook_dir / 'main.js') + main_js_content = generate_new_storybook_config(vr_tool) + result['configs_generated'].append({ + 'path': main_js_path, + 'content': main_js_content + }) + + # Update package.json + package_json_path = os.path.join(project_root, 'package.json') + if os.path.exists(package_json_path): + updated_package = update_package_json_scripts(package_json_path, vr_tool) + result['configs_updated'].append({ + 'path': package_json_path, + 'content': json.dumps(updated_package, indent=2) + }) + + return result + + +def main(): + """CLI entry point.""" + if len(sys.argv) < 2: + print("Usage: python chromatic_config_generator.py [vr_tool]", file=sys.stderr) + sys.exit(1) + + project_root = sys.argv[1] + vr_tool = sys.argv[2] if len(sys.argv) > 2 else 'chromatic' + + if vr_tool not in ['chromatic', 'percy', 'backstopjs']: + print(f"Unsupported VR tool: {vr_tool}. Use: chromatic, percy, or backstopjs", file=sys.stderr) + sys.exit(1) + + result = generate_configs(project_root, vr_tool) + + # Output as JSON + print(json.dumps(result, indent=2)) + + +if __name__ == '__main__': + main() diff --git a/skills/visual-regression/functions/ci_workflow_generator.py b/skills/visual-regression/functions/ci_workflow_generator.py new file mode 100644 index 0000000..6c62e94 --- /dev/null +++ b/skills/visual-regression/functions/ci_workflow_generator.py @@ -0,0 +1,490 @@ +#!/usr/bin/env python3 +""" +CI/CD Workflow Generator for Visual Regression + +Generates GitHub Actions, GitLab CI, and CircleCI workflows for Chromatic/Percy/BackstopJS. + +Usage: + python ci_workflow_generator.py +""" + +import json +import os +import sys +from pathlib import Path +from typing import Dict + + +def detect_node_version(project_root: str) -> str: + """ + Detect Node.js version from .nvmrc or package.json. + + Args: + project_root: Project root directory + + Returns: + Node version string (default: '20') + """ + # Check .nvmrc + nvmrc = Path(project_root) / '.nvmrc' + if nvmrc.exists(): + with open(nvmrc, 'r') as f: + return f.read().strip() + + # Check package.json engines.node + package_json = Path(project_root) / 'package.json' + if package_json.exists(): + with open(package_json, 'r') as f: + try: + data = json.load(f) + node_version = data.get('engines', {}).get('node') + if node_version: + # Extract version number (handle ">=18.0.0" format) + import re + match = re.search(r'\d+', node_version) + if match: + return match.group(0) + except json.JSONDecodeError: + pass + + return '20' # Default + + +def detect_package_manager(project_root: str) -> str: + """ + Detect package manager from lock files. + + Args: + project_root: Project root directory + + Returns: + Package manager name ('npm', 'yarn', 'pnpm') + """ + root = Path(project_root) + + if (root / 'pnpm-lock.yaml').exists(): + return 'pnpm' + elif (root / 'yarn.lock').exists(): + return 'yarn' + else: + return 'npm' + + +def get_install_command(package_manager: str) -> str: + """Get install command for package manager.""" + commands = { + 'npm': 'npm ci', + 'yarn': 'yarn install --frozen-lockfile', + 'pnpm': 'pnpm install --frozen-lockfile' + } + return commands.get(package_manager, 'npm ci') + + +def detect_branches(project_root: str) -> list: + """ + Detect main branches from git config. + + Args: + project_root: Project root directory + + Returns: + List of branch names + """ + git_head = Path(project_root) / '.git' / 'HEAD' + + if git_head.exists(): + with open(git_head, 'r') as f: + content = f.read().strip() + if 'refs/heads/main' in content: + return ['main', 'develop'] + elif 'refs/heads/master' in content: + return ['master', 'develop'] + + return ['main', 'develop'] + + +def generate_github_workflow_chromatic(project_info: Dict) -> str: + """ + Generate GitHub Actions workflow for Chromatic. + + Args: + project_info: Project information + + Returns: + YAML workflow content + """ + node_version = project_info.get('node_version', '20') + package_manager = project_info.get('package_manager', 'npm') + install_command = get_install_command(package_manager) + branches = project_info.get('branches', ['main', 'develop']) + + workflow = f"""name: Visual Regression Tests + +on: + push: + branches: {json.dumps(branches)} + pull_request: + branches: ['main'] + +jobs: + chromatic: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Required for Chromatic + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '{node_version}' + cache: '{package_manager}' + + - name: Install dependencies + run: {install_command} + + - name: Run Chromatic + uses: chromaui/action@latest + with: + projectToken: ${{{{ secrets.CHROMATIC_PROJECT_TOKEN }}}} + exitZeroOnChanges: true + onlyChanged: true + autoAcceptChanges: 'main' # Auto-accept on main branch +""" + return workflow + + +def generate_github_workflow_percy(project_info: Dict) -> str: + """ + Generate GitHub Actions workflow for Percy. + + Args: + project_info: Project information + + Returns: + YAML workflow content + """ + node_version = project_info.get('node_version', '20') + package_manager = project_info.get('package_manager', 'npm') + install_command = get_install_command(package_manager) + branches = project_info.get('branches', ['main', 'develop']) + + workflow = f"""name: Visual Regression Tests + +on: + push: + branches: {json.dumps(branches)} + pull_request: + branches: ['main'] + +jobs: + percy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '{node_version}' + cache: '{package_manager}' + + - name: Install dependencies + run: {install_command} + + - name: Build Storybook + run: npm run build-storybook + + - name: Run Percy + run: npx percy storybook storybook-static + env: + PERCY_TOKEN: ${{{{ secrets.PERCY_TOKEN }}}} +""" + return workflow + + +def generate_github_workflow_backstop(project_info: Dict) -> str: + """ + Generate GitHub Actions workflow for BackstopJS. + + Args: + project_info: Project information + + Returns: + YAML workflow content + """ + node_version = project_info.get('node_version', '20') + package_manager = project_info.get('package_manager', 'npm') + install_command = get_install_command(package_manager) + branches = project_info.get('branches', ['main', 'develop']) + + workflow = f"""name: Visual Regression Tests + +on: + push: + branches: {json.dumps(branches)} + pull_request: + branches: ['main'] + +jobs: + backstop: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '{node_version}' + cache: '{package_manager}' + + - name: Install dependencies + run: {install_command} + + - name: Run BackstopJS + run: npm run backstop:test + + - name: Upload test results + if: failure() + uses: actions/upload-artifact@v3 + with: + name: backstop-results + path: backstop_data/ +""" + return workflow + + +def generate_gitlab_ci_chromatic(project_info: Dict) -> str: + """ + Generate GitLab CI job for Chromatic. + + Args: + project_info: Project information + + Returns: + YAML job content + """ + node_version = project_info.get('node_version', '20') + install_command = get_install_command(project_info.get('package_manager', 'npm')) + + job = f"""# Add to .gitlab-ci.yml + +chromatic: + stage: test + image: node:{node_version} + cache: + paths: + - node_modules/ + script: + - {install_command} + - npx chromatic --exit-zero-on-changes --only-changed + variables: + CHROMATIC_PROJECT_TOKEN: $CHROMATIC_PROJECT_TOKEN + only: + - main + - develop + - merge_requests +""" + return job + + +def generate_gitlab_ci_percy(project_info: Dict) -> str: + """ + Generate GitLab CI job for Percy. + + Args: + project_info: Project information + + Returns: + YAML job content + """ + node_version = project_info.get('node_version', '20') + install_command = get_install_command(project_info.get('package_manager', 'npm')) + + job = f"""# Add to .gitlab-ci.yml + +percy: + stage: test + image: node:{node_version} + cache: + paths: + - node_modules/ + script: + - {install_command} + - npm run build-storybook + - npx percy storybook storybook-static + variables: + PERCY_TOKEN: $PERCY_TOKEN + only: + - main + - develop + - merge_requests +""" + return job + + +def generate_circleci_config_chromatic(project_info: Dict) -> str: + """ + Generate CircleCI job for Chromatic. + + Args: + project_info: Project information + + Returns: + YAML job content + """ + node_version = project_info.get('node_version', '20') + install_command = get_install_command(project_info.get('package_manager', 'npm')) + + config = f"""# Add to .circleci/config.yml + +version: 2.1 + +executors: + node: + docker: + - image: cimg/node:{node_version} + +jobs: + chromatic: + executor: node + steps: + - checkout + - restore_cache: + keys: + - v1-dependencies-{{{{ checksum "package.json" }}}} + - run: {install_command} + - save_cache: + paths: + - node_modules + key: v1-dependencies-{{{{ checksum "package.json" }}}} + - run: + name: Run Chromatic + command: npx chromatic --exit-zero-on-changes --only-changed + environment: + CHROMATIC_PROJECT_TOKEN: $CHROMATIC_PROJECT_TOKEN + +workflows: + version: 2 + test: + jobs: + - chromatic +""" + return config + + +def generate_workflow(project_root: str, ci_platform: str, vr_tool: str) -> Dict: + """ + Generate CI/CD workflow for specified platform and VR tool. + + Args: + project_root: Project root directory + ci_platform: CI platform ('github', 'gitlab', 'circleci') + vr_tool: VR tool ('chromatic', 'percy', 'backstopjs') + + Returns: + Dict with workflow path and content + """ + # Gather project info + project_info = { + 'node_version': detect_node_version(project_root), + 'package_manager': detect_package_manager(project_root), + 'branches': detect_branches(project_root) + } + + result = { + 'platform': ci_platform, + 'vr_tool': vr_tool, + 'workflow_path': None, + 'workflow_content': None, + 'instructions': None + } + + # Generate workflow based on platform and tool + if ci_platform == 'github': + workflow_dir = Path(project_root) / '.github' / 'workflows' + workflow_file = 'chromatic.yml' if vr_tool == 'chromatic' else f'{vr_tool}.yml' + result['workflow_path'] = str(workflow_dir / workflow_file) + + if vr_tool == 'chromatic': + result['workflow_content'] = generate_github_workflow_chromatic(project_info) + elif vr_tool == 'percy': + result['workflow_content'] = generate_github_workflow_percy(project_info) + elif vr_tool == 'backstopjs': + result['workflow_content'] = generate_github_workflow_backstop(project_info) + + result['instructions'] = f""" +GitHub Actions workflow created: {result['workflow_path']} + +Next steps: +1. Add secret: Repository Settings → Secrets → Actions +2. Create secret: CHROMATIC_PROJECT_TOKEN (or PERCY_TOKEN) +3. Commit and push this file +4. Workflow will run automatically on push/PR +""" + + elif ci_platform == 'gitlab': + result['workflow_path'] = str(Path(project_root) / '.gitlab-ci.yml') + + if vr_tool == 'chromatic': + result['workflow_content'] = generate_gitlab_ci_chromatic(project_info) + elif vr_tool == 'percy': + result['workflow_content'] = generate_gitlab_ci_percy(project_info) + + result['instructions'] = """ +GitLab CI job generated. Add to your .gitlab-ci.yml file. + +Next steps: +1. Add variable: Project Settings → CI/CD → Variables +2. Create variable: CHROMATIC_PROJECT_TOKEN (or PERCY_TOKEN) +3. Commit and push .gitlab-ci.yml +4. Pipeline will run automatically +""" + + elif ci_platform == 'circleci': + result['workflow_path'] = str(Path(project_root) / '.circleci' / 'config.yml') + + if vr_tool == 'chromatic': + result['workflow_content'] = generate_circleci_config_chromatic(project_info) + + result['instructions'] = """ +CircleCI job generated. Add to your .circleci/config.yml file. + +Next steps: +1. Add environment variable in CircleCI project settings +2. Variable name: CHROMATIC_PROJECT_TOKEN +3. Commit and push config.yml +4. Build will run automatically +""" + + return result + + +def main(): + """CLI entry point.""" + if len(sys.argv) < 4: + print("Usage: python ci_workflow_generator.py ", file=sys.stderr) + print(" ci_platform: github, gitlab, circleci", file=sys.stderr) + print(" vr_tool: chromatic, percy, backstopjs", file=sys.stderr) + sys.exit(1) + + project_root = sys.argv[1] + ci_platform = sys.argv[2].lower() + vr_tool = sys.argv[3].lower() + + if ci_platform not in ['github', 'gitlab', 'circleci']: + print(f"Unsupported CI platform: {ci_platform}", file=sys.stderr) + sys.exit(1) + + if vr_tool not in ['chromatic', 'percy', 'backstopjs']: + print(f"Unsupported VR tool: {vr_tool}", file=sys.stderr) + sys.exit(1) + + result = generate_workflow(project_root, ci_platform, vr_tool) + + # Output as JSON + print(json.dumps(result, indent=2)) + + +if __name__ == '__main__': + main() diff --git a/skills/visual-regression/functions/story_generator.py b/skills/visual-regression/functions/story_generator.py new file mode 100644 index 0000000..6adb165 --- /dev/null +++ b/skills/visual-regression/functions/story_generator.py @@ -0,0 +1,476 @@ +#!/usr/bin/env python3 +""" +Storybook Story Generator + +Analyzes React/Vue/Svelte components and generates comprehensive Storybook stories +with variants, accessibility tests, and interaction tests. + +Usage: + python story_generator.py [template_path] +""" + +import json +import os +import re +import sys +from pathlib import Path +from typing import Dict, List, Optional + + +def extract_component_name(file_path: str) -> str: + """Extract component name from file path.""" + return Path(file_path).stem + + +def analyze_react_component(component_path: str) -> Dict: + """ + Analyze React/TypeScript component to extract props and metadata. + + Args: + component_path: Path to component file + + Returns: + Dict with component info: name, props, prop_types, exports + """ + with open(component_path, 'r') as f: + content = f.read() + + component_name = extract_component_name(component_path) + + result = { + 'name': component_name, + 'path': component_path, + 'props': [], + 'has_typescript': component_path.endswith(('.tsx', '.ts')), + 'is_default_export': False, + 'story_title': f'Components/{component_name}' + } + + # Check for default export + if re.search(r'export\s+default\s+' + component_name, content): + result['is_default_export'] = True + + # Extract TypeScript interface/type props + if result['has_typescript']: + # Match interface or type definition + interface_pattern = r'(?:interface|type)\s+' + component_name + r'Props\s*{([^}]+)}' + match = re.search(interface_pattern, content, re.DOTALL) + + if match: + props_block = match.group(1) + # Parse each prop + prop_pattern = r'(\w+)(\?)?:\s*([^;]+);?' + for prop_match in re.finditer(prop_pattern, props_block): + prop_name = prop_match.group(1) + is_optional = prop_match.group(2) == '?' + prop_type = prop_match.group(3).strip() + + # Determine control type based on prop type + control = infer_control_type(prop_type) + + # Extract possible values for enums + values = extract_enum_values(prop_type) + + result['props'].append({ + 'name': prop_name, + 'type': prop_type, + 'optional': is_optional, + 'control': control, + 'values': values, + 'default': infer_default_value(prop_type, prop_name) + }) + + # Fallback: extract props from function signature + if not result['props']: + func_pattern = r'(?:function|const)\s+' + component_name + r'\s*(?:<[^>]+>)?\s*\(\s*{\s*([^}]+)\s*}' + match = re.search(func_pattern, content) + + if match: + props_str = match.group(1) + # Simple extraction of prop names + prop_names = [p.strip().split(':')[0].strip() for p in props_str.split(',')] + + for prop_name in prop_names: + result['props'].append({ + 'name': prop_name, + 'type': 'any', + 'optional': False, + 'control': 'text', + 'values': None, + 'default': None + }) + + return result + + +def infer_control_type(prop_type: str) -> str: + """ + Infer Storybook control type from TypeScript type. + + Args: + prop_type: TypeScript type string + + Returns: + Storybook control type + """ + prop_type_lower = prop_type.lower() + + # Boolean + if 'boolean' in prop_type_lower: + return 'boolean' + + # Number + if 'number' in prop_type_lower: + return 'number' + + # Union types (enums) + if '|' in prop_type: + return 'select' + + # Objects + if prop_type_lower in ['object', 'record']: + return 'object' + + # Arrays + if '[]' in prop_type or prop_type.startswith('array'): + return 'object' + + # Functions + if '=>' in prop_type or prop_type.startswith('('): + return 'function' + + # Default to text + return 'text' + + +def extract_enum_values(prop_type: str) -> Optional[List[str]]: + """ + Extract possible values from union type. + + Args: + prop_type: TypeScript type string (e.g., "'sm' | 'md' | 'lg'") + + Returns: + List of possible values or None + """ + if '|' not in prop_type: + return None + + # Extract string literals + values = re.findall(r"['\"]([^'\"]+)['\"]", prop_type) + + return values if values else None + + +def infer_default_value(prop_type: str, prop_name: str) -> any: + """ + Infer reasonable default value for prop. + + Args: + prop_type: TypeScript type string + prop_name: Prop name + + Returns: + Default value + """ + prop_type_lower = prop_type.lower() + prop_name_lower = prop_name.lower() + + # Boolean + if 'boolean' in prop_type_lower: + return False + + # Number + if 'number' in prop_type_lower: + if 'count' in prop_name_lower: + return 0 + return 1 + + # Union types - return first value + values = extract_enum_values(prop_type) + if values: + return values[0] + + # Strings - context-aware defaults + if 'name' in prop_name_lower: + return 'John Doe' + if 'title' in prop_name_lower: + return 'Example Title' + if 'description' in prop_name_lower or 'bio' in prop_name_lower: + return 'This is an example description' + if 'email' in prop_name_lower: + return 'user@example.com' + if 'url' in prop_name_lower or 'href' in prop_name_lower: + return 'https://example.com' + if 'image' in prop_name_lower or 'avatar' in prop_name_lower: + return 'https://via.placeholder.com/150' + + return 'Example text' + + +def generate_variants(component_info: Dict) -> List[Dict]: + """ + Generate story variants based on component props. + + Args: + component_info: Component analysis result + + Returns: + List of variant definitions + """ + variants = [] + + # Generate variants for enum props + for prop in component_info['props']: + if prop['values'] and len(prop['values']) > 1: + # Create variant for each enum value + for value in prop['values']: + if value != prop['default']: # Skip default (already in Default story) + variant_name = value.capitalize() + variants.append({ + 'name': variant_name, + 'prop_name': prop['name'], + 'value': value + }) + + # Generate boolean state variants + for prop in component_info['props']: + if prop['type'].lower() == 'boolean' and not prop['default']: + variant_name = prop['name'].capitalize() + variants.append({ + 'name': variant_name, + 'prop_name': prop['name'], + 'value': True + }) + + return variants + + +def generate_story_content(component_info: Dict, framework: str = 'react') -> str: + """ + Generate complete Storybook story file content. + + Args: + component_info: Component analysis result + framework: Framework name ('react', 'vue', 'svelte') + + Returns: + Story file content as string + """ + if framework == 'react': + return generate_react_story(component_info) + elif framework == 'vue': + return generate_vue_story(component_info) + elif framework == 'svelte': + return generate_svelte_story(component_info) + else: + raise ValueError(f"Unsupported framework: {framework}") + + +def generate_react_story(component_info: Dict) -> str: + """Generate React/TypeScript story.""" + name = component_info['name'] + props = component_info['props'] + variants = generate_variants(component_info) + + # Build imports + imports = f"""import type {{ Meta, StoryObj }} from '@storybook/react'; +import {{ {name} }} from './{name}'; +""" + + # Build argTypes + arg_types = [] + for prop in props: + if prop['values']: + arg_types.append(f" {prop['name']}: {{ control: '{prop['control']}', options: {json.dumps(prop['values'])} }}") + else: + arg_types.append(f" {prop['name']}: {{ control: '{prop['control']}' }}") + + arg_types_str = ',\n'.join(arg_types) if arg_types else '' + + # Build default args + default_args = [] + for prop in props: + if prop['default'] is not None: + if isinstance(prop['default'], str): + default_args.append(f" {prop['name']}: '{prop['default']}'") + else: + default_args.append(f" {prop['name']}: {json.dumps(prop['default'])}") + + default_args_str = ',\n'.join(default_args) if default_args else '' + + # Build meta + meta = f""" +const meta = {{ + title: '{component_info['story_title']}', + component: {name}, + parameters: {{ + layout: 'centered', + }}, + tags: ['autodocs'], + argTypes: {{ +{arg_types_str} + }}, +}} satisfies Meta; + +export default meta; +type Story = StoryObj; +""" + + # Default story + default_story = f""" +export const Default: Story = {{ + args: {{ +{default_args_str} + }}, +}}; +""" + + # Variant stories + variant_stories = [] + for variant in variants: + if isinstance(variant['value'], str): + value_str = f"'{variant['value']}'" + else: + value_str = json.dumps(variant['value']) + + variant_stories.append(f""" +export const {variant['name']}: Story = {{ + args: {{ + ...Default.args, + {variant['prop_name']}: {value_str}, + }}, +}}; +""") + + variant_stories_str = ''.join(variant_stories) + + # Accessibility tests + a11y = f""" +// Accessibility tests +Default.parameters = {{ + a11y: {{ + config: {{ + rules: [ + {{ id: 'color-contrast', enabled: true }}, + {{ id: 'label', enabled: true }}, + ], + }}, + }}, +}}; +""" + + return imports + meta + default_story + variant_stories_str + a11y + + +def generate_vue_story(component_info: Dict) -> str: + """Generate Vue story (simplified).""" + name = component_info['name'] + + return f"""import type {{ Meta, StoryObj }} from '@storybook/vue3'; +import {name} from './{name}.vue'; + +const meta = {{ + title: 'Components/{name}', + component: {name}, + tags: ['autodocs'], +}} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = {{ + args: {{}}, +}}; +""" + + +def generate_svelte_story(component_info: Dict) -> str: + """Generate Svelte story (simplified).""" + name = component_info['name'] + + return f"""import type {{ Meta, StoryObj }} from '@storybook/svelte'; +import {name} from './{name}.svelte'; + +const meta = {{ + title: 'Components/{name}', + component: {name}, + tags: ['autodocs'], +}} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = {{ + args: {{}}, +}}; +""" + + +def write_story_file(component_path: str, story_content: str) -> str: + """ + Write story file next to component file. + + Args: + component_path: Path to component file + story_content: Generated story content + + Returns: + Path to created story file + """ + component_file = Path(component_path) + story_file = component_file.parent / f"{component_file.stem}.stories{component_file.suffix}" + + with open(story_file, 'w') as f: + f.write(story_content) + + return str(story_file) + + +def main(): + """CLI entry point.""" + if len(sys.argv) < 3: + print("Usage: python story_generator.py ", file=sys.stderr) + sys.exit(1) + + component_path = sys.argv[1] + framework = sys.argv[2].lower() + + if framework not in ['react', 'vue', 'svelte']: + print(f"Unsupported framework: {framework}. Use: react, vue, or svelte", file=sys.stderr) + sys.exit(1) + + if not os.path.exists(component_path): + print(f"Component file not found: {component_path}", file=sys.stderr) + sys.exit(1) + + # Analyze component + if framework == 'react': + component_info = analyze_react_component(component_path) + else: + # Simplified for Vue/Svelte + component_info = { + 'name': extract_component_name(component_path), + 'path': component_path, + 'props': [], + 'story_title': f'Components/{extract_component_name(component_path)}' + } + + # Generate story + story_content = generate_story_content(component_info, framework) + + # Write story file + story_file_path = write_story_file(component_path, story_content) + + # Output result + result = { + 'component': component_info, + 'story_file': story_file_path, + 'success': True + } + + print(json.dumps(result, indent=2)) + + +if __name__ == '__main__': + main() diff --git a/skills/visual-regression/functions/vr_setup_validator.py b/skills/visual-regression/functions/vr_setup_validator.py new file mode 100644 index 0000000..5ab095a --- /dev/null +++ b/skills/visual-regression/functions/vr_setup_validator.py @@ -0,0 +1,409 @@ +#!/usr/bin/env python3 +""" +Visual Regression Setup Validator + +Detects existing Storybook setup, VR tools, CI platform, and validates component paths. +Returns comprehensive validation report to guide skill execution. + +Usage: + python vr_setup_validator.py [component_path] +""" + +import json +import os +import sys +from pathlib import Path +from typing import Dict, List, Optional + + +def detect_framework(project_root: str) -> Optional[str]: + """ + Detect frontend framework from package.json dependencies. + + Args: + project_root: Path to project root directory + + Returns: + Framework name ('react', 'vue', 'svelte') or None + """ + package_json_path = Path(project_root) / 'package.json' + + if not package_json_path.exists(): + return None + + try: + with open(package_json_path, 'r') as f: + package_data = json.load(f) + + dependencies = { + **package_data.get('dependencies', {}), + **package_data.get('devDependencies', {}) + } + + if 'react' in dependencies: + return 'react' + elif 'vue' in dependencies: + return 'vue' + elif 'svelte' in dependencies: + return 'svelte' + + return None + except (json.JSONDecodeError, FileNotFoundError): + return None + + +def detect_storybook_config(project_root: str) -> Dict: + """ + Detect Storybook version and configuration. + + Args: + project_root: Path to project root directory + + Returns: + Dict with version, addons, framework, and config path + """ + storybook_dir = Path(project_root) / '.storybook' + package_json_path = Path(project_root) / 'package.json' + + result = { + 'installed': False, + 'version': None, + 'config_path': None, + 'main_js_path': None, + 'addons': [], + 'framework': None + } + + # Check if .storybook directory exists + if not storybook_dir.exists(): + return result + + result['installed'] = True + result['config_path'] = str(storybook_dir) + + # Check for main.js or main.ts + main_js = storybook_dir / 'main.js' + main_ts = storybook_dir / 'main.ts' + + if main_js.exists(): + result['main_js_path'] = str(main_js) + elif main_ts.exists(): + result['main_js_path'] = str(main_ts) + + # Extract version from package.json + if package_json_path.exists(): + try: + with open(package_json_path, 'r') as f: + package_data = json.load(f) + + dependencies = { + **package_data.get('dependencies', {}), + **package_data.get('devDependencies', {}) + } + + # Find Storybook version + for dep in dependencies: + if dep.startswith('@storybook/'): + result['version'] = dependencies[dep].replace('^', '').replace('~', '') + break + + # Extract addons from dependencies + result['addons'] = [ + dep for dep in dependencies.keys() + if dep.startswith('@storybook/addon-') or dep == '@chromatic-com/storybook' + ] + except (json.JSONDecodeError, FileNotFoundError): + pass + + # Try to parse main.js for framework + if result['main_js_path']: + try: + with open(result['main_js_path'], 'r') as f: + content = f.read() + if '@storybook/react' in content: + result['framework'] = 'react' + elif '@storybook/vue' in content: + result['framework'] = 'vue' + elif '@storybook/svelte' in content: + result['framework'] = 'svelte' + except FileNotFoundError: + pass + + return result + + +def detect_vr_tool(project_root: str) -> Optional[str]: + """ + Detect existing visual regression tool from package.json. + + Args: + project_root: Path to project root directory + + Returns: + Tool name ('chromatic', 'percy', 'backstopjs') or None + """ + package_json_path = Path(project_root) / 'package.json' + + if not package_json_path.exists(): + return None + + try: + with open(package_json_path, 'r') as f: + package_data = json.load(f) + + dependencies = { + **package_data.get('dependencies', {}), + **package_data.get('devDependencies', {}) + } + + if 'chromatic' in dependencies or '@chromatic-com/storybook' in dependencies: + return 'chromatic' + elif '@percy/cli' in dependencies or '@percy/storybook' in dependencies: + return 'percy' + elif 'backstopjs' in dependencies: + return 'backstopjs' + + return None + except (json.JSONDecodeError, FileNotFoundError): + return None + + +def detect_ci_platform(project_root: str) -> Optional[str]: + """ + Detect CI/CD platform from existing configuration files. + + Args: + project_root: Path to project root directory + + Returns: + Platform name ('github', 'gitlab', 'circleci', 'bitbucket') or None + """ + root = Path(project_root) + + # GitHub Actions + if (root / '.github' / 'workflows').exists(): + return 'github' + + # GitLab CI + if (root / '.gitlab-ci.yml').exists(): + return 'gitlab' + + # CircleCI + if (root / '.circleci' / 'config.yml').exists(): + return 'circleci' + + # Bitbucket Pipelines + if (root / 'bitbucket-pipelines.yml').exists(): + return 'bitbucket' + + return None + + +def validate_component_path(component_path: str, project_root: str = '.') -> Dict: + """ + Validate component file exists and extract basic information. + + Args: + component_path: Path to component file (relative or absolute) + project_root: Project root directory + + Returns: + Dict with validation status and component info + """ + # Handle relative paths + if not os.path.isabs(component_path): + component_path = os.path.join(project_root, component_path) + + component_file = Path(component_path) + + result = { + 'valid': False, + 'path': component_path, + 'name': None, + 'extension': None, + 'directory': None, + 'error': None + } + + # Check if file exists + if not component_file.exists(): + result['error'] = f"Component file not found: {component_path}" + return result + + # Check if it's a file (not directory) + if not component_file.is_file(): + result['error'] = f"Path is not a file: {component_path}" + return result + + # Validate extension + valid_extensions = ['.tsx', '.ts', '.jsx', '.js', '.vue', '.svelte'] + if component_file.suffix not in valid_extensions: + result['error'] = f"Invalid file extension. Expected one of: {', '.join(valid_extensions)}" + return result + + # Extract component name (filename without extension) + result['name'] = component_file.stem + result['extension'] = component_file.suffix + result['directory'] = str(component_file.parent) + result['valid'] = True + + return result + + +def check_dependencies(project_root: str, vr_tool: Optional[str] = 'chromatic') -> Dict: + """ + Check which required dependencies are installed. + + Args: + project_root: Path to project root directory + vr_tool: VR tool to check for ('chromatic', 'percy', 'backstopjs') + + Returns: + Dict with installed and missing dependencies + """ + package_json_path = Path(project_root) / 'package.json' + + result = { + 'installed': [], + 'missing': [] + } + + if not package_json_path.exists(): + result['missing'] = ['package.json not found'] + return result + + try: + with open(package_json_path, 'r') as f: + package_data = json.load(f) + + dependencies = { + **package_data.get('dependencies', {}), + **package_data.get('devDependencies', {}) + } + + # Core Storybook dependencies + required_deps = [ + '@storybook/addon-essentials', + '@storybook/addon-interactions', + ] + + # Add VR tool specific dependencies + if vr_tool == 'chromatic': + required_deps.extend(['chromatic', '@chromatic-com/storybook']) + elif vr_tool == 'percy': + required_deps.extend(['@percy/cli', '@percy/storybook']) + elif vr_tool == 'backstopjs': + required_deps.append('backstopjs') + + # Check each dependency + for dep in required_deps: + if dep in dependencies: + result['installed'].append(dep) + else: + result['missing'].append(dep) + + except (json.JSONDecodeError, FileNotFoundError): + result['missing'] = ['Error reading package.json'] + + return result + + +def get_package_manager(project_root: str) -> str: + """ + Detect package manager from lock files. + + Args: + project_root: Path to project root directory + + Returns: + Package manager name ('npm', 'yarn', 'pnpm') + """ + root = Path(project_root) + + if (root / 'pnpm-lock.yaml').exists(): + return 'pnpm' + elif (root / 'yarn.lock').exists(): + return 'yarn' + else: + return 'npm' # Default to npm + + +def validate_setup(project_root: str, component_path: Optional[str] = None) -> Dict: + """ + Comprehensive validation of VR setup requirements. + + Args: + project_root: Path to project root directory + component_path: Optional path to component file + + Returns: + Complete validation report + """ + report = { + 'project_root': project_root, + 'framework': detect_framework(project_root), + 'storybook': detect_storybook_config(project_root), + 'vr_tool': detect_vr_tool(project_root), + 'ci_platform': detect_ci_platform(project_root), + 'package_manager': get_package_manager(project_root), + 'component': None, + 'dependencies': None, + 'ready': False, + 'warnings': [], + 'errors': [] + } + + # Validate component if path provided + if component_path: + report['component'] = validate_component_path(component_path, project_root) + if not report['component']['valid']: + report['errors'].append(report['component']['error']) + + # Check framework + if not report['framework']: + report['errors'].append('Framework not detected. Ensure React, Vue, or Svelte is installed.') + + # Check Storybook + if not report['storybook']['installed']: + report['errors'].append('Storybook not installed. Run: npx storybook init') + + # Determine VR tool (use detected or default to Chromatic) + vr_tool = report['vr_tool'] or 'chromatic' + report['dependencies'] = check_dependencies(project_root, vr_tool) + + # Add warnings for missing dependencies + if report['dependencies']['missing']: + report['warnings'].append( + f"Missing dependencies: {', '.join(report['dependencies']['missing'])}" + ) + + # Determine if ready to proceed + report['ready'] = ( + report['framework'] is not None and + report['storybook']['installed'] and + len(report['errors']) == 0 + ) + + return report + + +def main(): + """CLI entry point.""" + if len(sys.argv) < 2: + print("Usage: python vr_setup_validator.py [component_path]", file=sys.stderr) + sys.exit(1) + + project_root = sys.argv[1] + component_path = sys.argv[2] if len(sys.argv) > 2 else None + + report = validate_setup(project_root, component_path) + + # Output as JSON + print(json.dumps(report, indent=2)) + + # Exit with error code if not ready + sys.exit(0 if report['ready'] else 1) + + +if __name__ == '__main__': + main() diff --git a/skills/visual-regression/templates/chromatic-config.json.j2 b/skills/visual-regression/templates/chromatic-config.json.j2 new file mode 100644 index 0000000..c60257f --- /dev/null +++ b/skills/visual-regression/templates/chromatic-config.json.j2 @@ -0,0 +1,10 @@ +{ + "projectId": "{{ project_id | default('') }}", + "buildScriptName": "build-storybook", + "exitZeroOnChanges": true, + "exitOnceUploaded": true, + "onlyChanged": true, + "externals": ["public/**"], + "skip": "{{ skip_pattern | default('dependabot/**') }}", + "ignoreLastBuildOnBranch": "{{ main_branch | default('main') }}" +} diff --git a/skills/visual-regression/templates/github-workflow.yml.j2 b/skills/visual-regression/templates/github-workflow.yml.j2 new file mode 100644 index 0000000..39ee269 --- /dev/null +++ b/skills/visual-regression/templates/github-workflow.yml.j2 @@ -0,0 +1,80 @@ +name: Visual Regression Tests + +on: + push: + branches: {{ branches | default(['main', 'develop']) | tojson }} + pull_request: + branches: ['main'] + +jobs: + {% if vr_tool == 'chromatic' -%} + chromatic: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Required for Chromatic + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '{{ node_version | default('20') }}' + cache: '{{ package_manager | default('npm') }}' + + - name: Install dependencies + run: {{ install_command | default('npm ci') }} + + - name: Run Chromatic + uses: chromaui/action@latest + with: + projectToken: {% raw %}${{ secrets.CHROMATIC_PROJECT_TOKEN }}{% endraw %} + exitZeroOnChanges: true + onlyChanged: true + autoAcceptChanges: 'main' + {% elif vr_tool == 'percy' -%} + percy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '{{ node_version | default('20') }}' + cache: '{{ package_manager | default('npm') }}' + + - name: Install dependencies + run: {{ install_command | default('npm ci') }} + + - name: Build Storybook + run: npm run build-storybook + + - name: Run Percy + run: npx percy storybook storybook-static + env: + PERCY_TOKEN: {% raw %}${{ secrets.PERCY_TOKEN }}{% endraw %} + {% elif vr_tool == 'backstopjs' -%} + backstop: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '{{ node_version | default('20') }}' + cache: '{{ package_manager | default('npm') }}' + + - name: Install dependencies + run: {{ install_command | default('npm ci') }} + + - name: Run BackstopJS + run: npm run backstop:test + + - name: Upload test results + if: failure() + uses: actions/upload-artifact@v3 + with: + name: backstop-results + path: backstop_data/ + {% endif -%} diff --git a/skills/visual-regression/templates/gitlab-ci.yml.j2 b/skills/visual-regression/templates/gitlab-ci.yml.j2 new file mode 100644 index 0000000..a1ce73b --- /dev/null +++ b/skills/visual-regression/templates/gitlab-ci.yml.j2 @@ -0,0 +1,54 @@ +# Add to .gitlab-ci.yml + +{% if vr_tool == 'chromatic' -%} +chromatic: + stage: test + image: node:{{ node_version | default('20') }} + cache: + paths: + - node_modules/ + script: + - {{ install_command | default('npm ci') }} + - npx chromatic --exit-zero-on-changes --only-changed + variables: + CHROMATIC_PROJECT_TOKEN: $CHROMATIC_PROJECT_TOKEN + only: + - main + - develop + - merge_requests +{% elif vr_tool == 'percy' -%} +percy: + stage: test + image: node:{{ node_version | default('20') }} + cache: + paths: + - node_modules/ + script: + - {{ install_command | default('npm ci') }} + - npm run build-storybook + - npx percy storybook storybook-static + variables: + PERCY_TOKEN: $PERCY_TOKEN + only: + - main + - develop + - merge_requests +{% elif vr_tool == 'backstopjs' -%} +backstop: + stage: test + image: node:{{ node_version | default('20') }} + cache: + paths: + - node_modules/ + script: + - {{ install_command | default('npm ci') }} + - npm run backstop:test + artifacts: + when: on_failure + paths: + - backstop_data/ + only: + - main + - develop + - merge_requests +{% endif -%} diff --git a/skills/visual-regression/templates/story-template.tsx.j2 b/skills/visual-regression/templates/story-template.tsx.j2 new file mode 100644 index 0000000..af4ffb6 --- /dev/null +++ b/skills/visual-regression/templates/story-template.tsx.j2 @@ -0,0 +1,60 @@ +import type { Meta, StoryObj } from '@storybook/react'; +import { {{ component_name }} } from './{{ component_name }}'; + +const meta = { + title: '{{ story_title }}', + component: {{ component_name }}, + parameters: { + layout: 'centered', + }, + tags: ['autodocs'], + argTypes: { + {% for prop in props -%} + {% if prop.values -%} + {{ prop.name }}: { control: '{{ prop.control }}', options: {{ prop.values | tojson }} }, + {% else -%} + {{ prop.name }}: { control: '{{ prop.control }}' }, + {% endif -%} + {% endfor %} + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + {% for prop in props -%} + {% if prop.default is string -%} + {{ prop.name }}: '{{ prop.default }}', + {% elif prop.default is not none -%} + {{ prop.name }}: {{ prop.default | tojson }}, + {% endif -%} + {% endfor %} + }, +}; + +{% for variant in variants %} +export const {{ variant.name }}: Story = { + args: { + ...Default.args, + {% if variant.value is string -%} + {{ variant.prop_name }}: '{{ variant.value }}', + {% else -%} + {{ variant.prop_name }}: {{ variant.value | tojson }}, + {% endif -%} + }, +}; +{% endfor %} + +// Accessibility tests +Default.parameters = { + a11y: { + config: { + rules: [ + { id: 'color-contrast', enabled: true }, + { id: 'label', enabled: true }, + ], + }, + }, +}; diff --git a/skills/visual-regression/templates/storybook-main.js.j2 b/skills/visual-regression/templates/storybook-main.js.j2 new file mode 100644 index 0000000..9273669 --- /dev/null +++ b/skills/visual-regression/templates/storybook-main.js.j2 @@ -0,0 +1,17 @@ +module.exports = { + stories: ['../src/**/*.stories.@(js|jsx|ts|tsx)'], + addons: [ + '@storybook/addon-links', + '@storybook/addon-essentials', + {% if vr_tool == 'chromatic' -%} + '@chromatic-com/storybook', + {% elif vr_tool == 'percy' -%} + '@percy/storybook', + {% endif -%} + '@storybook/addon-interactions', + ], + framework: { + name: '@storybook/{{ framework | default('react') }}-vite', + options: {}, + }, +};