commit 8a4be47b6e751211ae98234c046957881c27bf91 Author: Zhongwei Li Date: Sun Nov 30 08:28:42 2025 +0800 Initial commit diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..28d115d --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,11 @@ +{ + "name": "adw-bootstrap", + "description": "Bootstrap AI Developer Workflows (ADWs) infrastructure in any codebase. Enables programmatic agent orchestration via subprocess/SDK, reusable workflow templates, multi-phase workflows, and structured observability for agent executions.", + "version": "1.0.0", + "author": { + "name": "Joshua Oliphant" + }, + "skills": [ + "./skills" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..c8fbc27 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# adw-bootstrap + +Bootstrap AI Developer Workflows (ADWs) infrastructure in any codebase. Enables programmatic agent orchestration via subprocess/SDK, reusable workflow templates, multi-phase workflows, and structured observability for agent executions. diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..328cf4a --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,201 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:JoshuaOliphant/claude-plugins:plugins/adw-bootstrap", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "5cabbb5fa325befac242e31d5cc2c7bbd0098f86", + "treeHash": "8641345ac841491b7cda8b9bb7add7ebca64762a92333ec0e7d258aaa77f534d", + "generatedAt": "2025-11-28T10:11:48.671239Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "adw-bootstrap", + "description": "Bootstrap AI Developer Workflows (ADWs) infrastructure in any codebase. Enables programmatic agent orchestration via subprocess/SDK, reusable workflow templates, multi-phase workflows, and structured observability for agent executions.", + "version": "1.0.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "003654b939848d151d818943f93610e52a4c4979300c4fecf6ada102df06f25a" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "791fdfce7290b8c9436527ef3c1848d42d8ae2cb2ed97ed67ac3bc7551db8f03" + }, + { + "path": "skills/adw-bootstrap/README.md", + "sha256": "7a8098a86f6db893a8db8d25f83b0ae180891bfea6e003f27699efa646e9a2ec" + }, + { + "path": "skills/adw-bootstrap/SKILL.md", + "sha256": "3b27d7277a8325b89a86636ed8575cc2e85a0473c48add878760b42bf7cf43a7" + }, + { + "path": "skills/adw-bootstrap/utils/validator.py", + "sha256": "afbe3ee2a06fcdad8e1d485f6d67f4fad2fab35810ed5bae81011592bf518c97" + }, + { + "path": "skills/adw-bootstrap/docs/examples.md", + "sha256": "4ce278b531bf15243eded55d8b048a0231ceaece3c2df576013fd81626e323df" + }, + { + "path": "skills/adw-bootstrap/docs/principles.md", + "sha256": "efc2d1c55cd4a894a3d76c5f608a92528fb1b9e18322bb13384574569c0e4611" + }, + { + "path": "skills/adw-bootstrap/docs/architecture.md", + "sha256": "7336a3fb85e48f351acd40d7a4d749cb6dd2474c753f577f2f3c6ad1b788c738" + }, + { + "path": "skills/adw-bootstrap/docs/upgrades.md", + "sha256": "7c92c058bfbad4ae2c015d4ce6d587c83417c1d80c5488aa2f21d1010406278b" + }, + { + "path": "skills/adw-bootstrap/docs/usage-modes.md", + "sha256": "730dae1e6d66a210094c06021c33b6eb5e16158a4c8f3544a27888f00838ed9d" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/adw_modules/git_ops.py", + "sha256": "d4cae7aab3225f758c00a2456560adb9dbf839423729b2edb81f18d5451435ee" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/adw_modules/worktree_ops.py", + "sha256": "73bcd3f6b5ca3d472c3d237727ebf3e5b7c5e01d63c19f24e3e5cf4b3333ccd1" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/adw_modules/github.py", + "sha256": "81eb3c1a37faaef061a8198de2fc2c91033b01eb1e3edbc947d12be51bf817b6" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/adw_modules/workflow_ops.py", + "sha256": "d26a94a36bdd33a679738e3d8a2dbd51b9a737c6d6284aaba4e0fe6e63163c93" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/adw_modules/state.py", + "sha256": "b8cb7a2594cb89bb31ba1d21088c8a80f0d4a1ad77f3b94f98aaf7840d3682b9" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/adw_modules/beads_integration.py", + "sha256": "3612f1a9e588d51921e6650e8b812bfa7aeba1cfe1feecac512a4250e4953989" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/workflows/adw_sdlc_iso.py", + "sha256": "8e92c55290ec146d4b5482c11a48ad2005f3fce86e045dc51f70444e53b11d85" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/workflows/adw_plan_build_test_review_iso.py", + "sha256": "c44e391d7fbeb714015b257954fd0d00be281fd5673c38cb031e3bc12618319e" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/workflows/adw_ship_iso.py", + "sha256": "e8b748c41d6899e878fdff2d21ee157da9ba1f9d7d05248c7b69cd036716569e" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/document.md", + "sha256": "bab34ef5f000014996a46cde650511dc4e2fc879cde386eb4b68a673de0aa74a" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/generate_branch_name.md", + "sha256": "f5d222f0012db24064b94da38318bb3105bd14d424f9b8aede01f67ea26e6ad3" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/install_worktree.md", + "sha256": "34a83c1c8cf2c8513020783542b9a960a861fce9633c1feb2860d7b5c6fad807" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/classify_adw.md", + "sha256": "59e05519f80e8664a5b4ca9427ecc4bc271d650f538a7a06ca643d5da0623b95" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/patch.md", + "sha256": "e272195711c74449d8a65b5faa0ceb555c4a41e5d73787541d0579fbc06bba77" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/bug.md", + "sha256": "d6ad5c4c3feb98771b7146ff5f5b9607f3f01ef2d19e62acf60d29dbe14f7251" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/pull_request.md", + "sha256": "664fecc77c511c2d6929f650f432e70d761bf176cc78d5da607b6150af151ec4" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/cleanup_worktrees.md", + "sha256": "54a1905447bc77f60c5e966ced086e9b6b0da08fd642af18a9a37483a96a3e50" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/review.md", + "sha256": "6842bf7a43d04905da5f4c36da8111e60c605c75722112b26e9549311a5cb733" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/test.md", + "sha256": "a94a7df8799ecd3c604723ad4fac3410bad82595de7723e2181dc25a3736401a" + }, + { + "path": "skills/adw-bootstrap/reference/scaled/commands/classify_issue.md", + "sha256": "bbd227d5f750cc7203fe0c8716d2579703ed8c9842165cd308c18c5e5bbca9fb" + }, + { + "path": "skills/adw-bootstrap/reference/minimal/.env.sample", + "sha256": "3313d0473497b6682468468e39d2241b64cb4f54897b522428ac45fd1ffbedc5" + }, + { + "path": "skills/adw-bootstrap/reference/minimal/adws/adw_prompt.py", + "sha256": "1d66634ddb00ea0a42e6f30dc580dd955410fc9c0996278a6d7c53d5daea1064" + }, + { + "path": "skills/adw-bootstrap/reference/minimal/adws/adw_modules/agent.py", + "sha256": "50fb52f53ef6c46b77a7f05caa80a13d78503e1d28f87cdcd63099e29152a9ef" + }, + { + "path": "skills/adw-bootstrap/reference/minimal/commands/implement.md", + "sha256": "eabbec1279e3ba53c7f5e8420dbbe6c5964204f78aaa6111c8e8e4193ab369b9" + }, + { + "path": "skills/adw-bootstrap/reference/minimal/commands/chore.md", + "sha256": "a5e1d64bdc5411b27cc319a92cb79767c7190a3f1e04a736cc86468a19858cb3" + }, + { + "path": "skills/adw-bootstrap/reference/enhanced/adws/adw_slash_command.py", + "sha256": "2fb9ede7d531c2d020ee3e4a3b28fdb8e77637975073e6c681f57e1ef42b2c06" + }, + { + "path": "skills/adw-bootstrap/reference/enhanced/adws/adw_sdk_prompt.py", + "sha256": "1de89c18e34d32ad0796cf577472742cb6dcff37aa40b1cc9eb4160228fb8382" + }, + { + "path": "skills/adw-bootstrap/reference/enhanced/adws/adw_chore_implement.py", + "sha256": "d2628ebdbbc5f353e30f162a26b73c07d96711f5cea431efaf816ec5eb581d80" + }, + { + "path": "skills/adw-bootstrap/reference/enhanced/adws/adw_modules/agent_sdk.py", + "sha256": "0beaa5e2ea0f086c0e00ea6b08df6ac3038a35bb524d5635cd5cc729531f42b3" + }, + { + "path": "skills/adw-bootstrap/reference/enhanced/commands/prime.md", + "sha256": "b402c3ed21472ab54918c47e138f75ba64ca9d3b789b1e1189222f3e75831655" + }, + { + "path": "skills/adw-bootstrap/reference/enhanced/commands/start.md", + "sha256": "856e2a5563edd8d0a48d62b39e498321e61fd356441b57d41896d3d7f1e278dc" + }, + { + "path": "skills/adw-bootstrap/reference/enhanced/commands/feature.md", + "sha256": "01b728070405d759442d2f784175bfff2d521e78ae67ecdadf3452fe2e1675d1" + } + ], + "dirSha256": "8641345ac841491b7cda8b9bb7add7ebca64762a92333ec0e7d258aaa77f534d" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/adw-bootstrap/README.md b/skills/adw-bootstrap/README.md new file mode 100644 index 0000000..34c0409 --- /dev/null +++ b/skills/adw-bootstrap/README.md @@ -0,0 +1,389 @@ +# ADW Bootstrap Skill + +A Claude skill that intelligently bootstraps **AI Developer Workflows (ADWs)** infrastructure in any codebase, enabling programmatic agent orchestration for automated development. + +## What It Does + +Transforms a regular project into one where AI agents can be invoked programmatically to plan, implement, test, and deploy features. + +**After setup, you can:** +- Execute prompts programmatically: `./adws/adw_prompt.py "implement feature X"` +- Use reusable templates: `./adws/adw_slash_command.py /chore "task"` +- Orchestrate multi-phase workflows: Plan → Implement → Test +- Track agent behavior with structured outputs in `agents/{id}/` +- Scale compute for parallel development + +## Installation + +### User Skill (Personal Use) + +```bash +# Clone or copy this skill to your Claude skills directory +cp -r adw-bootstrap ~/.claude/skills/ + +# Or create symlink +ln -s /path/to/adw-bootstrap ~/.claude/skills/adw-bootstrap +``` + +### Verify Installation + +The skill should appear when you type `/skills` in Claude Code. + +## Usage + +### Automatic Trigger + +The skill activates when you say: +- "Set up ADWs" +- "Bootstrap agentic workflows" +- "Add AI developer workflows" +- "Enable programmatic agent execution" +- "Initialize ADW infrastructure" + +### Manual Invocation + +```bash +# In Claude Code +/adw-bootstrap + +# Or invoke the skill programmatically +adw-bootstrap +``` + +### Options + +During setup, Claude will: +1. Analyze your project structure +2. Recommend a setup phase (minimal/enhanced/scaled) +3. Ask for confirmation +4. Create adapted infrastructure +5. Validate the setup + +## What Gets Created + +### Minimal Setup (Always) +``` +your-project/ +├── adws/ +│ ├── adw_modules/ +│ │ └── agent.py # Core subprocess execution +│ └── adw_prompt.py # CLI wrapper +├── .claude/commands/ +│ ├── chore.md # Planning template +│ └── implement.md # Implementation template +├── specs/ # Implementation plans +├── agents/ # Output observability +└── .env.sample # Configuration template +``` + +### Enhanced Setup (Recommended) +Adds: +- `agent_sdk.py` - SDK-based execution +- `adw_slash_command.py` - Command executor +- `adw_chore_implement.py` - Compound workflows +- Additional slash commands (feature.md, prime.md, start.md) + +### Scaled Setup (Production) +Adds: +- State management (`state.py`, `adw_state.json`) +- Git operations (`git_ops.py`) +- Worktree isolation (`worktree_ops.py`, `trees/`) +- GitHub integration (`github.py`) +- Workflow orchestration (`workflow_ops.py`) +- Multi-phase workflows (`adw_sdlc_iso.py`, `adw_ship_iso.py`) +- Advanced slash commands (20+ commands) +- Testing infrastructure + +## Upgrading Existing ADW Setup + +If you already have ADWs in your project, the skill can upgrade to a higher phase: + +### Upgrade Triggers + +Say: +- "Upgrade my ADWs to enhanced" +- "Add scaled ADW capabilities" +- "Upgrade ADW infrastructure" + +### Upgrade Process + +The skill will: +1. **Detect** current phase (minimal/enhanced/scaled) +2. **Report** what infrastructure you have +3. **Recommend** available upgrades +4. **Backup** existing setup (`.adw_backups/`) +5. **Add** new capabilities without overwriting customizations +6. **Validate** the upgrade +7. **Report** what was added + +**Safety Features:** +- Never overwrites customized files +- Creates timestamped backups +- Shows what will change before upgrading +- Rollback capability if upgrade fails + +### Example Upgrade Output + +``` +🔍 Existing ADW setup detected! + +Current Phase: Enhanced + +Found infrastructure: +- Core modules: agent.py, agent_sdk.py +- CLI scripts: adw_prompt.py, adw_sdk_prompt.py, adw_slash_command.py +- Slash commands: 7 commands +- Workflows: 2 workflows + +Available upgrades: +- Scaled: Adds state management, worktree isolation, GitHub integration, + multi-phase workflows, and 15+ advanced commands + +Would you like to upgrade to Scaled? (y/n) +``` + +After confirmation: +``` +✅ Created backup in .adw_backups/20251103_102530/ + +Adding Scaled capabilities: +✅ Added adws/adw_modules/state.py +✅ Added adws/adw_modules/git_ops.py +✅ Added adws/adw_modules/worktree_ops.py +✅ Added adws/adw_modules/workflow_ops.py +✅ Added adws/adw_modules/github.py +✅ Added adws/adw_sdlc_iso.py +✅ Added 15 new slash commands +⚠️ Preserved customized: adws/adw_prompt.py + +🎉 Upgrade to Scaled completed successfully! + +Try the new capabilities: +- ./adws/adw_sdlc_iso.py 123 # Complete SDLC for issue #123 +- ./adws/adw_ship_iso.py 123 abc12345 # Ship changes to main +``` + +## Usage After Bootstrap + +### Execute Prompts +```bash +./adws/adw_prompt.py "analyze this code" +./adws/adw_prompt.py "quick syntax check" --model haiku +./adws/adw_prompt.py "refactor for performance" --model opus +``` + +Three models available: +- `haiku` - Fast & economical (2x speed, 1/3 cost) +- `sonnet` - Balanced excellence (default) +- `opus` - Maximum intelligence + +### Use Slash Commands +```bash +# Create a plan +./adws/adw_slash_command.py /chore abc123 "add logging" + +# Implement a plan +./adws/adw_slash_command.py /implement specs/chore-abc123-*.md +``` + +### Compound Workflows +```bash +# Plan + implement in one command +./adws/adw_chore_implement.py "add error handling to API" +``` + +## Validation + +After setup, validate with: + +```bash +# Run validation suite +~/.claude/skills/adw-bootstrap/utils/validator.py + +# With test execution +~/.claude/skills/adw-bootstrap/utils/validator.py --test +``` + +## Documentation + +- **SKILL.md** - Main skill logic and instructions +- **docs/principles.md** - Core ADW concepts and philosophy +- **docs/architecture.md** - Technical architecture deep dive +- **docs/usage-modes.md** - Subscription vs API modes +- **docs/examples.md** - Real-world bootstrap examples +- **reference/** - Working code examples for adaptation + +## Key Features + +### 1. Intelligent Adaptation +- Analyzes project structure and conventions +- Adapts reference code to fit the target project +- Handles novel structures without rigid templates + +### 2. Progressive Enhancement +- Start minimal, add features as needed +- Clear upgrade path (minimal → enhanced → scaled) +- No over-engineering + +### 3. Mode Flexibility +- **Subscription Mode**: No API key needed, perfect for development +- **API Mode**: Headless automation for CI/CD, webhooks, cron jobs +- Same infrastructure supports both + +### 4. Project Agnostic +- Works on Python, TypeScript, Go, Rust, polyglot projects +- Adapts to any framework or structure +- Handles monorepos and single packages + +### 5. Built-in Observability +- Structured outputs in `agents/{id}/` +- Multiple formats (JSONL, JSON, summaries) +- Debug agent behavior easily + +## Architecture + +### Two-Layer Model + +**Agentic Layer** (`adws/`, `.claude/`, `specs/`) +- Templates engineering patterns +- Teaches agents how to operate +- Orchestrates workflows + +**Application Layer** (`apps/`, `src/`, etc.) +- Your actual application code +- What agents read and modify + +### Subprocess vs SDK + +- **Subprocess** (agent.py): Simple, universal, minimal dependencies +- **SDK** (agent_sdk.py): Type-safe, async/await, interactive sessions + +Both work seamlessly in the same infrastructure. + +## Requirements + +- **Claude Code CLI** installed and accessible +- **Python 3.10+** for ADW scripts +- **ANTHROPIC_API_KEY** (optional, for API mode) +- **uv** (recommended) or other Python package manager + +## Troubleshooting + +### Skill doesn't trigger +- Check skill is in `~/.claude/skills/adw-bootstrap/` +- Verify SKILL.md has frontmatter with trigger phrases +- Try manual invocation: `/adw-bootstrap` + +### Bootstrap fails +- Ensure Claude Code CLI is installed: `claude --version` +- Check project directory is readable +- Look for error messages in Claude's response + +### Validation fails +- Run: `~/.claude/skills/adw-bootstrap/utils/validator.py` +- Check specific failures and fix issues +- Ensure scripts are executable: `chmod +x adws/*.py` + +### Scripts don't execute +- Make executable: `chmod +x adws/adw_prompt.py` +- Check Python version: `python --version` (need 3.10+) +- For uv scripts, ensure uv is installed: `uv --version` + +## Examples + +### Bootstrap Python Project +``` +"Set up ADWs in this FastAPI project" +→ Analyzes pyproject.toml, detects FastAPI +→ Creates enhanced setup with uv +→ Adapts validation to use pytest, ruff +→ Ready to use! +``` + +### Bootstrap TypeScript Project +``` +"Initialize AI developer workflows" +→ Analyzes package.json, detects Next.js +→ Creates enhanced setup +→ Adapts validation to use npm scripts +→ Python ADWs work on TypeScript code! +``` + +### Upgrade Existing Setup +``` +"Upgrade my ADW setup to enhanced" +→ Detects existing minimal setup +→ Adds SDK support and compound workflows +→ Preserves existing customizations +→ Enhanced features now available! +``` + +## Development + +### Testing the Skill + +```bash +# Test on this project (dog-fooding) +cd /path/to/project +# In Claude Code: +"Set up ADWs here" + +# Validate +~/.claude/skills/adw-bootstrap/utils/validator.py + +# Try it +./adws/adw_prompt.py "test prompt" +``` + +### Modifying Reference Code + +Reference implementations in `reference/` are copied to target projects. To update: + +1. Modify files in `reference/` +2. Test changes in a sample project +3. Update SKILL.md instructions if needed +4. Document changes in this README + +### Adding New Features + +To add new capabilities: + +1. **New ADW script**: Add to `reference/enhanced/` or `reference/scaled/` +2. **New slash command**: Add to `reference/*/commands/` +3. **Update SKILL.md**: Add instructions for adaptation +4. **Update docs**: Document the feature + +## Philosophy + +> "Template your engineering patterns, teach agents how to operate your codebase, scale compute to scale impact." + +ADWs represent a paradigm shift from **writing code yourself** to **teaching agents to write code**. This skill makes that paradigm accessible to any project. + +## License + +This skill is part of the ADW framework project. + +## Contributing + +Improvements welcome! Key areas: + +- Additional reference implementations +- Project type adapters +- Enhanced validation +- More examples +- Better documentation + +## Support + +- Check documentation in `docs/` +- Review examples in `docs/examples.md` +- Validate setup with `utils/validator.py` +- Read generated CLAUDE.md in target projects + +## Version + +1.0.0 - Initial release + +## Credits + +Built from the patterns developed in the tac8_app1__agent_layer_primitives project, extracting universal patterns for any codebase. diff --git a/skills/adw-bootstrap/SKILL.md b/skills/adw-bootstrap/SKILL.md new file mode 100644 index 0000000..d3be3f1 --- /dev/null +++ b/skills/adw-bootstrap/SKILL.md @@ -0,0 +1,1684 @@ +--- +name: adw-bootstrap +description: | + Bootstrap AI Developer Workflows (ADWs) infrastructure in any codebase. + Use when user wants to: "set up ADWs", "bootstrap agentic workflows", + "add AI developer workflows", "enable programmatic agent execution", + "initialize ADW infrastructure", or "set up programmatic Claude Code". + + This enables programmatic agent orchestration via subprocess/SDK, + reusable workflow templates, multi-phase workflows, and structured + observability for agent executions. +allowed-tools: [Read, Write, Glob, Grep, Bash(mkdir:*), Bash(chmod:*), Bash(cp:*), Bash(mv:*), Bash(git:*), Bash(uv:*), Bash(python3:*), Bash(which:*), Bash(date:*), Bash(ls:*), Edit, TodoWrite, WebFetch] +--- + +# AI Developer Workflows Bootstrap Skill + +## Mission + +Bootstrap **AI Developer Workflows (ADWs)** infrastructure that enables programmatic agent orchestration in any codebase. Transform a regular project into one where AI agents can be invoked programmatically to plan, implement, test, and deploy features. + +## What ADWs Enable + +After setup, developers can: +- **Execute prompts programmatically**: `./adws/adw_prompt.py "implement feature X"` +- **Use reusable templates**: `./adws/adw_slash_command.py /chore "task"` +- **Orchestrate multi-phase workflows**: Plan → Implement → Test → Deploy +- **Track agent behavior**: Structured outputs in `agents/{id}/` for debugging +- **Scale compute**: Run multiple agents in parallel for complex tasks + +## Core Philosophy: Intelligence Over Templating + +**You are NOT executing a rigid template substitution.** + +You will: +1. Read working reference implementations +2. Understand the patterns they demonstrate +3. Analyze the target project's structure and conventions +4. Intelligently adapt the references to fit the target +5. Make contextual decisions based on project needs + +**Use your reasoning.** Handle novel structures, mixed languages, and edge cases that no template could anticipate. + +## Two-Layer Architecture + +ADWs create a **two-layer architecture**: + +1. **Agentic Layer** (`adws/`, `.claude/`, `specs/`) - Templates engineering patterns, teaches agents how to operate +2. **Application Layer** (`apps/`, `src/`, etc.) - The actual application code that agents operate on + +The agentic layer wraps the application layer, providing a programmatic interface for AI-driven development. + +## Progressive Enhancement Model + +Setup happens in phases based on project needs: + +- **Minimal** (Always): Core subprocess execution, basic prompts, essential commands +- **Enhanced** (Recommended for dev projects): SDK support, compound workflows, richer commands +- **Scaled** (Production/teams): State management, triggers, testing, worktree isolation + +## IMPORTANT: Upgrade Detection + +**BEFORE starting fresh setup, ALWAYS check if ADWs already exist in this project.** + +### Detect Existing ADW Setup + +Check for existence of: +```bash +# Primary indicator +adws/adw_modules/agent.py + +# If exists, this is an ADW project - proceed to classification +``` + +If `adws/` directory exists, **DO NOT run fresh setup**. Instead, **classify and offer upgrade**. + +### Classify Current Phase + +Use file presence to determine current phase: + +**Minimal Phase Indicators:** +- ✅ `adws/adw_modules/agent.py` (core module) +- ✅ `adws/adw_prompt.py` (basic CLI) +- ✅ `.claude/commands/chore.md` (basic templates) +- ✅ `.claude/commands/implement.md` +- ❌ No `adws/adw_modules/agent_sdk.py` +- ❌ No `adws/adw_modules/state.py` + +**Enhanced Phase Indicators:** +- ✅ Everything from Minimal +- ✅ `adws/adw_modules/agent_sdk.py` (SDK support) +- ✅ `adws/adw_sdk_prompt.py` (SDK CLI) +- ✅ `adws/adw_slash_command.py` (command executor) +- ✅ `adws/adw_chore_implement.py` (compound workflows) +- ✅ `adws/adw_plan_tdd.py` (TDD planning for large tasks) +- ✅ `.claude/commands/feature.md` (richer templates) +- ✅ `.claude/commands/plan-tdd.md` (TDD task breakdown) +- ✅ `.claude/commands/prime.md` +- ❌ No `adws/adw_modules/state.py` +- ❌ No `adws/adw_modules/worktree_ops.py` + +**Scaled Phase Indicators:** +- ✅ Everything from Enhanced +- ✅ `adws/adw_modules/state.py` (state management) +- ✅ `adws/adw_modules/git_ops.py` (git operations) +- ✅ `adws/adw_modules/worktree_ops.py` (worktree isolation) +- ✅ `adws/adw_modules/workflow_ops.py` (workflow composition) +- ✅ `adws/adw_modules/github.py` (GitHub integration) +- ✅ `adws/adw_sdlc_iso.py` (multi-phase workflows) +- ✅ `.claude/commands/classify_issue.md` (advanced templates) +- ✅ `.claude/commands/install_worktree.md` + +### Report Current Phase to User + +When existing ADW setup is detected: + +``` +🔍 Existing ADW setup detected! + +Current Phase: + +Found infrastructure: +- Core modules: agent.py +- CLI scripts: adw_prompt.py +- Slash commands: commands +- Workflows: workflows + +Available upgrades: +- : + +Would you like to: +1. Upgrade to +2. Keep current setup (no changes) +3. Add specific features +``` + +### Upgrade Execution Process + +When user confirms upgrade: + +#### Step 1: Safety Backup + +Create timestamped backup: +```bash +mkdir -p .adw_backups +cp -r adws .adw_backups/adws_$(date +%Y%m%d_%H%M%S) +cp -r .claude .adw_backups/.claude_$(date +%Y%m%d_%H%M%S) +``` + +Inform user: "✅ Created backup in .adw_backups/" + +#### Step 2: Read Reference Implementations + +Based on target phase, read appropriate references: + +**For Minimal → Enhanced upgrade:** +- Read @reference/enhanced/adws/adw_modules/agent_sdk.py +- Read @reference/enhanced/adws/adw_sdk_prompt.py +- Read @reference/enhanced/adws/adw_slash_command.py +- Read @reference/enhanced/adws/adw_chore_implement.py +- Read @reference/enhanced/adws/adw_plan_tdd.py +- Read @reference/enhanced/commands/*.md (especially plan-tdd.md) + +**For Enhanced → Scaled upgrade:** +- Read @reference/scaled/adw_modules/state.py +- Read @reference/scaled/adw_modules/git_ops.py +- Read @reference/scaled/adw_modules/worktree_ops.py +- Read @reference/scaled/adw_modules/workflow_ops.py +- Read @reference/scaled/adw_modules/github.py +- Read @reference/scaled/workflows/*.py +- Read @reference/scaled/commands/*.md + +#### Step 3: Detect Customizations + +Before adding files, check if target paths exist: +```python +# Pseudocode for detection logic +for file_to_add in new_files: + if file_exists(file_to_add): + # Compare with reference + if file_is_customized(file_to_add): + # Skip or ask user + print(f"⚠️ {file_to_add} appears customized - preserving") + else: + # Can safely update + print(f"📝 Updating {file_to_add}") +``` + +**Never overwrite:** +- Any file with modification timestamp significantly after installation +- Any file with content that differs from known reference versions +- Any file in a `custom_` directory +- When in doubt, preserve and create `.new` instead + +#### Step 4: Add New Capabilities + +**For Enhanced upgrade**, add: +- `adws/adw_modules/agent_sdk.py` (if not exists) +- `adws/adw_sdk_prompt.py` (if not exists) +- `adws/adw_slash_command.py` (if not exists) +- `adws/adw_chore_implement.py` (if not exists) +- `adws/adw_plan_tdd.py` (if not exists) +- `.claude/commands/feature.md` (if not exists) +- `.claude/commands/plan-tdd.md` (if not exists) +- `.claude/commands/prime.md` (if not exists) +- `specs/plans/` directory (if not exists) + +**For Scaled upgrade**, add: +- `adws/adw_modules/state.py` (if not exists) +- `adws/adw_modules/git_ops.py` (if not exists) +- `adws/adw_modules/worktree_ops.py` (if not exists) +- `adws/adw_modules/workflow_ops.py` (if not exists) +- `adws/adw_modules/github.py` (if not exists) +- `adws/adw_modules/beads_integration.py` (if not exists) - Beads issue tracker integration +- `adws/adw_modules/data_types.py` (if not exists or needs extension) +- `adws/adw_modules/utils.py` (if not exists) +- `adws/adw_plan_iso.py` (if not exists) - Individual planning phase +- `adws/adw_build_iso.py` (if not exists) - Individual build/implementation phase +- `adws/adw_test_iso.py` (if not exists) - Individual testing phase +- `adws/adw_review_iso.py` (if not exists) - Individual review phase +- `adws/adw_document_iso.py` (if not exists) - Individual documentation phase +- `adws/adw_sdlc_iso.py` (if not exists) - Composite SDLC workflow +- `adws/adw_plan_build_test_review_iso.py` (if not exists) - Composite workflow without documentation +- `adws/adw_ship_iso.py` (if not exists) - Shipping/merge workflow +- `adws/adw_beads_ready.py` (if not exists) - Interactive beads task picker +- `.claude/commands/classify_issue.md` (if not exists) +- `.claude/commands/classify_adw.md` (if not exists) +- `.claude/commands/generate_branch_name.md` (if not exists) +- `.claude/commands/patch.md` (if not exists) +- `.claude/commands/install_worktree.md` (if not exists) +- `.claude/commands/cleanup_worktrees.md` (if not exists) +- `.claude/commands/test.md` (if not exists) +- `.claude/commands/review.md` (if not exists) +- `.claude/commands/document.md` (if not exists) +- `.claude/commands/pull_request.md` (if not exists) +- `.claude/commands/bug.md` (if not exists) +- `trees/` directory (create if not exists, add to .gitignore) + +#### Step 5: Update Dependencies + +**For Enhanced upgrade:** +- Check if scripts use uv inline deps (PEP 723) +- If agent_sdk.py is added, ensure claude-code-sdk is in dependencies + +**For Scaled upgrade:** +- Ensure gh CLI is available (for GitHub operations) +- Create data_types.py with extended models if needed +- Add any missing utility functions + +#### Step 6: Update Documentation + +Add new sections to CLAUDE.md (if it exists): +- Document new capabilities added +- Show examples of new workflows +- Update command reference + +Create/update README sections showing new usage patterns. + +#### Step 7: Validate Upgrade + +Run validation checks: +```bash +# Check all scripts are executable +# Verify imports resolve +# Test a simple prompt +./adws/adw_prompt.py "test upgrade" --model haiku +``` + +If validation passes: ✅ +If validation fails: Show error and offer to rollback + +#### Step 8: Report Upgrade Results + +``` +🎉 Upgrade to completed successfully! + +Added: +- new modules +- new workflows +- new slash commands + +Your customizations were preserved: +- + +Backup location: .adw_backups/ + +Try the new capabilities: +- +- +- + +To rollback: cp -r .adw_backups//* ./ +``` + +### Upgrade Special Cases + +**Minimal → Scaled (skip Enhanced):** +If user wants to jump directly to Scaled, add both Enhanced and Scaled capabilities in one upgrade. + +**Customized setups:** +If setup has significant customizations, offer to create new files with `.new` extension and let user merge manually. + +**Failed upgrades:** +If any step fails, automatically rollback to backup and report error. + +## Issue Tracking System Support + +ADWs work with **any issue tracking system** - not just GitHub. The Scaled phase includes abstractions for multiple issue trackers. + +### Supported Systems + +**Built-in Support:** +- **GitHub Issues** - Full integration with gh CLI +- **Beads** - Local SQLite-based issue tracking (offline-first) + +**Extensible Pattern:** +- GitLab Issues +- Jira +- Linear +- Notion +- Asana +- Custom systems + +### Issue Tracking Abstraction Pattern + +The key insight: All issue trackers provide similar information: +- Issue ID/number +- Title +- Description/body +- Status (open, in progress, closed) +- Labels/tags +- Assignees + +Create adapters that convert from tracker-specific format to a common `Issue` interface. + +#### Implementation Pattern + +**1. Define Common Issue Interface** (in `data_types.py`): + +```python +from typing import Optional, List +from pydantic import BaseModel + +class Issue(BaseModel): + """Universal issue representation across all trackers.""" + id: str # Can be number or string depending on tracker + title: str + description: Optional[str] = None + status: str # "open", "in_progress", "closed" + labels: List[str] = [] + assignees: List[str] = [] + url: Optional[str] = None + + # Tracker-specific metadata (optional) + tracker_type: str # "github", "beads", "gitlab", etc. + raw_data: Optional[dict] = None # Original response +``` + +**2. Create Tracker-Specific Integration Modules**: + +Each tracker gets its own module (e.g., `github_integration.py`, `beads_integration.py`): + +```python +# adws/adw_modules/beads_integration.py structure +def is_beads_issue(issue_id: str) -> bool: + """Check if issue ID is from beads tracker.""" + return "-" in issue_id and not issue_id.isdigit() + +def fetch_beads_issue(issue_id: str) -> tuple[Optional[Issue], Optional[str]]: + """Fetch beads issue and convert to universal Issue format.""" + # Run beads command + # Parse output + # Return Issue object or error + +def get_ready_beads_tasks() -> tuple[List[str], Optional[str]]: + """Get list of ready task IDs.""" + +def update_beads_status(issue_id: str, status: str) -> tuple[bool, Optional[str]]: + """Update issue status.""" + +def close_beads_issue(issue_id: str, reason: str) -> tuple[bool, Optional[str]]: + """Close issue with reason.""" +``` + +**3. Create Unified Fetch Function** (in `workflow_ops.py`): + +```python +def fetch_issue_unified(issue_id: str, logger) -> tuple[Optional[Issue], Optional[str]]: + """Fetch issue from any tracker, returns universal Issue object.""" + # Detect tracker type + if is_beads_issue(issue_id): + return fetch_beads_issue(issue_id) + elif issue_id.isdigit() or "/" in issue_id: + return fetch_github_issue(issue_id) + # Add more trackers... + else: + return None, f"Unknown issue format: {issue_id}" +``` + +**4. Usage in Workflows**: + +```python +# Workflows use the unified interface +issue, error = fetch_issue_unified(issue_number, logger) +if error: + logger.error(f"Failed to fetch issue: {error}") + sys.exit(1) + +# Work with universal Issue object +logger.info(f"Processing: {issue.title}") +logger.info(f"Status: {issue.status}") +``` + +#### Adding Support for New Trackers + +To add support for a new issue tracker (GitLab, Jira, etc.): + +**Step 1**: Create integration module `adws/adw_modules/{tracker}_integration.py` + +```python +# Example: gitlab_integration.py +import subprocess +from typing import Optional, List +from .data_types import Issue + +def is_gitlab_issue(issue_id: str) -> bool: + """Detect GitLab issue format (e.g., 'project#123').""" + return "#" in issue_id + +def fetch_gitlab_issue(issue_id: str) -> tuple[Optional[Issue], Optional[str]]: + """Fetch from GitLab API or CLI.""" + try: + # Parse project and issue number + project, number = issue_id.split("#") + + # Call GitLab CLI or API + result = subprocess.run( + ["glab", "issue", "view", number, "-R", project], + capture_output=True, text=True + ) + + if result.returncode != 0: + return None, f"GitLab CLI error: {result.stderr}" + + # Parse output and create Issue object + issue = Issue( + id=issue_id, + title=parse_title(result.stdout), + description=parse_description(result.stdout), + status=parse_status(result.stdout), + labels=parse_labels(result.stdout), + tracker_type="gitlab", + url=f"https://gitlab.com/{project}/-/issues/{number}" + ) + + return issue, None + + except Exception as e: + return None, str(e) + +def get_ready_gitlab_issues(project: str) -> tuple[List[str], Optional[str]]: + """Get ready issues from GitLab.""" + # Implementation... + +def update_gitlab_status(issue_id: str, status: str) -> tuple[bool, Optional[str]]: + """Update GitLab issue status.""" + # Implementation... +``` + +**Step 2**: Update detection in `workflow_ops.py`: + +```python +from .gitlab_integration import is_gitlab_issue, fetch_gitlab_issue + +def fetch_issue_unified(issue_id: str, logger) -> tuple[Optional[Issue], Optional[str]]: + """Fetch issue from any tracker.""" + if is_beads_issue(issue_id): + return fetch_beads_issue(issue_id) + elif is_gitlab_issue(issue_id): # Add detection + return fetch_gitlab_issue(issue_id) + elif issue_id.isdigit() or "/" in issue_id: + return fetch_github_issue(issue_id) + else: + return None, f"Unknown issue format: {issue_id}" +``` + +**Step 3**: Add interactive selector (optional): + +```python +# adws/adw_gitlab_ready.py +# Similar to adw_beads_ready.py but for GitLab +``` + +**Step 4**: Update documentation in CLAUDE.md with new tracker usage. + +#### Testing Integration Modules + +Each integration module should handle errors gracefully: + +```python +# Test cases to verify: +# 1. Issue not found +# 2. Tracker CLI not installed +# 3. Network errors (for API-based) +# 4. Invalid issue format +# 5. Missing fields in response +# 6. Empty or malformed output + +# Example error handling: +def fetch_tracker_issue(issue_id: str) -> tuple[Optional[Issue], Optional[str]]: + # Check CLI installed + if not shutil.which("tracker-cli"): + return None, "Tracker CLI not installed. Install with: " + + try: + result = subprocess.run(...) + + if result.returncode != 0: + return None, f"Tracker error: {result.stderr}" + + # Validate required fields present + if not title: + return None, "Could not parse issue title from tracker output" + + return issue, None + + except Exception as e: + return None, f"Unexpected error: {e}" +``` + +#### Configuration Management + +Store user's tracker preference during setup: + +```python +# In state.py or config +ISSUE_TRACKER_CONFIG = { + "type": "beads", # or "github", "gitlab", etc. + "default_project": None, # For GitLab, Jira, etc. + "cli_path": None, # Optional custom CLI path +} + +# Load from .adw_config.json or similar +def get_tracker_config() -> dict: + """Load issue tracker configuration.""" + config_path = Path(".adw_config.json") + if config_path.exists(): + return json.loads(config_path.read_text()) + return {"type": "github"} # default +``` + +This pattern ensures: +- ✅ Consistent interface across all workflows +- ✅ Easy to add new trackers +- ✅ Graceful error handling +- ✅ User can choose their preferred system +- ✅ No vendor lock-in + +### Setup Question: Issue Tracking + +**ALWAYS ASK during Scaled setup:** + +``` +How do you track issues for this project? + +1. GitHub Issues (most common) +2. Beads (local/offline SQLite tracking) +3. GitLab Issues +4. Jira +5. Linear +6. Other (specify) + +Enter choice [1-6]: +``` + +Based on response, set up appropriate integration module. + +## Setup Process (Fresh Installation) + +### PHASE 0: Discover Issue Tracking Preference + +**Before analyzing the project, understand how they track work.** + +Ask the user: +``` +🎯 Issue Tracking Setup + +ADW workflows can integrate with your issue tracking system. +How does your team track issues? + +1. GitHub Issues (default for GitHub repos) +2. Beads (local SQLite - great for offline/personal projects) +3. GitLab Issues +4. Jira +5. Linear +6. Notion +7. Other (manual - you'll provide issue details) +8. None (pure prompt-driven, no issue integration) + +Choice [1-8]: +``` + +**Store the choice** for later setup phases. + +### PHASE 1: Analyze Target Project + +Before creating anything, deeply understand the target project. + +#### 1.1 Read Project Structure + +Use Glob to explore: +```bash +# Find configuration files +**/{package.json,pyproject.toml,go.mod,Cargo.toml,pom.xml} + +# Find source code locations +**/src/** +**/app/** +**/lib/** + +# Find existing tooling +**/{Dockerfile,docker-compose.yml,.github,Makefile} +``` + +#### 1.2 Identify Key Characteristics + +**Primary language(s)**: +- Python? (pyproject.toml, requirements.txt, setup.py) +- JavaScript/TypeScript? (package.json, tsconfig.json) +- Go? (go.mod, *.go files) +- Rust? (Cargo.toml, *.rs files) +- Polyglot? (multiple indicators) + +**Application layer location**: +- Where does the actual application code live? +- `src/`, `app/`, `apps/`, `lib/`, `pkg/`, root? +- Monorepo with multiple packages? +- Single package structure? + +**Package manager in use**: +- Python: uv, poetry, pip, pipenv? +- JavaScript: npm, yarn, pnpm, bun? +- Look at lock files and existing scripts + +**Framework/runtime**: +- FastAPI, Flask, Django? +- Express, Next.js, Nest.js? +- Framework-specific patterns to follow? + +**Existing development patterns**: +- How do they run the app currently? +- How do they run tests? +- What's their code style? (tabs vs spaces, line length, etc.) +- Any linters or formatters configured? + +**Project maturity**: +- Mature project with existing conventions? +- Greenfield project needing structure? +- Legacy code needing modernization? + +#### 1.3 Determine Setup Phase + +Based on analysis, recommend: + +**Minimal** if: +- Simple project or proof of concept +- User explicitly requests basic setup +- Just need adhoc prompt execution + +**Enhanced** if: +- Active development project (most common case) +- Team collaboration +- Need workflow automation +- Python or TypeScript project + +**Scaled** if: +- Production system +- Complex SDLC requirements +- Need CI/CD integration +- Large team or enterprise + +**Ask the user** which phase to install if unclear. + +### PHASE 2: Read Reference Implementations + +Before creating anything, read and understand the reference code. + +#### 2.1 Always Read (Minimal Phase) + +**Read @reference/minimal/adws/adw_modules/agent.py** + +This is the **core pattern**. Understand: +- How Claude Code CLI is invoked via subprocess +- How environment variables are filtered for security +- How JSONL streaming output is captured to files +- How output is parsed into structured JSON +- How retry logic handles transient failures +- How unique IDs track execution lineage +- How error messages are truncated to prevent flooding + +**Key abstractions**: +- `AgentPromptRequest` - Configuration for prompt execution +- `AgentPromptResponse` - Results with success/failure/retry info +- `prompt_claude_code()` - Core execution function +- `prompt_claude_code_with_retry()` - Execution with automatic retry +- `RetryCode` enum - Different error types for retry decisions + +**Read @reference/minimal/adws/adw_prompt.py** + +This shows how to **wrap agent.py for CLI use**. Understand: +- uv inline dependency management (`# /// script`) +- Click CLI parameter handling +- Rich console output for user feedback +- Unique ID generation per execution +- Output directory structure (`agents/{adw_id}/{agent_name}/`) +- Multiple output formats (JSONL, JSON array, final object, summary) + +**Read @reference/minimal/commands/chore.md** + +This shows how to **structure slash command templates**. Understand: +- Variable substitution ($1, $2, $ARGUMENTS) +- Embedded codebase context +- Step-by-step instructions format +- Validation command patterns +- Output specifications + +**Read @reference/minimal/commands/implement.md** + +Simple implementation template showing minimal structure. + +**Read @reference/minimal/env.sample** + +Shows configuration for both usage modes: +- Mode A: Subscription (no API key needed) +- Mode B: API-based (requires ANTHROPIC_API_KEY) + +#### 2.2 Read for Enhanced Phase + +**Read @reference/enhanced/adws/adw_modules/agent_sdk.py** + +This shows the **SDK-based approach**. Understand: +- Native async/await patterns +- Typed message objects (AssistantMessage, ResultMessage, etc.) +- SDK-specific error handling +- Interactive session support via ClaudeSDKClient +- Streaming with progress callbacks +- When to use SDK vs subprocess + +**Read @reference/enhanced/adws/adw_slash_command.py** + +Shows how to execute slash commands programmatically. + +**Read @reference/enhanced/adws/adw_chore_implement.py** + +Shows **compound workflow orchestration**: +- Multi-phase execution (planning + implementation) +- Output parsing between phases +- Comprehensive observability +- Workflow summary generation + +**Read @reference/enhanced/adws/adw_plan_tdd.py** + +Shows **TDD planning workflow** for breaking large tasks into agent-sized chunks: +- Subprocess execution with model selection (haiku/sonnet/opus) +- Breaks specifications into GitHub issue-sized tasks +- Agent-centric complexity metrics (context load, iterations, not human time) +- Dependency tracking and parallelization analysis +- Outputs to `specs/plans/plan-{id}.md` +- Smart Claude CLI detection (checks common install locations) + +**Key Insight**: Complexity measures **context switching cost** and **iteration depth**: +- **Size S**: Read 1-2 files, modify 1-2, write 5-10 tests, 1-2 iterations +- **Size M**: Read 3-5 files, modify 2-4, write 10-20 tests, 2-4 iterations +- **Size L**: Read 6+ files, modify 3-5, write 20+ tests, 4-6+ iterations + +**Read @reference/enhanced/commands/plan-tdd.md** + +Template for breaking down large specifications with: +- Agent-centric task sizing philosophy +- TDD approach (Red-Green-Refactor) for each task +- Dependency graph and implementation phases +- Critical path analysis +- Parallelization opportunities + +**Read @reference/enhanced/commands/feature.md** + +More comprehensive planning template with: +- User stories +- Problem/solution statements +- Multi-phase implementation plans +- Acceptance criteria + +**Read @reference/enhanced/commands/prime.md** + +Context loading pattern for priming Claude with project knowledge. + +### PHASE 3: Create Minimal Infrastructure + +Now create the ADW infrastructure, adapted to the target project. + +#### 3.1 Create Directory Structure + +```bash +mkdir -p adws/adw_modules +mkdir -p .claude/commands +mkdir -p specs +mkdir -p agents # For output observability +``` + +If the project already has any of these, note and work with existing structure. + +#### 3.2 Create adws/adw_modules/agent.py + +**Do NOT just copy the reference.** Adapt it: + +**Understand the core patterns** from the reference, then create a version that: +- Uses paths appropriate to this project's structure +- Matches this project's code style (if established) +- Includes inline documentation explaining patterns +- Has the project root detection logic that makes sense + +**Key adaptations**: +- If project uses specific directory structure, adjust `project_root` calculation +- If project has special environment needs, adapt `get_safe_subprocess_env()` +- Keep all the core patterns: subprocess execution, JSONL parsing, retry logic +- Add comments explaining "why" for future maintainers + +**Essential components to preserve**: +- `AgentPromptRequest` and `AgentPromptResponse` data models +- `prompt_claude_code()` core function +- `prompt_claude_code_with_retry()` with retry logic +- JSONL to JSON conversion +- Error handling and truncation +- Environment variable filtering + +#### 3.3 Create adws/adw_prompt.py + +Adapt the reference to this project: + +**Dependencies**: +- If project uses uv, use uv script headers (PEP 723) +- If project uses poetry, adapt for `poetry run` +- If project uses npm, this might be a TypeScript version + +**Paths**: +- Adjust output paths to make sense for this project +- Adjust working directory defaults +- Adjust imports to find agent.py + +**Style**: +- Match the project's Python style if established +- Use their preferred CLI framework if they have one +- Follow their naming conventions + +**Make it executable**: +```bash +chmod +x adws/adw_prompt.py +``` + +#### 3.4 Create .claude/commands/chore.md + +Adapt the reference template: + +**Codebase Structure section**: +- Replace with ACTUAL structure of this project +- List where their app code actually lives +- Reference their actual README, docs, etc. + +**Plan Format**: +- Keep the core structure (metadata, description, tasks, validation) +- Adapt validation commands to this project's tooling + +**Validation Commands**: +- Use their actual test commands +- Use their actual linting commands +- Use their package manager + +**Example adaptation**: +```markdown +# Before (generic): +- `uv run python -m py_compile apps/*.py` + +# After (adapted to Next.js project): +- `npm run type-check` - Verify TypeScript types +- `npm run lint` - Run ESLint +- `npm run build` - Ensure build succeeds +``` + +#### 3.5 Create .claude/commands/implement.md + +Simple implementation template - minimal changes needed, maybe adapt the validation reporting to match their tooling. + +#### 3.6 Create .env.sample + +Adapt to show both usage modes: + +```bash +# Mode A: Claude Max Subscription (default - recommended for interactive use) +# No configuration needed if you have Claude Max subscription +# Claude Code will authenticate through your subscription + +# Mode B: API-Based Programmatic Execution (for automation, CI/CD, webhooks) +# Required for headless/automated workflows +# ANTHROPIC_API_KEY=sk-ant-... + +# Optional: Claude Code Path (auto-detected if not set) +# CLAUDE_CODE_PATH=claude + +# Optional: Maintain working directory +# CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR=true +``` + +**Note**: The agent module includes smart Claude CLI detection via `find_claude_cli()`: +1. Checks `CLAUDE_CODE_PATH` environment variable +2. Runs `which claude` command +3. Checks common install locations (~/.claude/local/claude, /usr/local/bin/claude, etc.) +4. Falls back to "claude" (assumes in PATH) + +#### 3.7 Update CLAUDE.md + +If CLAUDE.md exists, add ADW section. If not, create it with: + +**Essential Commands section**: +```markdown +## AI Developer Workflows (ADWs) + +Execute Claude Code prompts programmatically: + +```bash +# Direct prompt execution +./adws/adw_prompt.py "your prompt here" +./adws/adw_prompt.py "analyze this module" --model opus + +# Run slash commands (after enhanced setup) +./adws/adw_slash_command.py /chore "add feature X" +./adws/adw_slash_command.py /implement specs/chore-*.md +``` +``` + +**Architecture section**: +Explain the two-layer model, observability in `agents/` directory, etc. + +Use examples from THIS project structure. + +### PHASE 4: Validate Minimal Setup + +Before moving forward, validate everything works. + +#### 4.1 Check Prerequisites + +```bash +# Verify Claude Code installed +claude --version + +# Check if it's available (should show help) +claude --help +``` + +If not installed, guide user to install: +- macOS/Linux: Installation instructions +- Windows: Installation instructions + +#### 4.2 Test Prompt Execution + +```bash +# Try a simple prompt +./adws/adw_prompt.py "What is 2 + 2?" +``` + +Expected: +- ✓ Script executes +- ✓ Creates output in `agents/{id}/oneoff/` +- ✓ Multiple output files created (JSONL, JSON, summary) +- ✓ Returns success + +If subscription mode, should work with no API key. +If API mode, requires ANTHROPIC_API_KEY. + +#### 4.3 Verify Output Structure + +Check that `agents/{adw_id}/oneoff/` contains: +- `cc_raw_output.jsonl` - Raw streaming output +- `cc_raw_output.json` - Parsed JSON array +- `cc_final_object.json` - Final result object +- `custom_summary_output.json` - High-level summary + +#### 4.4 Report to User + +Show: +- ✅ What was created +- ✅ How to use it +- ✅ Test results +- ✅ Next steps (enhance if desired) + +#### 4.5 Run Automated Validation Tests (NEW) + +**Test the installation systematically:** + +```bash +# Test 1: Basic execution +./adws/adw_prompt.py "What is 2 + 2?" --model haiku --no-retry + +# Test 2: Empty prompt validation +./adws/adw_prompt.py "" --no-retry +# Should show error and exit with code 1 + +# Test 3: Output structure +ls -la agents/*/oneoff/ +# Should show 4 files: JSONL, JSON, final, summary + +# Test 4: Module imports +python3 << 'PYEOF' +import sys +sys.path.insert(0, 'adws') +from adw_modules.agent import prompt_claude_code +print("✓ Agent module imports correctly") +PYEOF +``` + +**Create test report:** +- Document which tests passed/failed +- Save results to `specs/minimal-validation-results.md` +- If any critical tests fail, STOP and fix before proceeding + +**Success Criteria:** +- ✓ All 4 tests pass +- ✓ No import errors +- ✓ Output files created correctly +- ✓ Error handling works (empty prompt rejected) + +### PHASE 5: Create Enhanced Infrastructure (If Requested) + +Only proceed if user wants enhanced setup or you recommended it. + +#### 5.1 Add SDK Support (adws/adw_modules/agent_sdk.py) + +Adapt the SDK reference: + +**Dependencies**: +- Requires `claude-code-sdk` Python package +- Add to project dependencies or inline script deps + +**Adaptation**: +- Keep all the SDK patterns (async/await, typed messages, error handling) +- Adjust imports if needed for project structure +- Match project style +- Add documentation explaining when to use SDK vs subprocess + +**When to use SDK approach**: +- Interactive sessions (multi-turn conversations) +- Better type safety needed +- Async workflows +- Native Python integration + +**When to use subprocess approach**: +- Simple one-shot prompts +- Shell script compatibility +- Lower dependencies +- Easier debugging + +#### 5.2 Add Slash Command Executor (adws/adw_slash_command.py) + +Adapt for this project: +- Adjust paths +- Match style +- Use their package manager +- Make executable + +#### 5.3 Add Compound Workflow (adws/adw_chore_implement.py) + +This orchestrates: planning (/chore) → implementation (/implement) + +Adapt: +- Paths and imports +- Package manager +- Output formatting to match project conventions +- Error handling to project standards + +#### 5.4 Add Enhanced Commands + +**Create .claude/commands/feature.md**: +- Adapt codebase structure section to this project +- Adapt validation commands to their tooling +- Keep the comprehensive planning structure + +**Create .claude/commands/prime.md**: +- Update to read THIS project's docs +- Point to their actual README, architecture docs, etc. + +**Create .claude/commands/start.md** (if applicable): +- Update with commands to run THIS project's apps +- Their actual run commands, not generic ones + +#### 5.5 Update Documentation + +Add to CLAUDE.md: + +**Enhanced Commands**: +```markdown +### Compound Workflows + +# Plan and implement in one command +./adws/adw_chore_implement.py "add error handling to API" + +# Feature development +./adws/adw_slash_command.py /feature "user authentication" + +# Prime Claude with context +./adws/adw_slash_command.py /prime +``` + +```markdown +### TDD Planning for Large Tasks + +# Break down large spec into agent-sized tasks +./adws/adw_plan_tdd.py "Add user authentication with JWT and OAuth2" + +# From a spec file +./adws/adw_plan_tdd.py specs/feature-auth.md --spec-file + +# Use Opus for complex architecture planning +./adws/adw_plan_tdd.py "Build real-time collaboration system" --model opus + +# Output: specs/plans/plan-{id}.md with: +# - 25 tasks broken down (agent-optimized sizing) +# - Dependency graph and phases +# - TDD guidance for each task +# - Agent-centric complexity metrics +``` + +**Architecture Deep Dive**: +- Explain subprocess vs SDK approaches +- Show workflow orchestration patterns +- Document output observability structure + +#### 5.6 Validate Enhanced Setup + +```bash +# Test slash command execution +./adws/adw_slash_command.py /prime + +# Test compound workflow +./adws/adw_chore_implement.py "add a hello world endpoint" +``` + +Check outputs, verify everything works. + +#### 5.7 Run Comprehensive Enhanced Validation (NEW) + +**Automated test suite:** + +```bash +# Run all Enhanced regression tests +./adws/adw_prompt.py "test" --model haiku --no-retry # Base works +./adws/adw_slash_command.py /prime # Commands work +./adws/adw_chore_implement.py "hello world" --model haiku # Workflows work +./adws/adw_plan_tdd.py "test planning" --model haiku # TDD works + +# Verify critical patterns +grep -A3 "if not plan_file.exists()" adws/adw_plan_tdd.py | grep "sys.exit(1)" +# Must show exit(1) NOT exit(0) + +grep "Prompt cannot be empty" adws/adw_prompt.py +# Must have empty prompt validation +``` + +**Create Enhanced validation report:** +- Save results to `specs/enhanced-validation-results.md` +- Document pass/fail for each test +- Compare against Minimal (check for regressions) +- List any issues found + +**If failures occur:** +1. Document the failure with reproduction steps +2. Check reference implementation for correct pattern +3. Fix the issue +4. Re-run all tests +5. Document the fix + +**Success rate target:** 100% for Enhanced base functionality + +### PHASE 6: Create Scaled Infrastructure (If Requested) + +Only for production/enterprise needs. + +This adds: +- State management across workflow phases +- Git worktree isolation for safe operations +- Workflow orchestration helpers +- Trigger systems (webhooks, cron) +- Comprehensive testing infrastructure +- Database for agent execution history + +**Note**: This is advanced. Most projects won't need it initially. + +Guide through adding: +- `adws/adw_modules/state.py` - Workflow state tracking +- `adws/adw_modules/workflow_ops.py` - Orchestration helpers +- `adws/adw_triggers/` - Event-driven invocation +- `adws/adw_tests/` - Testing suite +- `trees/` - Git worktree isolation +- `.claude/hooks/` - Event handlers +- `.claude/settings.json` - Hook configuration + +### PHASE 7: Validate Scaled Installation (NEW) + +**IMPORTANT:** Scaled phase has many components. Systematic validation is critical. + +#### 7.1 Create Test Plan + +Generate comprehensive test cases covering: + +**Base (Regression):** +- All Minimal tests still pass +- All Enhanced tests still pass + +**Scaled Modules:** +- state.py imports +- git_ops.py imports +- worktree_ops.py imports +- workflow_ops.py imports +- github.py imports +- beads_integration.py imports (if using Beads) + +**Scaled Workflows:** +- adw_plan_iso.py --help works +- adw_build_iso.py --help works +- adw_test_iso.py --help works +- adw_review_iso.py --help works +- adw_document_iso.py --help works +- adw_sdlc_iso.py --help works +- adw_ship_iso.py --help works + +**Issue Tracker Integration:** +- Beads CLI detected (if selected) +- GitHub CLI detected (if selected) +- Integration modules import correctly + +#### 7.2 Run Systematic Validation + +```bash +# Category 1: Regression Tests +./adws/adw_prompt.py "2+2" --model haiku --no-retry +./adws/adw_slash_command.py /prime +./adws/adw_plan_tdd.py "test" --model haiku + +# Category 2: Module Imports +python3 << 'PYEOF' +import sys +sys.path.insert(0, 'adws') +from adw_modules import state, git_ops, worktree_ops, workflow_ops +from adw_modules import github, beads_integration +print("✓ All Scaled modules import") +PYEOF + +# Category 3: Workflow Scripts Executable +for script in adw_plan_iso adw_build_iso adw_test_iso adw_review_iso adw_document_iso adw_sdlc_iso adw_ship_iso; do + ./adws/${script}.py --help >/dev/null 2>&1 && echo "✓ ${script}.py" || echo "❌ ${script}.py FAILED" +done + +# Category 4: Issue Tracker +which bd && echo "✓ Beads CLI found" || echo "⚠️ Beads CLI not found" +which gh && echo "✓ GitHub CLI found" || echo "⚠️ GitHub CLI not found" + +# Category 5: Slash Commands Exist +ls .claude/commands/*.md | wc -l +# Should show 17+ command files +``` + +#### 7.3 Document Results + +Create `specs/scaled-validation-results.md` with: + +**Test Summary:** +- Total tests: [count] +- Passed: [count] ([percentage]%) +- Failed: [count] +- Warnings: [count] + +**Failures (if any):** +For each failure: +- Test ID and name +- Command that failed +- Error message +- Reproduction steps +- Impact (Critical/High/Medium/Low) + +**Success Criteria:** +- ✓ 100% regression tests pass (Minimal + Enhanced) +- ✓ 95%+ Scaled module tests pass +- ✓ All critical workflows executable +- ✓ Issue tracker integration works + +#### 7.4 Fix and Iterate + +If test pass rate < 95%: + +**Step 1: Triage failures** +- Critical: Blocks all workflows +- High: Breaks major features +- Medium: Breaks optional features +- Low: Minor issues + +**Step 2: Fix critical issues first** +- Review reference implementation +- Compare with working version +- Apply fix +- Re-test + +**Step 3: Create iteration report** +Document in `specs/scaled-iteration-N-results.md`: +- Issues found +- Fixes applied +- Test results before/after +- Remaining issues + +**Step 4: Re-run full test suite** +- Don't skip regression tests +- Verify fixes didn't break other features +- Update test pass rate + +**Step 5: Repeat until 95%+ pass rate** + +**When to stop iterating:** +- 100% critical tests pass +- 95%+ overall pass rate +- All known issues documented +- User can use system productively + +## Special Adaptations for Different Project Types + +### Python Projects + +**Package manager detection**: +- uv? Use `# /// script` headers with inline deps +- poetry? Use `poetry add` and `poetry run` +- pip? Use requirements.txt or pip install -e . + +**Style matching**: +- Check for black, ruff, mypy configs +- Match their line length, quote style +- Follow their typing conventions + +**Validation commands**: +- Their test runner (pytest, unittest, etc.) +- Their linter (ruff, flake8, pylint) +- Their type checker (mypy, pyright) + +### TypeScript/JavaScript Projects + +**Consider TypeScript version of ADWs**: +- Could create TypeScript equivalents +- Or keep Python scripts (they work on any project) + +**Package manager**: +- npm, yarn, pnpm, or bun? +- Use their lock file type + +**Validation commands**: +- `npm run test` or their test command +- `npm run type-check` or equivalent +- `npm run lint` or equivalent +- `npm run build` to ensure builds + +### Monorepo Projects + +**Structure awareness**: +- Multiple packages in `packages/` or `apps/`? +- Each package has own ADWs? Or shared at root? +- Recommend root-level ADWs that can target specific packages + +**Adapt paths**: +- Commands need to specify which package +- Working directories may vary per operation + +### Polyglot Projects + +**Flexibility**: +- ADWs work on any code (they orchestrate Claude Code) +- But validation commands must cover all languages +- Documentation must explain multi-language structure + +## Usage Mode Configuration + +### Mode A: Claude Max Subscription (Recommended for Users) + +**How it works**: +- User has Claude Max subscription +- Claude Code authenticates through subscription +- No API key needed +- ADWs invoke `claude -p "prompt"` and it just works + +**Setup**: +- No `.env` file needed +- Scripts work out of the box +- Perfect for interactive development + +**Limitations**: +- User must be logged in +- Not suitable for fully automated/headless workflows +- Can't run in CI/CD without additional setup + +### Mode B: API-Based (For Automation) + +**How it works**: +- User has ANTHROPIC_API_KEY +- Scripts set the API key in subprocess environment +- Claude Code uses API for programmatic execution +- Enables headless automation + +**Setup**: +```bash +# Create .env file +echo "ANTHROPIC_API_KEY=sk-ant-..." > .env + +# Or set in environment +export ANTHROPIC_API_KEY=sk-ant-... +``` + +**Use cases**: +- CI/CD pipelines +- Webhook-triggered workflows +- Scheduled tasks (cron) +- Server-side automation + +### Detection and Configuration + +In `agent.py`, the environment handling already supports both: + +```python +def get_safe_subprocess_env(): + env = {...} + + # Only add API key if it exists + api_key = os.getenv("ANTHROPIC_API_KEY") + if api_key: + env["ANTHROPIC_API_KEY"] = api_key + + # Claude Code will use subscription if no key provided + return env +``` + +**Guide users**: +- Default to subscription mode (simpler) +- Document API mode for automation needs +- Show both in .env.sample with clear comments + +## Best Practices to Embed + +### 1. Environment Safety +- Filter environment variables before subprocess +- Only pass required vars +- Never leak secrets + +### 2. Observability First +- Always create structured output directories +- Multiple output formats (JSONL, JSON, summary) +- Include metadata (adw_id, session_id, timestamps) + +### 3. Error Handling +- Retry logic for transient failures +- Truncate error messages (prevent flooding) +- Clear error messages to users +- Distinguish retry-able from non-retry-able errors + +### 4. Type Safety +- Use Pydantic models for data +- Use SDK types when available +- Document expected shapes + +### 5. Documentation +- Inline code comments explain "why" +- CLAUDE.md with project-specific examples +- README in adws/ directory +- Reference upstream docs + +### 6. Progressive Enhancement +- Start simple (minimal) +- Add features as needed (enhanced) +- Scale for production (scaled) +- Don't over-engineer initially + +## Reporting to User + +After setup, tell the user: + +### ✅ What Was Created + +``` +AI Developer Workflows infrastructure is set up! + +Created: +- adws/adw_modules/agent.py - Core subprocess execution engine +- adws/adw_prompt.py - CLI wrapper for adhoc prompts +- .claude/commands/ - Slash command templates (chore, implement) +- specs/ - Directory for implementation plans +- agents/ - Observability outputs directory +- .env.sample - Configuration template +- CLAUDE.md - Updated with ADW documentation +``` + +### 📚 How to Use + +```bash +# Execute an adhoc prompt +./adws/adw_prompt.py "analyze the database schema" + +# Create a plan for a chore +./adws/adw_slash_command.py /chore $(uuidgen | cut -c1-8) "add logging" + +# Implement a plan +./adws/adw_slash_command.py /implement specs/chore-abc123-*.md + +# Or do both in one command (enhanced setup) +./adws/adw_chore_implement.py "add error handling" +``` + +### 🔍 Observability + +``` +Agent outputs saved to: +agents/{adw_id}/{agent_name}/ + cc_raw_output.jsonl - Raw streaming output + cc_raw_output.json - Parsed JSON array + cc_final_object.json - Final result object + custom_summary_output.json - High-level summary +``` + +### 📖 Documentation + +``` +See CLAUDE.md for: +- Complete command reference +- Architecture explanation +- Examples for this project +- Extension patterns +``` + +### 🚀 Next Steps + +``` +1. Try a simple prompt: + ./adws/adw_prompt.py "what does this project do?" + +2. Create your first plan: + ./adws/adw_slash_command.py /chore test "add a new feature" + +3. Read CLAUDE.md for more examples + +4. (Optional) Upgrade to enhanced setup for more features: + - SDK support for better type safety + - Compound workflows (plan + implement in one command) + - Richer slash commands (feature planning, testing) +``` + +### ⚙️ Configuration (If Needed) + +``` +For API-based automation (CI/CD, webhooks): +1. Create .env file: cp .env.sample .env +2. Add your API key: ANTHROPIC_API_KEY=sk-ant-... + +For interactive use with Claude Max subscription: +- No configuration needed! Just use the scripts. +``` + +## Troubleshooting + +### Claude Code not found + +```bash +# Check if installed +claude --version + +# If not, guide to installation +``` + +### Permission denied + +```bash +# Make scripts executable +chmod +x adws/*.py +``` + +### Import errors + +```bash +# Check dependencies +# For uv scripts, they auto-install on first run +# For poetry projects, run: poetry install +``` + +### API key issues + +```bash +# Verify key is set +echo $ANTHROPIC_API_KEY + +# Or check .env file +cat .env +``` + +## Success Criteria + +✅ Directory structure created correctly +✅ Reference code adapted to project context +✅ Scripts are executable +✅ **Automated validation tests run successfully** (NEW) +✅ **Test pass rate meets threshold:** (NEW) + - Minimal: 100% + - Enhanced: 100% + - Scaled: 95%+ +✅ **Test results documented in specs/** (NEW) +✅ **Critical issues fixed and verified** (NEW) +✅ Output directories created properly +✅ Documentation updated with project-specific examples +✅ User understands how to use the system +✅ User knows how to extend the system +✅ **User can iterate if issues found** (NEW) + +## Validation and Iteration Best Practices + +### Run Tests Immediately After Installation + +Don't wait - validate right after setup: +1. Create test plan for the phase being installed +2. Run all critical tests first +3. Document results in `specs/` +4. Fix critical issues before proceeding + +### Use Dogfooding Methodology + +Test the installation by actually using it: +- Don't just check if files exist +- Run actual workflows +- Test error handling +- Verify edge cases + +### Document Everything + +Create detailed test reports: +- Commands run +- Expected vs actual results +- Pass/fail status +- Error messages and stack traces +- Reproduction steps for failures + +### Iterate Until Production-Ready + +For each iteration: +1. Run full test suite +2. Document results +3. Fix highest-priority issues +4. Re-test (including regression tests) +5. Update test report +6. Repeat until success criteria met + +### Compare Across Phases + +Check for regressions: +- Minimal tests should pass in Enhanced +- Enhanced tests should pass in Scaled +- Each phase builds on previous +- Never break working features + +### Set Clear Success Thresholds + +Know when to stop: +- Minimal/Enhanced: 100% pass rate (smaller scope) +- Scaled: 95%+ pass rate (acceptable for large phase) +- All critical tests must pass +- Remaining failures documented with workarounds + +## Remember + +- **Use your intelligence** - Don't just copy/paste +- **Understand the project** - Every project is different +- **Adapt thoughtfully** - Make it fit their conventions +- **Document well** - Future maintainers will thank you +- **Test thoroughly** - Ensure everything works before finishing +- **Validate systematically** - Run automated tests after each phase (NEW) +- **Iterate on failures** - Don't accept broken installations (NEW) +- **Guide the user** - Show them how to use what you created + +You're not installing a template. You're teaching a codebase how to work with programmatic agents. diff --git a/skills/adw-bootstrap/docs/architecture.md b/skills/adw-bootstrap/docs/architecture.md new file mode 100644 index 0000000..faade34 --- /dev/null +++ b/skills/adw-bootstrap/docs/architecture.md @@ -0,0 +1,687 @@ +# ADW Architecture Deep Dive + +## System Overview + +ADW infrastructure creates a **programmatic interface** for AI-driven development by wrapping the application layer with an agentic layer that templates engineering patterns. + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Agentic Layer │ +│ ┌───────────────────────────────────────────────────────┐ │ +│ │ ADW Scripts (adws/) │ │ +│ │ - Execute prompts programmatically │ │ +│ │ - Orchestrate multi-phase workflows │ │ +│ │ - Invoke Claude Code CLI or SDK │ │ +│ └───────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ ┌───────────────────────────────────────────────────────┐ │ +│ │ Slash Commands (.claude/commands/) │ │ +│ │ - Reusable prompt templates │ │ +│ │ - Structured instructions for agents │ │ +│ │ - Variable substitution │ │ +│ └───────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ ┌───────────────────────────────────────────────────────┐ │ +│ │ Specifications (specs/) │ │ +│ │ - Implementation plans │ │ +│ │ - Step-by-step tasks │ │ +│ │ - Validation commands │ │ +│ └───────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ ┌───────────────────────────────────────────────────────┐ │ +│ │ Claude Code (subprocess or SDK) │ │ +│ │ - Executes prompts with tools │ │ +│ │ - Modifies application code │ │ +│ │ - Returns structured results │ │ +│ └───────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ ┌───────────────────────────────────────────────────────┐ │ +│ │ Observability (agents/) │ │ +│ │ - Structured outputs │ │ +│ │ - Execution tracking │ │ +│ │ - Debug artifacts │ │ +│ └───────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ Application Layer │ +│ - Your actual application code (apps/, src/, lib/) │ +│ - Tests, configs, documentation │ +│ - What agents read and modify │ +└─────────────────────────────────────────────────────────────┘ +``` + +## Core Modules + +### agent.py - Subprocess Execution Engine + +**Purpose**: Core module for executing Claude Code CLI as subprocess + +**Key Components**: + +```python +# Data Models +class AgentPromptRequest(BaseModel): + prompt: str # What to execute + adw_id: str # Unique workflow ID + agent_name: str # Agent identifier + model: Literal["sonnet", "opus"] + output_file: str # Where to save output + working_dir: Optional[str] # Where to run + +class AgentPromptResponse(BaseModel): + output: str # Result text + success: bool # Did it succeed? + session_id: Optional[str] # Claude session ID + retry_code: RetryCode # Should we retry? + +# Core Functions +def prompt_claude_code(request: AgentPromptRequest) -> AgentPromptResponse +def prompt_claude_code_with_retry(request: AgentPromptRequest) -> AgentPromptResponse +def execute_template(request: AgentTemplateRequest) -> AgentPromptResponse +``` + +**Execution Flow**: + +1. **Build Command** + ```python + cmd = [ + "claude", + "-p", prompt, + "--model", model, + "--output-format", "stream-json", + "--verbose" + ] + ``` + +2. **Filter Environment** + ```python + env = get_safe_subprocess_env() + # Only passes essential variables + # Prevents environment leakage + ``` + +3. **Execute & Stream** + ```python + subprocess.run( + cmd, + stdout=output_file, # Stream to file + stderr=subprocess.PIPE, + env=env + ) + ``` + +4. **Parse JSONL Output** + ```python + messages = [json.loads(line) for line in f] + result_message = find_result_message(messages) + ``` + +5. **Convert to Multiple Formats** + - `cc_raw_output.jsonl` - Raw streaming JSONL + - `cc_raw_output.json` - Parsed JSON array + - `cc_final_object.json` - Final result object + +6. **Return Response** + ```python + return AgentPromptResponse( + output=result_text, + success=not is_error, + session_id=session_id, + retry_code=RetryCode.NONE + ) + ``` + +**Error Handling**: +- Retry codes distinguish transient from permanent errors +- Output truncation prevents console flooding +- Graceful degradation on parse failures + +### agent_sdk.py - SDK Execution Engine + +**Purpose**: Type-safe, async execution using Claude Code Python SDK + +**Key Patterns**: + +```python +# One-shot query +async def simple_query(prompt: str) -> str: + options = ClaudeCodeOptions(model="claude-sonnet-4-20250514") + texts = [] + async for message in query(prompt=prompt, options=options): + if isinstance(message, AssistantMessage): + texts.append(extract_text(message)) + return "\n".join(texts) + +# Interactive session +@asynccontextmanager +async def create_session(): + client = ClaudeSDKClient(options=options) + await client.connect() + try: + yield client + finally: + await client.disconnect() +``` + +**Advantages**: +- Native async/await patterns +- Typed message objects (AssistantMessage, ResultMessage) +- SDK-specific error handling +- Interactive session support +- Better IDE integration + +**When to Use**: +- Interactive multi-turn conversations +- Need better type safety +- Async workflow integration +- Native Python integration preferred + +## Data Flow Patterns + +### Pattern 1: Direct Prompt Execution + +``` +User Input + ↓ +./adws/adw_prompt.py "analyze this" + ↓ +AgentPromptRequest created + ↓ +prompt_claude_code_with_retry() + ↓ +subprocess: claude -p "analyze this" + ↓ +JSONL output streamed to file + ↓ +Parse JSONL → JSON conversion + ↓ +AgentPromptResponse returned + ↓ +Display to user + save summary +``` + +### Pattern 2: Slash Command Execution + +``` +User Input + ↓ +./adws/adw_slash_command.py /chore abc123 "task" + ↓ +Read .claude/commands/chore.md + ↓ +Substitute variables ($1, $2) + ↓ +AgentTemplateRequest created + ↓ +execute_template() → prompt_claude_code_with_retry() + ↓ +subprocess: claude -p "" + ↓ +Parse results + ↓ +Extract spec path from output + ↓ +Display + save +``` + +### Pattern 3: Compound Workflow + +``` +User Input + ↓ +./adws/adw_chore_implement.py "feature X" + ↓ +Generate unique adw_id + ↓ +┌─────────────────────────────────────┐ +│ Phase 1: Planning │ +├─────────────────────────────────────┤ +│ execute_template("/chore", args) │ +│ ↓ │ +│ Create plan in specs/ │ +│ ↓ │ +│ Parse output for plan path │ +└─────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────┐ +│ Phase 2: Implementation │ +├─────────────────────────────────────┤ +│ execute_template("/implement", path) │ +│ ↓ │ +│ Read plan from specs/ │ +│ ↓ │ +│ Execute step by step │ +│ ↓ │ +│ Modify application code │ +└─────────────────────────────────────┘ + ↓ +Workflow summary saved + ↓ +Display results to user +``` + +## Directory Structure + +### Minimal Setup + +``` +project/ +├── adws/ +│ ├── adw_modules/ +│ │ └── agent.py # Core execution +│ └── adw_prompt.py # CLI wrapper +├── .claude/commands/ +│ ├── chore.md # Planning template +│ └── implement.md # Implementation template +├── specs/ +│ └── *.md # Generated plans +├── agents/ # Output directory +│ └── {adw_id}/ +│ └── {agent_name}/ +│ ├── cc_raw_output.jsonl +│ ├── cc_raw_output.json +│ ├── cc_final_object.json +│ └── custom_summary_output.json +└── .env.sample # Configuration template +``` + +### Enhanced Setup + +``` +project/ +├── adws/ +│ ├── adw_modules/ +│ │ ├── agent.py # Subprocess execution +│ │ └── agent_sdk.py # SDK execution +│ ├── adw_prompt.py # Direct prompts +│ ├── adw_slash_command.py # Command executor +│ └── adw_chore_implement.py # Compound workflow +├── .claude/commands/ +│ ├── chore.md +│ ├── implement.md +│ ├── feature.md # Feature planning +│ ├── test.md # Test creation +│ ├── prime.md # Context loading +│ └── start.md # App startup +├── specs/ +│ ├── chore-*.md +│ └── feature-*.md +├── agents/ +│ └── {adw_id}/ +│ ├── planner/ # Planning agent outputs +│ ├── builder/ # Building agent outputs +│ └── workflow_summary.json # Overall summary +└── CLAUDE.md # Updated with ADW docs +``` + +### Scaled Setup (Production) + +``` +project/ +├── adws/ +│ ├── adw_modules/ +│ │ ├── agent.py # Subprocess execution +│ │ ├── agent_sdk.py # SDK execution +│ │ ├── data_types.py # Type definitions +│ │ ├── state.py # State management (adw_state.json) +│ │ ├── git_ops.py # Git operations (branch, commit, push, PR) +│ │ ├── worktree_ops.py # Worktree isolation management +│ │ ├── workflow_ops.py # High-level orchestration +│ │ ├── github.py # GitHub integration (gh CLI) +│ │ └── utils.py # Shared utilities +│ ├── adw_sdlc_iso.py # Complete SDLC (plan→build→test→review→doc) +│ ├── adw_plan_build_test_review_iso.py # Compound isolated workflow +│ ├── adw_ship_iso.py # Merge validation and shipping +│ ├── adw_plan_iso.py # Isolated planning phase +│ ├── adw_build_iso.py # Isolated build phase +│ ├── adw_test_iso.py # Isolated testing phase +│ ├── adw_review_iso.py # Isolated review phase +│ └── adw_document_iso.py # Isolated documentation phase +├── .claude/commands/ +│ ├── chore.md +│ ├── bug.md +│ ├── feature.md +│ ├── implement.md +│ ├── classify_issue.md # Issue classification +│ ├── classify_adw.md # ADW workflow selection +│ ├── generate_branch_name.md # Branch naming +│ ├── patch.md # Patch planning +│ ├── test.md # Test execution +│ ├── review.md # Code review +│ ├── document.md # Documentation generation +│ ├── pull_request.md # PR creation +│ ├── install_worktree.md # Worktree environment setup +│ ├── cleanup_worktrees.md # Worktree cleanup +│ ├── commit.md # Commit message generation +│ ├── prime.md +│ └── start.md +├── specs/ +│ ├── chore-*.md +│ ├── bug-*.md +│ ├── feature-*.md +│ └── patch/ # Patch plans +├── agents/ +│ └── {adw_id}/ +│ ├── {agent_name}/ # Per-agent outputs +│ ├── adw_state.json # Persistent state +│ └── workflow_summary.json +├── trees/ # Git worktree isolation +│ └── {adw_id}/ # Isolated working directory +│ ├── .ports.env # Port configuration +│ └── +├── .adw_backups/ # Safety backups during upgrades +├── CLAUDE.md # Updated with ADW docs +└── .env.sample # Configuration +``` + +**Key Scaled Phase Features:** + +1. **State Management**: Persistent state across workflow phases via `adw_state.json` +2. **Git Worktree Isolation**: Each ADW runs in isolated `trees/{adw_id}/` directory +3. **Port Management**: Deterministic port allocation (9100-9114 backend, 9200-9214 frontend) +4. **GitHub Integration**: Issue operations, PR management, comment posting via gh CLI +5. **Multi-Phase Workflows**: Complete SDLC automation (plan, build, test, review, document, ship) +6. **Workflow Composition**: High-level functions for issue classification, branch generation, etc. +7. **Advanced Commands**: Rich library of 20+ specialized slash commands + +## Output Structure + +### Observability Artifacts + +Each ADW execution creates: + +**cc_raw_output.jsonl** +- Line-delimited JSON +- Streaming output from Claude Code +- Each line is a message object +- Last line is result message + +**cc_raw_output.json** +- JSON array of all messages +- Easier for programmatic processing +- Contains full conversation history + +**cc_final_object.json** +- Just the last message (result) +- Quick access to final output +- Contains success/failure info + +**custom_summary_output.json** +- High-level execution summary +- Metadata (adw_id, prompt, model) +- Success status +- Session ID for debugging + +### Workflow Tracking + +For compound workflows: + +**workflow_summary.json** +```json +{ + "workflow": "chore_implement", + "adw_id": "abc12345", + "prompt": "add error handling", + "phases": { + "planning": { + "success": true, + "session_id": "session_xyz", + "agent": "planner", + "output_dir": "agents/abc12345/planner/" + }, + "implementation": { + "success": true, + "session_id": "session_abc", + "agent": "builder", + "output_dir": "agents/abc12345/builder/" + } + }, + "overall_success": true +} +``` + +## Retry Logic Architecture + +### Retry Code Classification + +```python +class RetryCode(str, Enum): + CLAUDE_CODE_ERROR = "claude_code_error" # Retry + TIMEOUT_ERROR = "timeout_error" # Retry + EXECUTION_ERROR = "execution_error" # Retry + ERROR_DURING_EXECUTION = "error_during_execution" # Retry + NONE = "none" # Don't retry +``` + +### Retry Decision Flow + +``` +Execute prompt + ↓ +Success? → Return response + ↓ No +Check retry_code + ↓ +NONE? → Return response (don't retry) + ↓ No +Retryable error? + ↓ Yes +Attempts < max_retries? + ↓ Yes +Wait (exponential backoff: 1s, 3s, 5s) + ↓ +Retry execution + ↓ +(Loop back to top) + ↓ +Max retries reached? → Return last response +``` + +### Default Configuration + +```python +max_retries = 3 +retry_delays = [1, 3, 5] # seconds + +# Exponential backoff: +# Attempt 1: fail → wait 1s +# Attempt 2: fail → wait 3s +# Attempt 3: fail → wait 5s +# Attempt 4: return failure +``` + +## Environment Safety + +### Security Model + +```python +def get_safe_subprocess_env() -> Dict[str, str]: + """Only pass essential environment variables.""" + safe_vars = { + # Authentication + "ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY"), + + # System essentials + "HOME": os.getenv("HOME"), + "USER": os.getenv("USER"), + "PATH": os.getenv("PATH"), + "SHELL": os.getenv("SHELL"), + + # Python-specific + "PYTHONPATH": os.getenv("PYTHONPATH"), + "PYTHONUNBUFFERED": "1", + + # Working directory + "PWD": os.getcwd(), + } + + # Filter out None values + return {k: v for k, v in safe_vars.items() if v is not None} +``` + +**Why this matters**: +- Prevents leaking sensitive variables +- Subprocess isolation +- Explicit allowlist vs implicit inheritance +- Security boundary between layers + +## Subprocess vs SDK: Decision Matrix + +| Criteria | Subprocess (agent.py) | SDK (agent_sdk.py) | +|----------|----------------------|-------------------| +| **Type Safety** | Basic (dicts) | Strong (typed objects) | +| **Error Handling** | Generic exceptions | SDK-specific exceptions | +| **Async Support** | Subprocess management | Native async/await | +| **Dependencies** | Minimal (subprocess, json) | claude-code-sdk package | +| **Debugging** | Read JSONL files | SDK message inspection | +| **Interactive Sessions** | ❌ Not supported | ✅ ClaudeSDKClient | +| **Shell Compatibility** | ✅ Works everywhere | Python only | +| **Use Case** | Simple prompts, automation | Complex workflows, sessions | +| **Learning Curve** | Low (familiar subprocess) | Medium (SDK concepts) | +| **Performance** | Process spawn overhead | Native Python speed | + +## Scalability Patterns + +### Horizontal Scaling + +Run multiple ADWs in parallel: + +```bash +# Terminal 1 +./adws/adw_chore_implement.py "feature A" & + +# Terminal 2 +./adws/adw_chore_implement.py "feature B" & + +# Terminal 3 +./adws/adw_chore_implement.py "feature C" & + +# Each gets unique adw_id +# Outputs don't conflict +# Scale compute → scale features +``` + +### Vertical Scaling + +Break complex tasks into phases: + +``` +Large Feature + ↓ +Phase 1: Architecture Design + ↓ +Phase 2: Core Implementation + ↓ +Phase 3: Testing + ↓ +Phase 4: Documentation + ↓ +Phase 5: Deployment +``` + +Each phase is separate ADW execution with clear handoffs. + +## Key Design Decisions + +### 1. JSONL as Interchange Format +- Streamable (process as it arrives) +- Line-delimited (easy to parse) +- Standard format (widely supported) + +### 2. Unique ID per Execution +- Enables parallel execution +- Clear output isolation +- Audit trail maintenance +- Debugging support + +### 3. Multiple Output Formats +- JSONL for streaming +- JSON for processing +- Final object for quick access +- Summary for humans + +### 4. Subprocess First, SDK Optional +- Lower barrier to entry +- Subprocess is universal +- SDK adds sophistication later +- Progressive enhancement + +### 5. Environment Filtering +- Security boundary +- Explicit allowlist +- Prevents accidents +- Isolation guarantee + +## Extension Points + +Where to extend the architecture: + +1. **New ADW Scripts** (`adws/adw_*.py`) + - Add new orchestration patterns + - Implement domain-specific workflows + +2. **New Slash Commands** (`.claude/commands/*.md`) + - Template new engineering patterns + - Capture team conventions + +3. **New Agent Modules** (`adws/adw_modules/*.py`) + - Add state management + - Implement workflow helpers + - Add integrations + +4. **Hooks** (`.claude/hooks/*.py`) + - Pre/post tool use events + - Notification systems + - Validation gates + +5. **Triggers** (`adws/adw_triggers/*.py`) + - Webhook endpoints + - Cron jobs + - Event handlers + +6. **Custom Subagents** (`.claude/agents/*.md`) + - Specialized agent configurations + - Domain experts + - Tool restrictions + +## Performance Considerations + +### Bottlenecks + +1. **Claude Code Execution Time** + - Dominant factor + - Minutes per complex task + - Mitigate: Run in parallel + +2. **Subprocess Spawn Overhead** + - Minimal (~100ms) + - Negligible compared to execution + - SDK slightly faster but not significant + +3. **JSONL Parsing** + - Fast (JSON is efficient) + - Linear in message count + - Not a bottleneck in practice + +### Optimization Strategies + +1. **Parallel Execution** + - Run independent tasks concurrently + - Each gets own adw_id + - No shared state conflicts + +2. **Appropriate Model Selection** + - Use Sonnet for most tasks (faster, cheaper) + - Use Opus only for complex reasoning + - 2-3x speed difference + +3. **Caching** + - Claude Code has built-in caching + - Repeated prompts are faster + - Design for cache reuse + +4. **Progressive Enhancement** + - Start minimal (faster setup) + - Add features as needed + - Don't over-engineer initially diff --git a/skills/adw-bootstrap/docs/examples.md b/skills/adw-bootstrap/docs/examples.md new file mode 100644 index 0000000..f8a7e24 --- /dev/null +++ b/skills/adw-bootstrap/docs/examples.md @@ -0,0 +1,81 @@ +# ADW Bootstrap Examples + +This document shows real-world examples of bootstrapping ADW infrastructure in different project types. + +## Example 1: Python FastAPI Project + +### Before Bootstrap +``` +my-api/ +├── src/ +│ ├── main.py +│ └── routes/ +├── tests/ +├── pyproject.toml +└── README.md +``` + +### After Bootstrap (Enhanced) +``` +my-api/ +├── adws/ +│ ├── adw_modules/ +│ │ ├── agent.py +│ │ └── agent_sdk.py +│ ├── adw_prompt.py +│ ├── adw_slash_command.py +│ └── adw_chore_implement.py +├── .claude/commands/ +│ ├── chore.md # Adapted to FastAPI +│ ├── implement.md +│ ├── feature.md +│ └── prime.md +├── specs/ +├── agents/ +├── .env.sample +└── CLAUDE.md # Updated +``` + +### Usage +```bash +# Fast exploration with Haiku +./adws/adw_prompt.py "what authentication methods are used?" --model haiku + +# Implementation with Sonnet (default) +./adws/adw_chore_implement.py "add JWT authentication" + +# Security review with Opus +./adws/adw_prompt.py "review security in auth module" --model opus +``` + +## Example 2: Next.js TypeScript Project + +### Key Adaptations +- ADW scripts remain Python (work on any project) +- Validation commands use `npm run type-check`, `npm run build` +- start.md uses `npm run dev` + +### Usage +```bash +./adws/adw_chore_implement.py "add dark mode toggle" +./adws/adw_prompt.py "analyze component architecture" +``` + +## Example 3: Minimal Setup + +Small projects get minimal setup only: +- Just agent.py and adw_prompt.py +- Basic slash commands +- Can upgrade later if needed + +## Testing Your Setup + +Run validation: +```bash +~/.claude/skills/adw-bootstrap/utils/validator.py +``` + +Try a simple prompt: +```bash +./adws/adw_prompt.py "what does this project do?" +``` diff --git a/skills/adw-bootstrap/docs/principles.md b/skills/adw-bootstrap/docs/principles.md new file mode 100644 index 0000000..c2b576c --- /dev/null +++ b/skills/adw-bootstrap/docs/principles.md @@ -0,0 +1,300 @@ +# AI Developer Workflows (ADWs): Core Principles + +## What Are ADWs? + +**AI Developer Workflows (ADWs)** are executable scripts that combine deterministic code with non-deterministic, compute-scalable AI agents to perform complex development tasks programmatically. + +Instead of directly modifying code yourself, you **template your engineering patterns** and **teach agents how to operate your codebase**. This allows you to scale impact by scaling compute, not just effort. + +## The Two-Layer Architecture + +### Agentic Layer +**Purpose**: Template engineering patterns and teach agents how to operate + +**Components**: +- `adws/` - AI Developer Workflow scripts and modules +- `.claude/` - Slash commands, skills, hooks, agent configurations +- `specs/` - Implementation plans and specifications +- `agents/` - Agent execution outputs (observability) + +**Role**: Provides the programmatic interface for AI-driven development + +### Application Layer +**Purpose**: The actual application code that agents operate on + +**Components**: +- `apps/`, `src/`, `lib/`, etc. - Your application code +- `tests/` - Your test suites +- Application-specific configuration and resources + +**Role**: What the agents modify, test, and deploy + +## The 12 Leverage Points of Agentic Coding + +### In Agent (Core Four) +These directly control agent behavior during execution: + +1. **Context** - What information the agent has access to +2. **Model** - Which AI model processes the prompt (Sonnet vs Opus) +3. **Prompt** - The specific instructions given to the agent +4. **Tools** - What actions the agent can take (Read, Write, Bash, etc.) + +### Through Agent (Structural Eight) +These shape how you structure work for agents: + +5. **Standard Output** - Structured agent outputs for observability +6. **Types** - Data models and schemas that define interfaces +7. **Docs** - Documentation that provides context to agents +8. **Tests** - Validation that ensures agent work is correct +9. **Architecture** - System design that guides agent decisions +10. **Plans** - Specifications that break work into steps +11. **Templates** - Reusable prompt patterns (slash commands) +12. **AI Developer Workflows** - Orchestrated multi-agent processes + +## Core Patterns + +### Pattern 1: Subprocess-Based Execution + +**When to use**: Simple prompts, shell compatibility, lower dependencies + +```python +from agent import prompt_claude_code, AgentPromptRequest + +request = AgentPromptRequest( + prompt="Analyze this module", + adw_id="abc12345", + agent_name="analyzer", + model="sonnet", + output_file="agents/abc12345/analyzer/cc_raw_output.jsonl" +) + +response = prompt_claude_code(request) +if response.success: + print(response.output) +``` + +**Characteristics**: +- Invokes Claude Code CLI as subprocess +- Streams output to JSONL files +- Parses results into structured JSON +- Implements retry logic for transient failures +- Works with both subscription and API modes + +### Pattern 2: SDK-Based Execution + +**When to use**: Interactive sessions, better type safety, async workflows + +```python +from agent_sdk import simple_query + +response = await simple_query("Implement feature X") +print(response) +``` + +**Characteristics**: +- Native Python async/await +- Typed message objects +- SDK-specific error handling +- Interactive session support +- Better IDE integration + +### Pattern 3: Template-Based Development + +**Slash commands** are reusable prompt templates: + +```markdown +# .claude/commands/chore.md +Create a plan using this format: + +## Variables +adw_id: $1 +description: $2 + +## Instructions +1. Analyze the codebase starting with README.md +2. Create a step-by-step plan +3. Save to specs/chore-{adw_id}-{name}.md +``` + +Execute: `./adws/adw_slash_command.py /chore abc123 "add logging"` + +### Pattern 4: Workflow Orchestration + +**Compound workflows** chain multiple agent invocations: + +```python +# Phase 1: Planning +chore_response = execute_template( + slash_command="/chore", + args=[adw_id, description] +) + +# Extract plan path from response +plan_path = extract_plan_path(chore_response.output) + +# Phase 2: Implementation +implement_response = execute_template( + slash_command="/implement", + args=[plan_path] +) +``` + +## Observability First + +Every ADW execution creates structured outputs: + +``` +agents/{adw_id}/{agent_name}/ + cc_raw_output.jsonl # Raw streaming JSONL + cc_raw_output.json # Parsed JSON array + cc_final_object.json # Final result object + custom_summary_output.json # High-level summary +``` + +**Why this matters**: +- Debug agent behavior by reading raw outputs +- Programmatically process results from JSON +- Track lineage with unique IDs +- Audit what agents did and why + +## Progressive Enhancement + +Start simple, add complexity as needed: + +### Minimal (Always) +- Core subprocess execution (`agent.py`) +- Basic prompt wrapper (`adw_prompt.py`) +- Essential slash commands (chore, implement) +- Spec directory for plans + +**Good for**: Proof of concepts, simple automation, getting started + +### Enhanced (Recommended) +- SDK support (`agent_sdk.py`) +- Compound workflows (`adw_chore_implement.py`) +- Rich slash commands (feature, test, prime) +- Slash command executor + +**Good for**: Active development, team collaboration, feature automation + +### Scaled (Production) +- State management across phases +- Git worktree isolation +- Trigger systems (webhooks, cron) +- Testing infrastructure +- Comprehensive observability + +**Good for**: Production systems, enterprise teams, complex SDLC + +## Best Practices + +### 1. Environment Safety +```python +# Filter environment variables +def get_safe_subprocess_env(): + return { + "ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY"), + "HOME": os.getenv("HOME"), + "PATH": os.getenv("PATH"), + # Only essential variables + } +``` + +**Why**: Prevents environment variable leakage, security isolation + +### 2. Retry Logic +```python +def prompt_claude_code_with_retry(request, max_retries=3): + for attempt in range(max_retries + 1): + response = prompt_claude_code(request) + if response.success or response.retry_code == RetryCode.NONE: + return response + time.sleep(retry_delays[attempt]) +``` + +**Why**: Handle transient failures (network issues, rate limits, timeouts) + +### 3. Error Truncation +```python +def truncate_output(output, max_length=500): + if len(output) <= max_length: + return output + return output[:max_length-15] + "... (truncated)" +``` + +**Why**: Prevent console flooding from huge error messages + +### 4. Unique ID Tracking +```python +def generate_short_id(): + return str(uuid.uuid4())[:8] + +adw_id = generate_short_id() # abc12345 +# All outputs go to: agents/abc12345/ +``` + +**Why**: Track execution lineage, associate outputs, debug flows + +### 5. Type Safety +```python +from pydantic import BaseModel + +class AgentPromptRequest(BaseModel): + prompt: str + adw_id: str + model: Literal["sonnet", "opus"] + output_file: str +``` + +**Why**: Catch errors early, better IDE support, self-documenting + +## Usage Modes + +### Mode A: Claude Max Subscription +- User has Claude Max subscription +- No API key needed +- Claude Code authenticates through subscription +- Perfect for interactive development + +### Mode B: API-Based Automation +- Requires `ANTHROPIC_API_KEY` +- Enables headless automation +- Required for CI/CD, webhooks, cron jobs +- Programmatic agent execution + +**Both modes work with the same ADW infrastructure.** The code detects which mode to use automatically. + +## Philosophy: Scale Compute, Not Just Effort + +Traditional development: **You write the code** +- Linear scaling: More features = more time +- Bottleneck: Your available hours +- Hard to parallelize + +Agentic development: **You template patterns, agents write the code** +- Compute scaling: More features = more parallel agents +- Bottleneck: Compute availability +- Easy to parallelize + +**This is the fundamental shift.** ADWs enable this paradigm. + +## Key Insights + +1. **Separation of Concerns**: Agentic layer (how to operate) vs Application layer (what to operate on) + +2. **Progressive Enhancement**: Start minimal, add features as needed, scale when required + +3. **Observability**: Structured outputs make agent behavior transparent and debuggable + +4. **Intelligence Over Templating**: Provide patterns, let Claude adapt intelligently + +5. **Mode Flexibility**: Support both interactive (subscription) and automated (API) workflows + +6. **Compute as Leverage**: Scale impact by scaling compute, not just working harder + +## Further Reading + +- `architecture.md` - Detailed architecture patterns +- `usage-modes.md` - Deep dive on subscription vs API modes +- Reference implementations in `reference/` directory +- Main skill logic in `SKILL.md` diff --git a/skills/adw-bootstrap/docs/upgrades.md b/skills/adw-bootstrap/docs/upgrades.md new file mode 100644 index 0000000..68191f5 --- /dev/null +++ b/skills/adw-bootstrap/docs/upgrades.md @@ -0,0 +1,326 @@ +# ADW Infrastructure Upgrades + +This document explains how to upgrade existing ADW infrastructure from one phase to another. + +## Overview + +ADW Bootstrap supports **progressive enhancement** with three phases: +- **Minimal** → **Enhanced** → **Scaled** + +You can upgrade at any time without losing customizations. + +## When to Upgrade + +### Minimal → Enhanced +Upgrade when you: +- Want SDK-based execution for better type safety +- Need compound workflows (plan + implement in one command) +- Want interactive session support +- Are building a production application + +**What it adds:** +- SDK execution module (`agent_sdk.py`) +- SDK CLI wrapper (`adw_sdk_prompt.py`) +- Slash command executor (`adw_slash_command.py`) +- Compound workflow script (`adw_chore_implement.py`) +- Richer slash commands (feature.md, prime.md) + +### Enhanced → Scaled +Upgrade when you: +- Need state management across workflow phases +- Want git worktree isolation for parallel development +- Require GitHub integration (issues, PRs, comments) +- Need complete SDLC automation +- Building enterprise/team workflows + +**What it adds:** +- State management module (`state.py`) +- Git operations module (`git_ops.py`) +- Worktree isolation module (`worktree_ops.py`) +- Workflow composition module (`workflow_ops.py`) +- GitHub integration module (`github.py`) +- Multi-phase workflows (`adw_sdlc_iso.py`, `adw_ship_iso.py`, etc.) +- 15+ advanced slash commands +- Worktree directory structure (`trees/`) + +## Upgrade Process + +### 1. Detection + +The skill automatically detects existing ADW setup by checking for: +- `adws/adw_modules/agent.py` (primary indicator) +- Other key files to determine current phase + +### 2. Classification + +Based on file presence, classifies as: +- **Minimal**: Has agent.py, basic commands, no SDK +- **Enhanced**: Has SDK support, compound workflows, no state management +- **Scaled**: Has state management, worktree ops, GitHub integration + +### 3. User Confirmation + +Shows current phase and available upgrades: +``` +🔍 Existing ADW setup detected! + +Current Phase: Enhanced + +Available upgrades: +- Scaled: Adds state management, worktree isolation, GitHub integration + +Would you like to upgrade to Scaled? +``` + +### 4. Safety Backup + +Before making changes: +```bash +mkdir -p .adw_backups +cp -r adws .adw_backups/adws_$(date +%Y%m%d_%H%M%S) +cp -r .claude .adw_backups/.claude_$(date +%Y%m%d_%H%M%S) +``` + +### 5. Customization Detection + +Before adding each file: +- Check if file already exists +- Compare content with reference version +- If customized: Preserve and warn user +- If not customized: Safe to update +- When in doubt: Create `.new` instead of overwriting + +**Files never overwritten:** +- Any file with recent modification timestamp +- Any file with content differing from reference +- Any file in a `custom_` directory + +### 6. Add New Capabilities + +Only adds files that don't exist or aren't customized: +- New modules in `adws/adw_modules/` +- New workflow scripts in `adws/` +- New slash commands in `.claude/commands/` +- Directory structure (trees/, .adw_backups/) + +### 7. Dependency Updates + +**For Enhanced:** +- Ensure `claude-code-sdk` is in script dependencies +- Update inline deps in uv scripts (PEP 723) + +**For Scaled:** +- Verify `gh` CLI is available +- Add extended data types if needed +- Create utility functions + +### 8. Documentation Updates + +Updates CLAUDE.md (if unmodified): +- Document new capabilities +- Add usage examples +- Update command reference + +### 9. Validation + +Runs checks: +```bash +# Check executability +chmod +x adws/*.py + +# Test a simple prompt +./adws/adw_prompt.py "test upgrade" --model haiku +``` + +If validation fails, offers automatic rollback. + +### 10. Report Results + +``` +🎉 Upgrade to Scaled completed successfully! + +Added: +- 5 new modules +- 3 new workflows +- 15 new slash commands + +Your customizations were preserved: +- adws/adw_prompt.py (customized) + +Backup location: .adw_backups/20251103_102530 + +Try the new capabilities: +- ./adws/adw_sdlc_iso.py 123 +- ./adws/adw_ship_iso.py 123 abc12345 + +To rollback: cp -r .adw_backups/20251103_102530/* ./ +``` + +## Rollback + +If upgrade fails or you want to revert: + +```bash +# List available backups +ls -la .adw_backups/ + +# Rollback to specific backup +cp -r .adw_backups/adws_20251103_102530 adws/ +cp -r .adw_backups/.claude_20251103_102530 .claude/ + +# Or restore from most recent +LATEST=$(ls -t .adw_backups/ | head -1) +cp -r .adw_backups/$LATEST/* ./ +``` + +## Skip Phases + +You can jump phases: + +**Minimal → Scaled (skip Enhanced):** +The skill adds both Enhanced and Scaled capabilities in one upgrade. + +This is safe because: +- All files are additive +- No breaking changes between phases +- Dependencies are properly managed + +## Customization Preservation + +The skill intelligently preserves customizations: + +**Safe to update:** +- Files identical to reference versions +- Files with only minor formatting differences +- New files being added + +**Preserved:** +- Files with significant code changes +- Files with custom functionality +- Files modified recently (within 7 days) +- Files in custom_* directories + +**Resolution options:** +1. Keep custom version, skip update +2. Create `.new` with new version, let user merge +3. Ask user to choose (for important files) + +## Testing Upgrades + +Before upgrading production: + +1. Test on a branch: +```bash +git checkout -b test-adw-upgrade +# Run upgrade +# Test new capabilities +git checkout main +``` + +2. Use backup feature: +```bash +# Upgrade creates automatic backup +# If issues: cp -r .adw_backups/latest/* ./ +``` + +3. Validate thoroughly: +```bash +./adws/adw_prompt.py "test" --model haiku +# Try new workflows +# Check customizations still work +``` + +## Upgrade Triggers + +The skill activates upgrade mode when you say: +- "Upgrade my ADWs" +- "Upgrade ADW infrastructure" +- "Add enhanced ADW features" +- "Upgrade to scaled ADWs" +- "Add scaled capabilities" + +## Advanced: Partial Upgrades + +If you only want specific features: +- "Add git worktree support to my ADWs" +- "Add state management to my ADWs" +- "Add GitHub integration" + +The skill can add individual modules without full phase upgrade. + +## Troubleshooting + +### Upgrade fails mid-process +- Automatic rollback to backup +- Check error message for specific issue +- Common causes: file permissions, missing dependencies + +### New features don't work +- Check dependencies installed: `uv pip list` +- Verify gh CLI available: `gh --version` +- Check file permissions: `chmod +x adws/*.py` + +### Customizations lost +- Check `.adw_backups/` directory +- Rollback and report issue +- Manual merge may be needed + +### Validation fails +- Check error output +- Verify Claude Code CLI: `claude --version` +- Test manually: `./adws/adw_prompt.py "test"` + +## Best Practices + +1. **Commit before upgrading** + ```bash + git add . + git commit -m "Pre-ADW upgrade checkpoint" + ``` + +2. **Review what changed** + ```bash + git diff # After upgrade + ``` + +3. **Test new features** + - Try each new capability + - Verify existing workflows still work + - Update documentation + +4. **Clean up backups periodically** + ```bash + # Keep last 3 backups + cd .adw_backups && ls -t | tail -n +4 | xargs rm -rf + ``` + +5. **Document customizations** + - Add comments explaining changes + - Create custom_* files for major modifications + - Keep notes in CLAUDE.md + +## Phase Comparison + +| Feature | Minimal | Enhanced | Scaled | +|---------|---------|----------|--------| +| Subprocess execution | ✅ | ✅ | ✅ | +| Basic CLI | ✅ | ✅ | ✅ | +| Basic slash commands | ✅ | ✅ | ✅ | +| SDK execution | ❌ | ✅ | ✅ | +| Interactive sessions | ❌ | ✅ | ✅ | +| Compound workflows | ❌ | ✅ | ✅ | +| State management | ❌ | ❌ | ✅ | +| Git worktree isolation | ❌ | ❌ | ✅ | +| GitHub integration | ❌ | ❌ | ✅ | +| Multi-phase workflows | ❌ | ❌ | ✅ | +| Port management | ❌ | ❌ | ✅ | +| Advanced commands | 2 | 7 | 20+ | + +## Support + +If you encounter issues during upgrade: +1. Check `.adw_backups/` for rollback +2. Review error messages carefully +3. Verify all dependencies installed +4. Test in clean environment +5. Report issue with upgrade log diff --git a/skills/adw-bootstrap/docs/usage-modes.md b/skills/adw-bootstrap/docs/usage-modes.md new file mode 100644 index 0000000..ef9fab1 --- /dev/null +++ b/skills/adw-bootstrap/docs/usage-modes.md @@ -0,0 +1,504 @@ +# ADW Usage Modes: Subscription vs API + +## Overview + +ADW infrastructure supports two distinct usage modes, enabling both interactive development and automated workflows without code changes. + +## Mode A: Claude Max Subscription + +### What It Is + +Run Claude Code through your Claude Max subscription without needing API keys. + +### How It Works + +``` +User with Claude Max subscription + ↓ +Logged in to Claude Code CLI + ↓ +ADW executes: claude -p "prompt" + ↓ +Claude Code uses subscription auth + ↓ +Prompt executed, results returned +``` + +No API key configuration needed - authentication happens automatically through your subscription. + +### Setup + +**Zero configuration required:** + +```bash +# Just run the ADWs +./adws/adw_prompt.py "analyze this code" + +# No .env file needed +# No API key needed +# Works immediately +``` + +### Requirements + +- Active Claude Max subscription +- Claude Code CLI installed and logged in +- User must be authenticated + +### Advantages + +✅ **Simple** - No configuration, just works +✅ **Secure** - No API keys to manage +✅ **Interactive** - Perfect for development +✅ **Immediate** - No setup friction + +### Limitations + +❌ **User presence required** - Can't run headless +❌ **Single user** - Tied to your subscription +❌ **Limited automation** - Not ideal for CI/CD +❌ **Session-dependent** - Must be logged in + +### Perfect For + +- Interactive development +- Local experimentation +- Learning ADWs +- Personal projects +- Quick prototyping + +### Example Workflow + +```bash +# Morning: Start development +./adws/adw_chore_implement.py "add user authentication" + +# Afternoon: Create another feature +./adws/adw_chore_implement.py "implement password reset" + +# Evening: Code review +./adws/adw_prompt.py "review security in auth module" + +# All executed through your subscription +# No API key management +# Just works +``` + +## Mode B: API-Based Programmatic Execution + +### What It Is + +Run Claude Code programmatically using API keys for automated, headless workflows. + +### How It Works + +``` +Environment has ANTHROPIC_API_KEY + ↓ +ADW reads API key from environment + ↓ +ADW executes: claude -p "prompt" + ↓ +Claude Code uses API key for auth + ↓ +Prompt executed, results returned +``` + +API key is passed through filtered environment to subprocess. + +### Setup + +**Step 1: Create .env file** + +```bash +# Copy template +cp .env.sample .env + +# Edit .env +nano .env +``` + +**Step 2: Add your API key** + +```bash +# .env file +ANTHROPIC_API_KEY=sk-ant-api03-... + +# Optional configurations +CLAUDE_CODE_PATH=claude +CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR=true +``` + +**Step 3: Secure the file** + +```bash +# Never commit .env +echo ".env" >> .gitignore + +# Restrict permissions +chmod 600 .env +``` + +### Requirements + +- Anthropic API key (from console.anthropic.com) +- API access enabled on account +- Claude Code CLI installed + +### Advantages + +✅ **Headless** - Runs without user interaction +✅ **Automatable** - Perfect for CI/CD +✅ **Multi-user** - Not tied to single subscription +✅ **Scriptable** - Full programmatic control +✅ **Reliable** - No session dependencies + +### Limitations + +❌ **Requires API key** - Additional setup +❌ **Costs** - API usage charges +❌ **Key management** - Security consideration +❌ **Environment config** - Must configure .env + +### Perfect For + +- CI/CD pipelines +- Webhook handlers +- Scheduled tasks (cron jobs) +- Server-side automation +- Team automation workflows +- Production deployments + +### Example Workflows + +#### CI/CD Integration + +```yaml +# .github/workflows/ai-review.yml +name: AI Code Review + +on: [pull_request] + +jobs: + review: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up environment + run: | + echo "ANTHROPIC_API_KEY=${{ secrets.ANTHROPIC_API_KEY }}" > .env + + - name: Run AI review + run: | + ./adws/adw_prompt.py "review this PR for security issues" +``` + +#### Webhook Handler + +```python +# webhook_server.py +from flask import Flask, request +import subprocess + +app = Flask(__name__) + +@app.route('/webhook/feature-request', methods=['POST']) +def handle_feature_request(): + data = request.json + description = data['description'] + + # Execute ADW with API key from environment + subprocess.run([ + './adws/adw_chore_implement.py', + description + ]) + + return {'status': 'processing'} +``` + +#### Cron Job + +```bash +# crontab -e + +# Run daily code quality check at 2 AM +0 2 * * * cd /path/to/project && ./adws/adw_prompt.py "analyze code quality and suggest improvements" >> /var/log/adw-daily.log 2>&1 +``` + +## Mode Detection + +The ADW infrastructure automatically detects which mode to use: + +```python +# In agent.py +def get_safe_subprocess_env(): + env = { + # System variables + "HOME": os.getenv("HOME"), + "PATH": os.getenv("PATH"), + # ... + } + + # Only add API key if it exists + api_key = os.getenv("ANTHROPIC_API_KEY") + if api_key: + env["ANTHROPIC_API_KEY"] = api_key + + # If no API key, Claude Code uses subscription + return env +``` + +**Detection logic:** +1. Check if `ANTHROPIC_API_KEY` exists in environment +2. If yes → API mode (pass key to subprocess) +3. If no → Subscription mode (Claude Code uses logged-in session) + +**No code changes needed** - same scripts work in both modes. + +## Comparison Matrix + +| Feature | Subscription Mode | API Mode | +|---------|------------------|----------| +| **Setup Complexity** | None | Moderate (API key) | +| **User Presence** | Required | Not required | +| **CI/CD Integration** | ❌ Difficult | ✅ Easy | +| **Cost** | Subscription price | API usage charges | +| **Security** | No key to manage | Must secure API key | +| **Automation** | Limited | Full | +| **Interactive Use** | ✅ Excellent | ✅ Works fine | +| **Headless Use** | ❌ Not practical | ✅ Perfect | +| **Multi-user** | Per subscription | Shared key possible | +| **Session Management** | Must stay logged in | None needed | + +## Switching Between Modes + +### Subscription → API + +```bash +# Create .env file +cat > .env << 'EOF' +ANTHROPIC_API_KEY=sk-ant-... +EOF + +# Secure it +chmod 600 .env + +# Now all ADW executions use API +./adws/adw_prompt.py "test" +``` + +### API → Subscription + +```bash +# Remove .env file +rm .env + +# Or comment out API key +# ANTHROPIC_API_KEY=sk-ant-... + +# Now all ADW executions use subscription +./adws/adw_prompt.py "test" +``` + +**Same scripts work in both modes** - just change environment. + +## Best Practices + +### For Subscription Mode + +1. **Stay logged in** + ```bash + # Check auth status + claude --version + ``` + +2. **Personal projects** + - Use for local development + - No .env file needed + +3. **Quick iteration** + - Fast setup + - Immediate feedback + +### For API Mode + +1. **Secure your keys** + ```bash + # Never commit + echo ".env" >> .gitignore + + # Restrict permissions + chmod 600 .env + + # Use secrets management in production + ``` + +2. **Rotate regularly** + ```bash + # Update API key periodically + # Revoke old keys + ``` + +3. **Monitor usage** + - Track API consumption + - Set up billing alerts + - Monitor costs + +4. **Use environment-specific keys** + ```bash + # Development + ANTHROPIC_API_KEY=sk-ant-dev-... + + # Production + ANTHROPIC_API_KEY=sk-ant-prod-... + ``` + +### For Both Modes + +1. **Document which mode you're using** + ```markdown + # README.md + + ## Setup + + This project uses Claude Max subscription mode by default. + For CI/CD, configure ANTHROPIC_API_KEY in .env. + ``` + +2. **Test in both modes** + - Ensure portability + - Validate automation path + - Document differences + +3. **Choose based on use case** + - Interactive development → Subscription + - Automation/CI/CD → API + - Both are valid + +## Troubleshooting + +### Subscription Mode Issues + +**Problem**: "Authentication failed" +```bash +# Solution: Re-login to Claude Code +claude login +``` + +**Problem**: "Command not found: claude" +```bash +# Solution: Install Claude Code CLI +# [Installation instructions] +``` + +**Problem**: "Session expired" +```bash +# Solution: Re-authenticate +claude logout +claude login +``` + +### API Mode Issues + +**Problem**: "Invalid API key" +```bash +# Solution: Check .env file +cat .env | grep ANTHROPIC_API_KEY + +# Verify key format: sk-ant-api03-... +# Get new key from console.anthropic.com +``` + +**Problem**: "API key not found" +```bash +# Solution: Ensure .env is loaded +# Check file location +ls -la .env + +# Verify environment +echo $ANTHROPIC_API_KEY +``` + +**Problem**: "Rate limit exceeded" +```bash +# Solution: Implement backoff or use subscription mode +# Reduce parallel executions +# Contact Anthropic for higher limits +``` + +## Security Considerations + +### Subscription Mode + +✅ **Secure by default** - No secrets to manage +✅ **Session-based** - Expires automatically +✅ **User-specific** - Can't be shared accidentally + +⚠️ **Physical access** - Anyone with terminal access can use +⚠️ **Session hijacking** - Theoretical risk in shared environments + +### API Mode + +✅ **Revocable** - Can disable keys anytime +✅ **Trackable** - Monitor usage per key +✅ **Rotatable** - Easy to update + +⚠️ **Key exposure** - Must protect .env file +⚠️ **Commit risk** - Can accidentally commit +⚠️ **Scope risk** - Key has broad permissions + +### Protection Strategies + +1. **For .env files** + ```bash + # Always in .gitignore + echo ".env" >> .gitignore + + # Restrict permissions + chmod 600 .env + + # Use git secrets scanning + git secrets --scan + ``` + +2. **For production** + ```bash + # Use secrets managers + - AWS Secrets Manager + - HashiCorp Vault + - GitHub Secrets + + # Not .env files in production + ``` + +3. **For CI/CD** + ```yaml + # Use repository secrets + secrets: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + + # Never hardcode + # Never commit + ``` + +## Recommendation + +**Start with Subscription Mode** +- Simpler setup +- Perfect for learning +- Great for development +- Zero configuration + +**Migrate to API Mode when needed** +- Adding CI/CD +- Implementing webhooks +- Scheduling tasks +- Production deployment + +**Keep both available** +- Developers use subscription +- Automation uses API +- Best of both worlds + +Same ADW infrastructure supports both - choose based on context. diff --git a/skills/adw-bootstrap/reference/enhanced/adws/adw_chore_implement.py b/skills/adw-bootstrap/reference/enhanced/adws/adw_chore_implement.py new file mode 100755 index 0000000..c617c50 --- /dev/null +++ b/skills/adw-bootstrap/reference/enhanced/adws/adw_chore_implement.py @@ -0,0 +1,501 @@ +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "pydantic", +# "python-dotenv", +# "click", +# "rich", +# ] +# /// +""" +Run chore planning and implementation workflow. + +This script runs two slash commands in sequence: +1. /chore - Creates a plan based on the prompt +2. /implement - Implements the plan created by /chore + +Usage: + # Method 1: Direct execution (requires uv) + ./adws/adw_chore_implement.py "Add error handling to all API endpoints" + + # Method 2: Using uv run + uv run adws/adw_chore_implement.py "Refactor database connection logic" + +Examples: + # Run with specific model + ./adws/adw_chore_implement.py "Add logging to agent.py" --model opus + + # Run from a different working directory + ./adws/adw_chore_implement.py "Update documentation" --working-dir /path/to/project + + # Run with verbose output + ./adws/adw_chore_implement.py "Add tests" --verbose +""" + +import os +import sys +import json +import re +from pathlib import Path +import click +from rich.console import Console +from rich.panel import Panel +from rich.table import Table +from rich.rule import Rule + +# Add the adw_modules directory to the path so we can import agent +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "adw_modules")) + +from agent import ( + AgentTemplateRequest, + AgentPromptResponse, + execute_template, + generate_short_id, +) + +# Output file name constants +OUTPUT_JSONL = "cc_raw_output.jsonl" +OUTPUT_JSON = "cc_raw_output.json" +FINAL_OBJECT_JSON = "cc_final_object.json" +SUMMARY_JSON = "custom_summary_output.json" + + +def extract_plan_path(output: str) -> str: + """Extract the plan file path from the chore command output. + + Looks for patterns like: + - specs/chore-12345678-update-readme.md + - Created plan at: specs/chore-... + - Plan file: specs/chore-... + """ + # Try multiple patterns to find the plan path + patterns = [ + r"specs/chore-[a-zA-Z0-9\-]+\.md", + r"Created plan at:\s*(specs/chore-[a-zA-Z0-9\-]+\.md)", + r"Plan file:\s*(specs/chore-[a-zA-Z0-9\-]+\.md)", + r"path.*?:\s*(specs/chore-[a-zA-Z0-9\-]+\.md)", + ] + + for pattern in patterns: + match = re.search(pattern, output, re.IGNORECASE | re.MULTILINE) + if match: + return match.group(1) if match.groups() else match.group(0) + + # If no match found, raise an error + raise ValueError("Could not find plan file path in chore output") + + +@click.command() +@click.argument("prompt", required=True) +@click.option( + "--model", + type=click.Choice(["sonnet", "opus", "haiku"]), + default="sonnet", + help="Claude model to use (sonnet=balanced, opus=max intelligence, haiku=fast & economical)", +) +@click.option( + "--working-dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True), + help="Working directory for command execution (default: current directory)", +) +def main( + prompt: str, + model: str, + working_dir: str, +): + """Run chore planning and implementation workflow.""" + console = Console() + + # Generate a unique ID for this workflow + adw_id = generate_short_id() + + # Use current directory if no working directory specified + if not working_dir: + working_dir = os.getcwd() + + # Set default agent names + planner_name = "planner" + builder_name = "builder" + + console.print( + Panel( + f"[bold blue]ADW Chore & Implement Workflow[/bold blue]\n\n" + f"[cyan]ADW ID:[/cyan] {adw_id}\n" + f"[cyan]Model:[/cyan] {model}\n" + f"[cyan]Working Dir:[/cyan] {working_dir}", + title="[bold blue]🚀 Workflow Configuration[/bold blue]", + border_style="blue", + ) + ) + console.print() + + # Phase 1: Run /chore command + console.print(Rule("[bold yellow]Phase 1: Planning (/chore)[/bold yellow]")) + console.print() + + # Create the chore request + chore_request = AgentTemplateRequest( + agent_name=planner_name, + slash_command="/chore", + args=[adw_id, prompt], + adw_id=adw_id, + model=model, + working_dir=working_dir, + ) + + # Display chore execution info + chore_info_table = Table(show_header=False, box=None, padding=(0, 1)) + chore_info_table.add_column(style="bold cyan") + chore_info_table.add_column() + + chore_info_table.add_row("ADW ID", adw_id) + chore_info_table.add_row("ADW Name", "adw_chore_implement (planning)") + chore_info_table.add_row("Command", "/chore") + chore_info_table.add_row("Args", f'{adw_id} "{prompt}"') + chore_info_table.add_row("Model", model) + chore_info_table.add_row("Agent", planner_name) + + console.print( + Panel( + chore_info_table, + title="[bold blue]🚀 Chore Inputs[/bold blue]", + border_style="blue", + ) + ) + console.print() + + plan_path = None + + try: + # Execute the chore command + with console.status("[bold yellow]Creating plan...[/bold yellow]"): + chore_response = execute_template(chore_request) + + # Display the chore result + if chore_response.success: + # Success panel + console.print( + Panel( + chore_response.output, + title="[bold green]✅ Planning Success[/bold green]", + border_style="green", + padding=(1, 2), + ) + ) + + # Extract the plan path from the output + try: + plan_path = extract_plan_path(chore_response.output) + console.print(f"\n[bold cyan]Plan created at:[/bold cyan] {plan_path}") + except ValueError as e: + console.print( + Panel( + f"[bold red]Could not extract plan path: {str(e)}[/bold red]\n\n" + "The chore command succeeded but the plan file path could not be found in the output.", + title="[bold red]❌ Parse Error[/bold red]", + border_style="red", + ) + ) + sys.exit(3) + + else: + # Error panel + console.print( + Panel( + chore_response.output, + title="[bold red]❌ Planning Failed[/bold red]", + border_style="red", + padding=(1, 2), + ) + ) + console.print( + "\n[bold red]Workflow aborted: Planning phase failed[/bold red]" + ) + sys.exit(1) + + # Save chore phase summary + chore_output_dir = f"./agents/{adw_id}/{planner_name}" + chore_summary_path = f"{chore_output_dir}/{SUMMARY_JSON}" + + with open(chore_summary_path, "w") as f: + json.dump( + { + "phase": "planning", + "adw_id": adw_id, + "slash_command": "/chore", + "args": [adw_id, prompt], + "path_to_slash_command_prompt": ".claude/commands/chore.md", + "model": model, + "working_dir": working_dir, + "success": chore_response.success, + "session_id": chore_response.session_id, + "retry_code": chore_response.retry_code, + "output": chore_response.output, + "plan_path": plan_path, + }, + f, + indent=2, + ) + + # Show chore output files + console.print() + + # Files saved panel for chore phase + chore_files_table = Table(show_header=True, box=None) + chore_files_table.add_column("File Type", style="bold cyan") + chore_files_table.add_column("Path", style="dim") + chore_files_table.add_column("Description", style="italic") + + chore_files_table.add_row( + "JSONL Stream", + f"{chore_output_dir}/{OUTPUT_JSONL}", + "Raw streaming output from Claude Code", + ) + chore_files_table.add_row( + "JSON Array", + f"{chore_output_dir}/{OUTPUT_JSON}", + "All messages as a JSON array", + ) + chore_files_table.add_row( + "Final Object", + f"{chore_output_dir}/{FINAL_OBJECT_JSON}", + "Last message entry (final result)", + ) + chore_files_table.add_row( + "Summary", + chore_summary_path, + "High-level execution summary with metadata", + ) + + console.print( + Panel( + chore_files_table, + title="[bold blue]📄 Planning Output Files[/bold blue]", + border_style="blue", + ) + ) + + console.print() + + # Phase 2: Run /implement command + console.print( + Rule("[bold yellow]Phase 2: Implementation (/implement)[/bold yellow]") + ) + console.print() + + # Create the implement request + implement_request = AgentTemplateRequest( + agent_name=builder_name, + slash_command="/implement", + args=[plan_path], + adw_id=adw_id, + model=model, + working_dir=working_dir, + ) + + # Display implement execution info + implement_info_table = Table(show_header=False, box=None, padding=(0, 1)) + implement_info_table.add_column(style="bold cyan") + implement_info_table.add_column() + + implement_info_table.add_row("ADW ID", adw_id) + implement_info_table.add_row("ADW Name", "adw_chore_implement (building)") + implement_info_table.add_row("Command", "/implement") + implement_info_table.add_row("Args", plan_path) + implement_info_table.add_row("Model", model) + implement_info_table.add_row("Agent", builder_name) + + console.print( + Panel( + implement_info_table, + title="[bold blue]🚀 Implement Inputs[/bold blue]", + border_style="blue", + ) + ) + console.print() + + # Execute the implement command + with console.status("[bold yellow]Implementing plan...[/bold yellow]"): + implement_response = execute_template(implement_request) + + # Display the implement result + if implement_response.success: + # Success panel + console.print( + Panel( + implement_response.output, + title="[bold green]✅ Implementation Success[/bold green]", + border_style="green", + padding=(1, 2), + ) + ) + + if implement_response.session_id: + console.print( + f"\n[bold cyan]Session ID:[/bold cyan] {implement_response.session_id}" + ) + else: + # Error panel + console.print( + Panel( + implement_response.output, + title="[bold red]❌ Implementation Failed[/bold red]", + border_style="red", + padding=(1, 2), + ) + ) + + # Save implement phase summary + implement_output_dir = f"./agents/{adw_id}/{builder_name}" + implement_summary_path = f"{implement_output_dir}/{SUMMARY_JSON}" + + with open(implement_summary_path, "w") as f: + json.dump( + { + "phase": "implementation", + "adw_id": adw_id, + "slash_command": "/implement", + "args": [plan_path], + "path_to_slash_command_prompt": ".claude/commands/implement.md", + "model": model, + "working_dir": working_dir, + "success": implement_response.success, + "session_id": implement_response.session_id, + "retry_code": implement_response.retry_code, + "output": implement_response.output, + }, + f, + indent=2, + ) + + # Show implement output files + console.print() + + # Files saved panel for implement phase + implement_files_table = Table(show_header=True, box=None) + implement_files_table.add_column("File Type", style="bold cyan") + implement_files_table.add_column("Path", style="dim") + implement_files_table.add_column("Description", style="italic") + + implement_files_table.add_row( + "JSONL Stream", + f"{implement_output_dir}/{OUTPUT_JSONL}", + "Raw streaming output from Claude Code", + ) + implement_files_table.add_row( + "JSON Array", + f"{implement_output_dir}/{OUTPUT_JSON}", + "All messages as a JSON array", + ) + implement_files_table.add_row( + "Final Object", + f"{implement_output_dir}/{FINAL_OBJECT_JSON}", + "Last message entry (final result)", + ) + implement_files_table.add_row( + "Summary", + implement_summary_path, + "High-level execution summary with metadata", + ) + + console.print( + Panel( + implement_files_table, + title="[bold blue]📄 Implementation Output Files[/bold blue]", + border_style="blue", + ) + ) + + # Show workflow summary + console.print() + console.print(Rule("[bold blue]Workflow Summary[/bold blue]")) + console.print() + + summary_table = Table(show_header=True, box=None) + summary_table.add_column("Phase", style="bold cyan") + summary_table.add_column("Status", style="bold") + summary_table.add_column("Output Directory", style="dim") + + # Planning phase row + planning_status = "✅ Success" if chore_response.success else "❌ Failed" + summary_table.add_row( + "Planning (/chore)", + planning_status, + f"./agents/{adw_id}/{planner_name}/", + ) + + # Implementation phase row + implement_status = "✅ Success" if implement_response.success else "❌ Failed" + summary_table.add_row( + "Implementation (/implement)", + implement_status, + f"./agents/{adw_id}/{builder_name}/", + ) + + console.print(summary_table) + + # Create overall workflow summary + workflow_summary_path = f"./agents/{adw_id}/workflow_summary.json" + os.makedirs(f"./agents/{adw_id}", exist_ok=True) + + with open(workflow_summary_path, "w") as f: + json.dump( + { + "workflow": "chore_implement", + "adw_id": adw_id, + "prompt": prompt, + "model": model, + "working_dir": working_dir, + "plan_path": plan_path, + "phases": { + "planning": { + "success": chore_response.success, + "session_id": chore_response.session_id, + "agent": planner_name, + "output_dir": f"./agents/{adw_id}/{planner_name}/", + }, + "implementation": { + "success": implement_response.success, + "session_id": implement_response.session_id, + "agent": builder_name, + "output_dir": f"./agents/{adw_id}/{builder_name}/", + }, + }, + "overall_success": chore_response.success + and implement_response.success, + }, + f, + indent=2, + ) + + console.print( + f"\n[bold cyan]Workflow summary:[/bold cyan] {workflow_summary_path}" + ) + console.print() + + # Exit with appropriate code + if chore_response.success and implement_response.success: + console.print( + "[bold green]✅ Workflow completed successfully![/bold green]" + ) + sys.exit(0) + else: + console.print( + "[bold yellow]⚠️ Workflow completed with errors[/bold yellow]" + ) + sys.exit(1) + + except Exception as e: + console.print( + Panel( + f"[bold red]{str(e)}[/bold red]", + title="[bold red]❌ Unexpected Error[/bold red]", + border_style="red", + ) + ) + sys.exit(2) + + +if __name__ == "__main__": + main() diff --git a/skills/adw-bootstrap/reference/enhanced/adws/adw_modules/agent_sdk.py b/skills/adw-bootstrap/reference/enhanced/adws/adw_modules/agent_sdk.py new file mode 100644 index 0000000..dba9d02 --- /dev/null +++ b/skills/adw-bootstrap/reference/enhanced/adws/adw_modules/agent_sdk.py @@ -0,0 +1,436 @@ +""" +Claude Code SDK - The SDK Way + +This module demonstrates the idiomatic way to use the Claude Code Python SDK +for programmatic agent interactions. It focuses on clean, type-safe patterns +using the SDK's native abstractions. + +Key Concepts: +- Use `query()` for one-shot operations +- Use `ClaudeSDKClient` for interactive sessions +- Work directly with SDK message types +- Leverage async/await for clean concurrency +- Configure options for your use case + +Example Usage: + # Simple query + async for message in query(prompt="What is 2 + 2?"): + if isinstance(message, AssistantMessage): + print(extract_text(message)) + + # With options + options = ClaudeCodeOptions( + model="claude-sonnet-4-20250514", + allowed_tools=["Read", "Write"], + permission_mode="bypassPermissions" + ) + async for message in query(prompt="Create hello.py", options=options): + process_message(message) + + # Interactive session + async with create_session() as client: + await client.query("Debug this error") + async for msg in client.receive_response(): + handle_message(msg) +""" + +import logging +from pathlib import Path +from typing import AsyncIterator, Optional, List +from contextlib import asynccontextmanager + +# Import all SDK components we'll use +from claude_code_sdk import ( + # Main functions + query, + ClaudeSDKClient, + + # Configuration + ClaudeCodeOptions, + PermissionMode, + + # Message types + Message, + AssistantMessage, + UserMessage, + SystemMessage, + ResultMessage, + + # Content blocks + ContentBlock, + TextBlock, + ToolUseBlock, + ToolResultBlock, + + # Errors + ClaudeSDKError, + CLIConnectionError, + CLINotFoundError, + ProcessError, +) + +# Set up logging +logger = logging.getLogger(__name__) + + +# ============================================================================ +# UTILITY FUNCTIONS +# ============================================================================ + +def extract_text(message: AssistantMessage) -> str: + """Extract all text content from an assistant message. + + The SDK way: Work directly with typed message objects. + + Args: + message: AssistantMessage with content blocks + + Returns: + Concatenated text from all text blocks + """ + texts = [] + for block in message.content: + if isinstance(block, TextBlock): + texts.append(block.text) + return "\n".join(texts) + + +def extract_tool_uses(message: AssistantMessage) -> List[ToolUseBlock]: + """Extract all tool use blocks from an assistant message. + + Args: + message: AssistantMessage with content blocks + + Returns: + List of ToolUseBlock objects + """ + return [ + block for block in message.content + if isinstance(block, ToolUseBlock) + ] + + +def get_result_text(messages: List[Message]) -> str: + """Extract final result text from a list of messages. + + Args: + messages: List of messages from a query + + Returns: + Result text or assistant responses + """ + # First check for ResultMessage + for msg in reversed(messages): + if isinstance(msg, ResultMessage) and msg.result: + return msg.result + + # Otherwise collect assistant text + texts = [] + for msg in messages: + if isinstance(msg, AssistantMessage): + text = extract_text(msg) + if text: + texts.append(text) + + return "\n".join(texts) + + +# ============================================================================ +# ONE-SHOT QUERIES (The Simple SDK Way) +# ============================================================================ + +async def simple_query(prompt: str, model: str = "claude-sonnet-4-5-20250929") -> str: + """Simple one-shot query with text response. + + The SDK way: Direct use of query() with minimal setup. + + Args: + prompt: What to ask Claude + model: Which model to use + + Returns: + Text response from Claude + + Example: + response = await simple_query("What is 2 + 2?") + print(response) # "4" or "2 + 2 equals 4" + """ + options = ClaudeCodeOptions(model=model) + + texts = [] + async for message in query(prompt=prompt, options=options): + if isinstance(message, AssistantMessage): + text = extract_text(message) + if text: + texts.append(text) + + return "\n".join(texts) if texts else "No response" + + +async def query_with_tools( + prompt: str, + allowed_tools: List[str], + working_dir: Optional[Path] = None +) -> AsyncIterator[Message]: + """Query with specific tools enabled. + + The SDK way: Configure options for your use case. + + Args: + prompt: What to ask Claude + allowed_tools: List of tool names to allow + working_dir: Optional working directory + + Yields: + SDK message objects + + Example: + async for msg in query_with_tools( + "Create a Python script", + allowed_tools=["Write", "Read"] + ): + if isinstance(msg, AssistantMessage): + for block in msg.content: + if isinstance(block, ToolUseBlock): + print(f"Using tool: {block.name}") + """ + options = ClaudeCodeOptions( + allowed_tools=allowed_tools, + cwd=str(working_dir) if working_dir else None, + permission_mode="bypassPermissions" # For automated workflows + ) + + async for message in query(prompt=prompt, options=options): + yield message + + +async def collect_query_response( + prompt: str, + options: Optional[ClaudeCodeOptions] = None +) -> tuple[List[Message], Optional[ResultMessage]]: + """Collect all messages from a query. + + The SDK way: Async iteration with type checking. + + Args: + prompt: What to ask Claude + options: Optional configuration + + Returns: + Tuple of (all_messages, result_message) + + Example: + messages, result = await collect_query_response("List files") + if result and not result.is_error: + print("Success!") + for msg in messages: + process_message(msg) + """ + if options is None: + options = ClaudeCodeOptions() + + messages = [] + result = None + + async for message in query(prompt=prompt, options=options): + messages.append(message) + if isinstance(message, ResultMessage): + result = message + + return messages, result + + +# ============================================================================ +# INTERACTIVE SESSIONS (The SDK Client Way) +# ============================================================================ + +@asynccontextmanager +async def create_session( + model: str = "claude-sonnet-4-5-20250929", + working_dir: Optional[Path] = None +): + """Create an interactive session with Claude. + + The SDK way: Use context managers for resource management. + + Args: + model: Which model to use + working_dir: Optional working directory + + Yields: + Connected ClaudeSDKClient + + Example: + async with create_session() as client: + await client.query("Hello") + async for msg in client.receive_response(): + print(msg) + """ + options = ClaudeCodeOptions( + model=model, + cwd=str(working_dir) if working_dir else None, + permission_mode="bypassPermissions" + ) + + client = ClaudeSDKClient(options=options) + await client.connect() + + try: + yield client + finally: + await client.disconnect() + + +async def interactive_conversation(prompts: List[str]) -> List[Message]: + """Have an interactive conversation with Claude. + + The SDK way: Bidirectional communication with the client. + + Args: + prompts: List of prompts to send in sequence + + Returns: + All messages from the conversation + + Example: + messages = await interactive_conversation([ + "What's the weather like?", + "Tell me more about clouds", + "How do they form?" + ]) + """ + all_messages = [] + + async with create_session() as client: + for prompt in prompts: + # Send prompt + await client.query(prompt) + + # Collect response + async for msg in client.receive_response(): + all_messages.append(msg) + if isinstance(msg, ResultMessage): + break + + return all_messages + + +# ============================================================================ +# ERROR HANDLING (The SDK Way) +# ============================================================================ + +async def safe_query(prompt: str) -> tuple[Optional[str], Optional[str]]: + """Query with comprehensive error handling. + + The SDK way: Handle specific SDK exceptions. + + Args: + prompt: What to ask Claude + + Returns: + Tuple of (response_text, error_message) + + Example: + response, error = await safe_query("Help me debug this") + if error: + print(f"Error: {error}") + else: + print(f"Response: {response}") + """ + try: + response = await simple_query(prompt) + return response, None + + except CLINotFoundError: + return None, "Claude Code CLI not found. Install with: npm install -g @anthropic-ai/claude-code" + + except CLIConnectionError as e: + return None, f"Connection error: {str(e)}" + + except ProcessError as e: + return None, f"Process error (exit code {e.exit_code}): {str(e)}" + + except ClaudeSDKError as e: + return None, f"SDK error: {str(e)}" + + except Exception as e: + return None, f"Unexpected error: {str(e)}" + + +# ============================================================================ +# ADVANCED PATTERNS (The SDK Way) +# ============================================================================ + +async def stream_with_progress( + prompt: str, + on_text: Optional[callable] = None, + on_tool: Optional[callable] = None +) -> ResultMessage: + """Stream query with progress callbacks. + + The SDK way: Process messages as they arrive. + + Args: + prompt: What to ask Claude + on_text: Callback for text blocks (optional) + on_tool: Callback for tool use blocks (optional) + + Returns: + Final ResultMessage + + Example: + result = await stream_with_progress( + "Analyze this codebase", + on_text=lambda text: print(f"Claude: {text}"), + on_tool=lambda tool: print(f"Using: {tool.name}") + ) + print(f"Cost: ${result.total_cost_usd:.4f}") + """ + result = None + + async for message in query(prompt=prompt): + if isinstance(message, AssistantMessage): + for block in message.content: + if isinstance(block, TextBlock) and on_text: + on_text(block.text) + elif isinstance(block, ToolUseBlock) and on_tool: + on_tool(block) + + elif isinstance(message, ResultMessage): + result = message + + return result + + +async def query_with_timeout(prompt: str, timeout_seconds: float = 30) -> Optional[str]: + """Query with timeout protection. + + The SDK way: Use asyncio for timeout control. + + Args: + prompt: What to ask Claude + timeout_seconds: Maximum time to wait + + Returns: + Response text or None if timeout + + Example: + response = await query_with_timeout("Complex analysis", timeout_seconds=60) + if response is None: + print("Query timed out") + """ + import asyncio + + try: + # Create the query task + async def _query(): + return await simple_query(prompt) + + # Run with timeout + response = await asyncio.wait_for(_query(), timeout=timeout_seconds) + return response + + except asyncio.TimeoutError: + logger.warning(f"Query timed out after {timeout_seconds} seconds") + return None + diff --git a/skills/adw-bootstrap/reference/enhanced/adws/adw_sdk_prompt.py b/skills/adw-bootstrap/reference/enhanced/adws/adw_sdk_prompt.py new file mode 100755 index 0000000..5370634 --- /dev/null +++ b/skills/adw-bootstrap/reference/enhanced/adws/adw_sdk_prompt.py @@ -0,0 +1,470 @@ +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "pydantic", +# "python-dotenv", +# "click", +# "rich", +# "claude-code-sdk", +# "anyio", +# ] +# /// +""" +Run Claude Code prompts using the official Python SDK. + +This ADW demonstrates using the Claude Code Python SDK for both one-shot +and interactive sessions. The SDK provides better type safety, error handling, +and a more Pythonic interface compared to subprocess-based implementations. + +Usage: + # One-shot query (default) + ./adws/adw_sdk_prompt.py "Hello Claude Code" + + # Interactive session + ./adws/adw_sdk_prompt.py --interactive + + # Resume a previous session + ./adws/adw_sdk_prompt.py --interactive --session-id abc123 + + # With specific model + ./adws/adw_sdk_prompt.py "Create a FastAPI app" --model opus + + # From different directory + ./adws/adw_sdk_prompt.py "List files here" --working-dir /path/to/project + +Examples: + # Simple query + ./adws/adw_sdk_prompt.py "Explain async/await in Python" + + # Interactive debugging session + ./adws/adw_sdk_prompt.py --interactive --context "Debugging a memory leak" + + # Resume session with context + ./adws/adw_sdk_prompt.py --interactive --session-id abc123 --context "Continue debugging" + + # Query with tools + ./adws/adw_sdk_prompt.py "Create a Python web server" --tools Read,Write,Bash + +Key Features: + - Uses official Claude Code Python SDK + - Supports both one-shot and interactive modes + - Better error handling with typed exceptions + - Native async/await support + - Clean message type handling +""" + +import os +import sys +import json +import asyncio +from pathlib import Path +from typing import Optional, List +import click +from rich.console import Console +from rich.panel import Panel +from rich.table import Table +from rich.live import Live +from rich.spinner import Spinner +from rich.text import Text +from rich.prompt import Prompt + +# Add the adw_modules directory to the path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "adw_modules")) + +# Import SDK functions from our clean module +from agent_sdk import ( + simple_query, + query_with_tools, + collect_query_response, + create_session, + safe_query, + stream_with_progress, + extract_text, + extract_tool_uses, +) + +# Import SDK types +from claude_code_sdk import ( + ClaudeCodeOptions, + AssistantMessage, + ResultMessage, + TextBlock, + ToolUseBlock, +) + + +def generate_short_id() -> str: + """Generate a short ID for tracking.""" + import uuid + + return str(uuid.uuid4())[:8] + + +async def run_one_shot_query( + prompt: str, + model: str, + working_dir: str, + allowed_tools: Optional[List[str]] = None, + session_id: Optional[str] = None, +) -> None: + """Run a one-shot query using the SDK.""" + console = Console() + adw_id = generate_short_id() + + # Display execution info + info_table = Table(show_header=False, box=None, padding=(0, 1)) + info_table.add_column(style="bold cyan") + info_table.add_column() + + info_table.add_row("ADW ID", adw_id) + info_table.add_row("Mode", "One-shot Query") + info_table.add_row("Prompt", prompt) + info_table.add_row("Model", model) + info_table.add_row("Working Dir", working_dir) + if allowed_tools: + info_table.add_row("Tools", ", ".join(allowed_tools)) + if session_id: + info_table.add_row("Session ID", session_id) + info_table.add_row("[bold green]SDK[/bold green]", "Claude Code Python SDK") + + console.print( + Panel( + info_table, + title="[bold blue]🚀 SDK Query Execution[/bold blue]", + border_style="blue", + ) + ) + console.print() + + try: + # Execute query based on whether tools are needed + with console.status("[bold yellow]Executing via SDK...[/bold yellow]"): + if allowed_tools: + # Query with tools + options = ClaudeCodeOptions( + model=model, + allowed_tools=allowed_tools, + cwd=working_dir, + permission_mode="bypassPermissions", + ) + if session_id: + options.resume = session_id + messages, result = await collect_query_response(prompt, options=options) + + # Extract response text + response_text = "" + tool_uses = [] + + for msg in messages: + if isinstance(msg, AssistantMessage): + text = extract_text(msg) + if text: + response_text += text + "\n" + for tool in extract_tool_uses(msg): + tool_uses.append(f"{tool.name} ({tool.id[:8]}...)") + + success = result and not result.is_error if result else False + + else: + # Simple query + response_text, error = await safe_query(prompt) + success = error is None + tool_uses = [] + + if error: + response_text = error + + # Display result + if success: + console.print( + Panel( + response_text.strip(), + title="[bold green]✅ SDK Success[/bold green]", + border_style="green", + padding=(1, 2), + ) + ) + + if tool_uses: + console.print( + f"\n[bold cyan]Tools used:[/bold cyan] {', '.join(tool_uses)}" + ) + else: + console.print( + Panel( + response_text, + title="[bold red]❌ SDK Error[/bold red]", + border_style="red", + padding=(1, 2), + ) + ) + + # Show cost and session info if available + if "result" in locals() and result: + if result.total_cost_usd: + console.print( + f"\n[bold cyan]Cost:[/bold cyan] ${result.total_cost_usd:.4f}" + ) + if hasattr(result, 'session_id') and result.session_id: + console.print( + f"[bold cyan]Session ID:[/bold cyan] {result.session_id}" + ) + console.print( + f"[dim]Resume with: --session-id {result.session_id}[/dim]" + ) + + except Exception as e: + console.print( + Panel( + f"[bold red]{str(e)}[/bold red]", + title="[bold red]❌ Unexpected Error[/bold red]", + border_style="red", + ) + ) + + +async def run_interactive_session( + model: str, + working_dir: str, + context: Optional[str] = None, + session_id: Optional[str] = None, +) -> None: + """Run an interactive session using the SDK.""" + console = Console() + adw_id = generate_short_id() + + # Display session info + info_table = Table(show_header=False, box=None, padding=(0, 1)) + info_table.add_column(style="bold cyan") + info_table.add_column() + + info_table.add_row("ADW ID", adw_id) + info_table.add_row("Mode", "Interactive Session") + info_table.add_row("Model", model) + info_table.add_row("Working Dir", working_dir) + if context: + info_table.add_row("Context", context) + if session_id: + info_table.add_row("Session ID", session_id) + info_table.add_row("[bold green]SDK[/bold green]", "Claude Code Python SDK") + + console.print( + Panel( + info_table, + title="[bold blue]💬 SDK Interactive Session[/bold blue]", + border_style="blue", + ) + ) + console.print() + + # Instructions + console.print("[bold yellow]Interactive Mode[/bold yellow]") + console.print("Commands: 'exit' or 'quit' to end session") + console.print("Just type your questions or requests\n") + + # Start session + options = ClaudeCodeOptions( + model=model, + cwd=working_dir, + permission_mode="bypassPermissions", + ) + if session_id: + options.resume = session_id + + from claude_code_sdk import ClaudeSDKClient + client = ClaudeSDKClient(options=options) + await client.connect() + + # Track session ID from results throughout the session + session_id_from_result = None + + try: + # Send initial context if provided + if context: + console.print(f"[dim]Setting context: {context}[/dim]\n") + await client.query(f"Context: {context}") + + # Consume the context response + async for msg in client.receive_response(): + if isinstance(msg, AssistantMessage): + text = extract_text(msg) + if text: + console.print(f"[dim]Claude: {text}[/dim]\n") + + # Interactive loop + while True: + # Get user input + try: + user_input = Prompt.ask("[bold cyan]You[/bold cyan]") + except (EOFError, KeyboardInterrupt): + console.print("\n[yellow]Session interrupted[/yellow]") + break + + if user_input.lower() in ["exit", "quit"]: + break + + # Send to Claude + await client.query(user_input) + + # Show response with progress + console.print() + response_parts = [] + tool_uses = [] + cost = None + session_id_from_result = None + + with Live( + Spinner("dots", text="Thinking..."), + console=console, + refresh_per_second=4, + ): + async for msg in client.receive_response(): + if isinstance(msg, AssistantMessage): + text = extract_text(msg) + if text: + response_parts.append(text) + + for tool in extract_tool_uses(msg): + tool_uses.append(f"{tool.name}") + + elif isinstance(msg, ResultMessage): + if msg.total_cost_usd: + cost = msg.total_cost_usd + if hasattr(msg, 'session_id') and msg.session_id: + session_id_from_result = msg.session_id + + # Display response + if response_parts: + console.print("[bold green]Claude:[/bold green]") + for part in response_parts: + console.print(part) + + if tool_uses: + console.print(f"\n[dim]Tools used: {', '.join(tool_uses)}[/dim]") + + if cost: + console.print(f"[dim]Cost: ${cost:.4f}[/dim]") + + if session_id_from_result: + console.print(f"[dim]Session ID: {session_id_from_result}[/dim]") + + console.print() + + finally: + await client.disconnect() + + console.print("\n[bold green]Session ended[/bold green]") + console.print(f"[dim]ADW ID: {adw_id}[/dim]") + if 'session_id_from_result' in locals() and session_id_from_result: + console.print(f"[bold cyan]Session ID:[/bold cyan] {session_id_from_result}") + console.print(f"[dim]Resume with: ./adws/adw_sdk_prompt.py --interactive --session-id {session_id_from_result}[/dim]") + + +@click.command() +@click.argument("prompt", required=False) +@click.option( + "--interactive", + "-i", + is_flag=True, + help="Start an interactive session instead of one-shot query", +) +@click.option( + "--model", + type=click.Choice(["sonnet", "opus", "haiku"]), + default="sonnet", + help="Claude model to use (sonnet=balanced, opus=max intelligence, haiku=fast & economical)", +) +@click.option( + "--working-dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True), + help="Working directory (default: current directory)", +) +@click.option( + "--tools", + help="Comma-separated list of allowed tools (e.g., Read,Write,Bash)", +) +@click.option( + "--context", + help="Context for interactive session (e.g., 'Debugging a memory leak')", +) +@click.option( + "--session-id", + help="Resume a previous session by its ID", +) +def main( + prompt: Optional[str], + interactive: bool, + model: str, + working_dir: Optional[str], + tools: Optional[str], + context: Optional[str], + session_id: Optional[str], +): + """Run Claude Code prompts using the Python SDK. + + Examples: + # One-shot query + adw_sdk_prompt.py "What is 2 + 2?" + + # Interactive session + adw_sdk_prompt.py --interactive + + # Resume session + adw_sdk_prompt.py --interactive --session-id abc123 + + # Query with tools + adw_sdk_prompt.py "Create hello.py" --tools Write,Read + """ + if not working_dir: + working_dir = os.getcwd() + + # Convert model names + model_map = { + "sonnet": "claude-sonnet-4-5-20250929", + "opus": "claude-opus-4-20250514", + "haiku": "claude-haiku-4-5-20251001" + } + full_model = model_map.get(model, model) + + # Parse tools if provided + allowed_tools = None + if tools: + allowed_tools = [t.strip() for t in tools.split(",")] + + # Run appropriate mode + if interactive: + if prompt: + console = Console() + console.print( + "[yellow]Warning: Prompt ignored in interactive mode[/yellow]\n" + ) + + asyncio.run( + run_interactive_session( + model=full_model, + working_dir=working_dir, + context=context, + session_id=session_id, + ) + ) + else: + if not prompt: + console = Console() + console.print("[red]Error: Prompt required for one-shot mode[/red]") + console.print("Use --interactive for interactive session") + sys.exit(1) + + asyncio.run( + run_one_shot_query( + prompt=prompt, + model=full_model, + working_dir=working_dir, + allowed_tools=allowed_tools, + session_id=session_id, + ) + ) + + +if __name__ == "__main__": + main() diff --git a/skills/adw-bootstrap/reference/enhanced/adws/adw_slash_command.py b/skills/adw-bootstrap/reference/enhanced/adws/adw_slash_command.py new file mode 100755 index 0000000..5a67da7 --- /dev/null +++ b/skills/adw-bootstrap/reference/enhanced/adws/adw_slash_command.py @@ -0,0 +1,247 @@ +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "pydantic", +# "python-dotenv", +# "click", +# "rich", +# ] +# /// +""" +Run Claude Code slash commands from the command line. + +Usage: + # Method 1: Direct execution (requires uv) + ./adws/adw_slash_command.py /chore "Update documentation" + + # Method 2: Using uv run + uv run adws/adw_slash_command.py /implement specs/.md + + uv run adws/adw_slash_command.py /start + + +Examples: + # Run a slash command + ./adws/adw_slash_command.py /chore "Add logging to agent.py" + + # Run with specific model + ./adws/adw_slash_command.py /implement plan.md --model opus + + # Run from a different working directory + ./adws/adw_slash_command.py /test --working-dir /path/to/project + + # Use custom agent name + ./adws/adw_slash_command.py /review --agent-name reviewer +""" + +import os +import sys +import json +from pathlib import Path +import click +from rich.console import Console +from rich.panel import Panel +from rich.table import Table + +# Add the adw_modules directory to the path so we can import agent +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "adw_modules")) + +from agent import ( + AgentTemplateRequest, + AgentPromptResponse, + execute_template, + generate_short_id, +) + +# Output file name constants +OUTPUT_JSONL = "cc_raw_output.jsonl" +OUTPUT_JSON = "cc_raw_output.json" +FINAL_OBJECT_JSON = "cc_final_object.json" +SUMMARY_JSON = "custom_summary_output.json" + + +@click.command() +@click.argument("slash_command", required=True) +@click.argument("args", nargs=-1) # Accept multiple optional arguments +@click.option( + "--model", + type=click.Choice(["sonnet", "opus", "haiku"]), + default="sonnet", + help="Claude model to use (sonnet=balanced, opus=max intelligence, haiku=fast & economical)", +) +@click.option( + "--working-dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True), + help="Working directory for command execution (default: current directory)", +) +@click.option( + "--agent-name", + default="executor", + help="Agent name for tracking (default: executor)", +) +def main( + slash_command: str, + args: tuple, + model: str, + working_dir: str, + agent_name: str, +): + """Run Claude Code slash commands from the command line.""" + console = Console() + + # Generate a unique ID for this execution + adw_id = generate_short_id() + + # Use current directory if no working directory specified + if not working_dir: + working_dir = os.getcwd() + + # Create the template request + request = AgentTemplateRequest( + agent_name=agent_name, + slash_command=slash_command, + args=list(args), # Convert tuple to list + adw_id=adw_id, + model=model, + working_dir=working_dir, + ) + + # Create execution info table + info_table = Table(show_header=False, box=None, padding=(0, 1)) + info_table.add_column(style="bold cyan") + info_table.add_column() + + info_table.add_row("ADW ID", adw_id) + info_table.add_row("ADW Name", "adw_slash_command") + info_table.add_row("Command", slash_command) + info_table.add_row("Args", " ".join(args) if args else "(none)") + info_table.add_row("Model", model) + info_table.add_row("Working Dir", working_dir) + + console.print( + Panel( + info_table, + title="[bold blue]🚀 Inputs[/bold blue]", + border_style="blue", + ) + ) + console.print() + + try: + # Execute the slash command + with console.status("[bold yellow]Executing command...[/bold yellow]"): + response = execute_template(request) + + # Display the result + if response.success: + # Success panel + result_panel = Panel( + response.output, + title="[bold green]✅ Success[/bold green]", + border_style="green", + padding=(1, 2), + ) + console.print(result_panel) + + if response.session_id: + console.print( + f"\n[bold cyan]Session ID:[/bold cyan] {response.session_id}" + ) + else: + # Error panel + error_panel = Panel( + response.output, + title="[bold red]❌ Failed[/bold red]", + border_style="red", + padding=(1, 2), + ) + console.print(error_panel) + + if response.retry_code != "none": + console.print( + f"\n[bold yellow]Retry code:[/bold yellow] {response.retry_code}" + ) + + # Show output file info + console.print() + + # Output files are in agents/// + output_dir = f"./agents/{adw_id}/{agent_name}" + + # Create the simple JSON summary file + simple_json_output = f"{output_dir}/{SUMMARY_JSON}" + + # Determine the template file path + command_name = slash_command.lstrip("/") # Remove leading slash + path_to_slash_command_prompt = f".claude/commands/{command_name}.md" + + with open(simple_json_output, "w") as f: + json.dump( + { + "adw_id": adw_id, + "slash_command": slash_command, + "args": list(args), + "path_to_slash_command_prompt": path_to_slash_command_prompt, + "model": model, + "working_dir": working_dir, + "success": response.success, + "session_id": response.session_id, + "retry_code": response.retry_code, + "output": response.output, + }, + f, + indent=2, + ) + + # Files saved panel + files_table = Table(show_header=True, box=None) + files_table.add_column("File Type", style="bold cyan") + files_table.add_column("Path", style="dim") + files_table.add_column("Description", style="italic") + + files_table.add_row( + "JSONL Stream", + f"{output_dir}/{OUTPUT_JSONL}", + "Raw streaming output from Claude Code", + ) + files_table.add_row( + "JSON Array", + f"{output_dir}/{OUTPUT_JSON}", + "All messages as a JSON array", + ) + files_table.add_row( + "Final Object", + f"{output_dir}/{FINAL_OBJECT_JSON}", + "Last message entry (final result)", + ) + files_table.add_row( + "Summary", + simple_json_output, + "High-level execution summary with metadata", + ) + + console.print( + Panel( + files_table, + title="[bold blue]📄 Output Files[/bold blue]", + border_style="blue", + ) + ) + + # Exit with appropriate code + sys.exit(0 if response.success else 1) + + except Exception as e: + console.print( + Panel( + f"[bold red]{str(e)}[/bold red]", + title="[bold red]❌ Unexpected Error[/bold red]", + border_style="red", + ) + ) + sys.exit(2) + + +if __name__ == "__main__": + main() diff --git a/skills/adw-bootstrap/reference/enhanced/commands/feature.md b/skills/adw-bootstrap/reference/enhanced/commands/feature.md new file mode 100644 index 0000000..cc16a42 --- /dev/null +++ b/skills/adw-bootstrap/reference/enhanced/commands/feature.md @@ -0,0 +1,109 @@ +# Feature Planning + +Create a plan to implement the feature using the specified markdown `Plan Format`. Research the codebase and create a thorough plan. + +## Variables +adw_id: $1 +prompt: $2 + +## Instructions + +- If the adw_id or prompt is not provided, stop and ask the user to provide them. +- Create a plan to implement the feature described in the `prompt` +- The plan should be comprehensive, well-designed, and follow existing patterns +- Create the plan in the `specs/` directory with filename: `feature-{adw_id}-{descriptive-name}.md` + - Replace `{descriptive-name}` with a short, descriptive name based on the feature (e.g., "add-agent-logging", "implement-retry-logic", "create-workflow-api") +- Research the codebase starting with `README.md` +- Replace every in the `Plan Format` with the requested value +- Use your reasoning model: THINK HARD about the feature requirements, design, and implementation approach +- Follow existing patterns and conventions in the codebase +- Design for extensibility and maintainability + +## Codebase Structure + +- `README.md` - Project overview and instructions (start here) +- `adws/` - AI Developer Workflow scripts and modules +- `apps/` - Application layer you'll be working in +- `.claude/commands/` - Claude command templates +- `specs/` - Specification and plan documents + +## Plan Format + +```md +# Feature: + +## Metadata +adw_id: `{adw_id}` +prompt: `{prompt}` + +## Feature Description + + +## User Story +As a +I want to +So that + +## Problem Statement + + +## Solution Statement + + +## Relevant Files +Use these files to implement the feature: + + + +## Implementation Plan +### Phase 1: Foundation + + +### Phase 2: Core Implementation + + +### Phase 3: Integration + + +## Step by Step Tasks +IMPORTANT: Execute every step in order, top to bottom. + + + +### 1. +- +- + +### 2. +- +- + + + +## Testing Strategy +### Unit Tests + + +### Edge Cases + + +## Acceptance Criteria + + +## Validation Commands +Execute these commands to validate the feature is complete: + + +- Example: `uv run python -m py_compile apps/*.py` - Test to ensure the code compiles + + +## Notes + +``` + +## Feature +Use the feature description from the `prompt` variable. + +## Report + +Return the path to the plan file created. \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/enhanced/commands/prime.md b/skills/adw-bootstrap/reference/enhanced/commands/prime.md new file mode 100644 index 0000000..6102705 --- /dev/null +++ b/skills/adw-bootstrap/reference/enhanced/commands/prime.md @@ -0,0 +1,12 @@ +# Prime +Execute the `Run`, `Read` and `Report` sections to understand the codebase then summarize your understanding. + +## Run +git ls-files + +## Read +README.md +adws/README.md + +## Report +Summarize your understanding of the codebase. \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/enhanced/commands/start.md b/skills/adw-bootstrap/reference/enhanced/commands/start.md new file mode 100644 index 0000000..8861a96 --- /dev/null +++ b/skills/adw-bootstrap/reference/enhanced/commands/start.md @@ -0,0 +1,9 @@ +# Start your applications + +## Instructions + +- Run the commands in the `Run` section below top to bottom. + +## Run +uv run apps/main.py +bun run apps/main.ts \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/minimal/.env.sample b/skills/adw-bootstrap/reference/minimal/.env.sample new file mode 100644 index 0000000..e4eebdb --- /dev/null +++ b/skills/adw-bootstrap/reference/minimal/.env.sample @@ -0,0 +1,8 @@ +# (REQUIRED) Anthropic Configuration to run Claude Code in programmatic mode +ANTHROPIC_API_KEY= + +# (Optional) Claude Code Path - if 'claude' does not work run 'which claude' and paste that value here +CLAUDE_CODE_PATH=claude + +# (Optional)( Returns claude code to the root directory after every command +CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR=true diff --git a/skills/adw-bootstrap/reference/minimal/adws/adw_modules/agent.py b/skills/adw-bootstrap/reference/minimal/adws/adw_modules/agent.py new file mode 100644 index 0000000..1e2ef75 --- /dev/null +++ b/skills/adw-bootstrap/reference/minimal/adws/adw_modules/agent.py @@ -0,0 +1,632 @@ +"""Claude Code agent module for executing prompts programmatically.""" + +import subprocess +import sys +import os +import json +import re +import logging +import time +import uuid +from typing import Optional, List, Dict, Any, Tuple, Final, Literal +from enum import Enum +from pydantic import BaseModel +from dotenv import load_dotenv + + +# Retry codes for Claude Code execution errors +class RetryCode(str, Enum): + """Codes indicating different types of errors that may be retryable.""" + CLAUDE_CODE_ERROR = "claude_code_error" # General Claude Code CLI error + TIMEOUT_ERROR = "timeout_error" # Command timed out + EXECUTION_ERROR = "execution_error" # Error during execution + ERROR_DURING_EXECUTION = "error_during_execution" # Agent encountered an error + NONE = "none" # No retry needed + + + + +class AgentPromptRequest(BaseModel): + """Claude Code agent prompt configuration.""" + prompt: str + adw_id: str + agent_name: str = "ops" + model: Literal["sonnet", "opus", "haiku"] = "sonnet" + dangerously_skip_permissions: bool = False + output_file: str + working_dir: Optional[str] = None + + +class AgentPromptResponse(BaseModel): + """Claude Code agent response.""" + output: str + success: bool + session_id: Optional[str] = None + retry_code: RetryCode = RetryCode.NONE + + +class AgentTemplateRequest(BaseModel): + """Claude Code agent template execution request.""" + agent_name: str + slash_command: str + args: List[str] + adw_id: str + model: Literal["sonnet", "opus", "haiku"] = "sonnet" + working_dir: Optional[str] = None + + +class ClaudeCodeResultMessage(BaseModel): + """Claude Code JSONL result message (last line).""" + type: str + subtype: str + is_error: bool + duration_ms: int + duration_api_ms: int + num_turns: int + result: str + session_id: str + total_cost_usd: float + + +def get_safe_subprocess_env() -> Dict[str, str]: + """Get filtered environment variables safe for subprocess execution. + + Returns only the environment variables needed based on .env.sample configuration. + + Returns: + Dictionary containing only required environment variables + """ + safe_env_vars = { + # Anthropic Configuration (required) + "ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY"), + + # Claude Code Configuration + "CLAUDE_CODE_PATH": os.getenv("CLAUDE_CODE_PATH", "claude"), + "CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR": os.getenv( + "CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR", "true" + ), + + # Essential system environment variables + "HOME": os.getenv("HOME"), + "USER": os.getenv("USER"), + "PATH": os.getenv("PATH"), + "SHELL": os.getenv("SHELL"), + "TERM": os.getenv("TERM"), + "LANG": os.getenv("LANG"), + "LC_ALL": os.getenv("LC_ALL"), + + # Python-specific variables that subprocesses might need + "PYTHONPATH": os.getenv("PYTHONPATH"), + "PYTHONUNBUFFERED": "1", # Useful for subprocess output + + # Working directory tracking + "PWD": os.getcwd(), + } + + # Filter out None values + return {k: v for k, v in safe_env_vars.items() if v is not None} + + +# Load environment variables +load_dotenv() + +# Get Claude Code CLI path from environment +CLAUDE_PATH = os.getenv("CLAUDE_CODE_PATH", "claude") + +# Output file name constants (matching adw_prompt.py and adw_slash_command.py) +OUTPUT_JSONL = "cc_raw_output.jsonl" +OUTPUT_JSON = "cc_raw_output.json" +FINAL_OBJECT_JSON = "cc_final_object.json" +SUMMARY_JSON = "custom_summary_output.json" + + +def generate_short_id() -> str: + """Generate a short 8-character UUID for tracking.""" + return str(uuid.uuid4())[:8] + + + + +def truncate_output( + output: str, max_length: int = 500, suffix: str = "... (truncated)" +) -> str: + """Truncate output to a reasonable length for display. + + Special handling for JSONL data - if the output appears to be JSONL, + try to extract just the meaningful part. + + Args: + output: The output string to truncate + max_length: Maximum length before truncation (default: 500) + suffix: Suffix to add when truncated (default: "... (truncated)") + + Returns: + Truncated string if needed, original if shorter than max_length + """ + # Check if this looks like JSONL data + if output.startswith('{"type":') and '\n{"type":' in output: + # This is likely JSONL output - try to extract the last meaningful message + lines = output.strip().split("\n") + for line in reversed(lines): + try: + data = json.loads(line) + # Look for result message + if data.get("type") == "result": + result = data.get("result", "") + if result: + return truncate_output(result, max_length, suffix) + # Look for assistant message + elif data.get("type") == "assistant" and data.get("message"): + content = data["message"].get("content", []) + if isinstance(content, list) and content: + text = content[0].get("text", "") + if text: + return truncate_output(text, max_length, suffix) + except: + pass + # If we couldn't extract anything meaningful, just show that it's JSONL + return f"[JSONL output with {len(lines)} messages]{suffix}" + + # Regular truncation logic + if len(output) <= max_length: + return output + + # Try to find a good break point (newline or space) + truncate_at = max_length - len(suffix) + + # Look for newline near the truncation point + newline_pos = output.rfind("\n", truncate_at - 50, truncate_at) + if newline_pos > 0: + return output[:newline_pos] + suffix + + # Look for space near the truncation point + space_pos = output.rfind(" ", truncate_at - 20, truncate_at) + if space_pos > 0: + return output[:space_pos] + suffix + + # Just truncate at the limit + return output[:truncate_at] + suffix + + +def check_claude_installed() -> Optional[str]: + """Check if Claude Code CLI is installed. Return error message if not.""" + try: + result = subprocess.run( + [CLAUDE_PATH, "--version"], capture_output=True, text=True + ) + if result.returncode != 0: + return ( + f"Error: Claude Code CLI is not installed. Expected at: {CLAUDE_PATH}" + ) + except FileNotFoundError: + return f"Error: Claude Code CLI is not installed. Expected at: {CLAUDE_PATH}" + return None + + +def parse_jsonl_output( + output_file: str, +) -> Tuple[List[Dict[str, Any]], Optional[Dict[str, Any]]]: + """Parse JSONL output file and return all messages and the result message. + + Returns: + Tuple of (all_messages, result_message) where result_message is None if not found + """ + try: + with open(output_file, "r") as f: + # Read all lines and parse each as JSON + messages = [json.loads(line) for line in f if line.strip()] + + # Find the result message (should be the last one) + result_message = None + for message in reversed(messages): + if message.get("type") == "result": + result_message = message + break + + return messages, result_message + except Exception as e: + return [], None + + +def convert_jsonl_to_json(jsonl_file: str) -> str: + """Convert JSONL file to JSON array file. + + Creates a cc_raw_output.json file in the same directory as the JSONL file, + containing all messages as a JSON array. + + Returns: + Path to the created JSON file + """ + # Create JSON filename in the same directory + output_dir = os.path.dirname(jsonl_file) + json_file = os.path.join(output_dir, OUTPUT_JSON) + + # Parse the JSONL file + messages, _ = parse_jsonl_output(jsonl_file) + + # Write as JSON array + with open(json_file, "w") as f: + json.dump(messages, f, indent=2) + + return json_file + + +def save_last_entry_as_raw_result(json_file: str) -> Optional[str]: + """Save the last entry from a JSON array file as cc_final_object.json. + + Args: + json_file: Path to the JSON array file + + Returns: + Path to the created cc_final_object.json file, or None if error + """ + try: + # Read the JSON array + with open(json_file, "r") as f: + messages = json.load(f) + + if not messages: + return None + + # Get the last entry + last_entry = messages[-1] + + # Create cc_final_object.json in the same directory + output_dir = os.path.dirname(json_file) + final_object_file = os.path.join(output_dir, FINAL_OBJECT_JSON) + + # Write the last entry + with open(final_object_file, "w") as f: + json.dump(last_entry, f, indent=2) + + return final_object_file + except Exception: + # Silently fail - this is a nice-to-have feature + return None + + +def get_claude_env() -> Dict[str, str]: + """Get only the required environment variables for Claude Code execution. + + This is a wrapper around get_safe_subprocess_env() for + backward compatibility. New code should use get_safe_subprocess_env() directly. + + Returns a dictionary containing only the necessary environment variables + based on .env.sample configuration. + """ + # Use the function defined above + return get_safe_subprocess_env() + + +def save_prompt(prompt: str, adw_id: str, agent_name: str = "ops") -> None: + """Save a prompt to the appropriate logging directory.""" + # Extract slash command from prompt + match = re.match(r"^(/\w+)", prompt) + if not match: + return + + slash_command = match.group(1) + # Remove leading slash for filename + command_name = slash_command[1:] + + # Create directory structure at project root (parent of adws) + # __file__ is in adws/adw_modules/, so we need to go up 3 levels to get to project root + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + prompt_dir = os.path.join(project_root, "agents", adw_id, agent_name, "prompts") + os.makedirs(prompt_dir, exist_ok=True) + + # Save prompt to file + prompt_file = os.path.join(prompt_dir, f"{command_name}.txt") + with open(prompt_file, "w") as f: + f.write(prompt) + + +def prompt_claude_code_with_retry( + request: AgentPromptRequest, + max_retries: int = 3, + retry_delays: List[int] = None, +) -> AgentPromptResponse: + """Execute Claude Code with retry logic for certain error types. + + Args: + request: The prompt request configuration + max_retries: Maximum number of retry attempts (default: 3) + retry_delays: List of delays in seconds between retries (default: [1, 3, 5]) + + Returns: + AgentPromptResponse with output and retry code + """ + if retry_delays is None: + retry_delays = [1, 3, 5] + + # Ensure we have enough delays for max_retries + while len(retry_delays) < max_retries: + retry_delays.append(retry_delays[-1] + 2) # Add incrementing delays + + last_response = None + + for attempt in range(max_retries + 1): # +1 for initial attempt + if attempt > 0: + # This is a retry + delay = retry_delays[attempt - 1] + time.sleep(delay) + + response = prompt_claude_code(request) + last_response = response + + # Check if we should retry based on the retry code + if response.success or response.retry_code == RetryCode.NONE: + # Success or non-retryable error + return response + + # Check if this is a retryable error + if response.retry_code in [ + RetryCode.CLAUDE_CODE_ERROR, + RetryCode.TIMEOUT_ERROR, + RetryCode.EXECUTION_ERROR, + RetryCode.ERROR_DURING_EXECUTION, + ]: + if attempt < max_retries: + continue + else: + return response + + # Should not reach here, but return last response just in case + return last_response + + +def prompt_claude_code(request: AgentPromptRequest) -> AgentPromptResponse: + """Execute Claude Code with the given prompt configuration.""" + + # Check if Claude Code CLI is installed + error_msg = check_claude_installed() + if error_msg: + return AgentPromptResponse( + output=error_msg, + success=False, + session_id=None, + retry_code=RetryCode.NONE, # Installation error is not retryable + ) + + # Save prompt before execution + save_prompt(request.prompt, request.adw_id, request.agent_name) + + # Create output directory if needed + output_dir = os.path.dirname(request.output_file) + if output_dir: + os.makedirs(output_dir, exist_ok=True) + + # Build command - always use stream-json format and verbose + cmd = [CLAUDE_PATH, "-p", request.prompt] + cmd.extend(["--model", request.model]) + cmd.extend(["--output-format", "stream-json"]) + cmd.append("--verbose") + + # Check for MCP config in working directory + if request.working_dir: + mcp_config_path = os.path.join(request.working_dir, ".mcp.json") + if os.path.exists(mcp_config_path): + cmd.extend(["--mcp-config", mcp_config_path]) + + # Add dangerous skip permissions flag if enabled + if request.dangerously_skip_permissions: + cmd.append("--dangerously-skip-permissions") + + # Set up environment with only required variables + env = get_claude_env() + + try: + # Open output file for streaming + with open(request.output_file, "w") as output_f: + # Execute Claude Code and stream output to file + result = subprocess.run( + cmd, + stdout=output_f, # Stream directly to file + stderr=subprocess.PIPE, + text=True, + env=env, + cwd=request.working_dir, # Use working_dir if provided + ) + + if result.returncode == 0: + + # Parse the JSONL file + messages, result_message = parse_jsonl_output(request.output_file) + + # Convert JSONL to JSON array file + json_file = convert_jsonl_to_json(request.output_file) + + # Save the last entry as raw_result.json + save_last_entry_as_raw_result(json_file) + + if result_message: + # Extract session_id from result message + session_id = result_message.get("session_id") + + # Check if there was an error in the result + is_error = result_message.get("is_error", False) + subtype = result_message.get("subtype", "") + + # Handle error_during_execution case where there's no result field + if subtype == "error_during_execution": + error_msg = "Error during execution: Agent encountered an error and did not return a result" + return AgentPromptResponse( + output=error_msg, + success=False, + session_id=session_id, + retry_code=RetryCode.ERROR_DURING_EXECUTION, + ) + + result_text = result_message.get("result", "") + + # For error cases, truncate the output to prevent JSONL blobs + if is_error and len(result_text) > 1000: + result_text = truncate_output(result_text, max_length=800) + + return AgentPromptResponse( + output=result_text, + success=not is_error, + session_id=session_id, + retry_code=RetryCode.NONE, # No retry needed for successful or non-retryable errors + ) + else: + # No result message found, try to extract meaningful error + error_msg = "No result message found in Claude Code output" + + # Try to get the last few lines of output for context + try: + with open(request.output_file, "r") as f: + lines = f.readlines() + if lines: + # Get last 5 lines or less + last_lines = lines[-5:] if len(lines) > 5 else lines + # Try to parse each as JSON to find any error messages + for line in reversed(last_lines): + try: + data = json.loads(line.strip()) + if data.get("type") == "assistant" and data.get( + "message" + ): + # Extract text from assistant message + content = data["message"].get("content", []) + if isinstance(content, list) and content: + text = content[0].get("text", "") + if text: + error_msg = f"Claude Code output: {text[:500]}" # Truncate + break + except: + pass + except: + pass + + return AgentPromptResponse( + output=truncate_output(error_msg, max_length=800), + success=False, + session_id=None, + retry_code=RetryCode.NONE, + ) + else: + # Error occurred - stderr is captured, stdout went to file + stderr_msg = result.stderr.strip() if result.stderr else "" + + # Try to read the output file to check for errors in stdout + stdout_msg = "" + error_from_jsonl = None + try: + if os.path.exists(request.output_file): + # Parse JSONL to find error message + messages, result_message = parse_jsonl_output(request.output_file) + + if result_message and result_message.get("is_error"): + # Found error in result message + error_from_jsonl = result_message.get("result", "Unknown error") + elif messages: + # Look for error in last few messages + for msg in reversed(messages[-5:]): + if msg.get("type") == "assistant" and msg.get( + "message", {} + ).get("content"): + content = msg["message"]["content"] + if isinstance(content, list) and content: + text = content[0].get("text", "") + if text and ( + "error" in text.lower() + or "failed" in text.lower() + ): + error_from_jsonl = text[:500] # Truncate + break + + # If no structured error found, get last line only + if not error_from_jsonl: + with open(request.output_file, "r") as f: + lines = f.readlines() + if lines: + # Just get the last line instead of entire file + stdout_msg = lines[-1].strip()[ + :200 + ] # Truncate to 200 chars + except: + pass + + if error_from_jsonl: + error_msg = f"Claude Code error: {error_from_jsonl}" + elif stdout_msg and not stderr_msg: + error_msg = f"Claude Code error: {stdout_msg}" + elif stderr_msg and not stdout_msg: + error_msg = f"Claude Code error: {stderr_msg}" + elif stdout_msg and stderr_msg: + error_msg = f"Claude Code error: {stderr_msg}\nStdout: {stdout_msg}" + else: + error_msg = f"Claude Code error: Command failed with exit code {result.returncode}" + + # Always truncate error messages to prevent huge outputs + return AgentPromptResponse( + output=truncate_output(error_msg, max_length=800), + success=False, + session_id=None, + retry_code=RetryCode.CLAUDE_CODE_ERROR, + ) + + except subprocess.TimeoutExpired: + error_msg = "Error: Claude Code command timed out after 5 minutes" + return AgentPromptResponse( + output=error_msg, + success=False, + session_id=None, + retry_code=RetryCode.TIMEOUT_ERROR, + ) + except Exception as e: + error_msg = f"Error executing Claude Code: {e}" + return AgentPromptResponse( + output=error_msg, + success=False, + session_id=None, + retry_code=RetryCode.EXECUTION_ERROR, + ) + + +def execute_template(request: AgentTemplateRequest) -> AgentPromptResponse: + """Execute a Claude Code template with slash command and arguments. + + Example: + request = AgentTemplateRequest( + agent_name="planner", + slash_command="/implement", + args=["plan.md"], + adw_id="abc12345", + model="sonnet" # Explicitly set model + ) + response = execute_template(request) + """ + + # Construct prompt from slash command and args + prompt = f"{request.slash_command} {' '.join(request.args)}" + + # Create output directory with adw_id at project root + # __file__ is in adws/adw_modules/, so we need to go up 3 levels to get to project root + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + output_dir = os.path.join( + project_root, "agents", request.adw_id, request.agent_name + ) + os.makedirs(output_dir, exist_ok=True) + + # Build output file path + output_file = os.path.join(output_dir, OUTPUT_JSONL) + + # Create prompt request with specific parameters + prompt_request = AgentPromptRequest( + prompt=prompt, + adw_id=request.adw_id, + agent_name=request.agent_name, + model=request.model, + dangerously_skip_permissions=True, + output_file=output_file, + working_dir=request.working_dir, # Pass through working_dir + ) + + # Execute with retry logic and return response (prompt_claude_code now handles all parsing) + return prompt_claude_code_with_retry(prompt_request) diff --git a/skills/adw-bootstrap/reference/minimal/adws/adw_prompt.py b/skills/adw-bootstrap/reference/minimal/adws/adw_prompt.py new file mode 100755 index 0000000..f4ff419 --- /dev/null +++ b/skills/adw-bootstrap/reference/minimal/adws/adw_prompt.py @@ -0,0 +1,283 @@ +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "pydantic", +# "python-dotenv", +# "click", +# "rich", +# ] +# /// +""" +Run an adhoc Claude Code prompt from the command line. + +Usage: + # Method 1: Direct execution (requires uv) + ./adw_prompt.py "Write a hello world Python script" + + # Method 2: Using uv run + uv run adw_prompt.py "Write a hello world Python script" + + # Method 3: Using Python directly (requires dependencies installed) + python adw_prompt.py "Write a hello world Python script" + +Examples: + # Run with specific model + ./adw_prompt.py "Explain this code" --model opus + + # Run with custom output file + ./adw_prompt.py "Create a FastAPI app" --output my_result.jsonl + + # Run from a different working directory + ./adw_prompt.py "List files here" --working-dir /path/to/project + + # Disable retry on failure + ./adw_prompt.py "Quick test" --no-retry + + # Use custom agent name + ./adw_prompt.py "Debug this" --agent-name debugger +""" + +import os +import sys +import json +from pathlib import Path +import click +from rich.console import Console +from rich.panel import Panel +from rich.table import Table +from rich.syntax import Syntax +from rich.text import Text + +# Add the adw_modules directory to the path so we can import agent +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "adw_modules")) + +from agent import ( + prompt_claude_code, + AgentPromptRequest, + AgentPromptResponse, + prompt_claude_code_with_retry, + generate_short_id, +) + +# Output file name constants +OUTPUT_JSONL = "cc_raw_output.jsonl" +OUTPUT_JSON = "cc_raw_output.json" +FINAL_OBJECT_JSON = "cc_final_object.json" +SUMMARY_JSON = "custom_summary_output.json" + + +@click.command() +@click.argument("prompt", required=True) +@click.option( + "--model", + type=click.Choice(["sonnet", "opus", "haiku"]), + default="sonnet", + help="Claude model to use (sonnet=balanced, opus=max intelligence, haiku=fast & economical)", +) +@click.option( + "--output", + type=click.Path(), + help="Output file path (default: ./output/oneoff__output.jsonl)", +) +@click.option( + "--working-dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True), + help="Working directory for the prompt execution (default: current directory)", +) +@click.option("--no-retry", is_flag=True, help="Disable automatic retry on failure") +@click.option( + "--agent-name", default="oneoff", help="Agent name for tracking (default: oneoff)" +) +def main( + prompt: str, + model: str, + output: str, + working_dir: str, + no_retry: bool, + agent_name: str, +): + """Run an adhoc Claude Code prompt from the command line.""" + console = Console() + + # Validate prompt is not empty + if not prompt or not prompt.strip(): + console.print( + Panel( + "[bold red]Error: Prompt cannot be empty[/bold red]\n\n" + "Please provide a valid prompt string.", + title="❌ Invalid Input", + border_style="red" + ) + ) + sys.exit(1) + + # Generate a unique ID for this execution + adw_id = generate_short_id() + + # Set up output file path + if not output: + # Default: write to agents/// + output_dir = Path(f"./agents/{adw_id}/{agent_name}") + output_dir.mkdir(parents=True, exist_ok=True) + output = str(output_dir / OUTPUT_JSONL) + + # Use current directory if no working directory specified + if not working_dir: + working_dir = os.getcwd() + + # Create the prompt request + request = AgentPromptRequest( + prompt=prompt, + adw_id=adw_id, + agent_name=agent_name, + model=model, + dangerously_skip_permissions=True, + output_file=output, + working_dir=working_dir, + ) + + # Create execution info table + info_table = Table(show_header=False, box=None, padding=(0, 1)) + info_table.add_column(style="bold cyan") + info_table.add_column() + + info_table.add_row("ADW ID", adw_id) + info_table.add_row("ADW Name", "adw_prompt") + info_table.add_row("Prompt", prompt) + info_table.add_row("Model", model) + info_table.add_row("Working Dir", working_dir) + info_table.add_row("Output", output) + + console.print( + Panel( + info_table, + title="[bold blue]🚀 Inputs[/bold blue]", + border_style="blue", + ) + ) + console.print() + + response: AgentPromptResponse | None = None + + try: + # Execute the prompt + with console.status("[bold yellow]Executing prompt...[/bold yellow]"): + if no_retry: + # Direct execution without retry + + response = prompt_claude_code(request) + else: + # Execute with retry logic + response = prompt_claude_code_with_retry(request) + + # Display the result + if response.success: + # Success panel + result_panel = Panel( + response.output, + title="[bold green]✅ Success[/bold green]", + border_style="green", + padding=(1, 2), + ) + console.print(result_panel) + + if response.session_id: + console.print( + f"\n[bold cyan]Session ID:[/bold cyan] {response.session_id}" + ) + else: + # Error panel + error_panel = Panel( + response.output, + title="[bold red]❌ Failed[/bold red]", + border_style="red", + padding=(1, 2), + ) + console.print(error_panel) + + if response.retry_code != "none": + console.print( + f"\n[bold yellow]Retry code:[/bold yellow] {response.retry_code}" + ) + + # Show output file info + console.print() + + # Also create a JSON summary file with explicit error handling + try: + if output.endswith(f"/{OUTPUT_JSONL}"): + # Default path: save as custom_summary_output.json in same directory + simple_json_output = output.replace(f"/{OUTPUT_JSONL}", f"/{SUMMARY_JSON}") + else: + # Custom path: replace .jsonl with _summary.json + simple_json_output = output.replace(".jsonl", "_summary.json") + + # Create summary data + summary_data = { + "adw_id": adw_id, + "prompt": prompt, + "model": model, + "working_dir": working_dir, + "success": response.success, + "session_id": response.session_id, + "retry_code": response.retry_code, + "output": response.output, + } + + # Write summary file + with open(simple_json_output, "w") as f: + json.dump(summary_data, f, indent=2) + except Exception as e: + console.print( + f"[yellow]Warning: Could not create summary file: {e}[/yellow]" + ) + + # Files saved panel with descriptions + files_table = Table(show_header=True, box=None) + files_table.add_column("File Type", style="bold cyan") + files_table.add_column("Path", style="dim") + files_table.add_column("Description", style="italic") + + # Determine paths for all files + output_dir = os.path.dirname(output) + json_array_path = os.path.join(output_dir, OUTPUT_JSON) + final_object_path = os.path.join(output_dir, FINAL_OBJECT_JSON) + + files_table.add_row( + "JSONL Stream", output, "Raw streaming output from Claude Code" + ) + files_table.add_row( + "JSON Array", json_array_path, "All messages as a JSON array" + ) + files_table.add_row( + "Final Object", final_object_path, "Last message entry (final result)" + ) + files_table.add_row( + "Summary", simple_json_output, "High-level execution summary with metadata" + ) + + console.print( + Panel( + files_table, + title="[bold blue]📄 Output Files[/bold blue]", + border_style="blue", + ) + ) + + # Exit with appropriate code + sys.exit(0 if response.success else 1) + + except Exception as e: + console.print( + Panel( + f"[bold red]{str(e)}[/bold red]", + title="[bold red]❌ Unexpected Error[/bold red]", + border_style="red", + ) + ) + sys.exit(2) + + +if __name__ == "__main__": + main() diff --git a/skills/adw-bootstrap/reference/minimal/commands/chore.md b/skills/adw-bootstrap/reference/minimal/commands/chore.md new file mode 100644 index 0000000..d5b96e3 --- /dev/null +++ b/skills/adw-bootstrap/reference/minimal/commands/chore.md @@ -0,0 +1,72 @@ +# Chore Planning + +Create a plan to complete the chore using the specified markdown `Plan Format`. Research the codebase and create a thorough plan. + +## Variables +adw_id: $1 +prompt: $2 + +## Instructions + +- If the adw_id or prompt is not provided, stop and ask the user to provide them. +- Create a plan to complete the chore described in the `prompt` +- The plan should be simple, thorough, and precise +- Create the plan in the `specs/` directory with filename: `chore-{adw_id}-{descriptive-name}.md` + - Replace `{descriptive-name}` with a short, descriptive name based on the chore (e.g., "update-readme", "add-logging", "refactor-agent") +- Research the codebase starting with `README.md` +- Replace every in the `Plan Format` with the requested value + +## Codebase Structure + +- `README.md` - Project overview and instructions (start here) +- `adws/` - AI Developer Workflow scripts and modules +- `apps/` - Example applications +- `.claude/commands/` - Claude command templates +- `specs/` - Specification and plan documents + +## Plan Format + +```md +# Chore: + +## Metadata +adw_id: `{adw_id}` +prompt: `{prompt}` + +## Chore Description + + +## Relevant Files +Use these files to complete the chore: + + + +## Step by Step Tasks +IMPORTANT: Execute every step in order, top to bottom. + + + +### 1. +- +- + +### 2. +- +- + +## Validation Commands +Execute these commands to validate the chore is complete: + + +- Example: `uv run python -m py_compile apps/*.py` - Test to ensure the code compiles + +## Notes + +``` + +## Chore +Use the chore description from the `prompt` variable. + +## Report + +Return the path to the plan file created. \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/minimal/commands/implement.md b/skills/adw-bootstrap/reference/minimal/commands/implement.md new file mode 100644 index 0000000..d2bf88d --- /dev/null +++ b/skills/adw-bootstrap/reference/minimal/commands/implement.md @@ -0,0 +1,12 @@ +# Implement the following plan +Follow the `Instructions` to implement the `Plan` then `Report` the completed work. + +## Instructions +- Read the plan, think hard about the plan and implement the plan. + +## Plan +$ARGUMENTS + +## Report +- Summarize the work you've just done in a concise bullet point list. +- Report the files and total lines changed with `git diff --stat` \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/adw_modules/beads_integration.py b/skills/adw-bootstrap/reference/scaled/adw_modules/beads_integration.py new file mode 100755 index 0000000..487c41c --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/adw_modules/beads_integration.py @@ -0,0 +1,292 @@ +"""Beads Integration Module - AI Developer Workflow (ADW) + +This module provides beads issue management as an alternative to GitHub issues. +Allows ADW workflows to work with local beads tasks for offline development. +""" + +import os +import subprocess +import json +from typing import Tuple, Optional +from adw_modules.data_types import GitHubIssue +from datetime import datetime + + +def get_workspace_root() -> str: + """Get workspace root for beads operations.""" + # Assume workspace root is the parent of adws directory + return os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + + +def fetch_beads_issue(issue_id: str) -> Tuple[Optional[GitHubIssue], Optional[str]]: + """Fetch beads issue and convert to GitHubIssue format. + + Args: + issue_id: The beads issue ID + + Returns: + Tuple of (GitHubIssue, error_message) + """ + workspace_root = get_workspace_root() + + # Use bd show to get issue details + cmd = ["bd", "show", issue_id] + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + cwd=workspace_root, + ) + + if result.returncode != 0: + return None, f"Failed to fetch beads issue: {result.stderr}" + + # Parse the output (bd show returns human-readable format) + # Format is: + # poc-fjw: Token Infrastructure & Redis Setup + # Status: in_progress + # Priority: P0 + # Type: feature + # ... + # Description: + # + output = result.stdout + + # Extract title, description, status from output + title = None + description = None + status = "open" + issue_type = "task" + in_description = False + description_lines = [] + + for line in output.split("\n"): + stripped = line.strip() + + # Skip empty lines + if not stripped: + continue + + # First line has format: "poc-fjw: Token Infrastructure & Redis Setup" + if not title and ":" in line and not line.startswith(" "): + parts = line.split(":", 1) + if len(parts) == 2 and parts[0].strip() == issue_id: + title = parts[1].strip() + continue + + # Status line + if stripped.startswith("Status:"): + status = stripped.split(":", 1)[1].strip() + in_description = False + # Type line + elif stripped.startswith("Type:"): + issue_type = stripped.split(":", 1)[1].strip() + in_description = False + # Description section + elif stripped.startswith("Description:"): + in_description = True + # Check if description is on same line + desc_text = stripped.split(":", 1)[1].strip() + if desc_text: + description_lines.append(desc_text) + elif in_description and stripped and not stripped.startswith("Dependents"): + description_lines.append(stripped) + elif stripped.startswith("Dependents") or stripped.startswith("Dependencies"): + in_description = False + + # Combine description lines + if description_lines: + description = "\n".join(description_lines) + + if not title: + return None, "Could not parse issue title from beads output" + + # Convert to GitHubIssue format for compatibility + # Use the issue_id as the number (extract numeric part if present) + try: + # Try to extract number from ID like "poc-123" + number_str = issue_id.split("-")[-1] + if number_str.isdigit(): + number = int(number_str) + else: + # Use hash of ID as fallback + number = hash(issue_id) % 10000 + except: + number = hash(issue_id) % 10000 + + # Create GitHubIssue-compatible object + issue = GitHubIssue( + number=number, + title=title or "Untitled Task", + body=description or "", + state=status, + author={"login": "beads"}, + assignees=[], + labels=[{"name": issue_type}], + milestone=None, + comments=[], + createdAt=datetime.now().isoformat(), + updatedAt=datetime.now().isoformat(), + closedAt=None, + url=f"beads://{issue_id}", + ) + + return issue, None + + except FileNotFoundError: + return None, "bd command not found. Is beads installed?" + except Exception as e: + return None, f"Error fetching beads issue: {str(e)}" + + +def update_beads_status(issue_id: str, status: str) -> Tuple[bool, Optional[str]]: + """Update beads issue status. + + Args: + issue_id: The beads issue ID + status: New status (open, in_progress, blocked, closed) + + Returns: + Tuple of (success, error_message) + """ + workspace_root = get_workspace_root() + + cmd = ["bd", "update", issue_id, "--status", status] + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + cwd=workspace_root, + ) + + if result.returncode != 0: + return False, f"Failed to update beads status: {result.stderr}" + + return True, None + + except FileNotFoundError: + return False, "bd command not found. Is beads installed?" + except Exception as e: + return False, f"Error updating beads status: {str(e)}" + + +def close_beads_issue(issue_id: str, reason: str = "Completed via ADW workflow") -> Tuple[bool, Optional[str]]: + """Close a beads issue. + + Args: + issue_id: The beads issue ID + reason: Reason for closing + + Returns: + Tuple of (success, error_message) + """ + workspace_root = get_workspace_root() + + cmd = ["bd", "close", issue_id, "--reason", reason] + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + cwd=workspace_root, + ) + + if result.returncode != 0: + return False, f"Failed to close beads issue: {result.stderr}" + + return True, None + + except FileNotFoundError: + return False, "bd command not found. Is beads installed?" + except Exception as e: + return False, f"Error closing beads issue: {str(e)}" + + +def get_ready_beads_tasks(limit: int = 10) -> Tuple[Optional[list], Optional[str]]: + """Get ready beads tasks (no blockers). + + Args: + limit: Maximum number of tasks to return + + Returns: + Tuple of (task_list, error_message) + """ + workspace_root = get_workspace_root() + + cmd = ["bd", "ready", "--limit", str(limit)] + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + cwd=workspace_root, + ) + + if result.returncode != 0: + return None, f"Failed to get ready tasks: {result.stderr}" + + # Parse output to extract task IDs + # bd ready returns format like: + # 📋 Ready work (1 issues with no blockers): + # + # 1. [P0] poc-pw3: Credit Consumption & Atomicity + # Assignee: La Boeuf + tasks = [] + + # Check if there are no ready tasks + if "No ready work found" in result.stdout or "(0 issues" in result.stdout: + return [], None + + for line in result.stdout.split("\n"): + line = line.strip() + # Skip empty lines, headers, and assignee lines + if not line or line.startswith("📋") or line.startswith("Assignee:"): + continue + + # Look for lines with format: "1. [P0] poc-pw3: Title" + # Extract the task ID (poc-pw3 in this case) + if ". [P" in line or ". [" in line: + # Split on ": " to get the ID part + parts = line.split(":") + if len(parts) >= 2: + # Get the part before the colon, then extract the ID + # Format: "1. [P0] poc-pw3" + id_part = parts[0].strip() + # Split by spaces and get the last token (the ID) + tokens = id_part.split() + if tokens: + task_id = tokens[-1] + # Verify it looks like a beads ID (has hyphen) + if "-" in task_id: + tasks.append(task_id) + + return tasks, None + + except FileNotFoundError: + return None, "bd command not found. Is beads installed?" + except Exception as e: + return None, f"Error getting ready tasks: {str(e)}" + + +def is_beads_issue(issue_identifier: str) -> bool: + """Check if an issue identifier is a beads issue. + + Beads issues have format like: poc-abc, feat-123, etc. + GitHub issues are just numbers. + + Args: + issue_identifier: The issue identifier + + Returns: + True if it's a beads issue, False otherwise + """ + # Beads issues contain a hyphen + return "-" in issue_identifier and not issue_identifier.isdigit() diff --git a/skills/adw-bootstrap/reference/scaled/adw_modules/git_ops.py b/skills/adw-bootstrap/reference/scaled/adw_modules/git_ops.py new file mode 100644 index 0000000..85f902e --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/adw_modules/git_ops.py @@ -0,0 +1,316 @@ +"""Git operations for ADW composable architecture. + +Provides centralized git operations that build on top of github.py module. +""" + +import subprocess +import json +import logging +from typing import Optional, Tuple + +# Import GitHub functions from existing module +from adw_modules.github import get_repo_url, extract_repo_path, make_issue_comment + + +def get_current_branch(cwd: Optional[str] = None) -> str: + """Get current git branch name.""" + result = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + capture_output=True, + text=True, + cwd=cwd, + ) + return result.stdout.strip() + + +def push_branch( + branch_name: str, cwd: Optional[str] = None +) -> Tuple[bool, Optional[str]]: + """Push current branch to remote. Returns (success, error_message).""" + result = subprocess.run( + ["git", "push", "-u", "origin", branch_name], + capture_output=True, + text=True, + cwd=cwd, + ) + if result.returncode != 0: + return False, result.stderr + return True, None + + +def check_pr_exists(branch_name: str) -> Optional[str]: + """Check if PR exists for branch. Returns PR URL if exists.""" + # Use github.py functions to get repo info + try: + repo_url = get_repo_url() + repo_path = extract_repo_path(repo_url) + except Exception as e: + return None + + result = subprocess.run( + [ + "gh", + "pr", + "list", + "--repo", + repo_path, + "--head", + branch_name, + "--json", + "url", + ], + capture_output=True, + text=True, + ) + if result.returncode == 0: + prs = json.loads(result.stdout) + if prs: + return prs[0]["url"] + return None + + +def create_branch( + branch_name: str, cwd: Optional[str] = None +) -> Tuple[bool, Optional[str]]: + """Create and checkout a new branch. Returns (success, error_message).""" + # Create branch + result = subprocess.run( + ["git", "checkout", "-b", branch_name], capture_output=True, text=True, cwd=cwd + ) + if result.returncode != 0: + # Check if error is because branch already exists + if "already exists" in result.stderr: + # Try to checkout existing branch + result = subprocess.run( + ["git", "checkout", branch_name], + capture_output=True, + text=True, + cwd=cwd, + ) + if result.returncode != 0: + return False, result.stderr + return True, None + return False, result.stderr + return True, None + + +def commit_changes( + message: str, cwd: Optional[str] = None +) -> Tuple[bool, Optional[str]]: + """Stage all changes and commit. Returns (success, error_message).""" + # Check if there are changes to commit + result = subprocess.run( + ["git", "status", "--porcelain"], capture_output=True, text=True, cwd=cwd + ) + if not result.stdout.strip(): + return True, None # No changes to commit + + # Stage all changes + result = subprocess.run( + ["git", "add", "-A"], capture_output=True, text=True, cwd=cwd + ) + if result.returncode != 0: + return False, result.stderr + + # Commit + result = subprocess.run( + ["git", "commit", "-m", message], capture_output=True, text=True, cwd=cwd + ) + if result.returncode != 0: + return False, result.stderr + return True, None + + +def get_pr_number(branch_name: str) -> Optional[str]: + """Get PR number for a branch. Returns PR number if exists.""" + # Use github.py functions to get repo info + try: + repo_url = get_repo_url() + repo_path = extract_repo_path(repo_url) + except Exception as e: + return None + + result = subprocess.run( + [ + "gh", + "pr", + "list", + "--repo", + repo_path, + "--head", + branch_name, + "--json", + "number", + "--limit", + "1", + ], + capture_output=True, + text=True, + ) + if result.returncode == 0: + prs = json.loads(result.stdout) + if prs: + return str(prs[0]["number"]) + return None + + +def approve_pr(pr_number: str, logger: logging.Logger) -> Tuple[bool, Optional[str]]: + """Approve a PR. Returns (success, error_message).""" + try: + repo_url = get_repo_url() + repo_path = extract_repo_path(repo_url) + except Exception as e: + return False, f"Failed to get repo info: {e}" + + result = subprocess.run( + [ + "gh", + "pr", + "review", + pr_number, + "--repo", + repo_path, + "--approve", + "--body", + "ADW Ship workflow approved this PR after validating all state fields.", + ], + capture_output=True, + text=True, + ) + if result.returncode != 0: + return False, result.stderr + + logger.info(f"Approved PR #{pr_number}") + return True, None + + +def merge_pr( + pr_number: str, logger: logging.Logger, merge_method: str = "squash" +) -> Tuple[bool, Optional[str]]: + """Merge a PR. Returns (success, error_message). + + Args: + pr_number: The PR number to merge + logger: Logger instance + merge_method: One of 'merge', 'squash', 'rebase' (default: 'squash') + """ + try: + repo_url = get_repo_url() + repo_path = extract_repo_path(repo_url) + except Exception as e: + return False, f"Failed to get repo info: {e}" + + # First check if PR is mergeable + result = subprocess.run( + [ + "gh", + "pr", + "view", + pr_number, + "--repo", + repo_path, + "--json", + "mergeable,mergeStateStatus", + ], + capture_output=True, + text=True, + ) + if result.returncode != 0: + return False, f"Failed to check PR status: {result.stderr}" + + pr_status = json.loads(result.stdout) + if pr_status.get("mergeable") != "MERGEABLE": + return ( + False, + f"PR is not mergeable. Status: {pr_status.get('mergeStateStatus', 'unknown')}", + ) + + # Merge the PR + merge_cmd = [ + "gh", + "pr", + "merge", + pr_number, + "--repo", + repo_path, + f"--{merge_method}", + ] + + # Add auto-merge body + merge_cmd.extend( + ["--body", "Merged by ADW Ship workflow after successful validation."] + ) + + result = subprocess.run(merge_cmd, capture_output=True, text=True) + if result.returncode != 0: + return False, result.stderr + + logger.info(f"Merged PR #{pr_number} using {merge_method} method") + return True, None + + +def finalize_git_operations( + state: "ADWState", logger: logging.Logger, cwd: Optional[str] = None +) -> None: + """Standard git finalization: push branch and create/update PR.""" + branch_name = state.get("branch_name") + if not branch_name: + # Fallback: use current git branch if not main + current_branch = get_current_branch(cwd=cwd) + if current_branch and current_branch != "main": + logger.warning( + f"No branch name in state, using current branch: {current_branch}" + ) + branch_name = current_branch + else: + logger.error( + "No branch name in state and current branch is main, skipping git operations" + ) + return + + # Always push + success, error = push_branch(branch_name, cwd=cwd) + if not success: + logger.error(f"Failed to push branch: {error}") + return + + logger.info(f"Pushed branch: {branch_name}") + + # Handle PR + pr_url = check_pr_exists(branch_name) + issue_number = state.get("issue_number") + adw_id = state.get("adw_id") + + if pr_url: + logger.info(f"Found existing PR: {pr_url}") + # Post PR link for easy reference + if issue_number and adw_id: + make_issue_comment(issue_number, f"{adw_id}_ops: ✅ Pull request: {pr_url}") + else: + # Create new PR - fetch issue data first + if issue_number: + try: + repo_url = get_repo_url() + repo_path = extract_repo_path(repo_url) + from adw_modules.github import fetch_issue + + issue = fetch_issue(issue_number, repo_path) + + from adw_modules.workflow_ops import create_pull_request + + pr_url, error = create_pull_request(branch_name, issue, state, logger, cwd) + except Exception as e: + logger.error(f"Failed to fetch issue for PR creation: {e}") + pr_url, error = None, str(e) + else: + pr_url, error = None, "No issue number in state" + + if pr_url: + logger.info(f"Created PR: {pr_url}") + # Post new PR link + if issue_number and adw_id: + make_issue_comment( + issue_number, f"{adw_id}_ops: ✅ Pull request created: {pr_url}" + ) + else: + logger.error(f"Failed to create PR: {error}") diff --git a/skills/adw-bootstrap/reference/scaled/adw_modules/github.py b/skills/adw-bootstrap/reference/scaled/adw_modules/github.py new file mode 100644 index 0000000..f99f06c --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/adw_modules/github.py @@ -0,0 +1,312 @@ +#!/usr/bin/env -S uv run +# /// script +# dependencies = ["python-dotenv", "pydantic"] +# /// + +""" +GitHub Operations Module - AI Developer Workflow (ADW) + +This module contains all GitHub-related operations including: +- Issue fetching and manipulation +- Comment posting +- Repository path extraction +- Issue status management +""" + +import subprocess +import sys +import os +import json +from typing import Dict, List, Optional +from .data_types import GitHubIssue, GitHubIssueListItem, GitHubComment + +# Bot identifier to prevent webhook loops and filter bot comments +ADW_BOT_IDENTIFIER = "[ADW-AGENTS]" + + +def get_github_env() -> Optional[dict]: + """Get environment with GitHub token set up. Returns None if no GITHUB_PAT. + + Subprocess env behavior: + - env=None → Inherits parent's environment (default) + - env={} → Empty environment (no variables) + - env=custom_dict → Only uses specified variables + + So this will work with gh authentication: + # These are equivalent: + result = subprocess.run(cmd, capture_output=True, text=True) + result = subprocess.run(cmd, capture_output=True, text=True, env=None) + + But this will NOT work (no PATH, no auth): + result = subprocess.run(cmd, capture_output=True, text=True, env={}) + """ + github_pat = os.getenv("GITHUB_PAT") + if not github_pat: + return None + + # Only create minimal env with GitHub token + env = { + "GH_TOKEN": github_pat, + "PATH": os.environ.get("PATH", ""), + } + return env + + +def get_repo_url() -> str: + """Get GitHub repository URL from git remote.""" + try: + result = subprocess.run( + ["git", "remote", "get-url", "origin"], + capture_output=True, + text=True, + check=True, + ) + return result.stdout.strip() + except subprocess.CalledProcessError: + raise ValueError( + "No git remote 'origin' found. Please ensure you're in a git repository with a remote." + ) + except FileNotFoundError: + raise ValueError("git command not found. Please ensure git is installed.") + + +def extract_repo_path(github_url: str) -> str: + """Extract owner/repo from GitHub URL.""" + # Handle both https://github.com/owner/repo and https://github.com/owner/repo.git + return github_url.replace("https://github.com/", "").replace(".git", "") + + +def fetch_issue(issue_number: str, repo_path: str) -> GitHubIssue: + """Fetch GitHub issue using gh CLI and return typed model.""" + # Use JSON output for structured data + cmd = [ + "gh", + "issue", + "view", + issue_number, + "-R", + repo_path, + "--json", + "number,title,body,state,author,assignees,labels,milestone,comments,createdAt,updatedAt,closedAt,url", + ] + + # Set up environment with GitHub token if available + env = get_github_env() + + try: + result = subprocess.run(cmd, capture_output=True, text=True, env=env) + + if result.returncode == 0: + # Parse JSON response into Pydantic model + issue_data = json.loads(result.stdout) + issue = GitHubIssue(**issue_data) + + return issue + else: + print(result.stderr, file=sys.stderr) + sys.exit(result.returncode) + except FileNotFoundError: + print("Error: GitHub CLI (gh) is not installed.", file=sys.stderr) + print("\nTo install gh:", file=sys.stderr) + print(" - macOS: brew install gh", file=sys.stderr) + print( + " - Linux: See https://github.com/cli/cli#installation", + file=sys.stderr, + ) + print( + " - Windows: See https://github.com/cli/cli#installation", file=sys.stderr + ) + print("\nAfter installation, authenticate with: gh auth login", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error parsing issue data: {e}", file=sys.stderr) + sys.exit(1) + + +def make_issue_comment(issue_id: str, comment: str) -> None: + """Post a comment to a GitHub issue using gh CLI.""" + # Get repo information from git remote + github_repo_url = get_repo_url() + repo_path = extract_repo_path(github_repo_url) + + # Ensure comment has ADW_BOT_IDENTIFIER to prevent webhook loops + if not comment.startswith(ADW_BOT_IDENTIFIER): + comment = f"{ADW_BOT_IDENTIFIER} {comment}" + + # Build command + cmd = [ + "gh", + "issue", + "comment", + issue_id, + "-R", + repo_path, + "--body", + comment, + ] + + # Set up environment with GitHub token if available + env = get_github_env() + + try: + result = subprocess.run(cmd, capture_output=True, text=True, env=env) + + if result.returncode == 0: + print(f"Successfully posted comment to issue #{issue_id}") + else: + print(f"Error posting comment: {result.stderr}", file=sys.stderr) + raise RuntimeError(f"Failed to post comment: {result.stderr}") + except Exception as e: + print(f"Error posting comment: {e}", file=sys.stderr) + raise + + +def mark_issue_in_progress(issue_id: str) -> None: + """Mark issue as in progress by adding label and comment.""" + # Get repo information from git remote + github_repo_url = get_repo_url() + repo_path = extract_repo_path(github_repo_url) + + # Add "in_progress" label + cmd = [ + "gh", + "issue", + "edit", + issue_id, + "-R", + repo_path, + "--add-label", + "in_progress", + ] + + # Set up environment with GitHub token if available + env = get_github_env() + + # Try to add label (may fail if label doesn't exist) + result = subprocess.run(cmd, capture_output=True, text=True, env=env) + if result.returncode != 0: + print(f"Note: Could not add 'in_progress' label: {result.stderr}") + + # Post comment indicating work has started + # make_issue_comment(issue_id, "🚧 ADW is working on this issue...") + + # Assign to self (optional) + cmd = [ + "gh", + "issue", + "edit", + issue_id, + "-R", + repo_path, + "--add-assignee", + "@me", + ] + result = subprocess.run(cmd, capture_output=True, text=True, env=env) + if result.returncode == 0: + print(f"Assigned issue #{issue_id} to self") + + +def fetch_open_issues(repo_path: str) -> List[GitHubIssueListItem]: + """Fetch all open issues from the GitHub repository.""" + try: + cmd = [ + "gh", + "issue", + "list", + "--repo", + repo_path, + "--state", + "open", + "--json", + "number,title,body,labels,createdAt,updatedAt", + "--limit", + "1000", + ] + + # Set up environment with GitHub token if available + env = get_github_env() + + # DEBUG level - not printing command + result = subprocess.run( + cmd, capture_output=True, text=True, check=True, env=env + ) + + issues_data = json.loads(result.stdout) + issues = [GitHubIssueListItem(**issue_data) for issue_data in issues_data] + print(f"Fetched {len(issues)} open issues") + return issues + + except subprocess.CalledProcessError as e: + print(f"ERROR: Failed to fetch issues: {e.stderr}", file=sys.stderr) + return [] + except json.JSONDecodeError as e: + print(f"ERROR: Failed to parse issues JSON: {e}", file=sys.stderr) + return [] + + +def fetch_issue_comments(repo_path: str, issue_number: int) -> List[Dict]: + """Fetch all comments for a specific issue.""" + try: + cmd = [ + "gh", + "issue", + "view", + str(issue_number), + "--repo", + repo_path, + "--json", + "comments", + ] + + # Set up environment with GitHub token if available + env = get_github_env() + + result = subprocess.run( + cmd, capture_output=True, text=True, check=True, env=env + ) + data = json.loads(result.stdout) + comments = data.get("comments", []) + + # Sort comments by creation time + comments.sort(key=lambda c: c.get("createdAt", "")) + + # DEBUG level - not printing + return comments + + except subprocess.CalledProcessError as e: + print( + f"ERROR: Failed to fetch comments for issue #{issue_number}: {e.stderr}", + file=sys.stderr, + ) + return [] + except json.JSONDecodeError as e: + print( + f"ERROR: Failed to parse comments JSON for issue #{issue_number}: {e}", + file=sys.stderr, + ) + return [] + + +def find_keyword_from_comment(keyword: str, issue: GitHubIssue) -> Optional[GitHubComment]: + """Find the latest comment containing a specific keyword. + + Args: + keyword: The keyword to search for in comments + issue: The GitHub issue containing comments + + Returns: + The latest GitHubComment containing the keyword, or None if not found + """ + # Sort comments by created_at date (newest first) + sorted_comments = sorted(issue.comments, key=lambda c: c.created_at, reverse=True) + + # Search through sorted comments (newest first) + for comment in sorted_comments: + # Skip ADW bot comments to prevent loops + if ADW_BOT_IDENTIFIER in comment.body: + continue + + if keyword in comment.body: + return comment + + return None diff --git a/skills/adw-bootstrap/reference/scaled/adw_modules/state.py b/skills/adw-bootstrap/reference/scaled/adw_modules/state.py new file mode 100644 index 0000000..1926521 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/adw_modules/state.py @@ -0,0 +1,172 @@ +"""State management for ADW composable architecture. + +Provides persistent state management via file storage and +transient state passing between scripts via stdin/stdout. +""" + +import json +import os +import sys +import logging +from typing import Dict, Any, Optional +from adw_modules.data_types import ADWStateData + + +class ADWState: + """Container for ADW workflow state with file persistence.""" + + STATE_FILENAME = "adw_state.json" + + def __init__(self, adw_id: str): + """Initialize ADWState with a required ADW ID. + + Args: + adw_id: The ADW ID for this state (required) + """ + if not adw_id: + raise ValueError("adw_id is required for ADWState") + + self.adw_id = adw_id + # Start with minimal state + self.data: Dict[str, Any] = {"adw_id": self.adw_id} + self.logger = logging.getLogger(__name__) + + def update(self, **kwargs): + """Update state with new key-value pairs.""" + # Filter to only our core fields + core_fields = {"adw_id", "issue_number", "branch_name", "plan_file", "issue_class", "worktree_path", "backend_port", "frontend_port", "model_set", "all_adws"} + for key, value in kwargs.items(): + if key in core_fields: + self.data[key] = value + + def get(self, key: str, default=None): + """Get value from state by key.""" + return self.data.get(key, default) + + def append_adw_id(self, adw_id: str): + """Append an ADW ID to the all_adws list if not already present.""" + all_adws = self.data.get("all_adws", []) + if adw_id not in all_adws: + all_adws.append(adw_id) + self.data["all_adws"] = all_adws + + def get_working_directory(self) -> str: + """Get the working directory for this ADW instance. + + Returns worktree_path if set (for isolated workflows), + otherwise returns the main repo path. + """ + worktree_path = self.data.get("worktree_path") + if worktree_path: + return worktree_path + + # Return main repo path (parent of adws directory) + return os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + + def get_state_path(self) -> str: + """Get path to state file.""" + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + return os.path.join(project_root, "agents", self.adw_id, self.STATE_FILENAME) + + def save(self, workflow_step: Optional[str] = None) -> None: + """Save state to file in agents/{adw_id}/adw_state.json.""" + state_path = self.get_state_path() + os.makedirs(os.path.dirname(state_path), exist_ok=True) + + # Create ADWStateData for validation + state_data = ADWStateData( + adw_id=self.data.get("adw_id"), + issue_number=self.data.get("issue_number"), + branch_name=self.data.get("branch_name"), + plan_file=self.data.get("plan_file"), + issue_class=self.data.get("issue_class"), + worktree_path=self.data.get("worktree_path"), + backend_port=self.data.get("backend_port"), + frontend_port=self.data.get("frontend_port"), + model_set=self.data.get("model_set", "base"), + all_adws=self.data.get("all_adws", []), + ) + + # Save as JSON + with open(state_path, "w") as f: + json.dump(state_data.model_dump(), f, indent=2) + + self.logger.info(f"Saved state to {state_path}") + if workflow_step: + self.logger.info(f"State updated by: {workflow_step}") + + @classmethod + def load( + cls, adw_id: str, logger: Optional[logging.Logger] = None + ) -> Optional["ADWState"]: + """Load state from file if it exists.""" + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + state_path = os.path.join(project_root, "agents", adw_id, cls.STATE_FILENAME) + + if not os.path.exists(state_path): + return None + + try: + with open(state_path, "r") as f: + data = json.load(f) + + # Validate with ADWStateData + state_data = ADWStateData(**data) + + # Create ADWState instance + state = cls(state_data.adw_id) + state.data = state_data.model_dump() + + if logger: + logger.info(f"🔍 Found existing state from {state_path}") + logger.info(f"State: {json.dumps(state_data.model_dump(), indent=2)}") + + return state + except Exception as e: + if logger: + logger.error(f"Failed to load state from {state_path}: {e}") + return None + + @classmethod + def from_stdin(cls) -> Optional["ADWState"]: + """Read state from stdin if available (for piped input). + + Returns None if no piped input is available (stdin is a tty). + """ + if sys.stdin.isatty(): + return None + try: + input_data = sys.stdin.read() + if not input_data.strip(): + return None + data = json.loads(input_data) + adw_id = data.get("adw_id") + if not adw_id: + return None # No valid state without adw_id + state = cls(adw_id) + state.data = data + return state + except (json.JSONDecodeError, EOFError): + return None + + def to_stdout(self): + """Write state to stdout as JSON (for piping to next script).""" + # Only output core fields + output_data = { + "adw_id": self.data.get("adw_id"), + "issue_number": self.data.get("issue_number"), + "branch_name": self.data.get("branch_name"), + "plan_file": self.data.get("plan_file"), + "issue_class": self.data.get("issue_class"), + "worktree_path": self.data.get("worktree_path"), + "backend_port": self.data.get("backend_port"), + "frontend_port": self.data.get("frontend_port"), + "all_adws": self.data.get("all_adws", []), + } + print(json.dumps(output_data, indent=2)) diff --git a/skills/adw-bootstrap/reference/scaled/adw_modules/workflow_ops.py b/skills/adw-bootstrap/reference/scaled/adw_modules/workflow_ops.py new file mode 100644 index 0000000..2406bf0 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/adw_modules/workflow_ops.py @@ -0,0 +1,714 @@ +"""Shared AI Developer Workflow (ADW) operations.""" + +import glob +import json +import logging +import os +import subprocess +import re +from typing import Tuple, Optional +from adw_modules.data_types import ( + AgentTemplateRequest, + GitHubIssue, + AgentPromptResponse, + IssueClassSlashCommand, + ADWExtractionResult, +) +from adw_modules.agent import execute_template +from adw_modules.github import get_repo_url, extract_repo_path, ADW_BOT_IDENTIFIER +from adw_modules.state import ADWState +from adw_modules.utils import parse_json + + +# Agent name constants +AGENT_PLANNER = "sdlc_planner" +AGENT_IMPLEMENTOR = "sdlc_implementor" +AGENT_CLASSIFIER = "issue_classifier" +AGENT_BRANCH_GENERATOR = "branch_generator" +AGENT_PR_CREATOR = "pr_creator" + +# Available ADW workflows for runtime validation +AVAILABLE_ADW_WORKFLOWS = [ + # Isolated workflows (all workflows are now iso-based) + "adw_plan_iso", + "adw_patch_iso", + "adw_build_iso", + "adw_test_iso", + "adw_review_iso", + "adw_document_iso", + "adw_ship_iso", + "adw_sdlc_ZTE_iso", # Zero Touch Execution workflow + "adw_plan_build_iso", + "adw_plan_build_test_iso", + "adw_plan_build_test_review_iso", + "adw_plan_build_document_iso", + "adw_plan_build_review_iso", + "adw_sdlc_iso", +] + + +def format_issue_message( + adw_id: str, agent_name: str, message: str, session_id: Optional[str] = None +) -> str: + """Format a message for issue comments with ADW tracking and bot identifier.""" + # Always include ADW_BOT_IDENTIFIER to prevent webhook loops + if session_id: + return f"{ADW_BOT_IDENTIFIER} {adw_id}_{agent_name}_{session_id}: {message}" + return f"{ADW_BOT_IDENTIFIER} {adw_id}_{agent_name}: {message}" + + +def extract_adw_info(text: str, temp_adw_id: str) -> ADWExtractionResult: + """Extract ADW workflow, ID, and model_set from text using classify_adw agent. + Returns ADWExtractionResult with workflow_command, adw_id, and model_set.""" + + # Use classify_adw to extract structured info + request = AgentTemplateRequest( + agent_name="adw_classifier", + slash_command="/classify_adw", + args=[text], + adw_id=temp_adw_id, + ) + + try: + response = execute_template(request) # No logger available in this function + + if not response.success: + print(f"Failed to classify ADW: {response.output}") + return ADWExtractionResult() # Empty result + + # Parse JSON response using utility that handles markdown + try: + data = parse_json(response.output, dict) + adw_command = data.get("adw_slash_command", "").replace( + "/", "" + ) # Remove slash + adw_id = data.get("adw_id") + model_set = data.get("model_set", "base") # Default to "base" + + # Validate command + if adw_command and adw_command in AVAILABLE_ADW_WORKFLOWS: + return ADWExtractionResult( + workflow_command=adw_command, + adw_id=adw_id, + model_set=model_set + ) + + return ADWExtractionResult() # Empty result + + except ValueError as e: + print(f"Failed to parse classify_adw response: {e}") + return ADWExtractionResult() # Empty result + + except Exception as e: + print(f"Error calling classify_adw: {e}") + return ADWExtractionResult() # Empty result + + +def classify_issue( + issue: GitHubIssue, adw_id: str, logger: logging.Logger +) -> Tuple[Optional[IssueClassSlashCommand], Optional[str]]: + """Classify GitHub issue and return appropriate slash command. + Returns (command, error_message) tuple.""" + + # Use the classify_issue slash command template with minimal payload + # Only include the essential fields: number, title, body + minimal_issue_json = issue.model_dump_json( + by_alias=True, include={"number", "title", "body"} + ) + + request = AgentTemplateRequest( + agent_name=AGENT_CLASSIFIER, + slash_command="/classify_issue", + args=[minimal_issue_json], + adw_id=adw_id, + ) + + logger.debug(f"Classifying issue: {issue.title}") + + response = execute_template(request) + + logger.debug( + f"Classification response: {response.model_dump_json(indent=2, by_alias=True)}" + ) + + if not response.success: + return None, response.output + + # Extract the classification from the response + output = response.output.strip() + + # Look for the classification pattern in the output + # Claude might add explanation, so we need to extract just the command + classification_match = re.search(r"(/chore|/bug|/feature|0)", output) + + if classification_match: + issue_command = classification_match.group(1) + else: + issue_command = output + + if issue_command == "0": + return None, f"No command selected: {response.output}" + + if issue_command not in ["/chore", "/bug", "/feature"]: + return None, f"Invalid command selected: {response.output}" + + return issue_command, None # type: ignore + + +def build_plan( + issue: GitHubIssue, + command: str, + adw_id: str, + logger: logging.Logger, + working_dir: Optional[str] = None, +) -> AgentPromptResponse: + """Build implementation plan for the issue using the specified command.""" + # Use minimal payload like classify_issue does + minimal_issue_json = issue.model_dump_json( + by_alias=True, include={"number", "title", "body"} + ) + + issue_plan_template_request = AgentTemplateRequest( + agent_name=AGENT_PLANNER, + slash_command=command, + args=[str(issue.number), adw_id, minimal_issue_json], + adw_id=adw_id, + working_dir=working_dir, + ) + + logger.debug( + f"issue_plan_template_request: {issue_plan_template_request.model_dump_json(indent=2, by_alias=True)}" + ) + + issue_plan_response = execute_template(issue_plan_template_request) + + logger.debug( + f"issue_plan_response: {issue_plan_response.model_dump_json(indent=2, by_alias=True)}" + ) + + return issue_plan_response + + +def implement_plan( + plan_file: str, + adw_id: str, + logger: logging.Logger, + agent_name: Optional[str] = None, + working_dir: Optional[str] = None, +) -> AgentPromptResponse: + """Implement the plan using the /implement command.""" + # Use provided agent_name or default to AGENT_IMPLEMENTOR + implementor_name = agent_name or AGENT_IMPLEMENTOR + + implement_template_request = AgentTemplateRequest( + agent_name=implementor_name, + slash_command="/implement", + args=[plan_file], + adw_id=adw_id, + working_dir=working_dir, + ) + + logger.debug( + f"implement_template_request: {implement_template_request.model_dump_json(indent=2, by_alias=True)}" + ) + + implement_response = execute_template(implement_template_request) + + logger.debug( + f"implement_response: {implement_response.model_dump_json(indent=2, by_alias=True)}" + ) + + return implement_response + + +def generate_branch_name( + issue: GitHubIssue, + issue_class: IssueClassSlashCommand, + adw_id: str, + logger: logging.Logger, +) -> Tuple[Optional[str], Optional[str]]: + """Generate a git branch name for the issue. + Returns (branch_name, error_message) tuple.""" + # Remove the leading slash from issue_class for the branch name + issue_type = issue_class.replace("/", "") + + # Use minimal payload like classify_issue does + minimal_issue_json = issue.model_dump_json( + by_alias=True, include={"number", "title", "body"} + ) + + request = AgentTemplateRequest( + agent_name=AGENT_BRANCH_GENERATOR, + slash_command="/generate_branch_name", + args=[issue_type, adw_id, minimal_issue_json], + adw_id=adw_id, + ) + + response = execute_template(request) + + if not response.success: + return None, response.output + + branch_name = response.output.strip() + logger.info(f"Generated branch name: {branch_name}") + return branch_name, None + + +def create_commit( + agent_name: str, + issue: GitHubIssue, + issue_class: IssueClassSlashCommand, + adw_id: str, + logger: logging.Logger, + working_dir: str, +) -> Tuple[Optional[str], Optional[str]]: + """Create a git commit with a properly formatted message. + Returns (commit_message, error_message) tuple.""" + # Remove the leading slash from issue_class + issue_type = issue_class.replace("/", "") + + # Create unique committer agent name by suffixing '_committer' + unique_agent_name = f"{agent_name}_committer" + + # Use minimal payload like classify_issue does + minimal_issue_json = issue.model_dump_json( + by_alias=True, include={"number", "title", "body"} + ) + + request = AgentTemplateRequest( + agent_name=unique_agent_name, + slash_command="/commit", + args=[agent_name, issue_type, minimal_issue_json], + adw_id=adw_id, + working_dir=working_dir, + ) + + response = execute_template(request) + + if not response.success: + return None, response.output + + commit_message = response.output.strip() + logger.info(f"Created commit message: {commit_message}") + return commit_message, None + + +def create_pull_request( + branch_name: str, + issue: Optional[GitHubIssue], + state: ADWState, + logger: logging.Logger, + working_dir: str, +) -> Tuple[Optional[str], Optional[str]]: + """Create a pull request for the implemented changes. + Returns (pr_url, error_message) tuple.""" + + # Get plan file from state (may be None for test runs) + plan_file = state.get("plan_file") or "No plan file (test run)" + adw_id = state.get("adw_id") + + # If we don't have issue data, try to construct minimal data + if not issue: + issue_data = state.get("issue", {}) + issue_json = json.dumps(issue_data) if issue_data else "{}" + elif isinstance(issue, dict): + # Try to reconstruct as GitHubIssue model which handles datetime serialization + from adw_modules.data_types import GitHubIssue + + try: + issue_model = GitHubIssue(**issue) + # Use minimal payload like classify_issue does + issue_json = issue_model.model_dump_json( + by_alias=True, include={"number", "title", "body"} + ) + except Exception: + # Fallback: use json.dumps with default str converter for datetime + issue_json = json.dumps(issue, default=str) + else: + # Use minimal payload like classify_issue does + issue_json = issue.model_dump_json( + by_alias=True, include={"number", "title", "body"} + ) + + request = AgentTemplateRequest( + agent_name=AGENT_PR_CREATOR, + slash_command="/pull_request", + args=[branch_name, issue_json, plan_file, adw_id], + adw_id=adw_id, + working_dir=working_dir, + ) + + response = execute_template(request) + + if not response.success: + return None, response.output + + pr_url = response.output.strip() + logger.info(f"Created pull request: {pr_url}") + return pr_url, None + + +def ensure_plan_exists(state: ADWState, issue_number: str) -> str: + """Find or error if no plan exists for issue. + Used by isolated build workflows in standalone mode.""" + # Check if plan file is in state + if state.get("plan_file"): + return state.get("plan_file") + + # Check current branch + from adw_modules.git_ops import get_current_branch + + branch = get_current_branch() + + # Look for plan in branch name + if f"-{issue_number}-" in branch: + # Look for plan file + plans = glob.glob(f"specs/*{issue_number}*.md") + if plans: + return plans[0] + + # No plan found + raise ValueError( + f"No plan found for issue {issue_number}. Run adw_plan_iso.py first." + ) + + +def ensure_adw_id( + issue_number: str, + adw_id: Optional[str] = None, + logger: Optional[logging.Logger] = None, +) -> str: + """Get ADW ID or create a new one and initialize state. + + Args: + issue_number: The issue number to find/create ADW ID for + adw_id: Optional existing ADW ID to use + logger: Optional logger instance + + Returns: + The ADW ID (existing or newly created) + """ + # If ADW ID provided, check if state exists + if adw_id: + state = ADWState.load(adw_id, logger) + if state: + if logger: + logger.info(f"Found existing ADW state for ID: {adw_id}") + else: + print(f"Found existing ADW state for ID: {adw_id}") + return adw_id + # ADW ID provided but no state exists, create state + state = ADWState(adw_id) + state.update(adw_id=adw_id, issue_number=issue_number) + state.save("ensure_adw_id") + if logger: + logger.info(f"Created new ADW state for provided ID: {adw_id}") + else: + print(f"Created new ADW state for provided ID: {adw_id}") + return adw_id + + # No ADW ID provided, create new one with state + from adw_modules.utils import make_adw_id + + new_adw_id = make_adw_id() + state = ADWState(new_adw_id) + state.update(adw_id=new_adw_id, issue_number=issue_number) + state.save("ensure_adw_id") + if logger: + logger.info(f"Created new ADW ID and state: {new_adw_id}") + else: + print(f"Created new ADW ID and state: {new_adw_id}") + return new_adw_id + + +def find_existing_branch_for_issue( + issue_number: str, adw_id: Optional[str] = None, cwd: Optional[str] = None +) -> Optional[str]: + """Find an existing branch for the given issue number. + Returns branch name if found, None otherwise.""" + # List all branches + result = subprocess.run( + ["git", "branch", "-a"], capture_output=True, text=True, cwd=cwd + ) + + if result.returncode != 0: + return None + + branches = result.stdout.strip().split("\n") + + # Look for branch with standardized pattern: *-issue-{issue_number}-adw-{adw_id}-* + for branch in branches: + branch = branch.strip().replace("* ", "").replace("remotes/origin/", "") + # Check for the standardized pattern + if f"-issue-{issue_number}-" in branch: + if adw_id and f"-adw-{adw_id}-" in branch: + return branch + elif not adw_id: + # Return first match if no adw_id specified + return branch + + return None + + +def find_plan_for_issue( + issue_number: str, adw_id: Optional[str] = None +) -> Optional[str]: + """Find plan file for the given issue number and optional adw_id. + Returns path to plan file if found, None otherwise.""" + import os + + # Get project root + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + agents_dir = os.path.join(project_root, "agents") + + if not os.path.exists(agents_dir): + return None + + # If adw_id is provided, check specific directory first + if adw_id: + plan_path = os.path.join(agents_dir, adw_id, AGENT_PLANNER, "plan.md") + if os.path.exists(plan_path): + return plan_path + + # Otherwise, search all agent directories + for agent_id in os.listdir(agents_dir): + agent_path = os.path.join(agents_dir, agent_id) + if os.path.isdir(agent_path): + plan_path = os.path.join(agent_path, AGENT_PLANNER, "plan.md") + if os.path.exists(plan_path): + # Check if this plan is for our issue by reading branch info or checking commits + # For now, return the first plan found (can be improved) + return plan_path + + return None + + +def create_or_find_branch( + issue_number: str, + issue: GitHubIssue, + state: ADWState, + logger: logging.Logger, + cwd: Optional[str] = None, +) -> Tuple[str, Optional[str]]: + """Create or find a branch for the given issue. + + 1. First checks state for existing branch name + 2. Then looks for existing branches matching the issue + 3. If none found, classifies the issue and creates a new branch + + Returns (branch_name, error_message) tuple. + """ + # 1. Check state for branch name + branch_name = state.get("branch_name") or state.get("branch", {}).get("name") + if branch_name: + logger.info(f"Found branch in state: {branch_name}") + # Check if we need to checkout + from adw_modules.git_ops import get_current_branch + + current = get_current_branch(cwd=cwd) + if current != branch_name: + result = subprocess.run( + ["git", "checkout", branch_name], + capture_output=True, + text=True, + cwd=cwd, + ) + if result.returncode != 0: + # Branch might not exist locally, try to create from remote + result = subprocess.run( + ["git", "checkout", "-b", branch_name, f"origin/{branch_name}"], + capture_output=True, + text=True, + cwd=cwd, + ) + if result.returncode != 0: + return "", f"Failed to checkout branch: {result.stderr}" + return branch_name, None + + # 2. Look for existing branch + adw_id = state.get("adw_id") + existing_branch = find_existing_branch_for_issue(issue_number, adw_id, cwd=cwd) + if existing_branch: + logger.info(f"Found existing branch: {existing_branch}") + # Checkout the branch + result = subprocess.run( + ["git", "checkout", existing_branch], + capture_output=True, + text=True, + cwd=cwd, + ) + if result.returncode != 0: + return "", f"Failed to checkout branch: {result.stderr}" + state.update(branch_name=existing_branch) + return existing_branch, None + + # 3. Create new branch - classify issue first + logger.info("No existing branch found, creating new one") + + # Classify the issue + issue_command, error = classify_issue(issue, adw_id, logger) + if error: + return "", f"Failed to classify issue: {error}" + + state.update(issue_class=issue_command) + + # Generate branch name + branch_name, error = generate_branch_name(issue, issue_command, adw_id, logger) + if error: + return "", f"Failed to generate branch name: {error}" + + # Create the branch + from adw_modules.git_ops import create_branch + + success, error = create_branch(branch_name, cwd=cwd) + if not success: + return "", f"Failed to create branch: {error}" + + state.update(branch_name=branch_name) + logger.info(f"Created and checked out new branch: {branch_name}") + + return branch_name, None + + +def find_spec_file(state: ADWState, logger: logging.Logger) -> Optional[str]: + """Find the spec file from state or by examining git diff. + + For isolated workflows, automatically uses worktree_path from state. + """ + # Get worktree path if in isolated workflow + worktree_path = state.get("worktree_path") + + # Check if spec file is already in state (from plan phase) + spec_file = state.get("plan_file") + if spec_file: + # If worktree_path exists and spec_file is relative, make it absolute + if worktree_path and not os.path.isabs(spec_file): + spec_file = os.path.join(worktree_path, spec_file) + + if os.path.exists(spec_file): + logger.info(f"Using spec file from state: {spec_file}") + return spec_file + + # Otherwise, try to find it from git diff + logger.info("Looking for spec file in git diff") + result = subprocess.run( + ["git", "diff", "origin/main", "--name-only"], + capture_output=True, + text=True, + cwd=worktree_path, + ) + + if result.returncode == 0: + files = result.stdout.strip().split("\n") + spec_files = [f for f in files if f.startswith("specs/") and f.endswith(".md")] + + if spec_files: + # Use the first spec file found + spec_file = spec_files[0] + if worktree_path: + spec_file = os.path.join(worktree_path, spec_file) + logger.info(f"Found spec file: {spec_file}") + return spec_file + + # If still not found, try to derive from branch name + branch_name = state.get("branch_name") + if branch_name: + # Extract issue number from branch name + import re + + match = re.search(r"issue-(\d+)", branch_name) + if match: + issue_num = match.group(1) + adw_id = state.get("adw_id") + + # Look for spec files matching the pattern + import glob + + # Use worktree_path if provided, otherwise current directory + search_dir = worktree_path if worktree_path else os.getcwd() + pattern = os.path.join( + search_dir, f"specs/issue-{issue_num}-adw-{adw_id}*.md" + ) + spec_files = glob.glob(pattern) + + if spec_files: + spec_file = spec_files[0] + logger.info(f"Found spec file by pattern: {spec_file}") + return spec_file + + logger.warning("No spec file found") + return None + + +def create_and_implement_patch( + adw_id: str, + review_change_request: str, + logger: logging.Logger, + agent_name_planner: str, + agent_name_implementor: str, + spec_path: Optional[str] = None, + issue_screenshots: Optional[str] = None, + working_dir: Optional[str] = None, +) -> Tuple[Optional[str], AgentPromptResponse]: + """Create a patch plan and implement it. + Returns (patch_file_path, implement_response) tuple.""" + + # Create patch plan using /patch command + args = [adw_id, review_change_request] + + # Add optional arguments in the correct order + if spec_path: + args.append(spec_path) + else: + args.append("") # Empty string for optional spec_path + + args.append(agent_name_planner) + + if issue_screenshots: + args.append(issue_screenshots) + + request = AgentTemplateRequest( + agent_name=agent_name_planner, + slash_command="/patch", + args=args, + adw_id=adw_id, + working_dir=working_dir, + ) + + logger.debug( + f"Patch plan request: {request.model_dump_json(indent=2, by_alias=True)}" + ) + + response = execute_template(request) + + logger.debug( + f"Patch plan response: {response.model_dump_json(indent=2, by_alias=True)}" + ) + + if not response.success: + logger.error(f"Error creating patch plan: {response.output}") + # Return None and a failed response + return None, AgentPromptResponse( + output=f"Failed to create patch plan: {response.output}", success=False + ) + + # Extract the patch plan file path from the response + patch_file_path = response.output.strip() + + # Validate that it looks like a file path + if "specs/patch/" not in patch_file_path or not patch_file_path.endswith(".md"): + logger.error(f"Invalid patch plan path returned: {patch_file_path}") + return None, AgentPromptResponse( + output=f"Invalid patch plan path: {patch_file_path}", success=False + ) + + logger.info(f"Created patch plan: {patch_file_path}") + + # Now implement the patch plan using the provided implementor agent name + implement_response = implement_plan( + patch_file_path, adw_id, logger, agent_name_implementor, working_dir=working_dir + ) + + return patch_file_path, implement_response diff --git a/skills/adw-bootstrap/reference/scaled/adw_modules/worktree_ops.py b/skills/adw-bootstrap/reference/scaled/adw_modules/worktree_ops.py new file mode 100644 index 0000000..aef6c39 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/adw_modules/worktree_ops.py @@ -0,0 +1,243 @@ +"""Worktree and port management operations for isolated ADW workflows. + +Provides utilities for creating and managing git worktrees under trees// +and allocating unique ports for each isolated instance. +""" + +import os +import subprocess +import logging +import socket +from typing import Tuple, Optional +from adw_modules.state import ADWState + + +def create_worktree(adw_id: str, branch_name: str, logger: logging.Logger) -> Tuple[str, Optional[str]]: + """Create a git worktree for isolated ADW execution. + + Args: + adw_id: The ADW ID for this worktree + branch_name: The branch name to create the worktree from + logger: Logger instance + + Returns: + Tuple of (worktree_path, error_message) + worktree_path is the absolute path if successful, None if error + """ + # Get project root (parent of adws directory) + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + + # Create trees directory if it doesn't exist + trees_dir = os.path.join(project_root, "trees") + os.makedirs(trees_dir, exist_ok=True) + + # Construct worktree path + worktree_path = os.path.join(trees_dir, adw_id) + + # Check if worktree already exists + if os.path.exists(worktree_path): + logger.warning(f"Worktree already exists at {worktree_path}") + return worktree_path, None + + # First, fetch latest changes from origin + logger.info("Fetching latest changes from origin") + fetch_result = subprocess.run( + ["git", "fetch", "origin"], + capture_output=True, + text=True, + cwd=project_root + ) + if fetch_result.returncode != 0: + logger.warning(f"Failed to fetch from origin: {fetch_result.stderr}") + + # Create the worktree using git, branching from origin/main + # Use -b to create the branch as part of worktree creation + cmd = ["git", "worktree", "add", "-b", branch_name, worktree_path, "origin/main"] + result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root) + + if result.returncode != 0: + # If branch already exists, try without -b + if "already exists" in result.stderr: + cmd = ["git", "worktree", "add", worktree_path, branch_name] + result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root) + + if result.returncode != 0: + error_msg = f"Failed to create worktree: {result.stderr}" + logger.error(error_msg) + return None, error_msg + + logger.info(f"Created worktree at {worktree_path} for branch {branch_name}") + return worktree_path, None + + +def validate_worktree(adw_id: str, state: ADWState) -> Tuple[bool, Optional[str]]: + """Validate worktree exists in state, filesystem, and git. + + Performs three-way validation to ensure consistency: + 1. State has worktree_path + 2. Directory exists on filesystem + 3. Git knows about the worktree + + Args: + adw_id: The ADW ID to validate + state: The ADW state object + + Returns: + Tuple of (is_valid, error_message) + """ + # Check state has worktree_path + worktree_path = state.get("worktree_path") + if not worktree_path: + return False, "No worktree_path in state" + + # Check directory exists + if not os.path.exists(worktree_path): + return False, f"Worktree directory not found: {worktree_path}" + + # Check git knows about it + result = subprocess.run(["git", "worktree", "list"], capture_output=True, text=True) + if worktree_path not in result.stdout: + return False, "Worktree not registered with git" + + return True, None + + +def get_worktree_path(adw_id: str) -> str: + """Get absolute path to worktree. + + Args: + adw_id: The ADW ID + + Returns: + Absolute path to worktree directory + """ + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ) + return os.path.join(project_root, "trees", adw_id) + + +def remove_worktree(adw_id: str, logger: logging.Logger) -> Tuple[bool, Optional[str]]: + """Remove a worktree and clean up. + + Args: + adw_id: The ADW ID for the worktree to remove + logger: Logger instance + + Returns: + Tuple of (success, error_message) + """ + worktree_path = get_worktree_path(adw_id) + + # First remove via git + cmd = ["git", "worktree", "remove", worktree_path, "--force"] + result = subprocess.run(cmd, capture_output=True, text=True) + + if result.returncode != 0: + # Try to clean up manually if git command failed + if os.path.exists(worktree_path): + try: + shutil.rmtree(worktree_path) + logger.warning(f"Manually removed worktree directory: {worktree_path}") + except Exception as e: + return False, f"Failed to remove worktree: {result.stderr}, manual cleanup failed: {e}" + + logger.info(f"Removed worktree at {worktree_path}") + return True, None + + +def setup_worktree_environment(worktree_path: str, backend_port: int, frontend_port: int, logger: logging.Logger) -> None: + """Set up worktree environment by creating .ports.env file. + + The actual environment setup (copying .env files, installing dependencies) is handled + by the install_worktree.md command which runs inside the worktree. + + Args: + worktree_path: Path to the worktree + backend_port: Backend port number + frontend_port: Frontend port number + logger: Logger instance + """ + # Create .ports.env file with port configuration + ports_env_path = os.path.join(worktree_path, ".ports.env") + + with open(ports_env_path, "w") as f: + f.write(f"BACKEND_PORT={backend_port}\n") + f.write(f"FRONTEND_PORT={frontend_port}\n") + f.write(f"VITE_BACKEND_URL=http://localhost:{backend_port}\n") + + logger.info(f"Created .ports.env with Backend: {backend_port}, Frontend: {frontend_port}") + + +# Port management functions + +def get_ports_for_adw(adw_id: str) -> Tuple[int, int]: + """Deterministically assign ports based on ADW ID. + + Args: + adw_id: The ADW ID + + Returns: + Tuple of (backend_port, frontend_port) + """ + # Convert first 8 chars of ADW ID to index (0-14) + # Using base 36 conversion and modulo to get consistent mapping + try: + # Take first 8 alphanumeric chars and convert from base 36 + id_chars = ''.join(c for c in adw_id[:8] if c.isalnum()) + index = int(id_chars, 36) % 15 + except ValueError: + # Fallback to simple hash if conversion fails + index = hash(adw_id) % 15 + + backend_port = 9100 + index + frontend_port = 9200 + index + + return backend_port, frontend_port + + +def is_port_available(port: int) -> bool: + """Check if a port is available for binding. + + Args: + port: Port number to check + + Returns: + True if port is available, False otherwise + """ + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.settimeout(1) + s.bind(('localhost', port)) + return True + except (socket.error, OSError): + return False + + +def find_next_available_ports(adw_id: str, max_attempts: int = 15) -> Tuple[int, int]: + """Find available ports starting from deterministic assignment. + + Args: + adw_id: The ADW ID + max_attempts: Maximum number of attempts (default 15) + + Returns: + Tuple of (backend_port, frontend_port) + + Raises: + RuntimeError: If no available ports found + """ + base_backend, base_frontend = get_ports_for_adw(adw_id) + base_index = base_backend - 9100 + + for offset in range(max_attempts): + index = (base_index + offset) % 15 + backend_port = 9100 + index + frontend_port = 9200 + index + + if is_port_available(backend_port) and is_port_available(frontend_port): + return backend_port, frontend_port + + raise RuntimeError("No available ports in the allocated range") \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/bug.md b/skills/adw-bootstrap/reference/scaled/commands/bug.md new file mode 100644 index 0000000..2dca305 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/bug.md @@ -0,0 +1,106 @@ +# Bug Planning + +Create a new plan to resolve the `Bug` using the exact specified markdown `Plan Format`. Follow the `Instructions` to create the plan use the `Relevant Files` to focus on the right files. + +## Variables +issue_number: $1 +adw_id: $2 +issue_json: $3 + +## Instructions + +- IMPORTANT: You're writing a plan to resolve a bug based on the `Bug` that will add value to the application. +- IMPORTANT: The `Bug` describes the bug that will be resolved but remember we're not resolving the bug, we're creating the plan that will be used to resolve the bug based on the `Plan Format` below. +- You're writing a plan to resolve a bug, it should be thorough and precise so we fix the root cause and prevent regressions. +- Create the plan in the `specs/` directory with filename: `issue-{issue_number}-adw-{adw_id}-sdlc_planner-{descriptive-name}.md` + - Replace `{descriptive-name}` with a short, descriptive name based on the bug (e.g., "fix-login-error", "resolve-timeout", "patch-memory-leak") +- Use the plan format below to create the plan. +- Research the codebase to understand the bug, reproduce it, and put together a plan to fix it. +- IMPORTANT: Replace every in the `Plan Format` with the requested value. Add as much detail as needed to fix the bug. +- Use your reasoning model: THINK HARD about the bug, its root cause, and the steps to fix it properly. +- IMPORTANT: Be surgical with your bug fix, solve the bug at hand and don't fall off track. +- IMPORTANT: We want the minimal number of changes that will fix and address the bug. +- Don't use decorators. Keep it simple. +- If you need a new library, use `uv add` and be sure to report it in the `Notes` section of the `Plan Format`. +- IMPORTANT: If the bug affects the UI or user interactions: + - Add a task in the `Step by Step Tasks` section to create a separate E2E test file in `.claude/commands/e2e/test_.md` based on examples in that directory + - Add E2E test validation to your Validation Commands section + - IMPORTANT: When you fill out the `Plan Format: Relevant Files` section, add an instruction to read `.claude/commands/test_e2e.md`, and `.claude/commands/e2e/test_basic_query.md` to understand how to create an E2E test file. List your new E2E test file to the `Plan Format: New Files` section. + - To be clear, we're not creating a new E2E test file, we're creating a task to create a new E2E test file in the `Plan Format` below +- Respect requested files in the `Relevant Files` section. +- Start your research by reading the `README.md` file. + +## Relevant Files + +Focus on the following files: +- `README.md` - Contains the project overview and instructions. +- `app/server/**` - Contains the codebase server. +- `app/client/**` - Contains the codebase client. +- `scripts/**` - Contains the scripts to start and stop the server + client. +- `adws/**` - Contains the AI Developer Workflow (ADW) scripts. + +- Read `.claude/commands/conditional_docs.md` to check if your task requires additional documentation +- If your task matches any of the conditions listed, include those documentation files in the `Plan Format: Relevant Files` section of your plan + +Ignore all other files in the codebase. + +## Plan Format + +```md +# Bug: + +## Metadata +issue_number: `{issue_number}` +adw_id: `{adw_id}` +issue_json: `{issue_json}` + +## Bug Description + + +## Problem Statement + + +## Solution Statement + + +## Steps to Reproduce + + +## Root Cause Analysis + + +## Relevant Files +Use these files to fix the bug: + + + +## Step by Step Tasks +IMPORTANT: Execute every step in order, top to bottom. + + + +.md` that validates the bug is fixed, be specific with the steps to prove the bug is fixed. We want the minimal set of steps to validate the bug is fixed and screen shots to prove it if possible."> + + + +## Validation Commands +Execute every command to validate the bug is fixed with zero regressions. + + + +.md` test file to validate this functionality works."> + +- `cd app/server && uv run pytest` - Run server tests to validate the bug is fixed with zero regressions +- `cd app/client && bun tsc --noEmit` - Run frontend tests to validate the bug is fixed with zero regressions +- `cd app/client && bun run build` - Run frontend build to validate the bug is fixed with zero regressions + +## Notes + +``` + +## Bug +Extract the bug details from the `issue_json` variable (parse the JSON and use the title and body fields). + +## Report + +- IMPORTANT: Return exclusively the path to the plan file created and nothing else. \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/classify_adw.md b/skills/adw-bootstrap/reference/scaled/commands/classify_adw.md new file mode 100644 index 0000000..e614927 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/classify_adw.md @@ -0,0 +1,57 @@ +# ADW Workflow Extraction + +Extract ADW workflow information from the text below and return a JSON response. + +## Instructions + +- Look for ADW workflow commands in the text (e.g., `/adw_plan_iso`, `/adw_build_iso`, `/adw_test_iso`, `/adw_review_iso`, `/adw_document_iso`, `/adw_patch_iso`, `/adw_plan_build_iso`, `/adw_plan_build_test_iso`, `/adw_plan_build_test_review_iso`, `/adw_sdlc_iso`, `/adw_sdlc_ZTE_iso`) +- Also recognize commands without the `_iso` suffix and automatically add it (e.g., `/adw_plan` → `/adw_plan_iso`) +- Also recognize variations like `adw_plan_build`, `adw plan build`, `/adw plan then build`, etc. and map to the correct command +- Look for ADW IDs (8-character alphanumeric strings, often after "adw_id:" or "ADW ID:" or similar) +- Look for model set specification: "model_set base" or "model_set heavy" (case insensitive) + - Default to "base" if no model_set is specified + - Also recognize variations like "model set: heavy", "modelset heavy", etc. +- Return a JSON object with the extracted information +- If no ADW workflow is found, return empty JSON: `{}` +- IMPORTANT: DO NOT RUN the `adw_sdlc_ZTE_iso` workflows unless `ZTE` is EXPLICITLY uppercased. This is a dangerous workflow and it needs to be absolutely clear when we're running it. If zte is not capitalized, then run the non zte version `/adw_sdlc_iso`. + +## Valid ADW Commands + +- `/adw_plan_iso` - Planning only +- `/adw_build_iso` - Building only (requires adw_id) +- `/adw_test_iso` - Testing only (requires adw_id) +- `/adw_review_iso` - Review only (requires adw_id) +- `/adw_document_iso` - Documentation only (requires adw_id) +- `/adw_ship_iso` - Ship/approve and merge PR (requires adw_id) +- `/adw_patch_iso` - Direct patch from issue +- `/adw_plan_build_iso` - Plan + Build +- `/adw_plan_build_test_iso` - Plan + Build + Test +- `/adw_plan_build_review_iso` - Plan + Build + Review (skips test) +- `/adw_plan_build_document_iso` - Plan + Build + Document (skips test and review) +- `/adw_plan_build_test_review_iso` - Plan + Build + Test + Review +- `/adw_sdlc_iso` - Complete SDLC: Plan + Build + Test + Review + Document +- `/adw_sdlc_zte_iso` - Zero Touch Execution: Complete SDLC + auto-merge to production. Note: as per instructions, 'ZTE' must be capitalized. Do not run this if 'zte' is not capitalized. + +## Response Format + +Respond ONLY with a JSON object in this format: +```json +{ + "adw_slash_command": "/adw_plan", + "adw_id": "abc12345", + "model_set": "base" +} +``` + +Fields: +- `adw_slash_command`: The ADW command found (include the slash) +- `adw_id`: The 8-character ADW ID if found +- `model_set`: The model set to use ("base" or "heavy"), defaults to "base" if not specified + +If only some fields are found, include only those fields. +If nothing is found, return: `{}` +IMPORTANT: Always include `model_set` with value "base" if no model_set is explicitly mentioned in the text. + +## Text to Analyze + +$ARGUMENTS \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/classify_issue.md b/skills/adw-bootstrap/reference/scaled/commands/classify_issue.md new file mode 100644 index 0000000..51e80ec --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/classify_issue.md @@ -0,0 +1,22 @@ +# Github Issue Command Selection + +Based on the `Github Issue` below, follow the `Instructions` to select the appropriate command to execute based on the `Command Mapping`. + +## Instructions + +- Based on the details in the `Github Issue`, select the appropriate command to execute. +- IMPORTANT: Respond exclusively with '/' followed by the command to execute based on the `Command Mapping` below. +- Use the command mapping to help you decide which command to respond with. +- Don't examine the codebase just focus on the `Github Issue` and the `Command Mapping` below to determine the appropriate command to execute. + +## Command Mapping + +- Respond with `/chore` if the issue is a chore. +- Respond with `/bug` if the issue is a bug. +- Respond with `/feature` if the issue is a feature. +- Respond with `/patch` if the issue is a patch. +- Respond with `0` if the issue isn't any of the above. + +## Github Issue + +$ARGUMENTS \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/cleanup_worktrees.md b/skills/adw-bootstrap/reference/scaled/commands/cleanup_worktrees.md new file mode 100644 index 0000000..afe7f28 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/cleanup_worktrees.md @@ -0,0 +1,44 @@ +# Cleanup ADW Worktrees + +Clean up isolated ADW worktrees and their associated resources. + +## Variables + +action: $1 (all|specific|list) +adw_id: $2 (optional, required if action is "specific") + +## Instructions + +Manage git worktrees created by isolated ADW workflows: +- If action is "list": Show all worktrees under trees/ directory +- If action is "specific": Remove the specific worktree for the given adw_id +- If action is "all": Remove all worktrees under trees/ directory + +## Run + +Based on the action: + +### List worktrees +If action is "list": +- Run `git worktree list | grep "trees/"` to show isolated worktrees +- List the contents of the trees/ directory with sizes + +### Remove specific worktree +If action is "specific" and adw_id is provided: +- Check if trees/{adw_id} exists +- Run `git worktree remove trees/{adw_id}` to remove it +- Report success or any errors + +### Remove all worktrees +If action is "all": +- First list all worktrees that will be removed +- For each worktree under trees/, run `git worktree remove` +- Clean up any remaining directories under trees/ +- Run `git worktree prune` to clean up any stale entries + +## Report + +Report the results of the cleanup operation: +- Number of worktrees removed +- Any errors encountered +- Current status after cleanup \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/document.md b/skills/adw-bootstrap/reference/scaled/commands/document.md new file mode 100644 index 0000000..19cdc35 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/document.md @@ -0,0 +1,129 @@ +# Document Feature + +Generate concise markdown documentation for implemented features by analyzing code changes and specifications. This command creates documentation in the `app_docs/` directory based on git diff analysis against the main branch and the original feature specification. + +## Variables + +adw_id: $1 +spec_path: $2 if provided, otherwise leave it blank +documentation_screenshots_dir: $3 if provided, otherwise leave it blank + +## Instructions + +### 1. Analyze Changes +- Run `git diff origin/main --stat` to see files changed and lines modified +- Run `git diff origin/main --name-only` to get the list of changed files +- For significant changes (>50 lines), run `git diff origin/main ` on specific files to understand the implementation details + +### 2. Read Specification (if provided) +- If `spec_path` is provided, read the specification file to understand: + - Original requirements and goals + - Expected functionality + - Success criteria +- Use this to frame the documentation around what was requested vs what was built + +### 3. Analyze and Copy Screenshots (if provided) +- If `documentation_screenshots_dir` is provided, list and examine screenshots +- Create `app_docs/assets/` directory if it doesn't exist +- Copy all screenshot files (*.png) from `documentation_screenshots_dir` to `app_docs/assets/` + - Preserve original filenames + - Use `cp` command to copy files +- Use visual context to better describe UI changes or visual features +- Reference screenshots in documentation using relative paths (e.g., `assets/screenshot-name.png`) + +### 4. Generate Documentation +- Create a new documentation file in `app_docs/` directory +- Filename format: `feature-{adw_id}-{descriptive-name}.md` + - Replace `{descriptive-name}` with a short feature name (e.g., "user-auth", "data-export", "search-ui") +- Follow the Documentation Format below +- Focus on: + - What was built (based on git diff) + - How it works (technical implementation) + - How to use it (user perspective) + - Any configuration or setup required + +### 5. Update Conditional Documentation +- After creating the documentation file, read `.claude/commands/conditional_docs.md` +- Add an entry for the new documentation file with appropriate conditions +- The entry should help future developers know when to read this documentation +- Format the entry following the existing pattern in the file + +### 6. Final Output +- When you finish writing the documentation and updating conditional_docs.md, return exclusively the path to the documentation file created and nothing else + +## Documentation Format + +```md +# + +**ADW ID:** +**Date:** +**Specification:** + +## Overview + +<2-3 sentence summary of what was built and why> + +## Screenshots + + + +![](assets/) + +## What Was Built + + + +- +- +- + +## Technical Implementation + +### Files Modified + + + +- ``: +- ``: + +### Key Changes + + + +## How to Use + + + +1. +2. +3. + +## Configuration + + + +## Testing + + + +## Notes + + +``` + +## Conditional Docs Entry Format + +After creating the documentation, add this entry to `.claude/commands/conditional_docs.md`: + +```md +- app_docs/.md + - Conditions: + - When working with + - When implementing + - When troubleshooting +``` + +## Report + +- IMPORTANT: Return exclusively the path to the documentation file created and nothing else. \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/generate_branch_name.md b/skills/adw-bootstrap/reference/scaled/commands/generate_branch_name.md new file mode 100644 index 0000000..b820a50 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/generate_branch_name.md @@ -0,0 +1,34 @@ +# Generate Git Branch Name + +Based on the `Instructions` below, take the `Variables` follow the `Run` section to generate a concise Git branch name following the specified format. Then follow the `Report` section to report the results of your work. + +## Variables + +issue_class: $1 +adw_id: $2 +issue: $3 + +## Instructions + +- Generate a branch name in the format: `-issue--adw--` +- The `` should be: + - 3-6 words maximum + - All lowercase + - Words separated by hyphens + - Descriptive of the main task/feature + - No special characters except hyphens +- Examples: + - `feat-issue-123-adw-a1b2c3d4-add-user-auth` + - `bug-issue-456-adw-e5f6g7h8-fix-login-error` + - `chore-issue-789-adw-i9j0k1l2-update-dependencies` + - `test-issue-323-adw-m3n4o5p6-fix-failing-tests` +- Extract the issue number, title, and body from the issue JSON + +## Run + +Generate the branch name based on the instructions above. +Do NOT create or checkout any branches - just generate the name. + +## Report + +Return ONLY the generated branch name (no other text) \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/install_worktree.md b/skills/adw-bootstrap/reference/scaled/commands/install_worktree.md new file mode 100644 index 0000000..417ca4b --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/install_worktree.md @@ -0,0 +1,82 @@ +# Install Worktree + +This command sets up an isolated worktree environment with custom port configuration. + +## Parameters +- Worktree path: {0} +- Backend port: {1} +- Frontend port: {2} + +## Read +- .env.sample (from parent repo) +- ./app/server/.env.sample (from parent repo) +- .mcp.json (from parent repo) +- playwright-mcp-config.json (from parent repo) + +## Steps + +1. **Navigate to worktree directory** + ```bash + cd {0} + ``` + +2. **Create port configuration file** + Create `.ports.env` with: + ``` + BACKEND_PORT={1} + FRONTEND_PORT={2} + VITE_BACKEND_URL=http://localhost:{1} + ``` + +3. **Copy and update .env files** + - Copy `.env` from parent repo if it exists + - Append `.ports.env` contents to `.env` + - Copy `app/server/.env` from parent repo if it exists + - Append `.ports.env` contents to `app/server/.env` + +4. **Copy and configure MCP files** + - Copy `.mcp.json` from parent repo if it exists + - Copy `playwright-mcp-config.json` from parent repo if it exists + - These files are needed for Model Context Protocol and Playwright automation + + After copying, update paths to use absolute paths: + - Get the absolute worktree path: `WORKTREE_PATH=$(pwd)` + - Update `.mcp.json`: + - Find the line containing `"./playwright-mcp-config.json"` + - Replace it with `"${WORKTREE_PATH}/playwright-mcp-config.json"` + - Use a JSON-aware tool or careful string replacement to maintain valid JSON + - Update `playwright-mcp-config.json`: + - Find the line containing `"dir": "./videos"` + - Replace it with `"dir": "${WORKTREE_PATH}/videos"` + - Create the videos directory: `mkdir -p ${WORKTREE_PATH}/videos` + - This ensures MCP configuration works correctly regardless of execution context + +5. **Install backend dependencies** + ```bash + cd app/server && uv sync --all-extras + ``` + +6. **Install frontend dependencies** + ```bash + cd ../client && bun install + ``` + +7. **Setup database** + ```bash + cd ../.. && ./scripts/reset_db.sh + ``` + +## Error Handling +- If parent .env files don't exist, create minimal versions from .env.sample files +- Ensure all paths are absolute to avoid confusion + +## Report +- List all files created/modified (including MCP configuration files) +- Show port assignments +- Confirm dependencies installed +- Note any missing parent .env files that need user attention +- Note any missing MCP configuration files +- Show the updated absolute paths in: + - `.mcp.json` (should show full path to playwright-mcp-config.json) + - `playwright-mcp-config.json` (should show full path to videos directory) +- Confirm videos directory was created \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/patch.md b/skills/adw-bootstrap/reference/scaled/commands/patch.md new file mode 100644 index 0000000..4f45549 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/patch.md @@ -0,0 +1,92 @@ +# Patch Plan + +Create a **focused patch plan** to resolve a specific issue based on the `review_change_request`. Follow the `Instructions` to create a concise plan that addresses the issue with minimal, targeted changes. + +## Variables + +adw_id: $1 +review_change_request: $2 +spec_path: $3 if provided, otherwise leave it blank +agent_name: $4 if provided, otherwise use 'patch_agent' +issue_screenshots: $ARGUMENT (optional) - comma-separated list of screenshot paths if provided + +## Instructions + +- IMPORTANT: You're creating a patch plan to fix a specific review issue. Keep changes small, focused, and targeted +- Read the original specification (spec) file at `spec_path` if provided to understand the context and requirements +- IMPORTANT Use the `review_change_request` to understand exactly what needs and use it as the basis for your patch plan +- If `issue_screenshots` are provided, examine them to better understand the visual context of the issue +- Create the patch plan in `specs/patch/` directory with filename: `patch-adw-{adw_id}-{descriptive-name}.md` + - Replace `{descriptive-name}` with a short name based on the issue (e.g., "fix-button-color", "update-validation", "correct-layout") +- IMPORTANT: This is a PATCH - keep the scope minimal. Only fix what's described in the `review_change_request` and nothing more. Address only the `review_change_request`. +- Run `git diff --stat`. If changes are available, use them to understand what's been done in the codebase and so you can understand the exact changes you should detail in the patch plan. +- Ultra think about the most efficient way to implement the solution with minimal code changes +- Base your `Plan Format: Validation` on the validation steps from `spec_path` if provided + - If any tests fail in the validation steps, you must fix them. + - If not provided, READ `.claude/commands/test.md: ## Test Execution Sequence` and execute the tests to understand the tests that need to be run to validate the patch. +- Replace every in the `Plan Format` with specific implementation details +- IMPORTANT: When you finish writing the patch plan, return exclusively the path to the patch plan file created and nothing else. + +## Relevant Files + +Focus on the following files: +- `README.md` - Contains the project overview and instructions. +- `app/server/**` - Contains the codebase server. +- `app/client/**` - Contains the codebase client. +- `scripts/**` - Contains the scripts to start and stop the server + client. +- `adws/**` - Contains the AI Developer Workflow (ADW) scripts. + +- Read `.claude/commands/conditional_docs.md` to check if your task requires additional documentation +- If your task matches any of the conditions listed, reference those documentation files to understand the context better when creating your patch plan + +Ignore all other files in the codebase. + + +## Plan Format + +```md +# Patch: + +## Metadata +adw_id: `{adw_id}` +review_change_request: `{review_change_request}` + +## Issue Summary +**Original Spec:** +**Issue:** +**Solution:** + +## Files to Modify +Use these files to implement the patch: + + + +## Implementation Steps +IMPORTANT: Execute every step in order, top to bottom. + + + +### Step 1: +- +- + +### Step 2: +- +- + + + +## Validation +Execute every command to validate the patch is complete with zero regressions. + + + +## Patch Scope +**Lines of code to change:** +**Risk level:** +**Testing required:** +``` + +## Report + +- IMPORTANT: Return exclusively the path to the patch plan file created and nothing else. \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/pull_request.md b/skills/adw-bootstrap/reference/scaled/commands/pull_request.md new file mode 100644 index 0000000..fd60995 --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/pull_request.md @@ -0,0 +1,41 @@ +# Create Pull Request + +Based on the `Instructions` below, take the `Variables` follow the `Run` section to create a pull request. Then follow the `Report` section to report the results of your work. + +## Variables + +branch_name: $1 +issue: $2 +plan_file: $3 +adw_id: $4 + +## Instructions + +- Generate a pull request title in the format: `: # - ` +- The PR body should include: + - A summary section with the issue context + - Link to the implementation `plan_file` if it exists + - Reference to the issue (Closes #) + - ADW tracking ID + - A checklist of what was done + - A summary of key changes made +- Extract issue number, type, and title from the issue JSON +- Examples of PR titles: + - `feat: #123 - Add user authentication` + - `bug: #456 - Fix login validation error` + - `chore: #789 - Update dependencies` + - `test: #1011 - Test xyz` +- Don't mention Claude Code in the PR body - let the author get credit for this. + +## Run + +1. Run `git diff origin/main...HEAD --stat` to see a summary of changed files +2. Run `git log origin/main..HEAD --oneline` to see the commits that will be included +3. Run `git diff origin/main...HEAD --name-only` to get a list of changed files +4. Run `git push -u origin ` to push the branch +5. Set GH_TOKEN environment variable from GITHUB_PAT if available, then run `gh pr create --title "" --body "" --base main` to create the PR +6. Capture the PR URL from the output + +## Report + +Return ONLY the PR URL that was created (no other text) \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/review.md b/skills/adw-bootstrap/reference/scaled/commands/review.md new file mode 100644 index 0000000..f786aaf --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/review.md @@ -0,0 +1,85 @@ +# Review + +Follow the `Instructions` below to **review work done against a specification file** (specs/*.md) to ensure implemented features match requirements. Use the spec file to understand the requirements and then use the git diff if available to understand the changes made. Capture screenshots of critical functionality paths as documented in the `Instructions` section. If there are issues, report them if not then report success. + +## Variables + +adw_id: $1 +spec_file: $2 +agent_name: $3 if provided, otherwise use 'review_agent' +review_image_dir: `/agents///review_img/` + +## Instructions + +- Check current git branch using `git branch` to understand context +- Run `git diff origin/main` to see all changes made in current branch. Continue even if there are no changes related to the spec file. +- Find the spec file by looking for specs/*.md files in the diff that match the current branch name +- Read the identified spec file to understand requirements +- IMPORTANT: If the work can be validated by UI validation then (if not skip the section): + - Use the playwright mcp server commands to validate the work. + - Look for corresponding e2e test files in ./claude/commands/e2e/test_*.md that mirror the feature name + - Use e2e test files only as navigation guides for screenshot locations, not for other purposes + - IMPORTANT: To be clear, we're not testing. We know the functionality works. We're reviewing the implementation against the spec to make sure it matches what was requested. + - IMPORTANT: Take screen shots along the way to showcase the new functionality and any issues you find + - Capture visual proof of working features through targeted screenshots + - Navigate to the application and capture screenshots of only the critical paths based on the spec + - Compare implemented changes with spec requirements to verify correctness + - Do not take screenshots of the entire process, only the critical points. + - IMPORTANT: Aim for `1-5` screenshots to showcase that the new functionality works as specified. + - If there is a review issue, take a screenshot of the issue and add it to the `review_issues` array. Describe the issue, resolution, and severity. + - Number your screenshots in the order they are taken like `01_.png`, `02_.png`, etc. + - IMPORTANT: Be absolutely sure to take a screen shot of the critical point of the new functionality + - IMPORTANT: Copy all screenshots to the provided `review_image_dir` + - IMPORTANT: Store the screenshots in the `review_image_dir` and be sure to use full absolute paths. + - Focus only on critical functionality paths - avoid unnecessary screenshots + - Ensure screenshots clearly demonstrate that features work as specified + - Use descriptive filenames that indicate what part of the change is being verified +- IMPORTANT: Issue Severity Guidelines + - Think hard about the impact of the issue on the feature and the user + - Guidelines: + - `skippable` - the issue is non-blocker for the work to be released but is still a problem + - `tech_debt` - the issue is non-blocker for the work to be released but will create technical debt that should be addressed in the future + - `blocker` - the issue is a blocker for the work to be released and should be addressed immediately. It will harm the user experience or will not function as expected. +- IMPORTANT: Return ONLY the JSON array with test results + - IMPORTANT: Output your result in JSON format based on the `Report` section below. + - IMPORTANT: Do not include any additional text, explanations, or markdown formatting + - We'll immediately run JSON.parse() on the output, so make sure it's valid JSON +- Ultra think as you work through the review process. Focus on the critical functionality paths and the user experience. Don't report issues if they are not critical to the feature. + +## Setup + +IMPORTANT: Read and **Execute** `.claude/commands/prepare_app.md` now to prepare the application for the review. +- Note: prepare_app.md will automatically detect and use ports from `.ports.env` if running in a worktree environment +- The application URL will be http://localhost:PORT where PORT is from `.ports.env` (FRONTEND_PORT) or default 5173 + +## Report + +- IMPORTANT: Return results exclusively as a JSON array based on the `Output Structure` section below. +- `success` should be `true` if there are NO BLOCKING issues (implementation matches spec for critical functionality) +- `success` should be `false` ONLY if there are BLOCKING issues that prevent the work from being released +- `review_issues` can contain issues of any severity (skippable, tech_debt, or blocker) +- `screenshots` should ALWAYS contain paths to screenshots showcasing the new functionality, regardless of success status. Use full absolute paths. +- This allows subsequent agents to quickly identify and resolve blocking errors while documenting all issues + +### Output Structure + +```json +{ + success: "boolean - true if there are NO BLOCKING issues (can have skippable/tech_debt issues), false if there are BLOCKING issues", + review_summary: "string - 2-4 sentences describing what was built and whether it matches the spec. Written as if reporting during a standup meeting. Example: 'The natural language query feature has been implemented with drag-and-drop file upload and interactive table display. The implementation matches the spec requirements for SQL injection protection and supports both CSV and JSON formats. Minor UI improvements could be made but all core functionality is working as specified.'", + review_issues: [ + { + "review_issue_number": "number - the issue number based on the index of this issue", + "screenshot_path": "string - /absolute/path/to/screenshot_that_shows_review_issue.png", + "issue_description": "string - description of the issue", + "issue_resolution": "string - description of the resolution", + "issue_severity": "string - severity of the issue between 'skippable', 'tech_debt', 'blocker'" + }, + ... + ], + screenshots: [ + "string - /absolute/path/to/screenshot_showcasing_functionality.png", + "string - /absolute/path/to/screenshot_showcasing_functionality.png", + "...", + ] +} \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/commands/test.md b/skills/adw-bootstrap/reference/scaled/commands/test.md new file mode 100644 index 0000000..e0d9f6d --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/commands/test.md @@ -0,0 +1,115 @@ +# Application Validation Test Suite + +Execute comprehensive validation tests for both frontend and backend components, returning results in a standardized JSON format for automated processing. + +## Purpose + +Proactively identify and fix issues in the application before they impact users or developers. By running this comprehensive test suite, you can: +- Detect syntax errors, type mismatches, and import failures +- Identify broken tests or security vulnerabilities +- Verify build processes and dependencies +- Ensure the application is in a healthy state + +## Variables + +TEST_COMMAND_TIMEOUT: 5 minutes + +## Instructions + +- Execute each test in the sequence provided below +- Capture the result (passed/failed) and any error messages +- IMPORTANT: Return ONLY the JSON array with test results + - IMPORTANT: Do not include any additional text, explanations, or markdown formatting + - We'll immediately run JSON.parse() on the output, so make sure it's valid JSON +- If a test passes, omit the error field +- If a test fails, include the error message in the error field +- Execute all tests even if some fail +- Error Handling: + - If a command returns non-zero exit code, mark as failed and immediately stop processing tests + - Capture stderr output for error field + - Timeout commands after `TEST_COMMAND_TIMEOUT` + - IMPORTANT: If a test fails, stop processing tests and return the results thus far +- Some tests may have dependencies (e.g., server must be stopped for port availability) +- API health check is required +- Test execution order is important - dependencies should be validated first +- All file paths are relative to the project root +- Always run `pwd` and `cd` before each test to ensure you're operating in the correct directory for the given test + +## Test Execution Sequence + +### Backend Tests + +1. **Python Syntax Check** + - Preparation Command: None + - Command: `cd app/server && uv run python -m py_compile server.py main.py core/*.py` + - test_name: "python_syntax_check" + - test_purpose: "Validates Python syntax by compiling source files to bytecode, catching syntax errors like missing colons, invalid indentation, or malformed statements" + +2. **Backend Code Quality Check** + - Preparation Command: None + - Command: `cd app/server && uv run ruff check .` + - test_name: "backend_linting" + - test_purpose: "Validates Python code quality, identifies unused imports, style violations, and potential bugs" + +3. **All Backend Tests** + - Preparation Command: None + - Command: `cd app/server && uv run pytest tests/ -v --tb=short` + - test_name: "all_backend_tests" + - test_purpose: "Validates all backend functionality including file processing, SQL security, LLM integration, and API endpoints" + +### Frontend Tests + +4. **TypeScript Type Check** + - Preparation Command: None + - Command: `cd app/client && bun tsc --noEmit` + - test_name: "typescript_check" + - test_purpose: "Validates TypeScript type correctness without generating output files, catching type errors, missing imports, and incorrect function signatures" + +5. **Frontend Build** + - Preparation Command: None + - Command: `cd app/client && bun run build` + - test_name: "frontend_build" + - test_purpose: "Validates the complete frontend build process including bundling, asset optimization, and production compilation" + +## Report + +- IMPORTANT: Return results exclusively as a JSON array based on the `Output Structure` section below. +- Sort the JSON array with failed tests (passed: false) at the top +- Include all tests in the output, both passed and failed +- The execution_command field should contain the exact command that can be run to reproduce the test +- This allows subsequent agents to quickly identify and resolve errors + +### Output Structure + +```json +[ + { + "test_name": "string", + "passed": boolean, + "execution_command": "string", + "test_purpose": "string", + "error": "optional string" + }, + ... +] +``` + +### Example Output + +```json +[ + { + "test_name": "frontend_build", + "passed": false, + "execution_command": "cd app/client && bun run build", + "test_purpose": "Validates TypeScript compilation, module resolution, and production build process for the frontend application", + "error": "TS2345: Argument of type 'string' is not assignable to parameter of type 'number'" + }, + { + "test_name": "all_backend_tests", + "passed": true, + "execution_command": "cd app/server && uv run pytest tests/ -v --tb=short", + "test_purpose": "Validates all backend functionality including file processing, SQL security, LLM integration, and API endpoints" + } +] +``` \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/workflows/adw_plan_build_test_review_iso.py b/skills/adw-bootstrap/reference/scaled/workflows/adw_plan_build_test_review_iso.py new file mode 100644 index 0000000..1553d4c --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/workflows/adw_plan_build_test_review_iso.py @@ -0,0 +1,132 @@ +#!/usr/bin/env -S uv run +# /// script +# dependencies = ["python-dotenv", "pydantic"] +# /// + +""" +ADW Plan Build Test Review Iso - Compositional workflow for isolated planning, building, testing, and reviewing + +Usage: uv run adw_plan_build_test_review_iso.py [adw-id] [--skip-e2e] [--skip-resolution] + +This script runs: +1. adw_plan_iso.py - Planning phase (isolated) +2. adw_build_iso.py - Implementation phase (isolated) +3. adw_test_iso.py - Testing phase (isolated) +4. adw_review_iso.py - Review phase (isolated) + +The scripts are chained together via persistent state (adw_state.json). +""" + +import subprocess +import sys +import os + +# Add the parent directory to Python path to import modules +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) +from adw_modules.workflow_ops import ensure_adw_id + + +def main(): + """Main entry point.""" + # Check for flags + skip_e2e = "--skip-e2e" in sys.argv + skip_resolution = "--skip-resolution" in sys.argv + + # Remove flags from argv + if skip_e2e: + sys.argv.remove("--skip-e2e") + if skip_resolution: + sys.argv.remove("--skip-resolution") + + if len(sys.argv) < 2: + print("Usage: uv run adw_plan_build_test_review_iso.py [adw-id] [--skip-e2e] [--skip-resolution]") + print("\nThis runs the isolated plan, build, test, and review workflow:") + print(" 1. Plan (isolated)") + print(" 2. Build (isolated)") + print(" 3. Test (isolated)") + print(" 4. Review (isolated)") + sys.exit(1) + + issue_number = sys.argv[1] + adw_id = sys.argv[2] if len(sys.argv) > 2 else None + + # Ensure ADW ID exists with initialized state + adw_id = ensure_adw_id(issue_number, adw_id) + print(f"Using ADW ID: {adw_id}") + + # Get the directory where this script is located + script_dir = os.path.dirname(os.path.abspath(__file__)) + + # Run isolated plan with the ADW ID + plan_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_plan_iso.py"), + issue_number, + adw_id, + ] + print(f"\n=== ISOLATED PLAN PHASE ===") + print(f"Running: {' '.join(plan_cmd)}") + plan = subprocess.run(plan_cmd) + if plan.returncode != 0: + print("Isolated plan phase failed") + sys.exit(1) + + # Run isolated build with the ADW ID + build_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_build_iso.py"), + issue_number, + adw_id, + ] + print(f"\n=== ISOLATED BUILD PHASE ===") + print(f"Running: {' '.join(build_cmd)}") + build = subprocess.run(build_cmd) + if build.returncode != 0: + print("Isolated build phase failed") + sys.exit(1) + + # Run isolated test with the ADW ID + test_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_test_iso.py"), + issue_number, + adw_id, + ] + if skip_e2e: + test_cmd.append("--skip-e2e") + + print(f"\n=== ISOLATED TEST PHASE ===") + print(f"Running: {' '.join(test_cmd)}") + test = subprocess.run(test_cmd) + if test.returncode != 0: + print("Isolated test phase failed") + sys.exit(1) + + # Run isolated review with the ADW ID + review_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_review_iso.py"), + issue_number, + adw_id, + ] + if skip_resolution: + review_cmd.append("--skip-resolution") + + print(f"\n=== ISOLATED REVIEW PHASE ===") + print(f"Running: {' '.join(review_cmd)}") + review = subprocess.run(review_cmd) + if review.returncode != 0: + print("Isolated review phase failed") + sys.exit(1) + + print(f"\n=== ISOLATED WORKFLOW COMPLETED ===") + print(f"ADW ID: {adw_id}") + print(f"All phases completed successfully!") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/workflows/adw_sdlc_iso.py b/skills/adw-bootstrap/reference/scaled/workflows/adw_sdlc_iso.py new file mode 100644 index 0000000..8db8d9f --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/workflows/adw_sdlc_iso.py @@ -0,0 +1,152 @@ +#!/usr/bin/env -S uv run +# /// script +# dependencies = ["python-dotenv", "pydantic"] +# /// + +""" +ADW SDLC Iso - Complete Software Development Life Cycle workflow with isolation + +Usage: uv run adw_sdlc_iso.py [adw-id] [--skip-e2e] [--skip-resolution] + +This script runs the complete ADW SDLC pipeline in isolation: +1. adw_plan_iso.py - Planning phase (isolated) +2. adw_build_iso.py - Implementation phase (isolated) +3. adw_test_iso.py - Testing phase (isolated) +4. adw_review_iso.py - Review phase (isolated) +5. adw_document_iso.py - Documentation phase (isolated) + +The scripts are chained together via persistent state (adw_state.json). +Each phase runs in its own git worktree with dedicated ports. +""" + +import subprocess +import sys +import os + +# Add the parent directory to Python path to import modules +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) +from adw_modules.workflow_ops import ensure_adw_id + + +def main(): + """Main entry point.""" + # Check for flags + skip_e2e = "--skip-e2e" in sys.argv + skip_resolution = "--skip-resolution" in sys.argv + + # Remove flags from argv + if skip_e2e: + sys.argv.remove("--skip-e2e") + if skip_resolution: + sys.argv.remove("--skip-resolution") + + if len(sys.argv) < 2: + print("Usage: uv run adw_sdlc_iso.py [adw-id] [--skip-e2e] [--skip-resolution]") + print("\nThis runs the complete isolated Software Development Life Cycle:") + print(" 1. Plan (isolated)") + print(" 2. Build (isolated)") + print(" 3. Test (isolated)") + print(" 4. Review (isolated)") + print(" 5. Document (isolated)") + sys.exit(1) + + issue_number = sys.argv[1] + adw_id = sys.argv[2] if len(sys.argv) > 2 else None + + # Ensure ADW ID exists with initialized state + adw_id = ensure_adw_id(issue_number, adw_id) + print(f"Using ADW ID: {adw_id}") + + # Get the directory where this script is located + script_dir = os.path.dirname(os.path.abspath(__file__)) + + # Run isolated plan with the ADW ID + plan_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_plan_iso.py"), + issue_number, + adw_id, + ] + print(f"\n=== ISOLATED PLAN PHASE ===") + print(f"Running: {' '.join(plan_cmd)}") + plan = subprocess.run(plan_cmd) + if plan.returncode != 0: + print("Isolated plan phase failed") + sys.exit(1) + + # Run isolated build with the ADW ID + build_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_build_iso.py"), + issue_number, + adw_id, + ] + print(f"\n=== ISOLATED BUILD PHASE ===") + print(f"Running: {' '.join(build_cmd)}") + build = subprocess.run(build_cmd) + if build.returncode != 0: + print("Isolated build phase failed") + sys.exit(1) + + # Run isolated test with the ADW ID + test_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_test_iso.py"), + issue_number, + adw_id, + "--skip-e2e", # Always skip E2E tests in SDLC workflows + ] + + print(f"\n=== ISOLATED TEST PHASE ===") + print(f"Running: {' '.join(test_cmd)}") + test = subprocess.run(test_cmd) + if test.returncode != 0: + print("Isolated test phase failed") + # Note: Continue anyway as some tests might be flaky + print("WARNING: Test phase failed but continuing with review") + + # Run isolated review with the ADW ID + review_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_review_iso.py"), + issue_number, + adw_id, + ] + if skip_resolution: + review_cmd.append("--skip-resolution") + + print(f"\n=== ISOLATED REVIEW PHASE ===") + print(f"Running: {' '.join(review_cmd)}") + review = subprocess.run(review_cmd) + if review.returncode != 0: + print("Isolated review phase failed") + sys.exit(1) + + # Run isolated documentation with the ADW ID + document_cmd = [ + "uv", + "run", + os.path.join(script_dir, "adw_document_iso.py"), + issue_number, + adw_id, + ] + print(f"\n=== ISOLATED DOCUMENTATION PHASE ===") + print(f"Running: {' '.join(document_cmd)}") + document = subprocess.run(document_cmd) + if document.returncode != 0: + print("Isolated documentation phase failed") + sys.exit(1) + + print(f"\n=== ISOLATED SDLC COMPLETED ===") + print(f"ADW ID: {adw_id}") + print(f"All phases completed successfully!") + print(f"\nWorktree location: trees/{adw_id}/") + print(f"To clean up: ./scripts/purge_tree.sh {adw_id}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/skills/adw-bootstrap/reference/scaled/workflows/adw_ship_iso.py b/skills/adw-bootstrap/reference/scaled/workflows/adw_ship_iso.py new file mode 100755 index 0000000..faa34df --- /dev/null +++ b/skills/adw-bootstrap/reference/scaled/workflows/adw_ship_iso.py @@ -0,0 +1,340 @@ +#!/usr/bin/env -S uv run +# /// script +# dependencies = ["python-dotenv", "pydantic"] +# /// + +""" +ADW Ship Iso - AI Developer Workflow for shipping (merging) to main + +Usage: + uv run adw_ship_iso.py + +Workflow: +1. Load state and validate worktree exists +2. Validate ALL state fields are populated (not None) +3. Perform manual git merge in main repository: + - Fetch latest from origin + - Checkout main + - Merge feature branch + - Push to origin/main +4. Post success message to issue + +This workflow REQUIRES that all previous workflows have been run and that +every field in ADWState has a value. This is our final approval step. + +Note: Merge operations happen in the main repository root, not in the worktree, +to preserve the worktree's state. +""" + +import sys +import os +import logging +import json +import subprocess +from typing import Optional, Dict, Any, Tuple +from dotenv import load_dotenv + +from adw_modules.state import ADWState +from adw_modules.github import ( + make_issue_comment, + get_repo_url, + extract_repo_path, +) +from adw_modules.beads_integration import is_beads_issue, close_beads_issue +from adw_modules.workflow_ops import format_issue_message +from adw_modules.worktree_ops import validate_worktree +from adw_modules.data_types import ADWStateData + +# Setup logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + +# Agent name constant +AGENT_SHIPPER = "shipper" + + +def get_main_repo_root() -> str: + """Get the main repository root directory (parent of adws).""" + # This script is in adws/, so go up one level to get repo root + return os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + + +def manual_merge_to_main(branch_name: str, logger: logging.Logger) -> Tuple[bool, Optional[str]]: + """Manually merge a branch to main using git commands. + + This runs in the main repository root, not in a worktree. + + Args: + branch_name: The feature branch to merge + logger: Logger instance + + Returns: + Tuple of (success, error_message) + """ + repo_root = get_main_repo_root() + logger.info(f"Performing manual merge in main repository: {repo_root}") + + try: + # Save current branch to restore later + result = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + capture_output=True, text=True, cwd=repo_root + ) + original_branch = result.stdout.strip() + logger.debug(f"Original branch: {original_branch}") + + # Step 1: Fetch latest from origin + logger.info("Fetching latest from origin...") + result = subprocess.run( + ["git", "fetch", "origin"], + capture_output=True, text=True, cwd=repo_root + ) + if result.returncode != 0: + return False, f"Failed to fetch from origin: {result.stderr}" + + # Step 2: Checkout main + logger.info("Checking out main branch...") + result = subprocess.run( + ["git", "checkout", "main"], + capture_output=True, text=True, cwd=repo_root + ) + if result.returncode != 0: + return False, f"Failed to checkout main: {result.stderr}" + + # Step 3: Pull latest main + logger.info("Pulling latest main...") + result = subprocess.run( + ["git", "pull", "origin", "main"], + capture_output=True, text=True, cwd=repo_root + ) + if result.returncode != 0: + # Try to restore original branch + subprocess.run(["git", "checkout", original_branch], cwd=repo_root) + return False, f"Failed to pull latest main: {result.stderr}" + + # Step 4: Merge the feature branch (no-ff to preserve all commits) + logger.info(f"Merging branch {branch_name} (no-ff to preserve all commits)...") + result = subprocess.run( + ["git", "merge", branch_name, "--no-ff", "-m", f"Merge branch '{branch_name}' via ADW Ship workflow"], + capture_output=True, text=True, cwd=repo_root + ) + if result.returncode != 0: + # Try to restore original branch + subprocess.run(["git", "checkout", original_branch], cwd=repo_root) + return False, f"Failed to merge {branch_name}: {result.stderr}" + + # Step 5: Push to origin/main + logger.info("Pushing to origin/main...") + result = subprocess.run( + ["git", "push", "origin", "main"], + capture_output=True, text=True, cwd=repo_root + ) + if result.returncode != 0: + # Try to restore original branch + subprocess.run(["git", "checkout", original_branch], cwd=repo_root) + return False, f"Failed to push to origin/main: {result.stderr}" + + # Step 6: Restore original branch + logger.info(f"Restoring original branch: {original_branch}") + subprocess.run(["git", "checkout", original_branch], cwd=repo_root) + + logger.info("✅ Successfully merged and pushed to main!") + return True, None + + except Exception as e: + logger.error(f"Unexpected error during merge: {e}") + # Try to restore original branch + try: + subprocess.run(["git", "checkout", original_branch], cwd=repo_root) + except: + pass + return False, str(e) + + +def validate_state_completeness(state: ADWState, logger: logging.Logger) -> tuple[bool, list[str]]: + """Validate that all fields in ADWState have values (not None). + + Returns: + tuple of (is_valid, missing_fields) + """ + # Get the expected fields from ADWStateData model + expected_fields = { + "adw_id", + "issue_number", + "branch_name", + "plan_file", + "issue_class", + "worktree_path", + "backend_port", + "frontend_port", + } + + missing_fields = [] + + for field in expected_fields: + value = state.get(field) + if value is None: + missing_fields.append(field) + logger.warning(f"Missing required field: {field}") + else: + logger.debug(f"✓ {field}: {value}") + + return len(missing_fields) == 0, missing_fields + + +def main(): + """Main entry point.""" + # Load environment variables + load_dotenv() + + # Parse command line args + # INTENTIONAL: adw-id is REQUIRED - we need it to find the worktree and state + if len(sys.argv) < 3: + print("Usage: uv run adw_ship_iso.py ") + print("\nError: Both issue-number and adw-id are required") + print("Run the complete SDLC workflow before shipping") + sys.exit(1) + + issue_number = sys.argv[1] + adw_id = sys.argv[2] + + # Try to load existing state + state = ADWState.load(adw_id, logger) + if not state: + # No existing state found + logger.error(f"No state found for ADW ID: {adw_id}") + logger.error("Run the complete SDLC workflow before shipping") + print(f"\nError: No state found for ADW ID: {adw_id}") + print("Run the complete SDLC workflow before shipping") + sys.exit(1) + + # Update issue number from state if available + issue_number = state.get("issue_number", issue_number) + + # Track that this ADW workflow has run + state.append_adw_id("adw_ship_iso") + + logger.info(f"ADW Ship Iso starting - ID: {adw_id}, Issue: {issue_number}") + + # Check if this is a beads issue + is_beads = is_beads_issue(issue_number) + logger.info(f"Issue type: {'beads' if is_beads else 'GitHub'}") + + # Post initial status (only for GitHub issues) + if not is_beads: + make_issue_comment( + issue_number, + format_issue_message(adw_id, "ops", f"🚢 Starting ship workflow\n" + f"📋 Validating state completeness...") + ) + + # Step 1: Validate state completeness + logger.info("Validating state completeness...") + is_valid, missing_fields = validate_state_completeness(state, logger) + + if not is_valid: + error_msg = f"State validation failed. Missing fields: {', '.join(missing_fields)}" + logger.error(error_msg) + if not is_beads: + make_issue_comment( + issue_number, + format_issue_message(adw_id, AGENT_SHIPPER, f"❌ {error_msg}\n\n" + "Please ensure all workflows have been run:\n" + "- adw_plan_iso.py (creates plan_file, branch_name, issue_class)\n" + "- adw_build_iso.py (implements the plan)\n" + "- adw_test_iso.py (runs tests)\n" + "- adw_review_iso.py (reviews implementation)\n" + "- adw_document_iso.py (generates docs)") + ) + sys.exit(1) + + logger.info("✅ State validation passed - all fields have values") + + # Step 2: Validate worktree exists + valid, error = validate_worktree(adw_id, state) + if not valid: + logger.error(f"Worktree validation failed: {error}") + if not is_beads: + make_issue_comment( + issue_number, + format_issue_message(adw_id, AGENT_SHIPPER, f"❌ Worktree validation failed: {error}") + ) + sys.exit(1) + + worktree_path = state.get("worktree_path") + logger.info(f"✅ Worktree validated at: {worktree_path}") + + # Step 3: Get branch name + branch_name = state.get("branch_name") + logger.info(f"Preparing to merge branch: {branch_name}") + + if not is_beads: + make_issue_comment( + issue_number, + format_issue_message(adw_id, AGENT_SHIPPER, f"📋 State validation complete\n" + f"🔍 Preparing to merge branch: {branch_name}") + ) + + # Step 4: Perform manual merge + logger.info(f"Starting manual merge of {branch_name} to main...") + if not is_beads: + make_issue_comment( + issue_number, + format_issue_message(adw_id, AGENT_SHIPPER, f"🔀 Merging {branch_name} to main...\n" + "Using manual git operations in main repository") + ) + + success, error = manual_merge_to_main(branch_name, logger) + + if not success: + logger.error(f"Failed to merge: {error}") + if not is_beads: + make_issue_comment( + issue_number, + format_issue_message(adw_id, AGENT_SHIPPER, f"❌ Failed to merge: {error}") + ) + sys.exit(1) + + logger.info(f"✅ Successfully merged {branch_name} to main") + + # Step 5: Close beads issue if applicable + if is_beads: + logger.info(f"Closing beads issue: {issue_number}") + success, error = close_beads_issue( + issue_number, + f"Completed via ADW {adw_id} - merged to main" + ) + if not success: + logger.warning(f"Failed to close beads issue: {error}") + else: + logger.info(f"✅ Closed beads issue: {issue_number}") + + # Step 6: Post success message (only for GitHub issues) + if not is_beads: + make_issue_comment( + issue_number, + format_issue_message(adw_id, AGENT_SHIPPER, + f"🎉 **Successfully shipped!**\n\n" + f"✅ Validated all state fields\n" + f"✅ Merged branch `{branch_name}` to main\n" + f"✅ Pushed to origin/main\n\n" + f"🚢 Code has been deployed to production!") + ) + + # Save final state + state.save("adw_ship_iso") + + # Post final state summary (only for GitHub issues) + if not is_beads: + make_issue_comment( + issue_number, + f"{adw_id}_ops: 📋 Final ship state:\n```json\n{json.dumps(state.data, indent=2)}\n```" + ) + + logger.info("Ship workflow completed successfully") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/skills/adw-bootstrap/utils/validator.py b/skills/adw-bootstrap/utils/validator.py new file mode 100644 index 0000000..def33d6 --- /dev/null +++ b/skills/adw-bootstrap/utils/validator.py @@ -0,0 +1,410 @@ +#!/usr/bin/env python3 +""" +Validation utilities for ADW setup. + +Simple checks to ensure ADW infrastructure is correctly installed and functional. +These are mechanical validations that don't require AI reasoning. +""" + +import os +import subprocess +import sys +from pathlib import Path +from typing import List, Tuple + + +def check_claude_installed() -> Tuple[bool, str]: + """Check if Claude Code CLI is available. + + Returns: + Tuple of (success, message) + """ + try: + result = subprocess.run( + ["claude", "--version"], + capture_output=True, + text=True, + timeout=5 + ) + + if result.returncode == 0: + version = result.stdout.strip() + return (True, f"✓ Claude Code CLI installed: {version}") + else: + return (False, "✗ Claude Code CLI not responding correctly") + + except FileNotFoundError: + return (False, "✗ Claude Code CLI not found. Install from: https://claude.ai/code") + except subprocess.TimeoutExpired: + return (False, "✗ Claude Code CLI timeout") + except Exception as e: + return (False, f"✗ Error checking Claude Code: {e}") + + +def check_api_key_configured() -> Tuple[bool, str]: + """Check if ANTHROPIC_API_KEY is configured (optional). + + Returns: + Tuple of (configured, message) + """ + api_key = os.getenv("ANTHROPIC_API_KEY") + + if api_key: + # Mask the key for display + masked = api_key[:10] + "..." + api_key[-4:] if len(api_key) > 14 else "***" + return (True, f"✓ API key configured: {masked}") + else: + return (False, "ℹ No API key (using subscription mode)") + + +def validate_directory_structure(project_root: str) -> List[str]: + """Check that expected ADW directories exist. + + Args: + project_root: Path to project root + + Returns: + List of validation messages + """ + project_path = Path(project_root) + messages = [] + + # Required directories + required_dirs = [ + "adws", + "adws/adw_modules", + ".claude/commands", + "specs" + ] + + for dir_path in required_dirs: + full_path = project_path / dir_path + if full_path.exists() and full_path.is_dir(): + messages.append(f"✓ Directory exists: {dir_path}") + else: + messages.append(f"✗ Directory missing: {dir_path}") + + # Optional but expected directories + optional_dirs = [ + "agents", + ] + + for dir_path in optional_dirs: + full_path = project_path / dir_path + if full_path.exists() and full_path.is_dir(): + messages.append(f"✓ Directory exists: {dir_path}") + else: + messages.append(f"ℹ Directory not created yet: {dir_path} (will be created on first run)") + + return messages + + +def validate_core_files(project_root: str) -> List[str]: + """Check that core ADW files exist. + + Args: + project_root: Path to project root + + Returns: + List of validation messages + """ + project_path = Path(project_root) + messages = [] + + # Core files for minimal setup + core_files = [ + "adws/adw_modules/agent.py", + "adws/adw_prompt.py", + ".claude/commands/chore.md", + ".claude/commands/implement.md", + ] + + for file_path in core_files: + full_path = project_path / file_path + if full_path.exists() and full_path.is_file(): + messages.append(f"✓ File exists: {file_path}") + else: + messages.append(f"✗ File missing: {file_path}") + + # Enhanced setup files (optional) + enhanced_files = [ + "adws/adw_modules/agent_sdk.py", + "adws/adw_slash_command.py", + "adws/adw_chore_implement.py", + ".claude/commands/feature.md", + ] + + enhanced_count = sum(1 for f in enhanced_files if (project_path / f).exists()) + if enhanced_count > 0: + messages.append(f"ℹ Enhanced setup detected ({enhanced_count}/{len(enhanced_files)} files)") + for file_path in enhanced_files: + full_path = project_path / file_path + if full_path.exists() and full_path.is_file(): + messages.append(f" ✓ {file_path}") + + return messages + + +def check_scripts_executable(project_root: str) -> List[str]: + """Verify ADW scripts have execute permissions. + + Args: + project_root: Path to project root + + Returns: + List of validation messages + """ + project_path = Path(project_root) + messages = [] + + # Find all adw_*.py scripts + adws_dir = project_path / "adws" + if not adws_dir.exists(): + return ["✗ adws/ directory not found"] + + scripts = list(adws_dir.glob("adw_*.py")) + + if not scripts: + messages.append("ℹ No ADW scripts found (adw_*.py)") + return messages + + for script in scripts: + is_executable = os.access(script, os.X_OK) + if is_executable: + messages.append(f"✓ Executable: {script.name}") + else: + messages.append(f"⚠ Not executable: {script.name} (run: chmod +x {script})") + + return messages + + +def test_prompt_execution(project_root: str, timeout: int = 30) -> Tuple[bool, str]: + """Try a simple prompt execution to verify setup works. + + Args: + project_root: Path to project root + timeout: Maximum seconds to wait + + Returns: + Tuple of (success, message) + """ + project_path = Path(project_root) + prompt_script = project_path / "adws" / "adw_prompt.py" + + if not prompt_script.exists(): + return (False, "✗ adw_prompt.py not found") + + try: + # Simple test prompt + result = subprocess.run( + [str(prompt_script), "What is 2 + 2?"], + cwd=project_root, + capture_output=True, + text=True, + timeout=timeout + ) + + if result.returncode == 0: + return (True, "✓ Test prompt executed successfully") + else: + error_msg = result.stderr[:200] if result.stderr else "Unknown error" + return (False, f"✗ Test prompt failed: {error_msg}") + + except subprocess.TimeoutExpired: + return (False, f"✗ Test prompt timeout (>{timeout}s)") + except Exception as e: + return (False, f"✗ Error executing test prompt: {e}") + + +def validate_output_structure(project_root: str) -> List[str]: + """Check if output directories are created correctly after execution. + + Args: + project_root: Path to project root + + Returns: + List of validation messages + """ + project_path = Path(project_root) + agents_dir = project_path / "agents" + + if not agents_dir.exists(): + return ["ℹ No agents/ directory yet (created on first execution)"] + + messages = [f"✓ Output directory exists: agents/"] + + # Check for any execution directories + execution_dirs = [d for d in agents_dir.iterdir() if d.is_dir()] + + if not execution_dirs: + messages.append("ℹ No execution outputs yet") + return messages + + messages.append(f"ℹ Found {len(execution_dirs)} execution output(s)") + + # Check structure of first execution + first_exec = execution_dirs[0] + agent_dirs = [d for d in first_exec.iterdir() if d.is_dir()] + + if agent_dirs: + first_agent = agent_dirs[0] + expected_files = [ + "cc_raw_output.jsonl", + "cc_raw_output.json", + "cc_final_object.json", + "custom_summary_output.json" + ] + + for filename in expected_files: + if (first_agent / filename).exists(): + messages.append(f" ✓ Output file: {filename}") + else: + messages.append(f" ⚠ Missing output: {filename}") + + return messages + + +def run_full_validation(project_root: str = ".", verbose: bool = True) -> bool: + """Run complete validation suite. + + Args: + project_root: Path to project root + verbose: Print detailed messages + + Returns: + True if all critical checks pass + """ + project_root = os.path.abspath(project_root) + + if verbose: + print(f"\n{'='*60}") + print(f"ADW Setup Validation") + print(f"{'='*60}") + print(f"Project: {project_root}\n") + + all_passed = True + + # Check 1: Claude Code CLI + if verbose: + print("1. Claude Code CLI") + print("-" * 60) + + success, msg = check_claude_installed() + if verbose: + print(f" {msg}") + if not success: + all_passed = False + + # Check 2: API Key (optional) + if verbose: + print("\n2. API Configuration") + print("-" * 60) + + configured, msg = check_api_key_configured() + if verbose: + print(f" {msg}") + + # Check 3: Directory structure + if verbose: + print("\n3. Directory Structure") + print("-" * 60) + + dir_messages = validate_directory_structure(project_root) + for msg in dir_messages: + if verbose: + print(f" {msg}") + if msg.startswith("✗"): + all_passed = False + + # Check 4: Core files + if verbose: + print("\n4. Core Files") + print("-" * 60) + + file_messages = validate_core_files(project_root) + for msg in file_messages: + if verbose: + print(f" {msg}") + if msg.startswith("✗"): + all_passed = False + + # Check 5: Script permissions + if verbose: + print("\n5. Script Permissions") + print("-" * 60) + + perm_messages = check_scripts_executable(project_root) + for msg in perm_messages: + if verbose: + print(f" {msg}") + if msg.startswith("⚠"): + # Warning but not critical failure + pass + + # Check 6: Output structure (informational) + if verbose: + print("\n6. Output Structure") + print("-" * 60) + + output_messages = validate_output_structure(project_root) + for msg in output_messages: + if verbose: + print(f" {msg}") + + # Summary + if verbose: + print(f"\n{'='*60}") + if all_passed: + print("✓ Validation passed - ADW setup is ready!") + else: + print("✗ Validation failed - see errors above") + print(f"{'='*60}\n") + + return all_passed + + +def main(): + """CLI entry point for validation.""" + import argparse + + parser = argparse.ArgumentParser( + description="Validate ADW setup", + formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument( + "project_root", + nargs="?", + default=".", + help="Project root directory (default: current directory)" + ) + parser.add_argument( + "--quiet", + action="store_true", + help="Only show summary" + ) + parser.add_argument( + "--test", + action="store_true", + help="Run test prompt execution (may take 30s)" + ) + + args = parser.parse_args() + + # Run validation + passed = run_full_validation(args.project_root, verbose=not args.quiet) + + # Optionally test execution + if args.test and passed: + print("\nRunning test prompt execution...") + print("-" * 60) + success, msg = test_prompt_execution(args.project_root) + print(f" {msg}") + if not success: + passed = False + + # Exit with appropriate code + sys.exit(0 if passed else 1) + + +if __name__ == "__main__": + main()