Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:28:42 +08:00
commit 8a4be47b6e
43 changed files with 10867 additions and 0 deletions

View File

@@ -0,0 +1,501 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "pydantic",
# "python-dotenv",
# "click",
# "rich",
# ]
# ///
"""
Run chore planning and implementation workflow.
This script runs two slash commands in sequence:
1. /chore - Creates a plan based on the prompt
2. /implement - Implements the plan created by /chore
Usage:
# Method 1: Direct execution (requires uv)
./adws/adw_chore_implement.py "Add error handling to all API endpoints"
# Method 2: Using uv run
uv run adws/adw_chore_implement.py "Refactor database connection logic"
Examples:
# Run with specific model
./adws/adw_chore_implement.py "Add logging to agent.py" --model opus
# Run from a different working directory
./adws/adw_chore_implement.py "Update documentation" --working-dir /path/to/project
# Run with verbose output
./adws/adw_chore_implement.py "Add tests" --verbose
"""
import os
import sys
import json
import re
from pathlib import Path
import click
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich.rule import Rule
# Add the adw_modules directory to the path so we can import agent
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "adw_modules"))
from agent import (
AgentTemplateRequest,
AgentPromptResponse,
execute_template,
generate_short_id,
)
# Output file name constants
OUTPUT_JSONL = "cc_raw_output.jsonl"
OUTPUT_JSON = "cc_raw_output.json"
FINAL_OBJECT_JSON = "cc_final_object.json"
SUMMARY_JSON = "custom_summary_output.json"
def extract_plan_path(output: str) -> str:
"""Extract the plan file path from the chore command output.
Looks for patterns like:
- specs/chore-12345678-update-readme.md
- Created plan at: specs/chore-...
- Plan file: specs/chore-...
"""
# Try multiple patterns to find the plan path
patterns = [
r"specs/chore-[a-zA-Z0-9\-]+\.md",
r"Created plan at:\s*(specs/chore-[a-zA-Z0-9\-]+\.md)",
r"Plan file:\s*(specs/chore-[a-zA-Z0-9\-]+\.md)",
r"path.*?:\s*(specs/chore-[a-zA-Z0-9\-]+\.md)",
]
for pattern in patterns:
match = re.search(pattern, output, re.IGNORECASE | re.MULTILINE)
if match:
return match.group(1) if match.groups() else match.group(0)
# If no match found, raise an error
raise ValueError("Could not find plan file path in chore output")
@click.command()
@click.argument("prompt", required=True)
@click.option(
"--model",
type=click.Choice(["sonnet", "opus", "haiku"]),
default="sonnet",
help="Claude model to use (sonnet=balanced, opus=max intelligence, haiku=fast & economical)",
)
@click.option(
"--working-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
help="Working directory for command execution (default: current directory)",
)
def main(
prompt: str,
model: str,
working_dir: str,
):
"""Run chore planning and implementation workflow."""
console = Console()
# Generate a unique ID for this workflow
adw_id = generate_short_id()
# Use current directory if no working directory specified
if not working_dir:
working_dir = os.getcwd()
# Set default agent names
planner_name = "planner"
builder_name = "builder"
console.print(
Panel(
f"[bold blue]ADW Chore & Implement Workflow[/bold blue]\n\n"
f"[cyan]ADW ID:[/cyan] {adw_id}\n"
f"[cyan]Model:[/cyan] {model}\n"
f"[cyan]Working Dir:[/cyan] {working_dir}",
title="[bold blue]🚀 Workflow Configuration[/bold blue]",
border_style="blue",
)
)
console.print()
# Phase 1: Run /chore command
console.print(Rule("[bold yellow]Phase 1: Planning (/chore)[/bold yellow]"))
console.print()
# Create the chore request
chore_request = AgentTemplateRequest(
agent_name=planner_name,
slash_command="/chore",
args=[adw_id, prompt],
adw_id=adw_id,
model=model,
working_dir=working_dir,
)
# Display chore execution info
chore_info_table = Table(show_header=False, box=None, padding=(0, 1))
chore_info_table.add_column(style="bold cyan")
chore_info_table.add_column()
chore_info_table.add_row("ADW ID", adw_id)
chore_info_table.add_row("ADW Name", "adw_chore_implement (planning)")
chore_info_table.add_row("Command", "/chore")
chore_info_table.add_row("Args", f'{adw_id} "{prompt}"')
chore_info_table.add_row("Model", model)
chore_info_table.add_row("Agent", planner_name)
console.print(
Panel(
chore_info_table,
title="[bold blue]🚀 Chore Inputs[/bold blue]",
border_style="blue",
)
)
console.print()
plan_path = None
try:
# Execute the chore command
with console.status("[bold yellow]Creating plan...[/bold yellow]"):
chore_response = execute_template(chore_request)
# Display the chore result
if chore_response.success:
# Success panel
console.print(
Panel(
chore_response.output,
title="[bold green]✅ Planning Success[/bold green]",
border_style="green",
padding=(1, 2),
)
)
# Extract the plan path from the output
try:
plan_path = extract_plan_path(chore_response.output)
console.print(f"\n[bold cyan]Plan created at:[/bold cyan] {plan_path}")
except ValueError as e:
console.print(
Panel(
f"[bold red]Could not extract plan path: {str(e)}[/bold red]\n\n"
"The chore command succeeded but the plan file path could not be found in the output.",
title="[bold red]❌ Parse Error[/bold red]",
border_style="red",
)
)
sys.exit(3)
else:
# Error panel
console.print(
Panel(
chore_response.output,
title="[bold red]❌ Planning Failed[/bold red]",
border_style="red",
padding=(1, 2),
)
)
console.print(
"\n[bold red]Workflow aborted: Planning phase failed[/bold red]"
)
sys.exit(1)
# Save chore phase summary
chore_output_dir = f"./agents/{adw_id}/{planner_name}"
chore_summary_path = f"{chore_output_dir}/{SUMMARY_JSON}"
with open(chore_summary_path, "w") as f:
json.dump(
{
"phase": "planning",
"adw_id": adw_id,
"slash_command": "/chore",
"args": [adw_id, prompt],
"path_to_slash_command_prompt": ".claude/commands/chore.md",
"model": model,
"working_dir": working_dir,
"success": chore_response.success,
"session_id": chore_response.session_id,
"retry_code": chore_response.retry_code,
"output": chore_response.output,
"plan_path": plan_path,
},
f,
indent=2,
)
# Show chore output files
console.print()
# Files saved panel for chore phase
chore_files_table = Table(show_header=True, box=None)
chore_files_table.add_column("File Type", style="bold cyan")
chore_files_table.add_column("Path", style="dim")
chore_files_table.add_column("Description", style="italic")
chore_files_table.add_row(
"JSONL Stream",
f"{chore_output_dir}/{OUTPUT_JSONL}",
"Raw streaming output from Claude Code",
)
chore_files_table.add_row(
"JSON Array",
f"{chore_output_dir}/{OUTPUT_JSON}",
"All messages as a JSON array",
)
chore_files_table.add_row(
"Final Object",
f"{chore_output_dir}/{FINAL_OBJECT_JSON}",
"Last message entry (final result)",
)
chore_files_table.add_row(
"Summary",
chore_summary_path,
"High-level execution summary with metadata",
)
console.print(
Panel(
chore_files_table,
title="[bold blue]📄 Planning Output Files[/bold blue]",
border_style="blue",
)
)
console.print()
# Phase 2: Run /implement command
console.print(
Rule("[bold yellow]Phase 2: Implementation (/implement)[/bold yellow]")
)
console.print()
# Create the implement request
implement_request = AgentTemplateRequest(
agent_name=builder_name,
slash_command="/implement",
args=[plan_path],
adw_id=adw_id,
model=model,
working_dir=working_dir,
)
# Display implement execution info
implement_info_table = Table(show_header=False, box=None, padding=(0, 1))
implement_info_table.add_column(style="bold cyan")
implement_info_table.add_column()
implement_info_table.add_row("ADW ID", adw_id)
implement_info_table.add_row("ADW Name", "adw_chore_implement (building)")
implement_info_table.add_row("Command", "/implement")
implement_info_table.add_row("Args", plan_path)
implement_info_table.add_row("Model", model)
implement_info_table.add_row("Agent", builder_name)
console.print(
Panel(
implement_info_table,
title="[bold blue]🚀 Implement Inputs[/bold blue]",
border_style="blue",
)
)
console.print()
# Execute the implement command
with console.status("[bold yellow]Implementing plan...[/bold yellow]"):
implement_response = execute_template(implement_request)
# Display the implement result
if implement_response.success:
# Success panel
console.print(
Panel(
implement_response.output,
title="[bold green]✅ Implementation Success[/bold green]",
border_style="green",
padding=(1, 2),
)
)
if implement_response.session_id:
console.print(
f"\n[bold cyan]Session ID:[/bold cyan] {implement_response.session_id}"
)
else:
# Error panel
console.print(
Panel(
implement_response.output,
title="[bold red]❌ Implementation Failed[/bold red]",
border_style="red",
padding=(1, 2),
)
)
# Save implement phase summary
implement_output_dir = f"./agents/{adw_id}/{builder_name}"
implement_summary_path = f"{implement_output_dir}/{SUMMARY_JSON}"
with open(implement_summary_path, "w") as f:
json.dump(
{
"phase": "implementation",
"adw_id": adw_id,
"slash_command": "/implement",
"args": [plan_path],
"path_to_slash_command_prompt": ".claude/commands/implement.md",
"model": model,
"working_dir": working_dir,
"success": implement_response.success,
"session_id": implement_response.session_id,
"retry_code": implement_response.retry_code,
"output": implement_response.output,
},
f,
indent=2,
)
# Show implement output files
console.print()
# Files saved panel for implement phase
implement_files_table = Table(show_header=True, box=None)
implement_files_table.add_column("File Type", style="bold cyan")
implement_files_table.add_column("Path", style="dim")
implement_files_table.add_column("Description", style="italic")
implement_files_table.add_row(
"JSONL Stream",
f"{implement_output_dir}/{OUTPUT_JSONL}",
"Raw streaming output from Claude Code",
)
implement_files_table.add_row(
"JSON Array",
f"{implement_output_dir}/{OUTPUT_JSON}",
"All messages as a JSON array",
)
implement_files_table.add_row(
"Final Object",
f"{implement_output_dir}/{FINAL_OBJECT_JSON}",
"Last message entry (final result)",
)
implement_files_table.add_row(
"Summary",
implement_summary_path,
"High-level execution summary with metadata",
)
console.print(
Panel(
implement_files_table,
title="[bold blue]📄 Implementation Output Files[/bold blue]",
border_style="blue",
)
)
# Show workflow summary
console.print()
console.print(Rule("[bold blue]Workflow Summary[/bold blue]"))
console.print()
summary_table = Table(show_header=True, box=None)
summary_table.add_column("Phase", style="bold cyan")
summary_table.add_column("Status", style="bold")
summary_table.add_column("Output Directory", style="dim")
# Planning phase row
planning_status = "✅ Success" if chore_response.success else "❌ Failed"
summary_table.add_row(
"Planning (/chore)",
planning_status,
f"./agents/{adw_id}/{planner_name}/",
)
# Implementation phase row
implement_status = "✅ Success" if implement_response.success else "❌ Failed"
summary_table.add_row(
"Implementation (/implement)",
implement_status,
f"./agents/{adw_id}/{builder_name}/",
)
console.print(summary_table)
# Create overall workflow summary
workflow_summary_path = f"./agents/{adw_id}/workflow_summary.json"
os.makedirs(f"./agents/{adw_id}", exist_ok=True)
with open(workflow_summary_path, "w") as f:
json.dump(
{
"workflow": "chore_implement",
"adw_id": adw_id,
"prompt": prompt,
"model": model,
"working_dir": working_dir,
"plan_path": plan_path,
"phases": {
"planning": {
"success": chore_response.success,
"session_id": chore_response.session_id,
"agent": planner_name,
"output_dir": f"./agents/{adw_id}/{planner_name}/",
},
"implementation": {
"success": implement_response.success,
"session_id": implement_response.session_id,
"agent": builder_name,
"output_dir": f"./agents/{adw_id}/{builder_name}/",
},
},
"overall_success": chore_response.success
and implement_response.success,
},
f,
indent=2,
)
console.print(
f"\n[bold cyan]Workflow summary:[/bold cyan] {workflow_summary_path}"
)
console.print()
# Exit with appropriate code
if chore_response.success and implement_response.success:
console.print(
"[bold green]✅ Workflow completed successfully![/bold green]"
)
sys.exit(0)
else:
console.print(
"[bold yellow]⚠️ Workflow completed with errors[/bold yellow]"
)
sys.exit(1)
except Exception as e:
console.print(
Panel(
f"[bold red]{str(e)}[/bold red]",
title="[bold red]❌ Unexpected Error[/bold red]",
border_style="red",
)
)
sys.exit(2)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,436 @@
"""
Claude Code SDK - The SDK Way
This module demonstrates the idiomatic way to use the Claude Code Python SDK
for programmatic agent interactions. It focuses on clean, type-safe patterns
using the SDK's native abstractions.
Key Concepts:
- Use `query()` for one-shot operations
- Use `ClaudeSDKClient` for interactive sessions
- Work directly with SDK message types
- Leverage async/await for clean concurrency
- Configure options for your use case
Example Usage:
# Simple query
async for message in query(prompt="What is 2 + 2?"):
if isinstance(message, AssistantMessage):
print(extract_text(message))
# With options
options = ClaudeCodeOptions(
model="claude-sonnet-4-20250514",
allowed_tools=["Read", "Write"],
permission_mode="bypassPermissions"
)
async for message in query(prompt="Create hello.py", options=options):
process_message(message)
# Interactive session
async with create_session() as client:
await client.query("Debug this error")
async for msg in client.receive_response():
handle_message(msg)
"""
import logging
from pathlib import Path
from typing import AsyncIterator, Optional, List
from contextlib import asynccontextmanager
# Import all SDK components we'll use
from claude_code_sdk import (
# Main functions
query,
ClaudeSDKClient,
# Configuration
ClaudeCodeOptions,
PermissionMode,
# Message types
Message,
AssistantMessage,
UserMessage,
SystemMessage,
ResultMessage,
# Content blocks
ContentBlock,
TextBlock,
ToolUseBlock,
ToolResultBlock,
# Errors
ClaudeSDKError,
CLIConnectionError,
CLINotFoundError,
ProcessError,
)
# Set up logging
logger = logging.getLogger(__name__)
# ============================================================================
# UTILITY FUNCTIONS
# ============================================================================
def extract_text(message: AssistantMessage) -> str:
"""Extract all text content from an assistant message.
The SDK way: Work directly with typed message objects.
Args:
message: AssistantMessage with content blocks
Returns:
Concatenated text from all text blocks
"""
texts = []
for block in message.content:
if isinstance(block, TextBlock):
texts.append(block.text)
return "\n".join(texts)
def extract_tool_uses(message: AssistantMessage) -> List[ToolUseBlock]:
"""Extract all tool use blocks from an assistant message.
Args:
message: AssistantMessage with content blocks
Returns:
List of ToolUseBlock objects
"""
return [
block for block in message.content
if isinstance(block, ToolUseBlock)
]
def get_result_text(messages: List[Message]) -> str:
"""Extract final result text from a list of messages.
Args:
messages: List of messages from a query
Returns:
Result text or assistant responses
"""
# First check for ResultMessage
for msg in reversed(messages):
if isinstance(msg, ResultMessage) and msg.result:
return msg.result
# Otherwise collect assistant text
texts = []
for msg in messages:
if isinstance(msg, AssistantMessage):
text = extract_text(msg)
if text:
texts.append(text)
return "\n".join(texts)
# ============================================================================
# ONE-SHOT QUERIES (The Simple SDK Way)
# ============================================================================
async def simple_query(prompt: str, model: str = "claude-sonnet-4-5-20250929") -> str:
"""Simple one-shot query with text response.
The SDK way: Direct use of query() with minimal setup.
Args:
prompt: What to ask Claude
model: Which model to use
Returns:
Text response from Claude
Example:
response = await simple_query("What is 2 + 2?")
print(response) # "4" or "2 + 2 equals 4"
"""
options = ClaudeCodeOptions(model=model)
texts = []
async for message in query(prompt=prompt, options=options):
if isinstance(message, AssistantMessage):
text = extract_text(message)
if text:
texts.append(text)
return "\n".join(texts) if texts else "No response"
async def query_with_tools(
prompt: str,
allowed_tools: List[str],
working_dir: Optional[Path] = None
) -> AsyncIterator[Message]:
"""Query with specific tools enabled.
The SDK way: Configure options for your use case.
Args:
prompt: What to ask Claude
allowed_tools: List of tool names to allow
working_dir: Optional working directory
Yields:
SDK message objects
Example:
async for msg in query_with_tools(
"Create a Python script",
allowed_tools=["Write", "Read"]
):
if isinstance(msg, AssistantMessage):
for block in msg.content:
if isinstance(block, ToolUseBlock):
print(f"Using tool: {block.name}")
"""
options = ClaudeCodeOptions(
allowed_tools=allowed_tools,
cwd=str(working_dir) if working_dir else None,
permission_mode="bypassPermissions" # For automated workflows
)
async for message in query(prompt=prompt, options=options):
yield message
async def collect_query_response(
prompt: str,
options: Optional[ClaudeCodeOptions] = None
) -> tuple[List[Message], Optional[ResultMessage]]:
"""Collect all messages from a query.
The SDK way: Async iteration with type checking.
Args:
prompt: What to ask Claude
options: Optional configuration
Returns:
Tuple of (all_messages, result_message)
Example:
messages, result = await collect_query_response("List files")
if result and not result.is_error:
print("Success!")
for msg in messages:
process_message(msg)
"""
if options is None:
options = ClaudeCodeOptions()
messages = []
result = None
async for message in query(prompt=prompt, options=options):
messages.append(message)
if isinstance(message, ResultMessage):
result = message
return messages, result
# ============================================================================
# INTERACTIVE SESSIONS (The SDK Client Way)
# ============================================================================
@asynccontextmanager
async def create_session(
model: str = "claude-sonnet-4-5-20250929",
working_dir: Optional[Path] = None
):
"""Create an interactive session with Claude.
The SDK way: Use context managers for resource management.
Args:
model: Which model to use
working_dir: Optional working directory
Yields:
Connected ClaudeSDKClient
Example:
async with create_session() as client:
await client.query("Hello")
async for msg in client.receive_response():
print(msg)
"""
options = ClaudeCodeOptions(
model=model,
cwd=str(working_dir) if working_dir else None,
permission_mode="bypassPermissions"
)
client = ClaudeSDKClient(options=options)
await client.connect()
try:
yield client
finally:
await client.disconnect()
async def interactive_conversation(prompts: List[str]) -> List[Message]:
"""Have an interactive conversation with Claude.
The SDK way: Bidirectional communication with the client.
Args:
prompts: List of prompts to send in sequence
Returns:
All messages from the conversation
Example:
messages = await interactive_conversation([
"What's the weather like?",
"Tell me more about clouds",
"How do they form?"
])
"""
all_messages = []
async with create_session() as client:
for prompt in prompts:
# Send prompt
await client.query(prompt)
# Collect response
async for msg in client.receive_response():
all_messages.append(msg)
if isinstance(msg, ResultMessage):
break
return all_messages
# ============================================================================
# ERROR HANDLING (The SDK Way)
# ============================================================================
async def safe_query(prompt: str) -> tuple[Optional[str], Optional[str]]:
"""Query with comprehensive error handling.
The SDK way: Handle specific SDK exceptions.
Args:
prompt: What to ask Claude
Returns:
Tuple of (response_text, error_message)
Example:
response, error = await safe_query("Help me debug this")
if error:
print(f"Error: {error}")
else:
print(f"Response: {response}")
"""
try:
response = await simple_query(prompt)
return response, None
except CLINotFoundError:
return None, "Claude Code CLI not found. Install with: npm install -g @anthropic-ai/claude-code"
except CLIConnectionError as e:
return None, f"Connection error: {str(e)}"
except ProcessError as e:
return None, f"Process error (exit code {e.exit_code}): {str(e)}"
except ClaudeSDKError as e:
return None, f"SDK error: {str(e)}"
except Exception as e:
return None, f"Unexpected error: {str(e)}"
# ============================================================================
# ADVANCED PATTERNS (The SDK Way)
# ============================================================================
async def stream_with_progress(
prompt: str,
on_text: Optional[callable] = None,
on_tool: Optional[callable] = None
) -> ResultMessage:
"""Stream query with progress callbacks.
The SDK way: Process messages as they arrive.
Args:
prompt: What to ask Claude
on_text: Callback for text blocks (optional)
on_tool: Callback for tool use blocks (optional)
Returns:
Final ResultMessage
Example:
result = await stream_with_progress(
"Analyze this codebase",
on_text=lambda text: print(f"Claude: {text}"),
on_tool=lambda tool: print(f"Using: {tool.name}")
)
print(f"Cost: ${result.total_cost_usd:.4f}")
"""
result = None
async for message in query(prompt=prompt):
if isinstance(message, AssistantMessage):
for block in message.content:
if isinstance(block, TextBlock) and on_text:
on_text(block.text)
elif isinstance(block, ToolUseBlock) and on_tool:
on_tool(block)
elif isinstance(message, ResultMessage):
result = message
return result
async def query_with_timeout(prompt: str, timeout_seconds: float = 30) -> Optional[str]:
"""Query with timeout protection.
The SDK way: Use asyncio for timeout control.
Args:
prompt: What to ask Claude
timeout_seconds: Maximum time to wait
Returns:
Response text or None if timeout
Example:
response = await query_with_timeout("Complex analysis", timeout_seconds=60)
if response is None:
print("Query timed out")
"""
import asyncio
try:
# Create the query task
async def _query():
return await simple_query(prompt)
# Run with timeout
response = await asyncio.wait_for(_query(), timeout=timeout_seconds)
return response
except asyncio.TimeoutError:
logger.warning(f"Query timed out after {timeout_seconds} seconds")
return None

View File

@@ -0,0 +1,470 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "pydantic",
# "python-dotenv",
# "click",
# "rich",
# "claude-code-sdk",
# "anyio",
# ]
# ///
"""
Run Claude Code prompts using the official Python SDK.
This ADW demonstrates using the Claude Code Python SDK for both one-shot
and interactive sessions. The SDK provides better type safety, error handling,
and a more Pythonic interface compared to subprocess-based implementations.
Usage:
# One-shot query (default)
./adws/adw_sdk_prompt.py "Hello Claude Code"
# Interactive session
./adws/adw_sdk_prompt.py --interactive
# Resume a previous session
./adws/adw_sdk_prompt.py --interactive --session-id abc123
# With specific model
./adws/adw_sdk_prompt.py "Create a FastAPI app" --model opus
# From different directory
./adws/adw_sdk_prompt.py "List files here" --working-dir /path/to/project
Examples:
# Simple query
./adws/adw_sdk_prompt.py "Explain async/await in Python"
# Interactive debugging session
./adws/adw_sdk_prompt.py --interactive --context "Debugging a memory leak"
# Resume session with context
./adws/adw_sdk_prompt.py --interactive --session-id abc123 --context "Continue debugging"
# Query with tools
./adws/adw_sdk_prompt.py "Create a Python web server" --tools Read,Write,Bash
Key Features:
- Uses official Claude Code Python SDK
- Supports both one-shot and interactive modes
- Better error handling with typed exceptions
- Native async/await support
- Clean message type handling
"""
import os
import sys
import json
import asyncio
from pathlib import Path
from typing import Optional, List
import click
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich.live import Live
from rich.spinner import Spinner
from rich.text import Text
from rich.prompt import Prompt
# Add the adw_modules directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "adw_modules"))
# Import SDK functions from our clean module
from agent_sdk import (
simple_query,
query_with_tools,
collect_query_response,
create_session,
safe_query,
stream_with_progress,
extract_text,
extract_tool_uses,
)
# Import SDK types
from claude_code_sdk import (
ClaudeCodeOptions,
AssistantMessage,
ResultMessage,
TextBlock,
ToolUseBlock,
)
def generate_short_id() -> str:
"""Generate a short ID for tracking."""
import uuid
return str(uuid.uuid4())[:8]
async def run_one_shot_query(
prompt: str,
model: str,
working_dir: str,
allowed_tools: Optional[List[str]] = None,
session_id: Optional[str] = None,
) -> None:
"""Run a one-shot query using the SDK."""
console = Console()
adw_id = generate_short_id()
# Display execution info
info_table = Table(show_header=False, box=None, padding=(0, 1))
info_table.add_column(style="bold cyan")
info_table.add_column()
info_table.add_row("ADW ID", adw_id)
info_table.add_row("Mode", "One-shot Query")
info_table.add_row("Prompt", prompt)
info_table.add_row("Model", model)
info_table.add_row("Working Dir", working_dir)
if allowed_tools:
info_table.add_row("Tools", ", ".join(allowed_tools))
if session_id:
info_table.add_row("Session ID", session_id)
info_table.add_row("[bold green]SDK[/bold green]", "Claude Code Python SDK")
console.print(
Panel(
info_table,
title="[bold blue]🚀 SDK Query Execution[/bold blue]",
border_style="blue",
)
)
console.print()
try:
# Execute query based on whether tools are needed
with console.status("[bold yellow]Executing via SDK...[/bold yellow]"):
if allowed_tools:
# Query with tools
options = ClaudeCodeOptions(
model=model,
allowed_tools=allowed_tools,
cwd=working_dir,
permission_mode="bypassPermissions",
)
if session_id:
options.resume = session_id
messages, result = await collect_query_response(prompt, options=options)
# Extract response text
response_text = ""
tool_uses = []
for msg in messages:
if isinstance(msg, AssistantMessage):
text = extract_text(msg)
if text:
response_text += text + "\n"
for tool in extract_tool_uses(msg):
tool_uses.append(f"{tool.name} ({tool.id[:8]}...)")
success = result and not result.is_error if result else False
else:
# Simple query
response_text, error = await safe_query(prompt)
success = error is None
tool_uses = []
if error:
response_text = error
# Display result
if success:
console.print(
Panel(
response_text.strip(),
title="[bold green]✅ SDK Success[/bold green]",
border_style="green",
padding=(1, 2),
)
)
if tool_uses:
console.print(
f"\n[bold cyan]Tools used:[/bold cyan] {', '.join(tool_uses)}"
)
else:
console.print(
Panel(
response_text,
title="[bold red]❌ SDK Error[/bold red]",
border_style="red",
padding=(1, 2),
)
)
# Show cost and session info if available
if "result" in locals() and result:
if result.total_cost_usd:
console.print(
f"\n[bold cyan]Cost:[/bold cyan] ${result.total_cost_usd:.4f}"
)
if hasattr(result, 'session_id') and result.session_id:
console.print(
f"[bold cyan]Session ID:[/bold cyan] {result.session_id}"
)
console.print(
f"[dim]Resume with: --session-id {result.session_id}[/dim]"
)
except Exception as e:
console.print(
Panel(
f"[bold red]{str(e)}[/bold red]",
title="[bold red]❌ Unexpected Error[/bold red]",
border_style="red",
)
)
async def run_interactive_session(
model: str,
working_dir: str,
context: Optional[str] = None,
session_id: Optional[str] = None,
) -> None:
"""Run an interactive session using the SDK."""
console = Console()
adw_id = generate_short_id()
# Display session info
info_table = Table(show_header=False, box=None, padding=(0, 1))
info_table.add_column(style="bold cyan")
info_table.add_column()
info_table.add_row("ADW ID", adw_id)
info_table.add_row("Mode", "Interactive Session")
info_table.add_row("Model", model)
info_table.add_row("Working Dir", working_dir)
if context:
info_table.add_row("Context", context)
if session_id:
info_table.add_row("Session ID", session_id)
info_table.add_row("[bold green]SDK[/bold green]", "Claude Code Python SDK")
console.print(
Panel(
info_table,
title="[bold blue]💬 SDK Interactive Session[/bold blue]",
border_style="blue",
)
)
console.print()
# Instructions
console.print("[bold yellow]Interactive Mode[/bold yellow]")
console.print("Commands: 'exit' or 'quit' to end session")
console.print("Just type your questions or requests\n")
# Start session
options = ClaudeCodeOptions(
model=model,
cwd=working_dir,
permission_mode="bypassPermissions",
)
if session_id:
options.resume = session_id
from claude_code_sdk import ClaudeSDKClient
client = ClaudeSDKClient(options=options)
await client.connect()
# Track session ID from results throughout the session
session_id_from_result = None
try:
# Send initial context if provided
if context:
console.print(f"[dim]Setting context: {context}[/dim]\n")
await client.query(f"Context: {context}")
# Consume the context response
async for msg in client.receive_response():
if isinstance(msg, AssistantMessage):
text = extract_text(msg)
if text:
console.print(f"[dim]Claude: {text}[/dim]\n")
# Interactive loop
while True:
# Get user input
try:
user_input = Prompt.ask("[bold cyan]You[/bold cyan]")
except (EOFError, KeyboardInterrupt):
console.print("\n[yellow]Session interrupted[/yellow]")
break
if user_input.lower() in ["exit", "quit"]:
break
# Send to Claude
await client.query(user_input)
# Show response with progress
console.print()
response_parts = []
tool_uses = []
cost = None
session_id_from_result = None
with Live(
Spinner("dots", text="Thinking..."),
console=console,
refresh_per_second=4,
):
async for msg in client.receive_response():
if isinstance(msg, AssistantMessage):
text = extract_text(msg)
if text:
response_parts.append(text)
for tool in extract_tool_uses(msg):
tool_uses.append(f"{tool.name}")
elif isinstance(msg, ResultMessage):
if msg.total_cost_usd:
cost = msg.total_cost_usd
if hasattr(msg, 'session_id') and msg.session_id:
session_id_from_result = msg.session_id
# Display response
if response_parts:
console.print("[bold green]Claude:[/bold green]")
for part in response_parts:
console.print(part)
if tool_uses:
console.print(f"\n[dim]Tools used: {', '.join(tool_uses)}[/dim]")
if cost:
console.print(f"[dim]Cost: ${cost:.4f}[/dim]")
if session_id_from_result:
console.print(f"[dim]Session ID: {session_id_from_result}[/dim]")
console.print()
finally:
await client.disconnect()
console.print("\n[bold green]Session ended[/bold green]")
console.print(f"[dim]ADW ID: {adw_id}[/dim]")
if 'session_id_from_result' in locals() and session_id_from_result:
console.print(f"[bold cyan]Session ID:[/bold cyan] {session_id_from_result}")
console.print(f"[dim]Resume with: ./adws/adw_sdk_prompt.py --interactive --session-id {session_id_from_result}[/dim]")
@click.command()
@click.argument("prompt", required=False)
@click.option(
"--interactive",
"-i",
is_flag=True,
help="Start an interactive session instead of one-shot query",
)
@click.option(
"--model",
type=click.Choice(["sonnet", "opus", "haiku"]),
default="sonnet",
help="Claude model to use (sonnet=balanced, opus=max intelligence, haiku=fast & economical)",
)
@click.option(
"--working-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
help="Working directory (default: current directory)",
)
@click.option(
"--tools",
help="Comma-separated list of allowed tools (e.g., Read,Write,Bash)",
)
@click.option(
"--context",
help="Context for interactive session (e.g., 'Debugging a memory leak')",
)
@click.option(
"--session-id",
help="Resume a previous session by its ID",
)
def main(
prompt: Optional[str],
interactive: bool,
model: str,
working_dir: Optional[str],
tools: Optional[str],
context: Optional[str],
session_id: Optional[str],
):
"""Run Claude Code prompts using the Python SDK.
Examples:
# One-shot query
adw_sdk_prompt.py "What is 2 + 2?"
# Interactive session
adw_sdk_prompt.py --interactive
# Resume session
adw_sdk_prompt.py --interactive --session-id abc123
# Query with tools
adw_sdk_prompt.py "Create hello.py" --tools Write,Read
"""
if not working_dir:
working_dir = os.getcwd()
# Convert model names
model_map = {
"sonnet": "claude-sonnet-4-5-20250929",
"opus": "claude-opus-4-20250514",
"haiku": "claude-haiku-4-5-20251001"
}
full_model = model_map.get(model, model)
# Parse tools if provided
allowed_tools = None
if tools:
allowed_tools = [t.strip() for t in tools.split(",")]
# Run appropriate mode
if interactive:
if prompt:
console = Console()
console.print(
"[yellow]Warning: Prompt ignored in interactive mode[/yellow]\n"
)
asyncio.run(
run_interactive_session(
model=full_model,
working_dir=working_dir,
context=context,
session_id=session_id,
)
)
else:
if not prompt:
console = Console()
console.print("[red]Error: Prompt required for one-shot mode[/red]")
console.print("Use --interactive for interactive session")
sys.exit(1)
asyncio.run(
run_one_shot_query(
prompt=prompt,
model=full_model,
working_dir=working_dir,
allowed_tools=allowed_tools,
session_id=session_id,
)
)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,247 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "pydantic",
# "python-dotenv",
# "click",
# "rich",
# ]
# ///
"""
Run Claude Code slash commands from the command line.
Usage:
# Method 1: Direct execution (requires uv)
./adws/adw_slash_command.py /chore "Update documentation"
# Method 2: Using uv run
uv run adws/adw_slash_command.py /implement specs/<name-of-spec>.md
uv run adws/adw_slash_command.py /start
Examples:
# Run a slash command
./adws/adw_slash_command.py /chore "Add logging to agent.py"
# Run with specific model
./adws/adw_slash_command.py /implement plan.md --model opus
# Run from a different working directory
./adws/adw_slash_command.py /test --working-dir /path/to/project
# Use custom agent name
./adws/adw_slash_command.py /review --agent-name reviewer
"""
import os
import sys
import json
from pathlib import Path
import click
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
# Add the adw_modules directory to the path so we can import agent
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "adw_modules"))
from agent import (
AgentTemplateRequest,
AgentPromptResponse,
execute_template,
generate_short_id,
)
# Output file name constants
OUTPUT_JSONL = "cc_raw_output.jsonl"
OUTPUT_JSON = "cc_raw_output.json"
FINAL_OBJECT_JSON = "cc_final_object.json"
SUMMARY_JSON = "custom_summary_output.json"
@click.command()
@click.argument("slash_command", required=True)
@click.argument("args", nargs=-1) # Accept multiple optional arguments
@click.option(
"--model",
type=click.Choice(["sonnet", "opus", "haiku"]),
default="sonnet",
help="Claude model to use (sonnet=balanced, opus=max intelligence, haiku=fast & economical)",
)
@click.option(
"--working-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
help="Working directory for command execution (default: current directory)",
)
@click.option(
"--agent-name",
default="executor",
help="Agent name for tracking (default: executor)",
)
def main(
slash_command: str,
args: tuple,
model: str,
working_dir: str,
agent_name: str,
):
"""Run Claude Code slash commands from the command line."""
console = Console()
# Generate a unique ID for this execution
adw_id = generate_short_id()
# Use current directory if no working directory specified
if not working_dir:
working_dir = os.getcwd()
# Create the template request
request = AgentTemplateRequest(
agent_name=agent_name,
slash_command=slash_command,
args=list(args), # Convert tuple to list
adw_id=adw_id,
model=model,
working_dir=working_dir,
)
# Create execution info table
info_table = Table(show_header=False, box=None, padding=(0, 1))
info_table.add_column(style="bold cyan")
info_table.add_column()
info_table.add_row("ADW ID", adw_id)
info_table.add_row("ADW Name", "adw_slash_command")
info_table.add_row("Command", slash_command)
info_table.add_row("Args", " ".join(args) if args else "(none)")
info_table.add_row("Model", model)
info_table.add_row("Working Dir", working_dir)
console.print(
Panel(
info_table,
title="[bold blue]🚀 Inputs[/bold blue]",
border_style="blue",
)
)
console.print()
try:
# Execute the slash command
with console.status("[bold yellow]Executing command...[/bold yellow]"):
response = execute_template(request)
# Display the result
if response.success:
# Success panel
result_panel = Panel(
response.output,
title="[bold green]✅ Success[/bold green]",
border_style="green",
padding=(1, 2),
)
console.print(result_panel)
if response.session_id:
console.print(
f"\n[bold cyan]Session ID:[/bold cyan] {response.session_id}"
)
else:
# Error panel
error_panel = Panel(
response.output,
title="[bold red]❌ Failed[/bold red]",
border_style="red",
padding=(1, 2),
)
console.print(error_panel)
if response.retry_code != "none":
console.print(
f"\n[bold yellow]Retry code:[/bold yellow] {response.retry_code}"
)
# Show output file info
console.print()
# Output files are in agents/<adw_id>/<agent_name>/
output_dir = f"./agents/{adw_id}/{agent_name}"
# Create the simple JSON summary file
simple_json_output = f"{output_dir}/{SUMMARY_JSON}"
# Determine the template file path
command_name = slash_command.lstrip("/") # Remove leading slash
path_to_slash_command_prompt = f".claude/commands/{command_name}.md"
with open(simple_json_output, "w") as f:
json.dump(
{
"adw_id": adw_id,
"slash_command": slash_command,
"args": list(args),
"path_to_slash_command_prompt": path_to_slash_command_prompt,
"model": model,
"working_dir": working_dir,
"success": response.success,
"session_id": response.session_id,
"retry_code": response.retry_code,
"output": response.output,
},
f,
indent=2,
)
# Files saved panel
files_table = Table(show_header=True, box=None)
files_table.add_column("File Type", style="bold cyan")
files_table.add_column("Path", style="dim")
files_table.add_column("Description", style="italic")
files_table.add_row(
"JSONL Stream",
f"{output_dir}/{OUTPUT_JSONL}",
"Raw streaming output from Claude Code",
)
files_table.add_row(
"JSON Array",
f"{output_dir}/{OUTPUT_JSON}",
"All messages as a JSON array",
)
files_table.add_row(
"Final Object",
f"{output_dir}/{FINAL_OBJECT_JSON}",
"Last message entry (final result)",
)
files_table.add_row(
"Summary",
simple_json_output,
"High-level execution summary with metadata",
)
console.print(
Panel(
files_table,
title="[bold blue]📄 Output Files[/bold blue]",
border_style="blue",
)
)
# Exit with appropriate code
sys.exit(0 if response.success else 1)
except Exception as e:
console.print(
Panel(
f"[bold red]{str(e)}[/bold red]",
title="[bold red]❌ Unexpected Error[/bold red]",
border_style="red",
)
)
sys.exit(2)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,109 @@
# Feature Planning
Create a plan to implement the feature using the specified markdown `Plan Format`. Research the codebase and create a thorough plan.
## Variables
adw_id: $1
prompt: $2
## Instructions
- If the adw_id or prompt is not provided, stop and ask the user to provide them.
- Create a plan to implement the feature described in the `prompt`
- The plan should be comprehensive, well-designed, and follow existing patterns
- Create the plan in the `specs/` directory with filename: `feature-{adw_id}-{descriptive-name}.md`
- Replace `{descriptive-name}` with a short, descriptive name based on the feature (e.g., "add-agent-logging", "implement-retry-logic", "create-workflow-api")
- Research the codebase starting with `README.md`
- Replace every <placeholder> in the `Plan Format` with the requested value
- Use your reasoning model: THINK HARD about the feature requirements, design, and implementation approach
- Follow existing patterns and conventions in the codebase
- Design for extensibility and maintainability
## Codebase Structure
- `README.md` - Project overview and instructions (start here)
- `adws/` - AI Developer Workflow scripts and modules
- `apps/` - Application layer you'll be working in
- `.claude/commands/` - Claude command templates
- `specs/` - Specification and plan documents
## Plan Format
```md
# Feature: <feature name>
## Metadata
adw_id: `{adw_id}`
prompt: `{prompt}`
## Feature Description
<describe the feature in detail, including its purpose and value to users>
## User Story
As a <type of user>
I want to <action/goal>
So that <benefit/value>
## Problem Statement
<clearly define the specific problem or opportunity this feature addresses>
## Solution Statement
<describe the proposed solution approach and how it solves the problem>
## Relevant Files
Use these files to implement the feature:
<list files relevant to the feature with bullet points explaining why. Include new files to be created under an h3 'New Files' section if needed>
## Implementation Plan
### Phase 1: Foundation
<describe the foundational work needed before implementing the main feature>
### Phase 2: Core Implementation
<describe the main implementation work for the feature>
### Phase 3: Integration
<describe how the feature will integrate with existing functionality>
## Step by Step Tasks
IMPORTANT: Execute every step in order, top to bottom.
<list step by step tasks as h3 headers with bullet points. Start with foundational changes then move to specific changes. Include creating tests throughout the implementation process>
### 1. <First Task Name>
- <specific action>
- <specific action>
### 2. <Second Task Name>
- <specific action>
- <specific action>
<continue with additional tasks as needed>
## Testing Strategy
### Unit Tests
<describe unit tests needed for the feature>
### Edge Cases
<list edge cases that need to be tested>
## Acceptance Criteria
<list specific, measurable criteria that must be met for the feature to be considered complete>
## Validation Commands
Execute these commands to validate the feature is complete:
<list specific commands to validate the work. Be precise about what to run>
- Example: `uv run python -m py_compile apps/*.py` - Test to ensure the code compiles
## Notes
<optional additional context, future considerations, or dependencies. If new libraries are needed, specify using `uv add`>
```
## Feature
Use the feature description from the `prompt` variable.
## Report
Return the path to the plan file created.

View File

@@ -0,0 +1,12 @@
# Prime
Execute the `Run`, `Read` and `Report` sections to understand the codebase then summarize your understanding.
## Run
git ls-files
## Read
README.md
adws/README.md
## Report
Summarize your understanding of the codebase.

View File

@@ -0,0 +1,9 @@
# Start your applications
## Instructions
- Run the commands in the `Run` section below top to bottom.
## Run
uv run apps/main.py
bun run apps/main.ts

View File

@@ -0,0 +1,8 @@
# (REQUIRED) Anthropic Configuration to run Claude Code in programmatic mode
ANTHROPIC_API_KEY=
# (Optional) Claude Code Path - if 'claude' does not work run 'which claude' and paste that value here
CLAUDE_CODE_PATH=claude
# (Optional)( Returns claude code to the root directory after every command
CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR=true

View File

@@ -0,0 +1,632 @@
"""Claude Code agent module for executing prompts programmatically."""
import subprocess
import sys
import os
import json
import re
import logging
import time
import uuid
from typing import Optional, List, Dict, Any, Tuple, Final, Literal
from enum import Enum
from pydantic import BaseModel
from dotenv import load_dotenv
# Retry codes for Claude Code execution errors
class RetryCode(str, Enum):
"""Codes indicating different types of errors that may be retryable."""
CLAUDE_CODE_ERROR = "claude_code_error" # General Claude Code CLI error
TIMEOUT_ERROR = "timeout_error" # Command timed out
EXECUTION_ERROR = "execution_error" # Error during execution
ERROR_DURING_EXECUTION = "error_during_execution" # Agent encountered an error
NONE = "none" # No retry needed
class AgentPromptRequest(BaseModel):
"""Claude Code agent prompt configuration."""
prompt: str
adw_id: str
agent_name: str = "ops"
model: Literal["sonnet", "opus", "haiku"] = "sonnet"
dangerously_skip_permissions: bool = False
output_file: str
working_dir: Optional[str] = None
class AgentPromptResponse(BaseModel):
"""Claude Code agent response."""
output: str
success: bool
session_id: Optional[str] = None
retry_code: RetryCode = RetryCode.NONE
class AgentTemplateRequest(BaseModel):
"""Claude Code agent template execution request."""
agent_name: str
slash_command: str
args: List[str]
adw_id: str
model: Literal["sonnet", "opus", "haiku"] = "sonnet"
working_dir: Optional[str] = None
class ClaudeCodeResultMessage(BaseModel):
"""Claude Code JSONL result message (last line)."""
type: str
subtype: str
is_error: bool
duration_ms: int
duration_api_ms: int
num_turns: int
result: str
session_id: str
total_cost_usd: float
def get_safe_subprocess_env() -> Dict[str, str]:
"""Get filtered environment variables safe for subprocess execution.
Returns only the environment variables needed based on .env.sample configuration.
Returns:
Dictionary containing only required environment variables
"""
safe_env_vars = {
# Anthropic Configuration (required)
"ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY"),
# Claude Code Configuration
"CLAUDE_CODE_PATH": os.getenv("CLAUDE_CODE_PATH", "claude"),
"CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR": os.getenv(
"CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR", "true"
),
# Essential system environment variables
"HOME": os.getenv("HOME"),
"USER": os.getenv("USER"),
"PATH": os.getenv("PATH"),
"SHELL": os.getenv("SHELL"),
"TERM": os.getenv("TERM"),
"LANG": os.getenv("LANG"),
"LC_ALL": os.getenv("LC_ALL"),
# Python-specific variables that subprocesses might need
"PYTHONPATH": os.getenv("PYTHONPATH"),
"PYTHONUNBUFFERED": "1", # Useful for subprocess output
# Working directory tracking
"PWD": os.getcwd(),
}
# Filter out None values
return {k: v for k, v in safe_env_vars.items() if v is not None}
# Load environment variables
load_dotenv()
# Get Claude Code CLI path from environment
CLAUDE_PATH = os.getenv("CLAUDE_CODE_PATH", "claude")
# Output file name constants (matching adw_prompt.py and adw_slash_command.py)
OUTPUT_JSONL = "cc_raw_output.jsonl"
OUTPUT_JSON = "cc_raw_output.json"
FINAL_OBJECT_JSON = "cc_final_object.json"
SUMMARY_JSON = "custom_summary_output.json"
def generate_short_id() -> str:
"""Generate a short 8-character UUID for tracking."""
return str(uuid.uuid4())[:8]
def truncate_output(
output: str, max_length: int = 500, suffix: str = "... (truncated)"
) -> str:
"""Truncate output to a reasonable length for display.
Special handling for JSONL data - if the output appears to be JSONL,
try to extract just the meaningful part.
Args:
output: The output string to truncate
max_length: Maximum length before truncation (default: 500)
suffix: Suffix to add when truncated (default: "... (truncated)")
Returns:
Truncated string if needed, original if shorter than max_length
"""
# Check if this looks like JSONL data
if output.startswith('{"type":') and '\n{"type":' in output:
# This is likely JSONL output - try to extract the last meaningful message
lines = output.strip().split("\n")
for line in reversed(lines):
try:
data = json.loads(line)
# Look for result message
if data.get("type") == "result":
result = data.get("result", "")
if result:
return truncate_output(result, max_length, suffix)
# Look for assistant message
elif data.get("type") == "assistant" and data.get("message"):
content = data["message"].get("content", [])
if isinstance(content, list) and content:
text = content[0].get("text", "")
if text:
return truncate_output(text, max_length, suffix)
except:
pass
# If we couldn't extract anything meaningful, just show that it's JSONL
return f"[JSONL output with {len(lines)} messages]{suffix}"
# Regular truncation logic
if len(output) <= max_length:
return output
# Try to find a good break point (newline or space)
truncate_at = max_length - len(suffix)
# Look for newline near the truncation point
newline_pos = output.rfind("\n", truncate_at - 50, truncate_at)
if newline_pos > 0:
return output[:newline_pos] + suffix
# Look for space near the truncation point
space_pos = output.rfind(" ", truncate_at - 20, truncate_at)
if space_pos > 0:
return output[:space_pos] + suffix
# Just truncate at the limit
return output[:truncate_at] + suffix
def check_claude_installed() -> Optional[str]:
"""Check if Claude Code CLI is installed. Return error message if not."""
try:
result = subprocess.run(
[CLAUDE_PATH, "--version"], capture_output=True, text=True
)
if result.returncode != 0:
return (
f"Error: Claude Code CLI is not installed. Expected at: {CLAUDE_PATH}"
)
except FileNotFoundError:
return f"Error: Claude Code CLI is not installed. Expected at: {CLAUDE_PATH}"
return None
def parse_jsonl_output(
output_file: str,
) -> Tuple[List[Dict[str, Any]], Optional[Dict[str, Any]]]:
"""Parse JSONL output file and return all messages and the result message.
Returns:
Tuple of (all_messages, result_message) where result_message is None if not found
"""
try:
with open(output_file, "r") as f:
# Read all lines and parse each as JSON
messages = [json.loads(line) for line in f if line.strip()]
# Find the result message (should be the last one)
result_message = None
for message in reversed(messages):
if message.get("type") == "result":
result_message = message
break
return messages, result_message
except Exception as e:
return [], None
def convert_jsonl_to_json(jsonl_file: str) -> str:
"""Convert JSONL file to JSON array file.
Creates a cc_raw_output.json file in the same directory as the JSONL file,
containing all messages as a JSON array.
Returns:
Path to the created JSON file
"""
# Create JSON filename in the same directory
output_dir = os.path.dirname(jsonl_file)
json_file = os.path.join(output_dir, OUTPUT_JSON)
# Parse the JSONL file
messages, _ = parse_jsonl_output(jsonl_file)
# Write as JSON array
with open(json_file, "w") as f:
json.dump(messages, f, indent=2)
return json_file
def save_last_entry_as_raw_result(json_file: str) -> Optional[str]:
"""Save the last entry from a JSON array file as cc_final_object.json.
Args:
json_file: Path to the JSON array file
Returns:
Path to the created cc_final_object.json file, or None if error
"""
try:
# Read the JSON array
with open(json_file, "r") as f:
messages = json.load(f)
if not messages:
return None
# Get the last entry
last_entry = messages[-1]
# Create cc_final_object.json in the same directory
output_dir = os.path.dirname(json_file)
final_object_file = os.path.join(output_dir, FINAL_OBJECT_JSON)
# Write the last entry
with open(final_object_file, "w") as f:
json.dump(last_entry, f, indent=2)
return final_object_file
except Exception:
# Silently fail - this is a nice-to-have feature
return None
def get_claude_env() -> Dict[str, str]:
"""Get only the required environment variables for Claude Code execution.
This is a wrapper around get_safe_subprocess_env() for
backward compatibility. New code should use get_safe_subprocess_env() directly.
Returns a dictionary containing only the necessary environment variables
based on .env.sample configuration.
"""
# Use the function defined above
return get_safe_subprocess_env()
def save_prompt(prompt: str, adw_id: str, agent_name: str = "ops") -> None:
"""Save a prompt to the appropriate logging directory."""
# Extract slash command from prompt
match = re.match(r"^(/\w+)", prompt)
if not match:
return
slash_command = match.group(1)
# Remove leading slash for filename
command_name = slash_command[1:]
# Create directory structure at project root (parent of adws)
# __file__ is in adws/adw_modules/, so we need to go up 3 levels to get to project root
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
prompt_dir = os.path.join(project_root, "agents", adw_id, agent_name, "prompts")
os.makedirs(prompt_dir, exist_ok=True)
# Save prompt to file
prompt_file = os.path.join(prompt_dir, f"{command_name}.txt")
with open(prompt_file, "w") as f:
f.write(prompt)
def prompt_claude_code_with_retry(
request: AgentPromptRequest,
max_retries: int = 3,
retry_delays: List[int] = None,
) -> AgentPromptResponse:
"""Execute Claude Code with retry logic for certain error types.
Args:
request: The prompt request configuration
max_retries: Maximum number of retry attempts (default: 3)
retry_delays: List of delays in seconds between retries (default: [1, 3, 5])
Returns:
AgentPromptResponse with output and retry code
"""
if retry_delays is None:
retry_delays = [1, 3, 5]
# Ensure we have enough delays for max_retries
while len(retry_delays) < max_retries:
retry_delays.append(retry_delays[-1] + 2) # Add incrementing delays
last_response = None
for attempt in range(max_retries + 1): # +1 for initial attempt
if attempt > 0:
# This is a retry
delay = retry_delays[attempt - 1]
time.sleep(delay)
response = prompt_claude_code(request)
last_response = response
# Check if we should retry based on the retry code
if response.success or response.retry_code == RetryCode.NONE:
# Success or non-retryable error
return response
# Check if this is a retryable error
if response.retry_code in [
RetryCode.CLAUDE_CODE_ERROR,
RetryCode.TIMEOUT_ERROR,
RetryCode.EXECUTION_ERROR,
RetryCode.ERROR_DURING_EXECUTION,
]:
if attempt < max_retries:
continue
else:
return response
# Should not reach here, but return last response just in case
return last_response
def prompt_claude_code(request: AgentPromptRequest) -> AgentPromptResponse:
"""Execute Claude Code with the given prompt configuration."""
# Check if Claude Code CLI is installed
error_msg = check_claude_installed()
if error_msg:
return AgentPromptResponse(
output=error_msg,
success=False,
session_id=None,
retry_code=RetryCode.NONE, # Installation error is not retryable
)
# Save prompt before execution
save_prompt(request.prompt, request.adw_id, request.agent_name)
# Create output directory if needed
output_dir = os.path.dirname(request.output_file)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
# Build command - always use stream-json format and verbose
cmd = [CLAUDE_PATH, "-p", request.prompt]
cmd.extend(["--model", request.model])
cmd.extend(["--output-format", "stream-json"])
cmd.append("--verbose")
# Check for MCP config in working directory
if request.working_dir:
mcp_config_path = os.path.join(request.working_dir, ".mcp.json")
if os.path.exists(mcp_config_path):
cmd.extend(["--mcp-config", mcp_config_path])
# Add dangerous skip permissions flag if enabled
if request.dangerously_skip_permissions:
cmd.append("--dangerously-skip-permissions")
# Set up environment with only required variables
env = get_claude_env()
try:
# Open output file for streaming
with open(request.output_file, "w") as output_f:
# Execute Claude Code and stream output to file
result = subprocess.run(
cmd,
stdout=output_f, # Stream directly to file
stderr=subprocess.PIPE,
text=True,
env=env,
cwd=request.working_dir, # Use working_dir if provided
)
if result.returncode == 0:
# Parse the JSONL file
messages, result_message = parse_jsonl_output(request.output_file)
# Convert JSONL to JSON array file
json_file = convert_jsonl_to_json(request.output_file)
# Save the last entry as raw_result.json
save_last_entry_as_raw_result(json_file)
if result_message:
# Extract session_id from result message
session_id = result_message.get("session_id")
# Check if there was an error in the result
is_error = result_message.get("is_error", False)
subtype = result_message.get("subtype", "")
# Handle error_during_execution case where there's no result field
if subtype == "error_during_execution":
error_msg = "Error during execution: Agent encountered an error and did not return a result"
return AgentPromptResponse(
output=error_msg,
success=False,
session_id=session_id,
retry_code=RetryCode.ERROR_DURING_EXECUTION,
)
result_text = result_message.get("result", "")
# For error cases, truncate the output to prevent JSONL blobs
if is_error and len(result_text) > 1000:
result_text = truncate_output(result_text, max_length=800)
return AgentPromptResponse(
output=result_text,
success=not is_error,
session_id=session_id,
retry_code=RetryCode.NONE, # No retry needed for successful or non-retryable errors
)
else:
# No result message found, try to extract meaningful error
error_msg = "No result message found in Claude Code output"
# Try to get the last few lines of output for context
try:
with open(request.output_file, "r") as f:
lines = f.readlines()
if lines:
# Get last 5 lines or less
last_lines = lines[-5:] if len(lines) > 5 else lines
# Try to parse each as JSON to find any error messages
for line in reversed(last_lines):
try:
data = json.loads(line.strip())
if data.get("type") == "assistant" and data.get(
"message"
):
# Extract text from assistant message
content = data["message"].get("content", [])
if isinstance(content, list) and content:
text = content[0].get("text", "")
if text:
error_msg = f"Claude Code output: {text[:500]}" # Truncate
break
except:
pass
except:
pass
return AgentPromptResponse(
output=truncate_output(error_msg, max_length=800),
success=False,
session_id=None,
retry_code=RetryCode.NONE,
)
else:
# Error occurred - stderr is captured, stdout went to file
stderr_msg = result.stderr.strip() if result.stderr else ""
# Try to read the output file to check for errors in stdout
stdout_msg = ""
error_from_jsonl = None
try:
if os.path.exists(request.output_file):
# Parse JSONL to find error message
messages, result_message = parse_jsonl_output(request.output_file)
if result_message and result_message.get("is_error"):
# Found error in result message
error_from_jsonl = result_message.get("result", "Unknown error")
elif messages:
# Look for error in last few messages
for msg in reversed(messages[-5:]):
if msg.get("type") == "assistant" and msg.get(
"message", {}
).get("content"):
content = msg["message"]["content"]
if isinstance(content, list) and content:
text = content[0].get("text", "")
if text and (
"error" in text.lower()
or "failed" in text.lower()
):
error_from_jsonl = text[:500] # Truncate
break
# If no structured error found, get last line only
if not error_from_jsonl:
with open(request.output_file, "r") as f:
lines = f.readlines()
if lines:
# Just get the last line instead of entire file
stdout_msg = lines[-1].strip()[
:200
] # Truncate to 200 chars
except:
pass
if error_from_jsonl:
error_msg = f"Claude Code error: {error_from_jsonl}"
elif stdout_msg and not stderr_msg:
error_msg = f"Claude Code error: {stdout_msg}"
elif stderr_msg and not stdout_msg:
error_msg = f"Claude Code error: {stderr_msg}"
elif stdout_msg and stderr_msg:
error_msg = f"Claude Code error: {stderr_msg}\nStdout: {stdout_msg}"
else:
error_msg = f"Claude Code error: Command failed with exit code {result.returncode}"
# Always truncate error messages to prevent huge outputs
return AgentPromptResponse(
output=truncate_output(error_msg, max_length=800),
success=False,
session_id=None,
retry_code=RetryCode.CLAUDE_CODE_ERROR,
)
except subprocess.TimeoutExpired:
error_msg = "Error: Claude Code command timed out after 5 minutes"
return AgentPromptResponse(
output=error_msg,
success=False,
session_id=None,
retry_code=RetryCode.TIMEOUT_ERROR,
)
except Exception as e:
error_msg = f"Error executing Claude Code: {e}"
return AgentPromptResponse(
output=error_msg,
success=False,
session_id=None,
retry_code=RetryCode.EXECUTION_ERROR,
)
def execute_template(request: AgentTemplateRequest) -> AgentPromptResponse:
"""Execute a Claude Code template with slash command and arguments.
Example:
request = AgentTemplateRequest(
agent_name="planner",
slash_command="/implement",
args=["plan.md"],
adw_id="abc12345",
model="sonnet" # Explicitly set model
)
response = execute_template(request)
"""
# Construct prompt from slash command and args
prompt = f"{request.slash_command} {' '.join(request.args)}"
# Create output directory with adw_id at project root
# __file__ is in adws/adw_modules/, so we need to go up 3 levels to get to project root
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
output_dir = os.path.join(
project_root, "agents", request.adw_id, request.agent_name
)
os.makedirs(output_dir, exist_ok=True)
# Build output file path
output_file = os.path.join(output_dir, OUTPUT_JSONL)
# Create prompt request with specific parameters
prompt_request = AgentPromptRequest(
prompt=prompt,
adw_id=request.adw_id,
agent_name=request.agent_name,
model=request.model,
dangerously_skip_permissions=True,
output_file=output_file,
working_dir=request.working_dir, # Pass through working_dir
)
# Execute with retry logic and return response (prompt_claude_code now handles all parsing)
return prompt_claude_code_with_retry(prompt_request)

View File

@@ -0,0 +1,283 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "pydantic",
# "python-dotenv",
# "click",
# "rich",
# ]
# ///
"""
Run an adhoc Claude Code prompt from the command line.
Usage:
# Method 1: Direct execution (requires uv)
./adw_prompt.py "Write a hello world Python script"
# Method 2: Using uv run
uv run adw_prompt.py "Write a hello world Python script"
# Method 3: Using Python directly (requires dependencies installed)
python adw_prompt.py "Write a hello world Python script"
Examples:
# Run with specific model
./adw_prompt.py "Explain this code" --model opus
# Run with custom output file
./adw_prompt.py "Create a FastAPI app" --output my_result.jsonl
# Run from a different working directory
./adw_prompt.py "List files here" --working-dir /path/to/project
# Disable retry on failure
./adw_prompt.py "Quick test" --no-retry
# Use custom agent name
./adw_prompt.py "Debug this" --agent-name debugger
"""
import os
import sys
import json
from pathlib import Path
import click
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich.syntax import Syntax
from rich.text import Text
# Add the adw_modules directory to the path so we can import agent
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "adw_modules"))
from agent import (
prompt_claude_code,
AgentPromptRequest,
AgentPromptResponse,
prompt_claude_code_with_retry,
generate_short_id,
)
# Output file name constants
OUTPUT_JSONL = "cc_raw_output.jsonl"
OUTPUT_JSON = "cc_raw_output.json"
FINAL_OBJECT_JSON = "cc_final_object.json"
SUMMARY_JSON = "custom_summary_output.json"
@click.command()
@click.argument("prompt", required=True)
@click.option(
"--model",
type=click.Choice(["sonnet", "opus", "haiku"]),
default="sonnet",
help="Claude model to use (sonnet=balanced, opus=max intelligence, haiku=fast & economical)",
)
@click.option(
"--output",
type=click.Path(),
help="Output file path (default: ./output/oneoff_<id>_output.jsonl)",
)
@click.option(
"--working-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
help="Working directory for the prompt execution (default: current directory)",
)
@click.option("--no-retry", is_flag=True, help="Disable automatic retry on failure")
@click.option(
"--agent-name", default="oneoff", help="Agent name for tracking (default: oneoff)"
)
def main(
prompt: str,
model: str,
output: str,
working_dir: str,
no_retry: bool,
agent_name: str,
):
"""Run an adhoc Claude Code prompt from the command line."""
console = Console()
# Validate prompt is not empty
if not prompt or not prompt.strip():
console.print(
Panel(
"[bold red]Error: Prompt cannot be empty[/bold red]\n\n"
"Please provide a valid prompt string.",
title="❌ Invalid Input",
border_style="red"
)
)
sys.exit(1)
# Generate a unique ID for this execution
adw_id = generate_short_id()
# Set up output file path
if not output:
# Default: write to agents/<adw_id>/<agent_name>/
output_dir = Path(f"./agents/{adw_id}/{agent_name}")
output_dir.mkdir(parents=True, exist_ok=True)
output = str(output_dir / OUTPUT_JSONL)
# Use current directory if no working directory specified
if not working_dir:
working_dir = os.getcwd()
# Create the prompt request
request = AgentPromptRequest(
prompt=prompt,
adw_id=adw_id,
agent_name=agent_name,
model=model,
dangerously_skip_permissions=True,
output_file=output,
working_dir=working_dir,
)
# Create execution info table
info_table = Table(show_header=False, box=None, padding=(0, 1))
info_table.add_column(style="bold cyan")
info_table.add_column()
info_table.add_row("ADW ID", adw_id)
info_table.add_row("ADW Name", "adw_prompt")
info_table.add_row("Prompt", prompt)
info_table.add_row("Model", model)
info_table.add_row("Working Dir", working_dir)
info_table.add_row("Output", output)
console.print(
Panel(
info_table,
title="[bold blue]🚀 Inputs[/bold blue]",
border_style="blue",
)
)
console.print()
response: AgentPromptResponse | None = None
try:
# Execute the prompt
with console.status("[bold yellow]Executing prompt...[/bold yellow]"):
if no_retry:
# Direct execution without retry
response = prompt_claude_code(request)
else:
# Execute with retry logic
response = prompt_claude_code_with_retry(request)
# Display the result
if response.success:
# Success panel
result_panel = Panel(
response.output,
title="[bold green]✅ Success[/bold green]",
border_style="green",
padding=(1, 2),
)
console.print(result_panel)
if response.session_id:
console.print(
f"\n[bold cyan]Session ID:[/bold cyan] {response.session_id}"
)
else:
# Error panel
error_panel = Panel(
response.output,
title="[bold red]❌ Failed[/bold red]",
border_style="red",
padding=(1, 2),
)
console.print(error_panel)
if response.retry_code != "none":
console.print(
f"\n[bold yellow]Retry code:[/bold yellow] {response.retry_code}"
)
# Show output file info
console.print()
# Also create a JSON summary file with explicit error handling
try:
if output.endswith(f"/{OUTPUT_JSONL}"):
# Default path: save as custom_summary_output.json in same directory
simple_json_output = output.replace(f"/{OUTPUT_JSONL}", f"/{SUMMARY_JSON}")
else:
# Custom path: replace .jsonl with _summary.json
simple_json_output = output.replace(".jsonl", "_summary.json")
# Create summary data
summary_data = {
"adw_id": adw_id,
"prompt": prompt,
"model": model,
"working_dir": working_dir,
"success": response.success,
"session_id": response.session_id,
"retry_code": response.retry_code,
"output": response.output,
}
# Write summary file
with open(simple_json_output, "w") as f:
json.dump(summary_data, f, indent=2)
except Exception as e:
console.print(
f"[yellow]Warning: Could not create summary file: {e}[/yellow]"
)
# Files saved panel with descriptions
files_table = Table(show_header=True, box=None)
files_table.add_column("File Type", style="bold cyan")
files_table.add_column("Path", style="dim")
files_table.add_column("Description", style="italic")
# Determine paths for all files
output_dir = os.path.dirname(output)
json_array_path = os.path.join(output_dir, OUTPUT_JSON)
final_object_path = os.path.join(output_dir, FINAL_OBJECT_JSON)
files_table.add_row(
"JSONL Stream", output, "Raw streaming output from Claude Code"
)
files_table.add_row(
"JSON Array", json_array_path, "All messages as a JSON array"
)
files_table.add_row(
"Final Object", final_object_path, "Last message entry (final result)"
)
files_table.add_row(
"Summary", simple_json_output, "High-level execution summary with metadata"
)
console.print(
Panel(
files_table,
title="[bold blue]📄 Output Files[/bold blue]",
border_style="blue",
)
)
# Exit with appropriate code
sys.exit(0 if response.success else 1)
except Exception as e:
console.print(
Panel(
f"[bold red]{str(e)}[/bold red]",
title="[bold red]❌ Unexpected Error[/bold red]",
border_style="red",
)
)
sys.exit(2)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,72 @@
# Chore Planning
Create a plan to complete the chore using the specified markdown `Plan Format`. Research the codebase and create a thorough plan.
## Variables
adw_id: $1
prompt: $2
## Instructions
- If the adw_id or prompt is not provided, stop and ask the user to provide them.
- Create a plan to complete the chore described in the `prompt`
- The plan should be simple, thorough, and precise
- Create the plan in the `specs/` directory with filename: `chore-{adw_id}-{descriptive-name}.md`
- Replace `{descriptive-name}` with a short, descriptive name based on the chore (e.g., "update-readme", "add-logging", "refactor-agent")
- Research the codebase starting with `README.md`
- Replace every <placeholder> in the `Plan Format` with the requested value
## Codebase Structure
- `README.md` - Project overview and instructions (start here)
- `adws/` - AI Developer Workflow scripts and modules
- `apps/` - Example applications
- `.claude/commands/` - Claude command templates
- `specs/` - Specification and plan documents
## Plan Format
```md
# Chore: <chore name>
## Metadata
adw_id: `{adw_id}`
prompt: `{prompt}`
## Chore Description
<describe the chore in detail based on the prompt>
## Relevant Files
Use these files to complete the chore:
<list files relevant to the chore with bullet points explaining why. Include new files to be created under an h3 'New Files' section if needed>
## Step by Step Tasks
IMPORTANT: Execute every step in order, top to bottom.
<list step by step tasks as h3 headers with bullet points. Start with foundational changes then move to specific changes. Last step should validate the work>
### 1. <First Task Name>
- <specific action>
- <specific action>
### 2. <Second Task Name>
- <specific action>
- <specific action>
## Validation Commands
Execute these commands to validate the chore is complete:
<list specific commands to validate the work. Be precise about what to run>
- Example: `uv run python -m py_compile apps/*.py` - Test to ensure the code compiles
## Notes
<optional additional context or considerations>
```
## Chore
Use the chore description from the `prompt` variable.
## Report
Return the path to the plan file created.

View File

@@ -0,0 +1,12 @@
# Implement the following plan
Follow the `Instructions` to implement the `Plan` then `Report` the completed work.
## Instructions
- Read the plan, think hard about the plan and implement the plan.
## Plan
$ARGUMENTS
## Report
- Summarize the work you've just done in a concise bullet point list.
- Report the files and total lines changed with `git diff --stat`

View File

@@ -0,0 +1,292 @@
"""Beads Integration Module - AI Developer Workflow (ADW)
This module provides beads issue management as an alternative to GitHub issues.
Allows ADW workflows to work with local beads tasks for offline development.
"""
import os
import subprocess
import json
from typing import Tuple, Optional
from adw_modules.data_types import GitHubIssue
from datetime import datetime
def get_workspace_root() -> str:
"""Get workspace root for beads operations."""
# Assume workspace root is the parent of adws directory
return os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
def fetch_beads_issue(issue_id: str) -> Tuple[Optional[GitHubIssue], Optional[str]]:
"""Fetch beads issue and convert to GitHubIssue format.
Args:
issue_id: The beads issue ID
Returns:
Tuple of (GitHubIssue, error_message)
"""
workspace_root = get_workspace_root()
# Use bd show to get issue details
cmd = ["bd", "show", issue_id]
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
cwd=workspace_root,
)
if result.returncode != 0:
return None, f"Failed to fetch beads issue: {result.stderr}"
# Parse the output (bd show returns human-readable format)
# Format is:
# poc-fjw: Token Infrastructure & Redis Setup
# Status: in_progress
# Priority: P0
# Type: feature
# ...
# Description:
# <description text>
output = result.stdout
# Extract title, description, status from output
title = None
description = None
status = "open"
issue_type = "task"
in_description = False
description_lines = []
for line in output.split("\n"):
stripped = line.strip()
# Skip empty lines
if not stripped:
continue
# First line has format: "poc-fjw: Token Infrastructure & Redis Setup"
if not title and ":" in line and not line.startswith(" "):
parts = line.split(":", 1)
if len(parts) == 2 and parts[0].strip() == issue_id:
title = parts[1].strip()
continue
# Status line
if stripped.startswith("Status:"):
status = stripped.split(":", 1)[1].strip()
in_description = False
# Type line
elif stripped.startswith("Type:"):
issue_type = stripped.split(":", 1)[1].strip()
in_description = False
# Description section
elif stripped.startswith("Description:"):
in_description = True
# Check if description is on same line
desc_text = stripped.split(":", 1)[1].strip()
if desc_text:
description_lines.append(desc_text)
elif in_description and stripped and not stripped.startswith("Dependents"):
description_lines.append(stripped)
elif stripped.startswith("Dependents") or stripped.startswith("Dependencies"):
in_description = False
# Combine description lines
if description_lines:
description = "\n".join(description_lines)
if not title:
return None, "Could not parse issue title from beads output"
# Convert to GitHubIssue format for compatibility
# Use the issue_id as the number (extract numeric part if present)
try:
# Try to extract number from ID like "poc-123"
number_str = issue_id.split("-")[-1]
if number_str.isdigit():
number = int(number_str)
else:
# Use hash of ID as fallback
number = hash(issue_id) % 10000
except:
number = hash(issue_id) % 10000
# Create GitHubIssue-compatible object
issue = GitHubIssue(
number=number,
title=title or "Untitled Task",
body=description or "",
state=status,
author={"login": "beads"},
assignees=[],
labels=[{"name": issue_type}],
milestone=None,
comments=[],
createdAt=datetime.now().isoformat(),
updatedAt=datetime.now().isoformat(),
closedAt=None,
url=f"beads://{issue_id}",
)
return issue, None
except FileNotFoundError:
return None, "bd command not found. Is beads installed?"
except Exception as e:
return None, f"Error fetching beads issue: {str(e)}"
def update_beads_status(issue_id: str, status: str) -> Tuple[bool, Optional[str]]:
"""Update beads issue status.
Args:
issue_id: The beads issue ID
status: New status (open, in_progress, blocked, closed)
Returns:
Tuple of (success, error_message)
"""
workspace_root = get_workspace_root()
cmd = ["bd", "update", issue_id, "--status", status]
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
cwd=workspace_root,
)
if result.returncode != 0:
return False, f"Failed to update beads status: {result.stderr}"
return True, None
except FileNotFoundError:
return False, "bd command not found. Is beads installed?"
except Exception as e:
return False, f"Error updating beads status: {str(e)}"
def close_beads_issue(issue_id: str, reason: str = "Completed via ADW workflow") -> Tuple[bool, Optional[str]]:
"""Close a beads issue.
Args:
issue_id: The beads issue ID
reason: Reason for closing
Returns:
Tuple of (success, error_message)
"""
workspace_root = get_workspace_root()
cmd = ["bd", "close", issue_id, "--reason", reason]
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
cwd=workspace_root,
)
if result.returncode != 0:
return False, f"Failed to close beads issue: {result.stderr}"
return True, None
except FileNotFoundError:
return False, "bd command not found. Is beads installed?"
except Exception as e:
return False, f"Error closing beads issue: {str(e)}"
def get_ready_beads_tasks(limit: int = 10) -> Tuple[Optional[list], Optional[str]]:
"""Get ready beads tasks (no blockers).
Args:
limit: Maximum number of tasks to return
Returns:
Tuple of (task_list, error_message)
"""
workspace_root = get_workspace_root()
cmd = ["bd", "ready", "--limit", str(limit)]
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
cwd=workspace_root,
)
if result.returncode != 0:
return None, f"Failed to get ready tasks: {result.stderr}"
# Parse output to extract task IDs
# bd ready returns format like:
# 📋 Ready work (1 issues with no blockers):
#
# 1. [P0] poc-pw3: Credit Consumption & Atomicity
# Assignee: La Boeuf
tasks = []
# Check if there are no ready tasks
if "No ready work found" in result.stdout or "(0 issues" in result.stdout:
return [], None
for line in result.stdout.split("\n"):
line = line.strip()
# Skip empty lines, headers, and assignee lines
if not line or line.startswith("📋") or line.startswith("Assignee:"):
continue
# Look for lines with format: "1. [P0] poc-pw3: Title"
# Extract the task ID (poc-pw3 in this case)
if ". [P" in line or ". [" in line:
# Split on ": " to get the ID part
parts = line.split(":")
if len(parts) >= 2:
# Get the part before the colon, then extract the ID
# Format: "1. [P0] poc-pw3"
id_part = parts[0].strip()
# Split by spaces and get the last token (the ID)
tokens = id_part.split()
if tokens:
task_id = tokens[-1]
# Verify it looks like a beads ID (has hyphen)
if "-" in task_id:
tasks.append(task_id)
return tasks, None
except FileNotFoundError:
return None, "bd command not found. Is beads installed?"
except Exception as e:
return None, f"Error getting ready tasks: {str(e)}"
def is_beads_issue(issue_identifier: str) -> bool:
"""Check if an issue identifier is a beads issue.
Beads issues have format like: poc-abc, feat-123, etc.
GitHub issues are just numbers.
Args:
issue_identifier: The issue identifier
Returns:
True if it's a beads issue, False otherwise
"""
# Beads issues contain a hyphen
return "-" in issue_identifier and not issue_identifier.isdigit()

View File

@@ -0,0 +1,316 @@
"""Git operations for ADW composable architecture.
Provides centralized git operations that build on top of github.py module.
"""
import subprocess
import json
import logging
from typing import Optional, Tuple
# Import GitHub functions from existing module
from adw_modules.github import get_repo_url, extract_repo_path, make_issue_comment
def get_current_branch(cwd: Optional[str] = None) -> str:
"""Get current git branch name."""
result = subprocess.run(
["git", "rev-parse", "--abbrev-ref", "HEAD"],
capture_output=True,
text=True,
cwd=cwd,
)
return result.stdout.strip()
def push_branch(
branch_name: str, cwd: Optional[str] = None
) -> Tuple[bool, Optional[str]]:
"""Push current branch to remote. Returns (success, error_message)."""
result = subprocess.run(
["git", "push", "-u", "origin", branch_name],
capture_output=True,
text=True,
cwd=cwd,
)
if result.returncode != 0:
return False, result.stderr
return True, None
def check_pr_exists(branch_name: str) -> Optional[str]:
"""Check if PR exists for branch. Returns PR URL if exists."""
# Use github.py functions to get repo info
try:
repo_url = get_repo_url()
repo_path = extract_repo_path(repo_url)
except Exception as e:
return None
result = subprocess.run(
[
"gh",
"pr",
"list",
"--repo",
repo_path,
"--head",
branch_name,
"--json",
"url",
],
capture_output=True,
text=True,
)
if result.returncode == 0:
prs = json.loads(result.stdout)
if prs:
return prs[0]["url"]
return None
def create_branch(
branch_name: str, cwd: Optional[str] = None
) -> Tuple[bool, Optional[str]]:
"""Create and checkout a new branch. Returns (success, error_message)."""
# Create branch
result = subprocess.run(
["git", "checkout", "-b", branch_name], capture_output=True, text=True, cwd=cwd
)
if result.returncode != 0:
# Check if error is because branch already exists
if "already exists" in result.stderr:
# Try to checkout existing branch
result = subprocess.run(
["git", "checkout", branch_name],
capture_output=True,
text=True,
cwd=cwd,
)
if result.returncode != 0:
return False, result.stderr
return True, None
return False, result.stderr
return True, None
def commit_changes(
message: str, cwd: Optional[str] = None
) -> Tuple[bool, Optional[str]]:
"""Stage all changes and commit. Returns (success, error_message)."""
# Check if there are changes to commit
result = subprocess.run(
["git", "status", "--porcelain"], capture_output=True, text=True, cwd=cwd
)
if not result.stdout.strip():
return True, None # No changes to commit
# Stage all changes
result = subprocess.run(
["git", "add", "-A"], capture_output=True, text=True, cwd=cwd
)
if result.returncode != 0:
return False, result.stderr
# Commit
result = subprocess.run(
["git", "commit", "-m", message], capture_output=True, text=True, cwd=cwd
)
if result.returncode != 0:
return False, result.stderr
return True, None
def get_pr_number(branch_name: str) -> Optional[str]:
"""Get PR number for a branch. Returns PR number if exists."""
# Use github.py functions to get repo info
try:
repo_url = get_repo_url()
repo_path = extract_repo_path(repo_url)
except Exception as e:
return None
result = subprocess.run(
[
"gh",
"pr",
"list",
"--repo",
repo_path,
"--head",
branch_name,
"--json",
"number",
"--limit",
"1",
],
capture_output=True,
text=True,
)
if result.returncode == 0:
prs = json.loads(result.stdout)
if prs:
return str(prs[0]["number"])
return None
def approve_pr(pr_number: str, logger: logging.Logger) -> Tuple[bool, Optional[str]]:
"""Approve a PR. Returns (success, error_message)."""
try:
repo_url = get_repo_url()
repo_path = extract_repo_path(repo_url)
except Exception as e:
return False, f"Failed to get repo info: {e}"
result = subprocess.run(
[
"gh",
"pr",
"review",
pr_number,
"--repo",
repo_path,
"--approve",
"--body",
"ADW Ship workflow approved this PR after validating all state fields.",
],
capture_output=True,
text=True,
)
if result.returncode != 0:
return False, result.stderr
logger.info(f"Approved PR #{pr_number}")
return True, None
def merge_pr(
pr_number: str, logger: logging.Logger, merge_method: str = "squash"
) -> Tuple[bool, Optional[str]]:
"""Merge a PR. Returns (success, error_message).
Args:
pr_number: The PR number to merge
logger: Logger instance
merge_method: One of 'merge', 'squash', 'rebase' (default: 'squash')
"""
try:
repo_url = get_repo_url()
repo_path = extract_repo_path(repo_url)
except Exception as e:
return False, f"Failed to get repo info: {e}"
# First check if PR is mergeable
result = subprocess.run(
[
"gh",
"pr",
"view",
pr_number,
"--repo",
repo_path,
"--json",
"mergeable,mergeStateStatus",
],
capture_output=True,
text=True,
)
if result.returncode != 0:
return False, f"Failed to check PR status: {result.stderr}"
pr_status = json.loads(result.stdout)
if pr_status.get("mergeable") != "MERGEABLE":
return (
False,
f"PR is not mergeable. Status: {pr_status.get('mergeStateStatus', 'unknown')}",
)
# Merge the PR
merge_cmd = [
"gh",
"pr",
"merge",
pr_number,
"--repo",
repo_path,
f"--{merge_method}",
]
# Add auto-merge body
merge_cmd.extend(
["--body", "Merged by ADW Ship workflow after successful validation."]
)
result = subprocess.run(merge_cmd, capture_output=True, text=True)
if result.returncode != 0:
return False, result.stderr
logger.info(f"Merged PR #{pr_number} using {merge_method} method")
return True, None
def finalize_git_operations(
state: "ADWState", logger: logging.Logger, cwd: Optional[str] = None
) -> None:
"""Standard git finalization: push branch and create/update PR."""
branch_name = state.get("branch_name")
if not branch_name:
# Fallback: use current git branch if not main
current_branch = get_current_branch(cwd=cwd)
if current_branch and current_branch != "main":
logger.warning(
f"No branch name in state, using current branch: {current_branch}"
)
branch_name = current_branch
else:
logger.error(
"No branch name in state and current branch is main, skipping git operations"
)
return
# Always push
success, error = push_branch(branch_name, cwd=cwd)
if not success:
logger.error(f"Failed to push branch: {error}")
return
logger.info(f"Pushed branch: {branch_name}")
# Handle PR
pr_url = check_pr_exists(branch_name)
issue_number = state.get("issue_number")
adw_id = state.get("adw_id")
if pr_url:
logger.info(f"Found existing PR: {pr_url}")
# Post PR link for easy reference
if issue_number and adw_id:
make_issue_comment(issue_number, f"{adw_id}_ops: ✅ Pull request: {pr_url}")
else:
# Create new PR - fetch issue data first
if issue_number:
try:
repo_url = get_repo_url()
repo_path = extract_repo_path(repo_url)
from adw_modules.github import fetch_issue
issue = fetch_issue(issue_number, repo_path)
from adw_modules.workflow_ops import create_pull_request
pr_url, error = create_pull_request(branch_name, issue, state, logger, cwd)
except Exception as e:
logger.error(f"Failed to fetch issue for PR creation: {e}")
pr_url, error = None, str(e)
else:
pr_url, error = None, "No issue number in state"
if pr_url:
logger.info(f"Created PR: {pr_url}")
# Post new PR link
if issue_number and adw_id:
make_issue_comment(
issue_number, f"{adw_id}_ops: ✅ Pull request created: {pr_url}"
)
else:
logger.error(f"Failed to create PR: {error}")

View File

@@ -0,0 +1,312 @@
#!/usr/bin/env -S uv run
# /// script
# dependencies = ["python-dotenv", "pydantic"]
# ///
"""
GitHub Operations Module - AI Developer Workflow (ADW)
This module contains all GitHub-related operations including:
- Issue fetching and manipulation
- Comment posting
- Repository path extraction
- Issue status management
"""
import subprocess
import sys
import os
import json
from typing import Dict, List, Optional
from .data_types import GitHubIssue, GitHubIssueListItem, GitHubComment
# Bot identifier to prevent webhook loops and filter bot comments
ADW_BOT_IDENTIFIER = "[ADW-AGENTS]"
def get_github_env() -> Optional[dict]:
"""Get environment with GitHub token set up. Returns None if no GITHUB_PAT.
Subprocess env behavior:
- env=None → Inherits parent's environment (default)
- env={} → Empty environment (no variables)
- env=custom_dict → Only uses specified variables
So this will work with gh authentication:
# These are equivalent:
result = subprocess.run(cmd, capture_output=True, text=True)
result = subprocess.run(cmd, capture_output=True, text=True, env=None)
But this will NOT work (no PATH, no auth):
result = subprocess.run(cmd, capture_output=True, text=True, env={})
"""
github_pat = os.getenv("GITHUB_PAT")
if not github_pat:
return None
# Only create minimal env with GitHub token
env = {
"GH_TOKEN": github_pat,
"PATH": os.environ.get("PATH", ""),
}
return env
def get_repo_url() -> str:
"""Get GitHub repository URL from git remote."""
try:
result = subprocess.run(
["git", "remote", "get-url", "origin"],
capture_output=True,
text=True,
check=True,
)
return result.stdout.strip()
except subprocess.CalledProcessError:
raise ValueError(
"No git remote 'origin' found. Please ensure you're in a git repository with a remote."
)
except FileNotFoundError:
raise ValueError("git command not found. Please ensure git is installed.")
def extract_repo_path(github_url: str) -> str:
"""Extract owner/repo from GitHub URL."""
# Handle both https://github.com/owner/repo and https://github.com/owner/repo.git
return github_url.replace("https://github.com/", "").replace(".git", "")
def fetch_issue(issue_number: str, repo_path: str) -> GitHubIssue:
"""Fetch GitHub issue using gh CLI and return typed model."""
# Use JSON output for structured data
cmd = [
"gh",
"issue",
"view",
issue_number,
"-R",
repo_path,
"--json",
"number,title,body,state,author,assignees,labels,milestone,comments,createdAt,updatedAt,closedAt,url",
]
# Set up environment with GitHub token if available
env = get_github_env()
try:
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
if result.returncode == 0:
# Parse JSON response into Pydantic model
issue_data = json.loads(result.stdout)
issue = GitHubIssue(**issue_data)
return issue
else:
print(result.stderr, file=sys.stderr)
sys.exit(result.returncode)
except FileNotFoundError:
print("Error: GitHub CLI (gh) is not installed.", file=sys.stderr)
print("\nTo install gh:", file=sys.stderr)
print(" - macOS: brew install gh", file=sys.stderr)
print(
" - Linux: See https://github.com/cli/cli#installation",
file=sys.stderr,
)
print(
" - Windows: See https://github.com/cli/cli#installation", file=sys.stderr
)
print("\nAfter installation, authenticate with: gh auth login", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error parsing issue data: {e}", file=sys.stderr)
sys.exit(1)
def make_issue_comment(issue_id: str, comment: str) -> None:
"""Post a comment to a GitHub issue using gh CLI."""
# Get repo information from git remote
github_repo_url = get_repo_url()
repo_path = extract_repo_path(github_repo_url)
# Ensure comment has ADW_BOT_IDENTIFIER to prevent webhook loops
if not comment.startswith(ADW_BOT_IDENTIFIER):
comment = f"{ADW_BOT_IDENTIFIER} {comment}"
# Build command
cmd = [
"gh",
"issue",
"comment",
issue_id,
"-R",
repo_path,
"--body",
comment,
]
# Set up environment with GitHub token if available
env = get_github_env()
try:
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
if result.returncode == 0:
print(f"Successfully posted comment to issue #{issue_id}")
else:
print(f"Error posting comment: {result.stderr}", file=sys.stderr)
raise RuntimeError(f"Failed to post comment: {result.stderr}")
except Exception as e:
print(f"Error posting comment: {e}", file=sys.stderr)
raise
def mark_issue_in_progress(issue_id: str) -> None:
"""Mark issue as in progress by adding label and comment."""
# Get repo information from git remote
github_repo_url = get_repo_url()
repo_path = extract_repo_path(github_repo_url)
# Add "in_progress" label
cmd = [
"gh",
"issue",
"edit",
issue_id,
"-R",
repo_path,
"--add-label",
"in_progress",
]
# Set up environment with GitHub token if available
env = get_github_env()
# Try to add label (may fail if label doesn't exist)
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
if result.returncode != 0:
print(f"Note: Could not add 'in_progress' label: {result.stderr}")
# Post comment indicating work has started
# make_issue_comment(issue_id, "🚧 ADW is working on this issue...")
# Assign to self (optional)
cmd = [
"gh",
"issue",
"edit",
issue_id,
"-R",
repo_path,
"--add-assignee",
"@me",
]
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
if result.returncode == 0:
print(f"Assigned issue #{issue_id} to self")
def fetch_open_issues(repo_path: str) -> List[GitHubIssueListItem]:
"""Fetch all open issues from the GitHub repository."""
try:
cmd = [
"gh",
"issue",
"list",
"--repo",
repo_path,
"--state",
"open",
"--json",
"number,title,body,labels,createdAt,updatedAt",
"--limit",
"1000",
]
# Set up environment with GitHub token if available
env = get_github_env()
# DEBUG level - not printing command
result = subprocess.run(
cmd, capture_output=True, text=True, check=True, env=env
)
issues_data = json.loads(result.stdout)
issues = [GitHubIssueListItem(**issue_data) for issue_data in issues_data]
print(f"Fetched {len(issues)} open issues")
return issues
except subprocess.CalledProcessError as e:
print(f"ERROR: Failed to fetch issues: {e.stderr}", file=sys.stderr)
return []
except json.JSONDecodeError as e:
print(f"ERROR: Failed to parse issues JSON: {e}", file=sys.stderr)
return []
def fetch_issue_comments(repo_path: str, issue_number: int) -> List[Dict]:
"""Fetch all comments for a specific issue."""
try:
cmd = [
"gh",
"issue",
"view",
str(issue_number),
"--repo",
repo_path,
"--json",
"comments",
]
# Set up environment with GitHub token if available
env = get_github_env()
result = subprocess.run(
cmd, capture_output=True, text=True, check=True, env=env
)
data = json.loads(result.stdout)
comments = data.get("comments", [])
# Sort comments by creation time
comments.sort(key=lambda c: c.get("createdAt", ""))
# DEBUG level - not printing
return comments
except subprocess.CalledProcessError as e:
print(
f"ERROR: Failed to fetch comments for issue #{issue_number}: {e.stderr}",
file=sys.stderr,
)
return []
except json.JSONDecodeError as e:
print(
f"ERROR: Failed to parse comments JSON for issue #{issue_number}: {e}",
file=sys.stderr,
)
return []
def find_keyword_from_comment(keyword: str, issue: GitHubIssue) -> Optional[GitHubComment]:
"""Find the latest comment containing a specific keyword.
Args:
keyword: The keyword to search for in comments
issue: The GitHub issue containing comments
Returns:
The latest GitHubComment containing the keyword, or None if not found
"""
# Sort comments by created_at date (newest first)
sorted_comments = sorted(issue.comments, key=lambda c: c.created_at, reverse=True)
# Search through sorted comments (newest first)
for comment in sorted_comments:
# Skip ADW bot comments to prevent loops
if ADW_BOT_IDENTIFIER in comment.body:
continue
if keyword in comment.body:
return comment
return None

View File

@@ -0,0 +1,172 @@
"""State management for ADW composable architecture.
Provides persistent state management via file storage and
transient state passing between scripts via stdin/stdout.
"""
import json
import os
import sys
import logging
from typing import Dict, Any, Optional
from adw_modules.data_types import ADWStateData
class ADWState:
"""Container for ADW workflow state with file persistence."""
STATE_FILENAME = "adw_state.json"
def __init__(self, adw_id: str):
"""Initialize ADWState with a required ADW ID.
Args:
adw_id: The ADW ID for this state (required)
"""
if not adw_id:
raise ValueError("adw_id is required for ADWState")
self.adw_id = adw_id
# Start with minimal state
self.data: Dict[str, Any] = {"adw_id": self.adw_id}
self.logger = logging.getLogger(__name__)
def update(self, **kwargs):
"""Update state with new key-value pairs."""
# Filter to only our core fields
core_fields = {"adw_id", "issue_number", "branch_name", "plan_file", "issue_class", "worktree_path", "backend_port", "frontend_port", "model_set", "all_adws"}
for key, value in kwargs.items():
if key in core_fields:
self.data[key] = value
def get(self, key: str, default=None):
"""Get value from state by key."""
return self.data.get(key, default)
def append_adw_id(self, adw_id: str):
"""Append an ADW ID to the all_adws list if not already present."""
all_adws = self.data.get("all_adws", [])
if adw_id not in all_adws:
all_adws.append(adw_id)
self.data["all_adws"] = all_adws
def get_working_directory(self) -> str:
"""Get the working directory for this ADW instance.
Returns worktree_path if set (for isolated workflows),
otherwise returns the main repo path.
"""
worktree_path = self.data.get("worktree_path")
if worktree_path:
return worktree_path
# Return main repo path (parent of adws directory)
return os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
def get_state_path(self) -> str:
"""Get path to state file."""
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
return os.path.join(project_root, "agents", self.adw_id, self.STATE_FILENAME)
def save(self, workflow_step: Optional[str] = None) -> None:
"""Save state to file in agents/{adw_id}/adw_state.json."""
state_path = self.get_state_path()
os.makedirs(os.path.dirname(state_path), exist_ok=True)
# Create ADWStateData for validation
state_data = ADWStateData(
adw_id=self.data.get("adw_id"),
issue_number=self.data.get("issue_number"),
branch_name=self.data.get("branch_name"),
plan_file=self.data.get("plan_file"),
issue_class=self.data.get("issue_class"),
worktree_path=self.data.get("worktree_path"),
backend_port=self.data.get("backend_port"),
frontend_port=self.data.get("frontend_port"),
model_set=self.data.get("model_set", "base"),
all_adws=self.data.get("all_adws", []),
)
# Save as JSON
with open(state_path, "w") as f:
json.dump(state_data.model_dump(), f, indent=2)
self.logger.info(f"Saved state to {state_path}")
if workflow_step:
self.logger.info(f"State updated by: {workflow_step}")
@classmethod
def load(
cls, adw_id: str, logger: Optional[logging.Logger] = None
) -> Optional["ADWState"]:
"""Load state from file if it exists."""
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
state_path = os.path.join(project_root, "agents", adw_id, cls.STATE_FILENAME)
if not os.path.exists(state_path):
return None
try:
with open(state_path, "r") as f:
data = json.load(f)
# Validate with ADWStateData
state_data = ADWStateData(**data)
# Create ADWState instance
state = cls(state_data.adw_id)
state.data = state_data.model_dump()
if logger:
logger.info(f"🔍 Found existing state from {state_path}")
logger.info(f"State: {json.dumps(state_data.model_dump(), indent=2)}")
return state
except Exception as e:
if logger:
logger.error(f"Failed to load state from {state_path}: {e}")
return None
@classmethod
def from_stdin(cls) -> Optional["ADWState"]:
"""Read state from stdin if available (for piped input).
Returns None if no piped input is available (stdin is a tty).
"""
if sys.stdin.isatty():
return None
try:
input_data = sys.stdin.read()
if not input_data.strip():
return None
data = json.loads(input_data)
adw_id = data.get("adw_id")
if not adw_id:
return None # No valid state without adw_id
state = cls(adw_id)
state.data = data
return state
except (json.JSONDecodeError, EOFError):
return None
def to_stdout(self):
"""Write state to stdout as JSON (for piping to next script)."""
# Only output core fields
output_data = {
"adw_id": self.data.get("adw_id"),
"issue_number": self.data.get("issue_number"),
"branch_name": self.data.get("branch_name"),
"plan_file": self.data.get("plan_file"),
"issue_class": self.data.get("issue_class"),
"worktree_path": self.data.get("worktree_path"),
"backend_port": self.data.get("backend_port"),
"frontend_port": self.data.get("frontend_port"),
"all_adws": self.data.get("all_adws", []),
}
print(json.dumps(output_data, indent=2))

View File

@@ -0,0 +1,714 @@
"""Shared AI Developer Workflow (ADW) operations."""
import glob
import json
import logging
import os
import subprocess
import re
from typing import Tuple, Optional
from adw_modules.data_types import (
AgentTemplateRequest,
GitHubIssue,
AgentPromptResponse,
IssueClassSlashCommand,
ADWExtractionResult,
)
from adw_modules.agent import execute_template
from adw_modules.github import get_repo_url, extract_repo_path, ADW_BOT_IDENTIFIER
from adw_modules.state import ADWState
from adw_modules.utils import parse_json
# Agent name constants
AGENT_PLANNER = "sdlc_planner"
AGENT_IMPLEMENTOR = "sdlc_implementor"
AGENT_CLASSIFIER = "issue_classifier"
AGENT_BRANCH_GENERATOR = "branch_generator"
AGENT_PR_CREATOR = "pr_creator"
# Available ADW workflows for runtime validation
AVAILABLE_ADW_WORKFLOWS = [
# Isolated workflows (all workflows are now iso-based)
"adw_plan_iso",
"adw_patch_iso",
"adw_build_iso",
"adw_test_iso",
"adw_review_iso",
"adw_document_iso",
"adw_ship_iso",
"adw_sdlc_ZTE_iso", # Zero Touch Execution workflow
"adw_plan_build_iso",
"adw_plan_build_test_iso",
"adw_plan_build_test_review_iso",
"adw_plan_build_document_iso",
"adw_plan_build_review_iso",
"adw_sdlc_iso",
]
def format_issue_message(
adw_id: str, agent_name: str, message: str, session_id: Optional[str] = None
) -> str:
"""Format a message for issue comments with ADW tracking and bot identifier."""
# Always include ADW_BOT_IDENTIFIER to prevent webhook loops
if session_id:
return f"{ADW_BOT_IDENTIFIER} {adw_id}_{agent_name}_{session_id}: {message}"
return f"{ADW_BOT_IDENTIFIER} {adw_id}_{agent_name}: {message}"
def extract_adw_info(text: str, temp_adw_id: str) -> ADWExtractionResult:
"""Extract ADW workflow, ID, and model_set from text using classify_adw agent.
Returns ADWExtractionResult with workflow_command, adw_id, and model_set."""
# Use classify_adw to extract structured info
request = AgentTemplateRequest(
agent_name="adw_classifier",
slash_command="/classify_adw",
args=[text],
adw_id=temp_adw_id,
)
try:
response = execute_template(request) # No logger available in this function
if not response.success:
print(f"Failed to classify ADW: {response.output}")
return ADWExtractionResult() # Empty result
# Parse JSON response using utility that handles markdown
try:
data = parse_json(response.output, dict)
adw_command = data.get("adw_slash_command", "").replace(
"/", ""
) # Remove slash
adw_id = data.get("adw_id")
model_set = data.get("model_set", "base") # Default to "base"
# Validate command
if adw_command and adw_command in AVAILABLE_ADW_WORKFLOWS:
return ADWExtractionResult(
workflow_command=adw_command,
adw_id=adw_id,
model_set=model_set
)
return ADWExtractionResult() # Empty result
except ValueError as e:
print(f"Failed to parse classify_adw response: {e}")
return ADWExtractionResult() # Empty result
except Exception as e:
print(f"Error calling classify_adw: {e}")
return ADWExtractionResult() # Empty result
def classify_issue(
issue: GitHubIssue, adw_id: str, logger: logging.Logger
) -> Tuple[Optional[IssueClassSlashCommand], Optional[str]]:
"""Classify GitHub issue and return appropriate slash command.
Returns (command, error_message) tuple."""
# Use the classify_issue slash command template with minimal payload
# Only include the essential fields: number, title, body
minimal_issue_json = issue.model_dump_json(
by_alias=True, include={"number", "title", "body"}
)
request = AgentTemplateRequest(
agent_name=AGENT_CLASSIFIER,
slash_command="/classify_issue",
args=[minimal_issue_json],
adw_id=adw_id,
)
logger.debug(f"Classifying issue: {issue.title}")
response = execute_template(request)
logger.debug(
f"Classification response: {response.model_dump_json(indent=2, by_alias=True)}"
)
if not response.success:
return None, response.output
# Extract the classification from the response
output = response.output.strip()
# Look for the classification pattern in the output
# Claude might add explanation, so we need to extract just the command
classification_match = re.search(r"(/chore|/bug|/feature|0)", output)
if classification_match:
issue_command = classification_match.group(1)
else:
issue_command = output
if issue_command == "0":
return None, f"No command selected: {response.output}"
if issue_command not in ["/chore", "/bug", "/feature"]:
return None, f"Invalid command selected: {response.output}"
return issue_command, None # type: ignore
def build_plan(
issue: GitHubIssue,
command: str,
adw_id: str,
logger: logging.Logger,
working_dir: Optional[str] = None,
) -> AgentPromptResponse:
"""Build implementation plan for the issue using the specified command."""
# Use minimal payload like classify_issue does
minimal_issue_json = issue.model_dump_json(
by_alias=True, include={"number", "title", "body"}
)
issue_plan_template_request = AgentTemplateRequest(
agent_name=AGENT_PLANNER,
slash_command=command,
args=[str(issue.number), adw_id, minimal_issue_json],
adw_id=adw_id,
working_dir=working_dir,
)
logger.debug(
f"issue_plan_template_request: {issue_plan_template_request.model_dump_json(indent=2, by_alias=True)}"
)
issue_plan_response = execute_template(issue_plan_template_request)
logger.debug(
f"issue_plan_response: {issue_plan_response.model_dump_json(indent=2, by_alias=True)}"
)
return issue_plan_response
def implement_plan(
plan_file: str,
adw_id: str,
logger: logging.Logger,
agent_name: Optional[str] = None,
working_dir: Optional[str] = None,
) -> AgentPromptResponse:
"""Implement the plan using the /implement command."""
# Use provided agent_name or default to AGENT_IMPLEMENTOR
implementor_name = agent_name or AGENT_IMPLEMENTOR
implement_template_request = AgentTemplateRequest(
agent_name=implementor_name,
slash_command="/implement",
args=[plan_file],
adw_id=adw_id,
working_dir=working_dir,
)
logger.debug(
f"implement_template_request: {implement_template_request.model_dump_json(indent=2, by_alias=True)}"
)
implement_response = execute_template(implement_template_request)
logger.debug(
f"implement_response: {implement_response.model_dump_json(indent=2, by_alias=True)}"
)
return implement_response
def generate_branch_name(
issue: GitHubIssue,
issue_class: IssueClassSlashCommand,
adw_id: str,
logger: logging.Logger,
) -> Tuple[Optional[str], Optional[str]]:
"""Generate a git branch name for the issue.
Returns (branch_name, error_message) tuple."""
# Remove the leading slash from issue_class for the branch name
issue_type = issue_class.replace("/", "")
# Use minimal payload like classify_issue does
minimal_issue_json = issue.model_dump_json(
by_alias=True, include={"number", "title", "body"}
)
request = AgentTemplateRequest(
agent_name=AGENT_BRANCH_GENERATOR,
slash_command="/generate_branch_name",
args=[issue_type, adw_id, minimal_issue_json],
adw_id=adw_id,
)
response = execute_template(request)
if not response.success:
return None, response.output
branch_name = response.output.strip()
logger.info(f"Generated branch name: {branch_name}")
return branch_name, None
def create_commit(
agent_name: str,
issue: GitHubIssue,
issue_class: IssueClassSlashCommand,
adw_id: str,
logger: logging.Logger,
working_dir: str,
) -> Tuple[Optional[str], Optional[str]]:
"""Create a git commit with a properly formatted message.
Returns (commit_message, error_message) tuple."""
# Remove the leading slash from issue_class
issue_type = issue_class.replace("/", "")
# Create unique committer agent name by suffixing '_committer'
unique_agent_name = f"{agent_name}_committer"
# Use minimal payload like classify_issue does
minimal_issue_json = issue.model_dump_json(
by_alias=True, include={"number", "title", "body"}
)
request = AgentTemplateRequest(
agent_name=unique_agent_name,
slash_command="/commit",
args=[agent_name, issue_type, minimal_issue_json],
adw_id=adw_id,
working_dir=working_dir,
)
response = execute_template(request)
if not response.success:
return None, response.output
commit_message = response.output.strip()
logger.info(f"Created commit message: {commit_message}")
return commit_message, None
def create_pull_request(
branch_name: str,
issue: Optional[GitHubIssue],
state: ADWState,
logger: logging.Logger,
working_dir: str,
) -> Tuple[Optional[str], Optional[str]]:
"""Create a pull request for the implemented changes.
Returns (pr_url, error_message) tuple."""
# Get plan file from state (may be None for test runs)
plan_file = state.get("plan_file") or "No plan file (test run)"
adw_id = state.get("adw_id")
# If we don't have issue data, try to construct minimal data
if not issue:
issue_data = state.get("issue", {})
issue_json = json.dumps(issue_data) if issue_data else "{}"
elif isinstance(issue, dict):
# Try to reconstruct as GitHubIssue model which handles datetime serialization
from adw_modules.data_types import GitHubIssue
try:
issue_model = GitHubIssue(**issue)
# Use minimal payload like classify_issue does
issue_json = issue_model.model_dump_json(
by_alias=True, include={"number", "title", "body"}
)
except Exception:
# Fallback: use json.dumps with default str converter for datetime
issue_json = json.dumps(issue, default=str)
else:
# Use minimal payload like classify_issue does
issue_json = issue.model_dump_json(
by_alias=True, include={"number", "title", "body"}
)
request = AgentTemplateRequest(
agent_name=AGENT_PR_CREATOR,
slash_command="/pull_request",
args=[branch_name, issue_json, plan_file, adw_id],
adw_id=adw_id,
working_dir=working_dir,
)
response = execute_template(request)
if not response.success:
return None, response.output
pr_url = response.output.strip()
logger.info(f"Created pull request: {pr_url}")
return pr_url, None
def ensure_plan_exists(state: ADWState, issue_number: str) -> str:
"""Find or error if no plan exists for issue.
Used by isolated build workflows in standalone mode."""
# Check if plan file is in state
if state.get("plan_file"):
return state.get("plan_file")
# Check current branch
from adw_modules.git_ops import get_current_branch
branch = get_current_branch()
# Look for plan in branch name
if f"-{issue_number}-" in branch:
# Look for plan file
plans = glob.glob(f"specs/*{issue_number}*.md")
if plans:
return plans[0]
# No plan found
raise ValueError(
f"No plan found for issue {issue_number}. Run adw_plan_iso.py first."
)
def ensure_adw_id(
issue_number: str,
adw_id: Optional[str] = None,
logger: Optional[logging.Logger] = None,
) -> str:
"""Get ADW ID or create a new one and initialize state.
Args:
issue_number: The issue number to find/create ADW ID for
adw_id: Optional existing ADW ID to use
logger: Optional logger instance
Returns:
The ADW ID (existing or newly created)
"""
# If ADW ID provided, check if state exists
if adw_id:
state = ADWState.load(adw_id, logger)
if state:
if logger:
logger.info(f"Found existing ADW state for ID: {adw_id}")
else:
print(f"Found existing ADW state for ID: {adw_id}")
return adw_id
# ADW ID provided but no state exists, create state
state = ADWState(adw_id)
state.update(adw_id=adw_id, issue_number=issue_number)
state.save("ensure_adw_id")
if logger:
logger.info(f"Created new ADW state for provided ID: {adw_id}")
else:
print(f"Created new ADW state for provided ID: {adw_id}")
return adw_id
# No ADW ID provided, create new one with state
from adw_modules.utils import make_adw_id
new_adw_id = make_adw_id()
state = ADWState(new_adw_id)
state.update(adw_id=new_adw_id, issue_number=issue_number)
state.save("ensure_adw_id")
if logger:
logger.info(f"Created new ADW ID and state: {new_adw_id}")
else:
print(f"Created new ADW ID and state: {new_adw_id}")
return new_adw_id
def find_existing_branch_for_issue(
issue_number: str, adw_id: Optional[str] = None, cwd: Optional[str] = None
) -> Optional[str]:
"""Find an existing branch for the given issue number.
Returns branch name if found, None otherwise."""
# List all branches
result = subprocess.run(
["git", "branch", "-a"], capture_output=True, text=True, cwd=cwd
)
if result.returncode != 0:
return None
branches = result.stdout.strip().split("\n")
# Look for branch with standardized pattern: *-issue-{issue_number}-adw-{adw_id}-*
for branch in branches:
branch = branch.strip().replace("* ", "").replace("remotes/origin/", "")
# Check for the standardized pattern
if f"-issue-{issue_number}-" in branch:
if adw_id and f"-adw-{adw_id}-" in branch:
return branch
elif not adw_id:
# Return first match if no adw_id specified
return branch
return None
def find_plan_for_issue(
issue_number: str, adw_id: Optional[str] = None
) -> Optional[str]:
"""Find plan file for the given issue number and optional adw_id.
Returns path to plan file if found, None otherwise."""
import os
# Get project root
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
agents_dir = os.path.join(project_root, "agents")
if not os.path.exists(agents_dir):
return None
# If adw_id is provided, check specific directory first
if adw_id:
plan_path = os.path.join(agents_dir, adw_id, AGENT_PLANNER, "plan.md")
if os.path.exists(plan_path):
return plan_path
# Otherwise, search all agent directories
for agent_id in os.listdir(agents_dir):
agent_path = os.path.join(agents_dir, agent_id)
if os.path.isdir(agent_path):
plan_path = os.path.join(agent_path, AGENT_PLANNER, "plan.md")
if os.path.exists(plan_path):
# Check if this plan is for our issue by reading branch info or checking commits
# For now, return the first plan found (can be improved)
return plan_path
return None
def create_or_find_branch(
issue_number: str,
issue: GitHubIssue,
state: ADWState,
logger: logging.Logger,
cwd: Optional[str] = None,
) -> Tuple[str, Optional[str]]:
"""Create or find a branch for the given issue.
1. First checks state for existing branch name
2. Then looks for existing branches matching the issue
3. If none found, classifies the issue and creates a new branch
Returns (branch_name, error_message) tuple.
"""
# 1. Check state for branch name
branch_name = state.get("branch_name") or state.get("branch", {}).get("name")
if branch_name:
logger.info(f"Found branch in state: {branch_name}")
# Check if we need to checkout
from adw_modules.git_ops import get_current_branch
current = get_current_branch(cwd=cwd)
if current != branch_name:
result = subprocess.run(
["git", "checkout", branch_name],
capture_output=True,
text=True,
cwd=cwd,
)
if result.returncode != 0:
# Branch might not exist locally, try to create from remote
result = subprocess.run(
["git", "checkout", "-b", branch_name, f"origin/{branch_name}"],
capture_output=True,
text=True,
cwd=cwd,
)
if result.returncode != 0:
return "", f"Failed to checkout branch: {result.stderr}"
return branch_name, None
# 2. Look for existing branch
adw_id = state.get("adw_id")
existing_branch = find_existing_branch_for_issue(issue_number, adw_id, cwd=cwd)
if existing_branch:
logger.info(f"Found existing branch: {existing_branch}")
# Checkout the branch
result = subprocess.run(
["git", "checkout", existing_branch],
capture_output=True,
text=True,
cwd=cwd,
)
if result.returncode != 0:
return "", f"Failed to checkout branch: {result.stderr}"
state.update(branch_name=existing_branch)
return existing_branch, None
# 3. Create new branch - classify issue first
logger.info("No existing branch found, creating new one")
# Classify the issue
issue_command, error = classify_issue(issue, adw_id, logger)
if error:
return "", f"Failed to classify issue: {error}"
state.update(issue_class=issue_command)
# Generate branch name
branch_name, error = generate_branch_name(issue, issue_command, adw_id, logger)
if error:
return "", f"Failed to generate branch name: {error}"
# Create the branch
from adw_modules.git_ops import create_branch
success, error = create_branch(branch_name, cwd=cwd)
if not success:
return "", f"Failed to create branch: {error}"
state.update(branch_name=branch_name)
logger.info(f"Created and checked out new branch: {branch_name}")
return branch_name, None
def find_spec_file(state: ADWState, logger: logging.Logger) -> Optional[str]:
"""Find the spec file from state or by examining git diff.
For isolated workflows, automatically uses worktree_path from state.
"""
# Get worktree path if in isolated workflow
worktree_path = state.get("worktree_path")
# Check if spec file is already in state (from plan phase)
spec_file = state.get("plan_file")
if spec_file:
# If worktree_path exists and spec_file is relative, make it absolute
if worktree_path and not os.path.isabs(spec_file):
spec_file = os.path.join(worktree_path, spec_file)
if os.path.exists(spec_file):
logger.info(f"Using spec file from state: {spec_file}")
return spec_file
# Otherwise, try to find it from git diff
logger.info("Looking for spec file in git diff")
result = subprocess.run(
["git", "diff", "origin/main", "--name-only"],
capture_output=True,
text=True,
cwd=worktree_path,
)
if result.returncode == 0:
files = result.stdout.strip().split("\n")
spec_files = [f for f in files if f.startswith("specs/") and f.endswith(".md")]
if spec_files:
# Use the first spec file found
spec_file = spec_files[0]
if worktree_path:
spec_file = os.path.join(worktree_path, spec_file)
logger.info(f"Found spec file: {spec_file}")
return spec_file
# If still not found, try to derive from branch name
branch_name = state.get("branch_name")
if branch_name:
# Extract issue number from branch name
import re
match = re.search(r"issue-(\d+)", branch_name)
if match:
issue_num = match.group(1)
adw_id = state.get("adw_id")
# Look for spec files matching the pattern
import glob
# Use worktree_path if provided, otherwise current directory
search_dir = worktree_path if worktree_path else os.getcwd()
pattern = os.path.join(
search_dir, f"specs/issue-{issue_num}-adw-{adw_id}*.md"
)
spec_files = glob.glob(pattern)
if spec_files:
spec_file = spec_files[0]
logger.info(f"Found spec file by pattern: {spec_file}")
return spec_file
logger.warning("No spec file found")
return None
def create_and_implement_patch(
adw_id: str,
review_change_request: str,
logger: logging.Logger,
agent_name_planner: str,
agent_name_implementor: str,
spec_path: Optional[str] = None,
issue_screenshots: Optional[str] = None,
working_dir: Optional[str] = None,
) -> Tuple[Optional[str], AgentPromptResponse]:
"""Create a patch plan and implement it.
Returns (patch_file_path, implement_response) tuple."""
# Create patch plan using /patch command
args = [adw_id, review_change_request]
# Add optional arguments in the correct order
if spec_path:
args.append(spec_path)
else:
args.append("") # Empty string for optional spec_path
args.append(agent_name_planner)
if issue_screenshots:
args.append(issue_screenshots)
request = AgentTemplateRequest(
agent_name=agent_name_planner,
slash_command="/patch",
args=args,
adw_id=adw_id,
working_dir=working_dir,
)
logger.debug(
f"Patch plan request: {request.model_dump_json(indent=2, by_alias=True)}"
)
response = execute_template(request)
logger.debug(
f"Patch plan response: {response.model_dump_json(indent=2, by_alias=True)}"
)
if not response.success:
logger.error(f"Error creating patch plan: {response.output}")
# Return None and a failed response
return None, AgentPromptResponse(
output=f"Failed to create patch plan: {response.output}", success=False
)
# Extract the patch plan file path from the response
patch_file_path = response.output.strip()
# Validate that it looks like a file path
if "specs/patch/" not in patch_file_path or not patch_file_path.endswith(".md"):
logger.error(f"Invalid patch plan path returned: {patch_file_path}")
return None, AgentPromptResponse(
output=f"Invalid patch plan path: {patch_file_path}", success=False
)
logger.info(f"Created patch plan: {patch_file_path}")
# Now implement the patch plan using the provided implementor agent name
implement_response = implement_plan(
patch_file_path, adw_id, logger, agent_name_implementor, working_dir=working_dir
)
return patch_file_path, implement_response

View File

@@ -0,0 +1,243 @@
"""Worktree and port management operations for isolated ADW workflows.
Provides utilities for creating and managing git worktrees under trees/<adw_id>/
and allocating unique ports for each isolated instance.
"""
import os
import subprocess
import logging
import socket
from typing import Tuple, Optional
from adw_modules.state import ADWState
def create_worktree(adw_id: str, branch_name: str, logger: logging.Logger) -> Tuple[str, Optional[str]]:
"""Create a git worktree for isolated ADW execution.
Args:
adw_id: The ADW ID for this worktree
branch_name: The branch name to create the worktree from
logger: Logger instance
Returns:
Tuple of (worktree_path, error_message)
worktree_path is the absolute path if successful, None if error
"""
# Get project root (parent of adws directory)
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
# Create trees directory if it doesn't exist
trees_dir = os.path.join(project_root, "trees")
os.makedirs(trees_dir, exist_ok=True)
# Construct worktree path
worktree_path = os.path.join(trees_dir, adw_id)
# Check if worktree already exists
if os.path.exists(worktree_path):
logger.warning(f"Worktree already exists at {worktree_path}")
return worktree_path, None
# First, fetch latest changes from origin
logger.info("Fetching latest changes from origin")
fetch_result = subprocess.run(
["git", "fetch", "origin"],
capture_output=True,
text=True,
cwd=project_root
)
if fetch_result.returncode != 0:
logger.warning(f"Failed to fetch from origin: {fetch_result.stderr}")
# Create the worktree using git, branching from origin/main
# Use -b to create the branch as part of worktree creation
cmd = ["git", "worktree", "add", "-b", branch_name, worktree_path, "origin/main"]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root)
if result.returncode != 0:
# If branch already exists, try without -b
if "already exists" in result.stderr:
cmd = ["git", "worktree", "add", worktree_path, branch_name]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root)
if result.returncode != 0:
error_msg = f"Failed to create worktree: {result.stderr}"
logger.error(error_msg)
return None, error_msg
logger.info(f"Created worktree at {worktree_path} for branch {branch_name}")
return worktree_path, None
def validate_worktree(adw_id: str, state: ADWState) -> Tuple[bool, Optional[str]]:
"""Validate worktree exists in state, filesystem, and git.
Performs three-way validation to ensure consistency:
1. State has worktree_path
2. Directory exists on filesystem
3. Git knows about the worktree
Args:
adw_id: The ADW ID to validate
state: The ADW state object
Returns:
Tuple of (is_valid, error_message)
"""
# Check state has worktree_path
worktree_path = state.get("worktree_path")
if not worktree_path:
return False, "No worktree_path in state"
# Check directory exists
if not os.path.exists(worktree_path):
return False, f"Worktree directory not found: {worktree_path}"
# Check git knows about it
result = subprocess.run(["git", "worktree", "list"], capture_output=True, text=True)
if worktree_path not in result.stdout:
return False, "Worktree not registered with git"
return True, None
def get_worktree_path(adw_id: str) -> str:
"""Get absolute path to worktree.
Args:
adw_id: The ADW ID
Returns:
Absolute path to worktree directory
"""
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
return os.path.join(project_root, "trees", adw_id)
def remove_worktree(adw_id: str, logger: logging.Logger) -> Tuple[bool, Optional[str]]:
"""Remove a worktree and clean up.
Args:
adw_id: The ADW ID for the worktree to remove
logger: Logger instance
Returns:
Tuple of (success, error_message)
"""
worktree_path = get_worktree_path(adw_id)
# First remove via git
cmd = ["git", "worktree", "remove", worktree_path, "--force"]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
# Try to clean up manually if git command failed
if os.path.exists(worktree_path):
try:
shutil.rmtree(worktree_path)
logger.warning(f"Manually removed worktree directory: {worktree_path}")
except Exception as e:
return False, f"Failed to remove worktree: {result.stderr}, manual cleanup failed: {e}"
logger.info(f"Removed worktree at {worktree_path}")
return True, None
def setup_worktree_environment(worktree_path: str, backend_port: int, frontend_port: int, logger: logging.Logger) -> None:
"""Set up worktree environment by creating .ports.env file.
The actual environment setup (copying .env files, installing dependencies) is handled
by the install_worktree.md command which runs inside the worktree.
Args:
worktree_path: Path to the worktree
backend_port: Backend port number
frontend_port: Frontend port number
logger: Logger instance
"""
# Create .ports.env file with port configuration
ports_env_path = os.path.join(worktree_path, ".ports.env")
with open(ports_env_path, "w") as f:
f.write(f"BACKEND_PORT={backend_port}\n")
f.write(f"FRONTEND_PORT={frontend_port}\n")
f.write(f"VITE_BACKEND_URL=http://localhost:{backend_port}\n")
logger.info(f"Created .ports.env with Backend: {backend_port}, Frontend: {frontend_port}")
# Port management functions
def get_ports_for_adw(adw_id: str) -> Tuple[int, int]:
"""Deterministically assign ports based on ADW ID.
Args:
adw_id: The ADW ID
Returns:
Tuple of (backend_port, frontend_port)
"""
# Convert first 8 chars of ADW ID to index (0-14)
# Using base 36 conversion and modulo to get consistent mapping
try:
# Take first 8 alphanumeric chars and convert from base 36
id_chars = ''.join(c for c in adw_id[:8] if c.isalnum())
index = int(id_chars, 36) % 15
except ValueError:
# Fallback to simple hash if conversion fails
index = hash(adw_id) % 15
backend_port = 9100 + index
frontend_port = 9200 + index
return backend_port, frontend_port
def is_port_available(port: int) -> bool:
"""Check if a port is available for binding.
Args:
port: Port number to check
Returns:
True if port is available, False otherwise
"""
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(1)
s.bind(('localhost', port))
return True
except (socket.error, OSError):
return False
def find_next_available_ports(adw_id: str, max_attempts: int = 15) -> Tuple[int, int]:
"""Find available ports starting from deterministic assignment.
Args:
adw_id: The ADW ID
max_attempts: Maximum number of attempts (default 15)
Returns:
Tuple of (backend_port, frontend_port)
Raises:
RuntimeError: If no available ports found
"""
base_backend, base_frontend = get_ports_for_adw(adw_id)
base_index = base_backend - 9100
for offset in range(max_attempts):
index = (base_index + offset) % 15
backend_port = 9100 + index
frontend_port = 9200 + index
if is_port_available(backend_port) and is_port_available(frontend_port):
return backend_port, frontend_port
raise RuntimeError("No available ports in the allocated range")

View File

@@ -0,0 +1,106 @@
# Bug Planning
Create a new plan to resolve the `Bug` using the exact specified markdown `Plan Format`. Follow the `Instructions` to create the plan use the `Relevant Files` to focus on the right files.
## Variables
issue_number: $1
adw_id: $2
issue_json: $3
## Instructions
- IMPORTANT: You're writing a plan to resolve a bug based on the `Bug` that will add value to the application.
- IMPORTANT: The `Bug` describes the bug that will be resolved but remember we're not resolving the bug, we're creating the plan that will be used to resolve the bug based on the `Plan Format` below.
- You're writing a plan to resolve a bug, it should be thorough and precise so we fix the root cause and prevent regressions.
- Create the plan in the `specs/` directory with filename: `issue-{issue_number}-adw-{adw_id}-sdlc_planner-{descriptive-name}.md`
- Replace `{descriptive-name}` with a short, descriptive name based on the bug (e.g., "fix-login-error", "resolve-timeout", "patch-memory-leak")
- Use the plan format below to create the plan.
- Research the codebase to understand the bug, reproduce it, and put together a plan to fix it.
- IMPORTANT: Replace every <placeholder> in the `Plan Format` with the requested value. Add as much detail as needed to fix the bug.
- Use your reasoning model: THINK HARD about the bug, its root cause, and the steps to fix it properly.
- IMPORTANT: Be surgical with your bug fix, solve the bug at hand and don't fall off track.
- IMPORTANT: We want the minimal number of changes that will fix and address the bug.
- Don't use decorators. Keep it simple.
- If you need a new library, use `uv add` and be sure to report it in the `Notes` section of the `Plan Format`.
- IMPORTANT: If the bug affects the UI or user interactions:
- Add a task in the `Step by Step Tasks` section to create a separate E2E test file in `.claude/commands/e2e/test_<descriptive_name>.md` based on examples in that directory
- Add E2E test validation to your Validation Commands section
- IMPORTANT: When you fill out the `Plan Format: Relevant Files` section, add an instruction to read `.claude/commands/test_e2e.md`, and `.claude/commands/e2e/test_basic_query.md` to understand how to create an E2E test file. List your new E2E test file to the `Plan Format: New Files` section.
- To be clear, we're not creating a new E2E test file, we're creating a task to create a new E2E test file in the `Plan Format` below
- Respect requested files in the `Relevant Files` section.
- Start your research by reading the `README.md` file.
## Relevant Files
Focus on the following files:
- `README.md` - Contains the project overview and instructions.
- `app/server/**` - Contains the codebase server.
- `app/client/**` - Contains the codebase client.
- `scripts/**` - Contains the scripts to start and stop the server + client.
- `adws/**` - Contains the AI Developer Workflow (ADW) scripts.
- Read `.claude/commands/conditional_docs.md` to check if your task requires additional documentation
- If your task matches any of the conditions listed, include those documentation files in the `Plan Format: Relevant Files` section of your plan
Ignore all other files in the codebase.
## Plan Format
```md
# Bug: <bug name>
## Metadata
issue_number: `{issue_number}`
adw_id: `{adw_id}`
issue_json: `{issue_json}`
## Bug Description
<describe the bug in detail, including symptoms and expected vs actual behavior>
## Problem Statement
<clearly define the specific problem that needs to be solved>
## Solution Statement
<describe the proposed solution approach to fix the bug>
## Steps to Reproduce
<list exact steps to reproduce the bug>
## Root Cause Analysis
<analyze and explain the root cause of the bug>
## Relevant Files
Use these files to fix the bug:
<find and list the files that are relevant to the bug describe why they are relevant in bullet points. If there are new files that need to be created to fix the bug, list them in an h3 'New Files' section.>
## Step by Step Tasks
IMPORTANT: Execute every step in order, top to bottom.
<list step by step tasks as h3 headers plus bullet points. use as many h3 headers as needed to fix the bug. Order matters, start with the foundational shared changes required to fix the bug then move on to the specific changes required to fix the bug. Include tests that will validate the bug is fixed with zero regressions.>
<If the bug affects UI, include a task to create a E2E test file. Your task should look like: "Read `.claude/commands/e2e/test_basic_query.md` and `.claude/commands/e2e/test_complex_query.md` and create a new E2E test file in `.claude/commands/e2e/test_<descriptive_name>.md` that validates the bug is fixed, be specific with the steps to prove the bug is fixed. We want the minimal set of steps to validate the bug is fixed and screen shots to prove it if possible.">
<Your last step should be running the `Validation Commands` to validate the bug is fixed with zero regressions.>
## Validation Commands
Execute every command to validate the bug is fixed with zero regressions.
<list commands you'll use to validate with 100% confidence the bug is fixed with zero regressions. every command must execute without errors so be specific about what you want to run to validate the bug is fixed with zero regressions. Include commands to reproduce the bug before and after the fix.>
<If you created an E2E test, include the following validation step: "Read .claude/commands/test_e2e.md`, then read and execute your new E2E `.claude/commands/e2e/test_<descriptive_name>.md` test file to validate this functionality works.">
- `cd app/server && uv run pytest` - Run server tests to validate the bug is fixed with zero regressions
- `cd app/client && bun tsc --noEmit` - Run frontend tests to validate the bug is fixed with zero regressions
- `cd app/client && bun run build` - Run frontend build to validate the bug is fixed with zero regressions
## Notes
<optionally list any additional notes or context that are relevant to the bug that will be helpful to the developer>
```
## Bug
Extract the bug details from the `issue_json` variable (parse the JSON and use the title and body fields).
## Report
- IMPORTANT: Return exclusively the path to the plan file created and nothing else.

View File

@@ -0,0 +1,57 @@
# ADW Workflow Extraction
Extract ADW workflow information from the text below and return a JSON response.
## Instructions
- Look for ADW workflow commands in the text (e.g., `/adw_plan_iso`, `/adw_build_iso`, `/adw_test_iso`, `/adw_review_iso`, `/adw_document_iso`, `/adw_patch_iso`, `/adw_plan_build_iso`, `/adw_plan_build_test_iso`, `/adw_plan_build_test_review_iso`, `/adw_sdlc_iso`, `/adw_sdlc_ZTE_iso`)
- Also recognize commands without the `_iso` suffix and automatically add it (e.g., `/adw_plan``/adw_plan_iso`)
- Also recognize variations like `adw_plan_build`, `adw plan build`, `/adw plan then build`, etc. and map to the correct command
- Look for ADW IDs (8-character alphanumeric strings, often after "adw_id:" or "ADW ID:" or similar)
- Look for model set specification: "model_set base" or "model_set heavy" (case insensitive)
- Default to "base" if no model_set is specified
- Also recognize variations like "model set: heavy", "modelset heavy", etc.
- Return a JSON object with the extracted information
- If no ADW workflow is found, return empty JSON: `{}`
- IMPORTANT: DO NOT RUN the `adw_sdlc_ZTE_iso` workflows unless `ZTE` is EXPLICITLY uppercased. This is a dangerous workflow and it needs to be absolutely clear when we're running it. If zte is not capitalized, then run the non zte version `/adw_sdlc_iso`.
## Valid ADW Commands
- `/adw_plan_iso` - Planning only
- `/adw_build_iso` - Building only (requires adw_id)
- `/adw_test_iso` - Testing only (requires adw_id)
- `/adw_review_iso` - Review only (requires adw_id)
- `/adw_document_iso` - Documentation only (requires adw_id)
- `/adw_ship_iso` - Ship/approve and merge PR (requires adw_id)
- `/adw_patch_iso` - Direct patch from issue
- `/adw_plan_build_iso` - Plan + Build
- `/adw_plan_build_test_iso` - Plan + Build + Test
- `/adw_plan_build_review_iso` - Plan + Build + Review (skips test)
- `/adw_plan_build_document_iso` - Plan + Build + Document (skips test and review)
- `/adw_plan_build_test_review_iso` - Plan + Build + Test + Review
- `/adw_sdlc_iso` - Complete SDLC: Plan + Build + Test + Review + Document
- `/adw_sdlc_zte_iso` - Zero Touch Execution: Complete SDLC + auto-merge to production. Note: as per instructions, 'ZTE' must be capitalized. Do not run this if 'zte' is not capitalized.
## Response Format
Respond ONLY with a JSON object in this format:
```json
{
"adw_slash_command": "/adw_plan",
"adw_id": "abc12345",
"model_set": "base"
}
```
Fields:
- `adw_slash_command`: The ADW command found (include the slash)
- `adw_id`: The 8-character ADW ID if found
- `model_set`: The model set to use ("base" or "heavy"), defaults to "base" if not specified
If only some fields are found, include only those fields.
If nothing is found, return: `{}`
IMPORTANT: Always include `model_set` with value "base" if no model_set is explicitly mentioned in the text.
## Text to Analyze
$ARGUMENTS

View File

@@ -0,0 +1,22 @@
# Github Issue Command Selection
Based on the `Github Issue` below, follow the `Instructions` to select the appropriate command to execute based on the `Command Mapping`.
## Instructions
- Based on the details in the `Github Issue`, select the appropriate command to execute.
- IMPORTANT: Respond exclusively with '/' followed by the command to execute based on the `Command Mapping` below.
- Use the command mapping to help you decide which command to respond with.
- Don't examine the codebase just focus on the `Github Issue` and the `Command Mapping` below to determine the appropriate command to execute.
## Command Mapping
- Respond with `/chore` if the issue is a chore.
- Respond with `/bug` if the issue is a bug.
- Respond with `/feature` if the issue is a feature.
- Respond with `/patch` if the issue is a patch.
- Respond with `0` if the issue isn't any of the above.
## Github Issue
$ARGUMENTS

View File

@@ -0,0 +1,44 @@
# Cleanup ADW Worktrees
Clean up isolated ADW worktrees and their associated resources.
## Variables
action: $1 (all|specific|list)
adw_id: $2 (optional, required if action is "specific")
## Instructions
Manage git worktrees created by isolated ADW workflows:
- If action is "list": Show all worktrees under trees/ directory
- If action is "specific": Remove the specific worktree for the given adw_id
- If action is "all": Remove all worktrees under trees/ directory
## Run
Based on the action:
### List worktrees
If action is "list":
- Run `git worktree list | grep "trees/"` to show isolated worktrees
- List the contents of the trees/ directory with sizes
### Remove specific worktree
If action is "specific" and adw_id is provided:
- Check if trees/{adw_id} exists
- Run `git worktree remove trees/{adw_id}` to remove it
- Report success or any errors
### Remove all worktrees
If action is "all":
- First list all worktrees that will be removed
- For each worktree under trees/, run `git worktree remove`
- Clean up any remaining directories under trees/
- Run `git worktree prune` to clean up any stale entries
## Report
Report the results of the cleanup operation:
- Number of worktrees removed
- Any errors encountered
- Current status after cleanup

View File

@@ -0,0 +1,129 @@
# Document Feature
Generate concise markdown documentation for implemented features by analyzing code changes and specifications. This command creates documentation in the `app_docs/` directory based on git diff analysis against the main branch and the original feature specification.
## Variables
adw_id: $1
spec_path: $2 if provided, otherwise leave it blank
documentation_screenshots_dir: $3 if provided, otherwise leave it blank
## Instructions
### 1. Analyze Changes
- Run `git diff origin/main --stat` to see files changed and lines modified
- Run `git diff origin/main --name-only` to get the list of changed files
- For significant changes (>50 lines), run `git diff origin/main <file>` on specific files to understand the implementation details
### 2. Read Specification (if provided)
- If `spec_path` is provided, read the specification file to understand:
- Original requirements and goals
- Expected functionality
- Success criteria
- Use this to frame the documentation around what was requested vs what was built
### 3. Analyze and Copy Screenshots (if provided)
- If `documentation_screenshots_dir` is provided, list and examine screenshots
- Create `app_docs/assets/` directory if it doesn't exist
- Copy all screenshot files (*.png) from `documentation_screenshots_dir` to `app_docs/assets/`
- Preserve original filenames
- Use `cp` command to copy files
- Use visual context to better describe UI changes or visual features
- Reference screenshots in documentation using relative paths (e.g., `assets/screenshot-name.png`)
### 4. Generate Documentation
- Create a new documentation file in `app_docs/` directory
- Filename format: `feature-{adw_id}-{descriptive-name}.md`
- Replace `{descriptive-name}` with a short feature name (e.g., "user-auth", "data-export", "search-ui")
- Follow the Documentation Format below
- Focus on:
- What was built (based on git diff)
- How it works (technical implementation)
- How to use it (user perspective)
- Any configuration or setup required
### 5. Update Conditional Documentation
- After creating the documentation file, read `.claude/commands/conditional_docs.md`
- Add an entry for the new documentation file with appropriate conditions
- The entry should help future developers know when to read this documentation
- Format the entry following the existing pattern in the file
### 6. Final Output
- When you finish writing the documentation and updating conditional_docs.md, return exclusively the path to the documentation file created and nothing else
## Documentation Format
```md
# <Feature Title>
**ADW ID:** <adw_id>
**Date:** <current date>
**Specification:** <spec_path or "N/A">
## Overview
<2-3 sentence summary of what was built and why>
## Screenshots
<If documentation_screenshots_dir was provided and screenshots were copied>
![<Description>](assets/<screenshot-filename.png>)
## What Was Built
<List the main components/features implemented based on the git diff analysis>
- <Component/feature 1>
- <Component/feature 2>
- <etc>
## Technical Implementation
### Files Modified
<List key files changed with brief description of changes>
- `<file_path>`: <what was changed/added>
- `<file_path>`: <what was changed/added>
### Key Changes
<Describe the most important technical changes in 3-5 bullet points>
## How to Use
<Step-by-step instructions for using the new feature>
1. <Step 1>
2. <Step 2>
3. <etc>
## Configuration
<Any configuration options, environment variables, or settings>
## Testing
<Brief description of how to test the feature>
## Notes
<Any additional context, limitations, or future considerations>
```
## Conditional Docs Entry Format
After creating the documentation, add this entry to `.claude/commands/conditional_docs.md`:
```md
- app_docs/<your_documentation_file>.md
- Conditions:
- When working with <feature area>
- When implementing <related functionality>
- When troubleshooting <specific issues>
```
## Report
- IMPORTANT: Return exclusively the path to the documentation file created and nothing else.

View File

@@ -0,0 +1,34 @@
# Generate Git Branch Name
Based on the `Instructions` below, take the `Variables` follow the `Run` section to generate a concise Git branch name following the specified format. Then follow the `Report` section to report the results of your work.
## Variables
issue_class: $1
adw_id: $2
issue: $3
## Instructions
- Generate a branch name in the format: `<issue_class>-issue-<issue_number>-adw-<adw_id>-<concise_name>`
- The `<concise_name>` should be:
- 3-6 words maximum
- All lowercase
- Words separated by hyphens
- Descriptive of the main task/feature
- No special characters except hyphens
- Examples:
- `feat-issue-123-adw-a1b2c3d4-add-user-auth`
- `bug-issue-456-adw-e5f6g7h8-fix-login-error`
- `chore-issue-789-adw-i9j0k1l2-update-dependencies`
- `test-issue-323-adw-m3n4o5p6-fix-failing-tests`
- Extract the issue number, title, and body from the issue JSON
## Run
Generate the branch name based on the instructions above.
Do NOT create or checkout any branches - just generate the name.
## Report
Return ONLY the generated branch name (no other text)

View File

@@ -0,0 +1,82 @@
# Install Worktree
This command sets up an isolated worktree environment with custom port configuration.
## Parameters
- Worktree path: {0}
- Backend port: {1}
- Frontend port: {2}
## Read
- .env.sample (from parent repo)
- ./app/server/.env.sample (from parent repo)
- .mcp.json (from parent repo)
- playwright-mcp-config.json (from parent repo)
## Steps
1. **Navigate to worktree directory**
```bash
cd {0}
```
2. **Create port configuration file**
Create `.ports.env` with:
```
BACKEND_PORT={1}
FRONTEND_PORT={2}
VITE_BACKEND_URL=http://localhost:{1}
```
3. **Copy and update .env files**
- Copy `.env` from parent repo if it exists
- Append `.ports.env` contents to `.env`
- Copy `app/server/.env` from parent repo if it exists
- Append `.ports.env` contents to `app/server/.env`
4. **Copy and configure MCP files**
- Copy `.mcp.json` from parent repo if it exists
- Copy `playwright-mcp-config.json` from parent repo if it exists
- These files are needed for Model Context Protocol and Playwright automation
After copying, update paths to use absolute paths:
- Get the absolute worktree path: `WORKTREE_PATH=$(pwd)`
- Update `.mcp.json`:
- Find the line containing `"./playwright-mcp-config.json"`
- Replace it with `"${WORKTREE_PATH}/playwright-mcp-config.json"`
- Use a JSON-aware tool or careful string replacement to maintain valid JSON
- Update `playwright-mcp-config.json`:
- Find the line containing `"dir": "./videos"`
- Replace it with `"dir": "${WORKTREE_PATH}/videos"`
- Create the videos directory: `mkdir -p ${WORKTREE_PATH}/videos`
- This ensures MCP configuration works correctly regardless of execution context
5. **Install backend dependencies**
```bash
cd app/server && uv sync --all-extras
```
6. **Install frontend dependencies**
```bash
cd ../client && bun install
```
7. **Setup database**
```bash
cd ../.. && ./scripts/reset_db.sh
```
## Error Handling
- If parent .env files don't exist, create minimal versions from .env.sample files
- Ensure all paths are absolute to avoid confusion
## Report
- List all files created/modified (including MCP configuration files)
- Show port assignments
- Confirm dependencies installed
- Note any missing parent .env files that need user attention
- Note any missing MCP configuration files
- Show the updated absolute paths in:
- `.mcp.json` (should show full path to playwright-mcp-config.json)
- `playwright-mcp-config.json` (should show full path to videos directory)
- Confirm videos directory was created

View File

@@ -0,0 +1,92 @@
# Patch Plan
Create a **focused patch plan** to resolve a specific issue based on the `review_change_request`. Follow the `Instructions` to create a concise plan that addresses the issue with minimal, targeted changes.
## Variables
adw_id: $1
review_change_request: $2
spec_path: $3 if provided, otherwise leave it blank
agent_name: $4 if provided, otherwise use 'patch_agent'
issue_screenshots: $ARGUMENT (optional) - comma-separated list of screenshot paths if provided
## Instructions
- IMPORTANT: You're creating a patch plan to fix a specific review issue. Keep changes small, focused, and targeted
- Read the original specification (spec) file at `spec_path` if provided to understand the context and requirements
- IMPORTANT Use the `review_change_request` to understand exactly what needs and use it as the basis for your patch plan
- If `issue_screenshots` are provided, examine them to better understand the visual context of the issue
- Create the patch plan in `specs/patch/` directory with filename: `patch-adw-{adw_id}-{descriptive-name}.md`
- Replace `{descriptive-name}` with a short name based on the issue (e.g., "fix-button-color", "update-validation", "correct-layout")
- IMPORTANT: This is a PATCH - keep the scope minimal. Only fix what's described in the `review_change_request` and nothing more. Address only the `review_change_request`.
- Run `git diff --stat`. If changes are available, use them to understand what's been done in the codebase and so you can understand the exact changes you should detail in the patch plan.
- Ultra think about the most efficient way to implement the solution with minimal code changes
- Base your `Plan Format: Validation` on the validation steps from `spec_path` if provided
- If any tests fail in the validation steps, you must fix them.
- If not provided, READ `.claude/commands/test.md: ## Test Execution Sequence` and execute the tests to understand the tests that need to be run to validate the patch.
- Replace every <placeholder> in the `Plan Format` with specific implementation details
- IMPORTANT: When you finish writing the patch plan, return exclusively the path to the patch plan file created and nothing else.
## Relevant Files
Focus on the following files:
- `README.md` - Contains the project overview and instructions.
- `app/server/**` - Contains the codebase server.
- `app/client/**` - Contains the codebase client.
- `scripts/**` - Contains the scripts to start and stop the server + client.
- `adws/**` - Contains the AI Developer Workflow (ADW) scripts.
- Read `.claude/commands/conditional_docs.md` to check if your task requires additional documentation
- If your task matches any of the conditions listed, reference those documentation files to understand the context better when creating your patch plan
Ignore all other files in the codebase.
## Plan Format
```md
# Patch: <concise patch title>
## Metadata
adw_id: `{adw_id}`
review_change_request: `{review_change_request}`
## Issue Summary
**Original Spec:** <spec_path>
**Issue:** <brief description of the review issue based on the `review_change_request`>
**Solution:** <brief description of the solution approach based on the `review_change_request`>
## Files to Modify
Use these files to implement the patch:
<list only the files that need changes - be specific and minimal>
## Implementation Steps
IMPORTANT: Execute every step in order, top to bottom.
<list 2-5 focused steps to implement the patch. Each step should be a concrete action.>
### Step 1: <specific action>
- <implementation detail>
- <implementation detail>
### Step 2: <specific action>
- <implementation detail>
- <implementation detail>
<continue as needed, but keep it minimal>
## Validation
Execute every command to validate the patch is complete with zero regressions.
<list 1-5 specific commands or checks to verify the patch works correctly>
## Patch Scope
**Lines of code to change:** <estimate>
**Risk level:** <low|medium|high>
**Testing required:** <brief description>
```
## Report
- IMPORTANT: Return exclusively the path to the patch plan file created and nothing else.

View File

@@ -0,0 +1,41 @@
# Create Pull Request
Based on the `Instructions` below, take the `Variables` follow the `Run` section to create a pull request. Then follow the `Report` section to report the results of your work.
## Variables
branch_name: $1
issue: $2
plan_file: $3
adw_id: $4
## Instructions
- Generate a pull request title in the format: `<issue_type>: #<issue_number> - <issue_title>`
- The PR body should include:
- A summary section with the issue context
- Link to the implementation `plan_file` if it exists
- Reference to the issue (Closes #<issue_number>)
- ADW tracking ID
- A checklist of what was done
- A summary of key changes made
- Extract issue number, type, and title from the issue JSON
- Examples of PR titles:
- `feat: #123 - Add user authentication`
- `bug: #456 - Fix login validation error`
- `chore: #789 - Update dependencies`
- `test: #1011 - Test xyz`
- Don't mention Claude Code in the PR body - let the author get credit for this.
## Run
1. Run `git diff origin/main...HEAD --stat` to see a summary of changed files
2. Run `git log origin/main..HEAD --oneline` to see the commits that will be included
3. Run `git diff origin/main...HEAD --name-only` to get a list of changed files
4. Run `git push -u origin <branch_name>` to push the branch
5. Set GH_TOKEN environment variable from GITHUB_PAT if available, then run `gh pr create --title "<pr_title>" --body "<pr_body>" --base main` to create the PR
6. Capture the PR URL from the output
## Report
Return ONLY the PR URL that was created (no other text)

View File

@@ -0,0 +1,85 @@
# Review
Follow the `Instructions` below to **review work done against a specification file** (specs/*.md) to ensure implemented features match requirements. Use the spec file to understand the requirements and then use the git diff if available to understand the changes made. Capture screenshots of critical functionality paths as documented in the `Instructions` section. If there are issues, report them if not then report success.
## Variables
adw_id: $1
spec_file: $2
agent_name: $3 if provided, otherwise use 'review_agent'
review_image_dir: `<absolute path to codebase>/agents/<adw_id>/<agent_name>/review_img/`
## Instructions
- Check current git branch using `git branch` to understand context
- Run `git diff origin/main` to see all changes made in current branch. Continue even if there are no changes related to the spec file.
- Find the spec file by looking for specs/*.md files in the diff that match the current branch name
- Read the identified spec file to understand requirements
- IMPORTANT: If the work can be validated by UI validation then (if not skip the section):
- Use the playwright mcp server commands to validate the work.
- Look for corresponding e2e test files in ./claude/commands/e2e/test_*.md that mirror the feature name
- Use e2e test files only as navigation guides for screenshot locations, not for other purposes
- IMPORTANT: To be clear, we're not testing. We know the functionality works. We're reviewing the implementation against the spec to make sure it matches what was requested.
- IMPORTANT: Take screen shots along the way to showcase the new functionality and any issues you find
- Capture visual proof of working features through targeted screenshots
- Navigate to the application and capture screenshots of only the critical paths based on the spec
- Compare implemented changes with spec requirements to verify correctness
- Do not take screenshots of the entire process, only the critical points.
- IMPORTANT: Aim for `1-5` screenshots to showcase that the new functionality works as specified.
- If there is a review issue, take a screenshot of the issue and add it to the `review_issues` array. Describe the issue, resolution, and severity.
- Number your screenshots in the order they are taken like `01_<descriptive name>.png`, `02_<descriptive name>.png`, etc.
- IMPORTANT: Be absolutely sure to take a screen shot of the critical point of the new functionality
- IMPORTANT: Copy all screenshots to the provided `review_image_dir`
- IMPORTANT: Store the screenshots in the `review_image_dir` and be sure to use full absolute paths.
- Focus only on critical functionality paths - avoid unnecessary screenshots
- Ensure screenshots clearly demonstrate that features work as specified
- Use descriptive filenames that indicate what part of the change is being verified
- IMPORTANT: Issue Severity Guidelines
- Think hard about the impact of the issue on the feature and the user
- Guidelines:
- `skippable` - the issue is non-blocker for the work to be released but is still a problem
- `tech_debt` - the issue is non-blocker for the work to be released but will create technical debt that should be addressed in the future
- `blocker` - the issue is a blocker for the work to be released and should be addressed immediately. It will harm the user experience or will not function as expected.
- IMPORTANT: Return ONLY the JSON array with test results
- IMPORTANT: Output your result in JSON format based on the `Report` section below.
- IMPORTANT: Do not include any additional text, explanations, or markdown formatting
- We'll immediately run JSON.parse() on the output, so make sure it's valid JSON
- Ultra think as you work through the review process. Focus on the critical functionality paths and the user experience. Don't report issues if they are not critical to the feature.
## Setup
IMPORTANT: Read and **Execute** `.claude/commands/prepare_app.md` now to prepare the application for the review.
- Note: prepare_app.md will automatically detect and use ports from `.ports.env` if running in a worktree environment
- The application URL will be http://localhost:PORT where PORT is from `.ports.env` (FRONTEND_PORT) or default 5173
## Report
- IMPORTANT: Return results exclusively as a JSON array based on the `Output Structure` section below.
- `success` should be `true` if there are NO BLOCKING issues (implementation matches spec for critical functionality)
- `success` should be `false` ONLY if there are BLOCKING issues that prevent the work from being released
- `review_issues` can contain issues of any severity (skippable, tech_debt, or blocker)
- `screenshots` should ALWAYS contain paths to screenshots showcasing the new functionality, regardless of success status. Use full absolute paths.
- This allows subsequent agents to quickly identify and resolve blocking errors while documenting all issues
### Output Structure
```json
{
success: "boolean - true if there are NO BLOCKING issues (can have skippable/tech_debt issues), false if there are BLOCKING issues",
review_summary: "string - 2-4 sentences describing what was built and whether it matches the spec. Written as if reporting during a standup meeting. Example: 'The natural language query feature has been implemented with drag-and-drop file upload and interactive table display. The implementation matches the spec requirements for SQL injection protection and supports both CSV and JSON formats. Minor UI improvements could be made but all core functionality is working as specified.'",
review_issues: [
{
"review_issue_number": "number - the issue number based on the index of this issue",
"screenshot_path": "string - /absolute/path/to/screenshot_that_shows_review_issue.png",
"issue_description": "string - description of the issue",
"issue_resolution": "string - description of the resolution",
"issue_severity": "string - severity of the issue between 'skippable', 'tech_debt', 'blocker'"
},
...
],
screenshots: [
"string - /absolute/path/to/screenshot_showcasing_functionality.png",
"string - /absolute/path/to/screenshot_showcasing_functionality.png",
"...",
]
}

View File

@@ -0,0 +1,115 @@
# Application Validation Test Suite
Execute comprehensive validation tests for both frontend and backend components, returning results in a standardized JSON format for automated processing.
## Purpose
Proactively identify and fix issues in the application before they impact users or developers. By running this comprehensive test suite, you can:
- Detect syntax errors, type mismatches, and import failures
- Identify broken tests or security vulnerabilities
- Verify build processes and dependencies
- Ensure the application is in a healthy state
## Variables
TEST_COMMAND_TIMEOUT: 5 minutes
## Instructions
- Execute each test in the sequence provided below
- Capture the result (passed/failed) and any error messages
- IMPORTANT: Return ONLY the JSON array with test results
- IMPORTANT: Do not include any additional text, explanations, or markdown formatting
- We'll immediately run JSON.parse() on the output, so make sure it's valid JSON
- If a test passes, omit the error field
- If a test fails, include the error message in the error field
- Execute all tests even if some fail
- Error Handling:
- If a command returns non-zero exit code, mark as failed and immediately stop processing tests
- Capture stderr output for error field
- Timeout commands after `TEST_COMMAND_TIMEOUT`
- IMPORTANT: If a test fails, stop processing tests and return the results thus far
- Some tests may have dependencies (e.g., server must be stopped for port availability)
- API health check is required
- Test execution order is important - dependencies should be validated first
- All file paths are relative to the project root
- Always run `pwd` and `cd` before each test to ensure you're operating in the correct directory for the given test
## Test Execution Sequence
### Backend Tests
1. **Python Syntax Check**
- Preparation Command: None
- Command: `cd app/server && uv run python -m py_compile server.py main.py core/*.py`
- test_name: "python_syntax_check"
- test_purpose: "Validates Python syntax by compiling source files to bytecode, catching syntax errors like missing colons, invalid indentation, or malformed statements"
2. **Backend Code Quality Check**
- Preparation Command: None
- Command: `cd app/server && uv run ruff check .`
- test_name: "backend_linting"
- test_purpose: "Validates Python code quality, identifies unused imports, style violations, and potential bugs"
3. **All Backend Tests**
- Preparation Command: None
- Command: `cd app/server && uv run pytest tests/ -v --tb=short`
- test_name: "all_backend_tests"
- test_purpose: "Validates all backend functionality including file processing, SQL security, LLM integration, and API endpoints"
### Frontend Tests
4. **TypeScript Type Check**
- Preparation Command: None
- Command: `cd app/client && bun tsc --noEmit`
- test_name: "typescript_check"
- test_purpose: "Validates TypeScript type correctness without generating output files, catching type errors, missing imports, and incorrect function signatures"
5. **Frontend Build**
- Preparation Command: None
- Command: `cd app/client && bun run build`
- test_name: "frontend_build"
- test_purpose: "Validates the complete frontend build process including bundling, asset optimization, and production compilation"
## Report
- IMPORTANT: Return results exclusively as a JSON array based on the `Output Structure` section below.
- Sort the JSON array with failed tests (passed: false) at the top
- Include all tests in the output, both passed and failed
- The execution_command field should contain the exact command that can be run to reproduce the test
- This allows subsequent agents to quickly identify and resolve errors
### Output Structure
```json
[
{
"test_name": "string",
"passed": boolean,
"execution_command": "string",
"test_purpose": "string",
"error": "optional string"
},
...
]
```
### Example Output
```json
[
{
"test_name": "frontend_build",
"passed": false,
"execution_command": "cd app/client && bun run build",
"test_purpose": "Validates TypeScript compilation, module resolution, and production build process for the frontend application",
"error": "TS2345: Argument of type 'string' is not assignable to parameter of type 'number'"
},
{
"test_name": "all_backend_tests",
"passed": true,
"execution_command": "cd app/server && uv run pytest tests/ -v --tb=short",
"test_purpose": "Validates all backend functionality including file processing, SQL security, LLM integration, and API endpoints"
}
]
```

View File

@@ -0,0 +1,132 @@
#!/usr/bin/env -S uv run
# /// script
# dependencies = ["python-dotenv", "pydantic"]
# ///
"""
ADW Plan Build Test Review Iso - Compositional workflow for isolated planning, building, testing, and reviewing
Usage: uv run adw_plan_build_test_review_iso.py <issue-number> [adw-id] [--skip-e2e] [--skip-resolution]
This script runs:
1. adw_plan_iso.py - Planning phase (isolated)
2. adw_build_iso.py - Implementation phase (isolated)
3. adw_test_iso.py - Testing phase (isolated)
4. adw_review_iso.py - Review phase (isolated)
The scripts are chained together via persistent state (adw_state.json).
"""
import subprocess
import sys
import os
# Add the parent directory to Python path to import modules
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from adw_modules.workflow_ops import ensure_adw_id
def main():
"""Main entry point."""
# Check for flags
skip_e2e = "--skip-e2e" in sys.argv
skip_resolution = "--skip-resolution" in sys.argv
# Remove flags from argv
if skip_e2e:
sys.argv.remove("--skip-e2e")
if skip_resolution:
sys.argv.remove("--skip-resolution")
if len(sys.argv) < 2:
print("Usage: uv run adw_plan_build_test_review_iso.py <issue-number> [adw-id] [--skip-e2e] [--skip-resolution]")
print("\nThis runs the isolated plan, build, test, and review workflow:")
print(" 1. Plan (isolated)")
print(" 2. Build (isolated)")
print(" 3. Test (isolated)")
print(" 4. Review (isolated)")
sys.exit(1)
issue_number = sys.argv[1]
adw_id = sys.argv[2] if len(sys.argv) > 2 else None
# Ensure ADW ID exists with initialized state
adw_id = ensure_adw_id(issue_number, adw_id)
print(f"Using ADW ID: {adw_id}")
# Get the directory where this script is located
script_dir = os.path.dirname(os.path.abspath(__file__))
# Run isolated plan with the ADW ID
plan_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_plan_iso.py"),
issue_number,
adw_id,
]
print(f"\n=== ISOLATED PLAN PHASE ===")
print(f"Running: {' '.join(plan_cmd)}")
plan = subprocess.run(plan_cmd)
if plan.returncode != 0:
print("Isolated plan phase failed")
sys.exit(1)
# Run isolated build with the ADW ID
build_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_build_iso.py"),
issue_number,
adw_id,
]
print(f"\n=== ISOLATED BUILD PHASE ===")
print(f"Running: {' '.join(build_cmd)}")
build = subprocess.run(build_cmd)
if build.returncode != 0:
print("Isolated build phase failed")
sys.exit(1)
# Run isolated test with the ADW ID
test_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_test_iso.py"),
issue_number,
adw_id,
]
if skip_e2e:
test_cmd.append("--skip-e2e")
print(f"\n=== ISOLATED TEST PHASE ===")
print(f"Running: {' '.join(test_cmd)}")
test = subprocess.run(test_cmd)
if test.returncode != 0:
print("Isolated test phase failed")
sys.exit(1)
# Run isolated review with the ADW ID
review_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_review_iso.py"),
issue_number,
adw_id,
]
if skip_resolution:
review_cmd.append("--skip-resolution")
print(f"\n=== ISOLATED REVIEW PHASE ===")
print(f"Running: {' '.join(review_cmd)}")
review = subprocess.run(review_cmd)
if review.returncode != 0:
print("Isolated review phase failed")
sys.exit(1)
print(f"\n=== ISOLATED WORKFLOW COMPLETED ===")
print(f"ADW ID: {adw_id}")
print(f"All phases completed successfully!")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,152 @@
#!/usr/bin/env -S uv run
# /// script
# dependencies = ["python-dotenv", "pydantic"]
# ///
"""
ADW SDLC Iso - Complete Software Development Life Cycle workflow with isolation
Usage: uv run adw_sdlc_iso.py <issue-number> [adw-id] [--skip-e2e] [--skip-resolution]
This script runs the complete ADW SDLC pipeline in isolation:
1. adw_plan_iso.py - Planning phase (isolated)
2. adw_build_iso.py - Implementation phase (isolated)
3. adw_test_iso.py - Testing phase (isolated)
4. adw_review_iso.py - Review phase (isolated)
5. adw_document_iso.py - Documentation phase (isolated)
The scripts are chained together via persistent state (adw_state.json).
Each phase runs in its own git worktree with dedicated ports.
"""
import subprocess
import sys
import os
# Add the parent directory to Python path to import modules
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from adw_modules.workflow_ops import ensure_adw_id
def main():
"""Main entry point."""
# Check for flags
skip_e2e = "--skip-e2e" in sys.argv
skip_resolution = "--skip-resolution" in sys.argv
# Remove flags from argv
if skip_e2e:
sys.argv.remove("--skip-e2e")
if skip_resolution:
sys.argv.remove("--skip-resolution")
if len(sys.argv) < 2:
print("Usage: uv run adw_sdlc_iso.py <issue-number> [adw-id] [--skip-e2e] [--skip-resolution]")
print("\nThis runs the complete isolated Software Development Life Cycle:")
print(" 1. Plan (isolated)")
print(" 2. Build (isolated)")
print(" 3. Test (isolated)")
print(" 4. Review (isolated)")
print(" 5. Document (isolated)")
sys.exit(1)
issue_number = sys.argv[1]
adw_id = sys.argv[2] if len(sys.argv) > 2 else None
# Ensure ADW ID exists with initialized state
adw_id = ensure_adw_id(issue_number, adw_id)
print(f"Using ADW ID: {adw_id}")
# Get the directory where this script is located
script_dir = os.path.dirname(os.path.abspath(__file__))
# Run isolated plan with the ADW ID
plan_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_plan_iso.py"),
issue_number,
adw_id,
]
print(f"\n=== ISOLATED PLAN PHASE ===")
print(f"Running: {' '.join(plan_cmd)}")
plan = subprocess.run(plan_cmd)
if plan.returncode != 0:
print("Isolated plan phase failed")
sys.exit(1)
# Run isolated build with the ADW ID
build_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_build_iso.py"),
issue_number,
adw_id,
]
print(f"\n=== ISOLATED BUILD PHASE ===")
print(f"Running: {' '.join(build_cmd)}")
build = subprocess.run(build_cmd)
if build.returncode != 0:
print("Isolated build phase failed")
sys.exit(1)
# Run isolated test with the ADW ID
test_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_test_iso.py"),
issue_number,
adw_id,
"--skip-e2e", # Always skip E2E tests in SDLC workflows
]
print(f"\n=== ISOLATED TEST PHASE ===")
print(f"Running: {' '.join(test_cmd)}")
test = subprocess.run(test_cmd)
if test.returncode != 0:
print("Isolated test phase failed")
# Note: Continue anyway as some tests might be flaky
print("WARNING: Test phase failed but continuing with review")
# Run isolated review with the ADW ID
review_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_review_iso.py"),
issue_number,
adw_id,
]
if skip_resolution:
review_cmd.append("--skip-resolution")
print(f"\n=== ISOLATED REVIEW PHASE ===")
print(f"Running: {' '.join(review_cmd)}")
review = subprocess.run(review_cmd)
if review.returncode != 0:
print("Isolated review phase failed")
sys.exit(1)
# Run isolated documentation with the ADW ID
document_cmd = [
"uv",
"run",
os.path.join(script_dir, "adw_document_iso.py"),
issue_number,
adw_id,
]
print(f"\n=== ISOLATED DOCUMENTATION PHASE ===")
print(f"Running: {' '.join(document_cmd)}")
document = subprocess.run(document_cmd)
if document.returncode != 0:
print("Isolated documentation phase failed")
sys.exit(1)
print(f"\n=== ISOLATED SDLC COMPLETED ===")
print(f"ADW ID: {adw_id}")
print(f"All phases completed successfully!")
print(f"\nWorktree location: trees/{adw_id}/")
print(f"To clean up: ./scripts/purge_tree.sh {adw_id}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,340 @@
#!/usr/bin/env -S uv run
# /// script
# dependencies = ["python-dotenv", "pydantic"]
# ///
"""
ADW Ship Iso - AI Developer Workflow for shipping (merging) to main
Usage:
uv run adw_ship_iso.py <issue-number> <adw-id>
Workflow:
1. Load state and validate worktree exists
2. Validate ALL state fields are populated (not None)
3. Perform manual git merge in main repository:
- Fetch latest from origin
- Checkout main
- Merge feature branch
- Push to origin/main
4. Post success message to issue
This workflow REQUIRES that all previous workflows have been run and that
every field in ADWState has a value. This is our final approval step.
Note: Merge operations happen in the main repository root, not in the worktree,
to preserve the worktree's state.
"""
import sys
import os
import logging
import json
import subprocess
from typing import Optional, Dict, Any, Tuple
from dotenv import load_dotenv
from adw_modules.state import ADWState
from adw_modules.github import (
make_issue_comment,
get_repo_url,
extract_repo_path,
)
from adw_modules.beads_integration import is_beads_issue, close_beads_issue
from adw_modules.workflow_ops import format_issue_message
from adw_modules.worktree_ops import validate_worktree
from adw_modules.data_types import ADWStateData
# Setup logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
# Agent name constant
AGENT_SHIPPER = "shipper"
def get_main_repo_root() -> str:
"""Get the main repository root directory (parent of adws)."""
# This script is in adws/, so go up one level to get repo root
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def manual_merge_to_main(branch_name: str, logger: logging.Logger) -> Tuple[bool, Optional[str]]:
"""Manually merge a branch to main using git commands.
This runs in the main repository root, not in a worktree.
Args:
branch_name: The feature branch to merge
logger: Logger instance
Returns:
Tuple of (success, error_message)
"""
repo_root = get_main_repo_root()
logger.info(f"Performing manual merge in main repository: {repo_root}")
try:
# Save current branch to restore later
result = subprocess.run(
["git", "rev-parse", "--abbrev-ref", "HEAD"],
capture_output=True, text=True, cwd=repo_root
)
original_branch = result.stdout.strip()
logger.debug(f"Original branch: {original_branch}")
# Step 1: Fetch latest from origin
logger.info("Fetching latest from origin...")
result = subprocess.run(
["git", "fetch", "origin"],
capture_output=True, text=True, cwd=repo_root
)
if result.returncode != 0:
return False, f"Failed to fetch from origin: {result.stderr}"
# Step 2: Checkout main
logger.info("Checking out main branch...")
result = subprocess.run(
["git", "checkout", "main"],
capture_output=True, text=True, cwd=repo_root
)
if result.returncode != 0:
return False, f"Failed to checkout main: {result.stderr}"
# Step 3: Pull latest main
logger.info("Pulling latest main...")
result = subprocess.run(
["git", "pull", "origin", "main"],
capture_output=True, text=True, cwd=repo_root
)
if result.returncode != 0:
# Try to restore original branch
subprocess.run(["git", "checkout", original_branch], cwd=repo_root)
return False, f"Failed to pull latest main: {result.stderr}"
# Step 4: Merge the feature branch (no-ff to preserve all commits)
logger.info(f"Merging branch {branch_name} (no-ff to preserve all commits)...")
result = subprocess.run(
["git", "merge", branch_name, "--no-ff", "-m", f"Merge branch '{branch_name}' via ADW Ship workflow"],
capture_output=True, text=True, cwd=repo_root
)
if result.returncode != 0:
# Try to restore original branch
subprocess.run(["git", "checkout", original_branch], cwd=repo_root)
return False, f"Failed to merge {branch_name}: {result.stderr}"
# Step 5: Push to origin/main
logger.info("Pushing to origin/main...")
result = subprocess.run(
["git", "push", "origin", "main"],
capture_output=True, text=True, cwd=repo_root
)
if result.returncode != 0:
# Try to restore original branch
subprocess.run(["git", "checkout", original_branch], cwd=repo_root)
return False, f"Failed to push to origin/main: {result.stderr}"
# Step 6: Restore original branch
logger.info(f"Restoring original branch: {original_branch}")
subprocess.run(["git", "checkout", original_branch], cwd=repo_root)
logger.info("✅ Successfully merged and pushed to main!")
return True, None
except Exception as e:
logger.error(f"Unexpected error during merge: {e}")
# Try to restore original branch
try:
subprocess.run(["git", "checkout", original_branch], cwd=repo_root)
except:
pass
return False, str(e)
def validate_state_completeness(state: ADWState, logger: logging.Logger) -> tuple[bool, list[str]]:
"""Validate that all fields in ADWState have values (not None).
Returns:
tuple of (is_valid, missing_fields)
"""
# Get the expected fields from ADWStateData model
expected_fields = {
"adw_id",
"issue_number",
"branch_name",
"plan_file",
"issue_class",
"worktree_path",
"backend_port",
"frontend_port",
}
missing_fields = []
for field in expected_fields:
value = state.get(field)
if value is None:
missing_fields.append(field)
logger.warning(f"Missing required field: {field}")
else:
logger.debug(f"{field}: {value}")
return len(missing_fields) == 0, missing_fields
def main():
"""Main entry point."""
# Load environment variables
load_dotenv()
# Parse command line args
# INTENTIONAL: adw-id is REQUIRED - we need it to find the worktree and state
if len(sys.argv) < 3:
print("Usage: uv run adw_ship_iso.py <issue-number> <adw-id>")
print("\nError: Both issue-number and adw-id are required")
print("Run the complete SDLC workflow before shipping")
sys.exit(1)
issue_number = sys.argv[1]
adw_id = sys.argv[2]
# Try to load existing state
state = ADWState.load(adw_id, logger)
if not state:
# No existing state found
logger.error(f"No state found for ADW ID: {adw_id}")
logger.error("Run the complete SDLC workflow before shipping")
print(f"\nError: No state found for ADW ID: {adw_id}")
print("Run the complete SDLC workflow before shipping")
sys.exit(1)
# Update issue number from state if available
issue_number = state.get("issue_number", issue_number)
# Track that this ADW workflow has run
state.append_adw_id("adw_ship_iso")
logger.info(f"ADW Ship Iso starting - ID: {adw_id}, Issue: {issue_number}")
# Check if this is a beads issue
is_beads = is_beads_issue(issue_number)
logger.info(f"Issue type: {'beads' if is_beads else 'GitHub'}")
# Post initial status (only for GitHub issues)
if not is_beads:
make_issue_comment(
issue_number,
format_issue_message(adw_id, "ops", f"🚢 Starting ship workflow\n"
f"📋 Validating state completeness...")
)
# Step 1: Validate state completeness
logger.info("Validating state completeness...")
is_valid, missing_fields = validate_state_completeness(state, logger)
if not is_valid:
error_msg = f"State validation failed. Missing fields: {', '.join(missing_fields)}"
logger.error(error_msg)
if not is_beads:
make_issue_comment(
issue_number,
format_issue_message(adw_id, AGENT_SHIPPER, f"{error_msg}\n\n"
"Please ensure all workflows have been run:\n"
"- adw_plan_iso.py (creates plan_file, branch_name, issue_class)\n"
"- adw_build_iso.py (implements the plan)\n"
"- adw_test_iso.py (runs tests)\n"
"- adw_review_iso.py (reviews implementation)\n"
"- adw_document_iso.py (generates docs)")
)
sys.exit(1)
logger.info("✅ State validation passed - all fields have values")
# Step 2: Validate worktree exists
valid, error = validate_worktree(adw_id, state)
if not valid:
logger.error(f"Worktree validation failed: {error}")
if not is_beads:
make_issue_comment(
issue_number,
format_issue_message(adw_id, AGENT_SHIPPER, f"❌ Worktree validation failed: {error}")
)
sys.exit(1)
worktree_path = state.get("worktree_path")
logger.info(f"✅ Worktree validated at: {worktree_path}")
# Step 3: Get branch name
branch_name = state.get("branch_name")
logger.info(f"Preparing to merge branch: {branch_name}")
if not is_beads:
make_issue_comment(
issue_number,
format_issue_message(adw_id, AGENT_SHIPPER, f"📋 State validation complete\n"
f"🔍 Preparing to merge branch: {branch_name}")
)
# Step 4: Perform manual merge
logger.info(f"Starting manual merge of {branch_name} to main...")
if not is_beads:
make_issue_comment(
issue_number,
format_issue_message(adw_id, AGENT_SHIPPER, f"🔀 Merging {branch_name} to main...\n"
"Using manual git operations in main repository")
)
success, error = manual_merge_to_main(branch_name, logger)
if not success:
logger.error(f"Failed to merge: {error}")
if not is_beads:
make_issue_comment(
issue_number,
format_issue_message(adw_id, AGENT_SHIPPER, f"❌ Failed to merge: {error}")
)
sys.exit(1)
logger.info(f"✅ Successfully merged {branch_name} to main")
# Step 5: Close beads issue if applicable
if is_beads:
logger.info(f"Closing beads issue: {issue_number}")
success, error = close_beads_issue(
issue_number,
f"Completed via ADW {adw_id} - merged to main"
)
if not success:
logger.warning(f"Failed to close beads issue: {error}")
else:
logger.info(f"✅ Closed beads issue: {issue_number}")
# Step 6: Post success message (only for GitHub issues)
if not is_beads:
make_issue_comment(
issue_number,
format_issue_message(adw_id, AGENT_SHIPPER,
f"🎉 **Successfully shipped!**\n\n"
f"✅ Validated all state fields\n"
f"✅ Merged branch `{branch_name}` to main\n"
f"✅ Pushed to origin/main\n\n"
f"🚢 Code has been deployed to production!")
)
# Save final state
state.save("adw_ship_iso")
# Post final state summary (only for GitHub issues)
if not is_beads:
make_issue_comment(
issue_number,
f"{adw_id}_ops: 📋 Final ship state:\n```json\n{json.dumps(state.data, indent=2)}\n```"
)
logger.info("Ship workflow completed successfully")
if __name__ == "__main__":
main()