Initial commit
This commit is contained in:
292
skills/adw-bootstrap/reference/scaled/adw_modules/beads_integration.py
Executable file
292
skills/adw-bootstrap/reference/scaled/adw_modules/beads_integration.py
Executable file
@@ -0,0 +1,292 @@
|
||||
"""Beads Integration Module - AI Developer Workflow (ADW)
|
||||
|
||||
This module provides beads issue management as an alternative to GitHub issues.
|
||||
Allows ADW workflows to work with local beads tasks for offline development.
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import json
|
||||
from typing import Tuple, Optional
|
||||
from adw_modules.data_types import GitHubIssue
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def get_workspace_root() -> str:
|
||||
"""Get workspace root for beads operations."""
|
||||
# Assume workspace root is the parent of adws directory
|
||||
return os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
|
||||
|
||||
def fetch_beads_issue(issue_id: str) -> Tuple[Optional[GitHubIssue], Optional[str]]:
|
||||
"""Fetch beads issue and convert to GitHubIssue format.
|
||||
|
||||
Args:
|
||||
issue_id: The beads issue ID
|
||||
|
||||
Returns:
|
||||
Tuple of (GitHubIssue, error_message)
|
||||
"""
|
||||
workspace_root = get_workspace_root()
|
||||
|
||||
# Use bd show to get issue details
|
||||
cmd = ["bd", "show", issue_id]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=workspace_root,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
return None, f"Failed to fetch beads issue: {result.stderr}"
|
||||
|
||||
# Parse the output (bd show returns human-readable format)
|
||||
# Format is:
|
||||
# poc-fjw: Token Infrastructure & Redis Setup
|
||||
# Status: in_progress
|
||||
# Priority: P0
|
||||
# Type: feature
|
||||
# ...
|
||||
# Description:
|
||||
# <description text>
|
||||
output = result.stdout
|
||||
|
||||
# Extract title, description, status from output
|
||||
title = None
|
||||
description = None
|
||||
status = "open"
|
||||
issue_type = "task"
|
||||
in_description = False
|
||||
description_lines = []
|
||||
|
||||
for line in output.split("\n"):
|
||||
stripped = line.strip()
|
||||
|
||||
# Skip empty lines
|
||||
if not stripped:
|
||||
continue
|
||||
|
||||
# First line has format: "poc-fjw: Token Infrastructure & Redis Setup"
|
||||
if not title and ":" in line and not line.startswith(" "):
|
||||
parts = line.split(":", 1)
|
||||
if len(parts) == 2 and parts[0].strip() == issue_id:
|
||||
title = parts[1].strip()
|
||||
continue
|
||||
|
||||
# Status line
|
||||
if stripped.startswith("Status:"):
|
||||
status = stripped.split(":", 1)[1].strip()
|
||||
in_description = False
|
||||
# Type line
|
||||
elif stripped.startswith("Type:"):
|
||||
issue_type = stripped.split(":", 1)[1].strip()
|
||||
in_description = False
|
||||
# Description section
|
||||
elif stripped.startswith("Description:"):
|
||||
in_description = True
|
||||
# Check if description is on same line
|
||||
desc_text = stripped.split(":", 1)[1].strip()
|
||||
if desc_text:
|
||||
description_lines.append(desc_text)
|
||||
elif in_description and stripped and not stripped.startswith("Dependents"):
|
||||
description_lines.append(stripped)
|
||||
elif stripped.startswith("Dependents") or stripped.startswith("Dependencies"):
|
||||
in_description = False
|
||||
|
||||
# Combine description lines
|
||||
if description_lines:
|
||||
description = "\n".join(description_lines)
|
||||
|
||||
if not title:
|
||||
return None, "Could not parse issue title from beads output"
|
||||
|
||||
# Convert to GitHubIssue format for compatibility
|
||||
# Use the issue_id as the number (extract numeric part if present)
|
||||
try:
|
||||
# Try to extract number from ID like "poc-123"
|
||||
number_str = issue_id.split("-")[-1]
|
||||
if number_str.isdigit():
|
||||
number = int(number_str)
|
||||
else:
|
||||
# Use hash of ID as fallback
|
||||
number = hash(issue_id) % 10000
|
||||
except:
|
||||
number = hash(issue_id) % 10000
|
||||
|
||||
# Create GitHubIssue-compatible object
|
||||
issue = GitHubIssue(
|
||||
number=number,
|
||||
title=title or "Untitled Task",
|
||||
body=description or "",
|
||||
state=status,
|
||||
author={"login": "beads"},
|
||||
assignees=[],
|
||||
labels=[{"name": issue_type}],
|
||||
milestone=None,
|
||||
comments=[],
|
||||
createdAt=datetime.now().isoformat(),
|
||||
updatedAt=datetime.now().isoformat(),
|
||||
closedAt=None,
|
||||
url=f"beads://{issue_id}",
|
||||
)
|
||||
|
||||
return issue, None
|
||||
|
||||
except FileNotFoundError:
|
||||
return None, "bd command not found. Is beads installed?"
|
||||
except Exception as e:
|
||||
return None, f"Error fetching beads issue: {str(e)}"
|
||||
|
||||
|
||||
def update_beads_status(issue_id: str, status: str) -> Tuple[bool, Optional[str]]:
|
||||
"""Update beads issue status.
|
||||
|
||||
Args:
|
||||
issue_id: The beads issue ID
|
||||
status: New status (open, in_progress, blocked, closed)
|
||||
|
||||
Returns:
|
||||
Tuple of (success, error_message)
|
||||
"""
|
||||
workspace_root = get_workspace_root()
|
||||
|
||||
cmd = ["bd", "update", issue_id, "--status", status]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=workspace_root,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
return False, f"Failed to update beads status: {result.stderr}"
|
||||
|
||||
return True, None
|
||||
|
||||
except FileNotFoundError:
|
||||
return False, "bd command not found. Is beads installed?"
|
||||
except Exception as e:
|
||||
return False, f"Error updating beads status: {str(e)}"
|
||||
|
||||
|
||||
def close_beads_issue(issue_id: str, reason: str = "Completed via ADW workflow") -> Tuple[bool, Optional[str]]:
|
||||
"""Close a beads issue.
|
||||
|
||||
Args:
|
||||
issue_id: The beads issue ID
|
||||
reason: Reason for closing
|
||||
|
||||
Returns:
|
||||
Tuple of (success, error_message)
|
||||
"""
|
||||
workspace_root = get_workspace_root()
|
||||
|
||||
cmd = ["bd", "close", issue_id, "--reason", reason]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=workspace_root,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
return False, f"Failed to close beads issue: {result.stderr}"
|
||||
|
||||
return True, None
|
||||
|
||||
except FileNotFoundError:
|
||||
return False, "bd command not found. Is beads installed?"
|
||||
except Exception as e:
|
||||
return False, f"Error closing beads issue: {str(e)}"
|
||||
|
||||
|
||||
def get_ready_beads_tasks(limit: int = 10) -> Tuple[Optional[list], Optional[str]]:
|
||||
"""Get ready beads tasks (no blockers).
|
||||
|
||||
Args:
|
||||
limit: Maximum number of tasks to return
|
||||
|
||||
Returns:
|
||||
Tuple of (task_list, error_message)
|
||||
"""
|
||||
workspace_root = get_workspace_root()
|
||||
|
||||
cmd = ["bd", "ready", "--limit", str(limit)]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=workspace_root,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
return None, f"Failed to get ready tasks: {result.stderr}"
|
||||
|
||||
# Parse output to extract task IDs
|
||||
# bd ready returns format like:
|
||||
# 📋 Ready work (1 issues with no blockers):
|
||||
#
|
||||
# 1. [P0] poc-pw3: Credit Consumption & Atomicity
|
||||
# Assignee: La Boeuf
|
||||
tasks = []
|
||||
|
||||
# Check if there are no ready tasks
|
||||
if "No ready work found" in result.stdout or "(0 issues" in result.stdout:
|
||||
return [], None
|
||||
|
||||
for line in result.stdout.split("\n"):
|
||||
line = line.strip()
|
||||
# Skip empty lines, headers, and assignee lines
|
||||
if not line or line.startswith("📋") or line.startswith("Assignee:"):
|
||||
continue
|
||||
|
||||
# Look for lines with format: "1. [P0] poc-pw3: Title"
|
||||
# Extract the task ID (poc-pw3 in this case)
|
||||
if ". [P" in line or ". [" in line:
|
||||
# Split on ": " to get the ID part
|
||||
parts = line.split(":")
|
||||
if len(parts) >= 2:
|
||||
# Get the part before the colon, then extract the ID
|
||||
# Format: "1. [P0] poc-pw3"
|
||||
id_part = parts[0].strip()
|
||||
# Split by spaces and get the last token (the ID)
|
||||
tokens = id_part.split()
|
||||
if tokens:
|
||||
task_id = tokens[-1]
|
||||
# Verify it looks like a beads ID (has hyphen)
|
||||
if "-" in task_id:
|
||||
tasks.append(task_id)
|
||||
|
||||
return tasks, None
|
||||
|
||||
except FileNotFoundError:
|
||||
return None, "bd command not found. Is beads installed?"
|
||||
except Exception as e:
|
||||
return None, f"Error getting ready tasks: {str(e)}"
|
||||
|
||||
|
||||
def is_beads_issue(issue_identifier: str) -> bool:
|
||||
"""Check if an issue identifier is a beads issue.
|
||||
|
||||
Beads issues have format like: poc-abc, feat-123, etc.
|
||||
GitHub issues are just numbers.
|
||||
|
||||
Args:
|
||||
issue_identifier: The issue identifier
|
||||
|
||||
Returns:
|
||||
True if it's a beads issue, False otherwise
|
||||
"""
|
||||
# Beads issues contain a hyphen
|
||||
return "-" in issue_identifier and not issue_identifier.isdigit()
|
||||
316
skills/adw-bootstrap/reference/scaled/adw_modules/git_ops.py
Normal file
316
skills/adw-bootstrap/reference/scaled/adw_modules/git_ops.py
Normal file
@@ -0,0 +1,316 @@
|
||||
"""Git operations for ADW composable architecture.
|
||||
|
||||
Provides centralized git operations that build on top of github.py module.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Tuple
|
||||
|
||||
# Import GitHub functions from existing module
|
||||
from adw_modules.github import get_repo_url, extract_repo_path, make_issue_comment
|
||||
|
||||
|
||||
def get_current_branch(cwd: Optional[str] = None) -> str:
|
||||
"""Get current git branch name."""
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
)
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
def push_branch(
|
||||
branch_name: str, cwd: Optional[str] = None
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""Push current branch to remote. Returns (success, error_message)."""
|
||||
result = subprocess.run(
|
||||
["git", "push", "-u", "origin", branch_name],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return False, result.stderr
|
||||
return True, None
|
||||
|
||||
|
||||
def check_pr_exists(branch_name: str) -> Optional[str]:
|
||||
"""Check if PR exists for branch. Returns PR URL if exists."""
|
||||
# Use github.py functions to get repo info
|
||||
try:
|
||||
repo_url = get_repo_url()
|
||||
repo_path = extract_repo_path(repo_url)
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
result = subprocess.run(
|
||||
[
|
||||
"gh",
|
||||
"pr",
|
||||
"list",
|
||||
"--repo",
|
||||
repo_path,
|
||||
"--head",
|
||||
branch_name,
|
||||
"--json",
|
||||
"url",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
prs = json.loads(result.stdout)
|
||||
if prs:
|
||||
return prs[0]["url"]
|
||||
return None
|
||||
|
||||
|
||||
def create_branch(
|
||||
branch_name: str, cwd: Optional[str] = None
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""Create and checkout a new branch. Returns (success, error_message)."""
|
||||
# Create branch
|
||||
result = subprocess.run(
|
||||
["git", "checkout", "-b", branch_name], capture_output=True, text=True, cwd=cwd
|
||||
)
|
||||
if result.returncode != 0:
|
||||
# Check if error is because branch already exists
|
||||
if "already exists" in result.stderr:
|
||||
# Try to checkout existing branch
|
||||
result = subprocess.run(
|
||||
["git", "checkout", branch_name],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return False, result.stderr
|
||||
return True, None
|
||||
return False, result.stderr
|
||||
return True, None
|
||||
|
||||
|
||||
def commit_changes(
|
||||
message: str, cwd: Optional[str] = None
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""Stage all changes and commit. Returns (success, error_message)."""
|
||||
# Check if there are changes to commit
|
||||
result = subprocess.run(
|
||||
["git", "status", "--porcelain"], capture_output=True, text=True, cwd=cwd
|
||||
)
|
||||
if not result.stdout.strip():
|
||||
return True, None # No changes to commit
|
||||
|
||||
# Stage all changes
|
||||
result = subprocess.run(
|
||||
["git", "add", "-A"], capture_output=True, text=True, cwd=cwd
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return False, result.stderr
|
||||
|
||||
# Commit
|
||||
result = subprocess.run(
|
||||
["git", "commit", "-m", message], capture_output=True, text=True, cwd=cwd
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return False, result.stderr
|
||||
return True, None
|
||||
|
||||
|
||||
def get_pr_number(branch_name: str) -> Optional[str]:
|
||||
"""Get PR number for a branch. Returns PR number if exists."""
|
||||
# Use github.py functions to get repo info
|
||||
try:
|
||||
repo_url = get_repo_url()
|
||||
repo_path = extract_repo_path(repo_url)
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
result = subprocess.run(
|
||||
[
|
||||
"gh",
|
||||
"pr",
|
||||
"list",
|
||||
"--repo",
|
||||
repo_path,
|
||||
"--head",
|
||||
branch_name,
|
||||
"--json",
|
||||
"number",
|
||||
"--limit",
|
||||
"1",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
prs = json.loads(result.stdout)
|
||||
if prs:
|
||||
return str(prs[0]["number"])
|
||||
return None
|
||||
|
||||
|
||||
def approve_pr(pr_number: str, logger: logging.Logger) -> Tuple[bool, Optional[str]]:
|
||||
"""Approve a PR. Returns (success, error_message)."""
|
||||
try:
|
||||
repo_url = get_repo_url()
|
||||
repo_path = extract_repo_path(repo_url)
|
||||
except Exception as e:
|
||||
return False, f"Failed to get repo info: {e}"
|
||||
|
||||
result = subprocess.run(
|
||||
[
|
||||
"gh",
|
||||
"pr",
|
||||
"review",
|
||||
pr_number,
|
||||
"--repo",
|
||||
repo_path,
|
||||
"--approve",
|
||||
"--body",
|
||||
"ADW Ship workflow approved this PR after validating all state fields.",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return False, result.stderr
|
||||
|
||||
logger.info(f"Approved PR #{pr_number}")
|
||||
return True, None
|
||||
|
||||
|
||||
def merge_pr(
|
||||
pr_number: str, logger: logging.Logger, merge_method: str = "squash"
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""Merge a PR. Returns (success, error_message).
|
||||
|
||||
Args:
|
||||
pr_number: The PR number to merge
|
||||
logger: Logger instance
|
||||
merge_method: One of 'merge', 'squash', 'rebase' (default: 'squash')
|
||||
"""
|
||||
try:
|
||||
repo_url = get_repo_url()
|
||||
repo_path = extract_repo_path(repo_url)
|
||||
except Exception as e:
|
||||
return False, f"Failed to get repo info: {e}"
|
||||
|
||||
# First check if PR is mergeable
|
||||
result = subprocess.run(
|
||||
[
|
||||
"gh",
|
||||
"pr",
|
||||
"view",
|
||||
pr_number,
|
||||
"--repo",
|
||||
repo_path,
|
||||
"--json",
|
||||
"mergeable,mergeStateStatus",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return False, f"Failed to check PR status: {result.stderr}"
|
||||
|
||||
pr_status = json.loads(result.stdout)
|
||||
if pr_status.get("mergeable") != "MERGEABLE":
|
||||
return (
|
||||
False,
|
||||
f"PR is not mergeable. Status: {pr_status.get('mergeStateStatus', 'unknown')}",
|
||||
)
|
||||
|
||||
# Merge the PR
|
||||
merge_cmd = [
|
||||
"gh",
|
||||
"pr",
|
||||
"merge",
|
||||
pr_number,
|
||||
"--repo",
|
||||
repo_path,
|
||||
f"--{merge_method}",
|
||||
]
|
||||
|
||||
# Add auto-merge body
|
||||
merge_cmd.extend(
|
||||
["--body", "Merged by ADW Ship workflow after successful validation."]
|
||||
)
|
||||
|
||||
result = subprocess.run(merge_cmd, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
return False, result.stderr
|
||||
|
||||
logger.info(f"Merged PR #{pr_number} using {merge_method} method")
|
||||
return True, None
|
||||
|
||||
|
||||
def finalize_git_operations(
|
||||
state: "ADWState", logger: logging.Logger, cwd: Optional[str] = None
|
||||
) -> None:
|
||||
"""Standard git finalization: push branch and create/update PR."""
|
||||
branch_name = state.get("branch_name")
|
||||
if not branch_name:
|
||||
# Fallback: use current git branch if not main
|
||||
current_branch = get_current_branch(cwd=cwd)
|
||||
if current_branch and current_branch != "main":
|
||||
logger.warning(
|
||||
f"No branch name in state, using current branch: {current_branch}"
|
||||
)
|
||||
branch_name = current_branch
|
||||
else:
|
||||
logger.error(
|
||||
"No branch name in state and current branch is main, skipping git operations"
|
||||
)
|
||||
return
|
||||
|
||||
# Always push
|
||||
success, error = push_branch(branch_name, cwd=cwd)
|
||||
if not success:
|
||||
logger.error(f"Failed to push branch: {error}")
|
||||
return
|
||||
|
||||
logger.info(f"Pushed branch: {branch_name}")
|
||||
|
||||
# Handle PR
|
||||
pr_url = check_pr_exists(branch_name)
|
||||
issue_number = state.get("issue_number")
|
||||
adw_id = state.get("adw_id")
|
||||
|
||||
if pr_url:
|
||||
logger.info(f"Found existing PR: {pr_url}")
|
||||
# Post PR link for easy reference
|
||||
if issue_number and adw_id:
|
||||
make_issue_comment(issue_number, f"{adw_id}_ops: ✅ Pull request: {pr_url}")
|
||||
else:
|
||||
# Create new PR - fetch issue data first
|
||||
if issue_number:
|
||||
try:
|
||||
repo_url = get_repo_url()
|
||||
repo_path = extract_repo_path(repo_url)
|
||||
from adw_modules.github import fetch_issue
|
||||
|
||||
issue = fetch_issue(issue_number, repo_path)
|
||||
|
||||
from adw_modules.workflow_ops import create_pull_request
|
||||
|
||||
pr_url, error = create_pull_request(branch_name, issue, state, logger, cwd)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch issue for PR creation: {e}")
|
||||
pr_url, error = None, str(e)
|
||||
else:
|
||||
pr_url, error = None, "No issue number in state"
|
||||
|
||||
if pr_url:
|
||||
logger.info(f"Created PR: {pr_url}")
|
||||
# Post new PR link
|
||||
if issue_number and adw_id:
|
||||
make_issue_comment(
|
||||
issue_number, f"{adw_id}_ops: ✅ Pull request created: {pr_url}"
|
||||
)
|
||||
else:
|
||||
logger.error(f"Failed to create PR: {error}")
|
||||
312
skills/adw-bootstrap/reference/scaled/adw_modules/github.py
Normal file
312
skills/adw-bootstrap/reference/scaled/adw_modules/github.py
Normal file
@@ -0,0 +1,312 @@
|
||||
#!/usr/bin/env -S uv run
|
||||
# /// script
|
||||
# dependencies = ["python-dotenv", "pydantic"]
|
||||
# ///
|
||||
|
||||
"""
|
||||
GitHub Operations Module - AI Developer Workflow (ADW)
|
||||
|
||||
This module contains all GitHub-related operations including:
|
||||
- Issue fetching and manipulation
|
||||
- Comment posting
|
||||
- Repository path extraction
|
||||
- Issue status management
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
from typing import Dict, List, Optional
|
||||
from .data_types import GitHubIssue, GitHubIssueListItem, GitHubComment
|
||||
|
||||
# Bot identifier to prevent webhook loops and filter bot comments
|
||||
ADW_BOT_IDENTIFIER = "[ADW-AGENTS]"
|
||||
|
||||
|
||||
def get_github_env() -> Optional[dict]:
|
||||
"""Get environment with GitHub token set up. Returns None if no GITHUB_PAT.
|
||||
|
||||
Subprocess env behavior:
|
||||
- env=None → Inherits parent's environment (default)
|
||||
- env={} → Empty environment (no variables)
|
||||
- env=custom_dict → Only uses specified variables
|
||||
|
||||
So this will work with gh authentication:
|
||||
# These are equivalent:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, env=None)
|
||||
|
||||
But this will NOT work (no PATH, no auth):
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, env={})
|
||||
"""
|
||||
github_pat = os.getenv("GITHUB_PAT")
|
||||
if not github_pat:
|
||||
return None
|
||||
|
||||
# Only create minimal env with GitHub token
|
||||
env = {
|
||||
"GH_TOKEN": github_pat,
|
||||
"PATH": os.environ.get("PATH", ""),
|
||||
}
|
||||
return env
|
||||
|
||||
|
||||
def get_repo_url() -> str:
|
||||
"""Get GitHub repository URL from git remote."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "remote", "get-url", "origin"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError:
|
||||
raise ValueError(
|
||||
"No git remote 'origin' found. Please ensure you're in a git repository with a remote."
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise ValueError("git command not found. Please ensure git is installed.")
|
||||
|
||||
|
||||
def extract_repo_path(github_url: str) -> str:
|
||||
"""Extract owner/repo from GitHub URL."""
|
||||
# Handle both https://github.com/owner/repo and https://github.com/owner/repo.git
|
||||
return github_url.replace("https://github.com/", "").replace(".git", "")
|
||||
|
||||
|
||||
def fetch_issue(issue_number: str, repo_path: str) -> GitHubIssue:
|
||||
"""Fetch GitHub issue using gh CLI and return typed model."""
|
||||
# Use JSON output for structured data
|
||||
cmd = [
|
||||
"gh",
|
||||
"issue",
|
||||
"view",
|
||||
issue_number,
|
||||
"-R",
|
||||
repo_path,
|
||||
"--json",
|
||||
"number,title,body,state,author,assignees,labels,milestone,comments,createdAt,updatedAt,closedAt,url",
|
||||
]
|
||||
|
||||
# Set up environment with GitHub token if available
|
||||
env = get_github_env()
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
|
||||
|
||||
if result.returncode == 0:
|
||||
# Parse JSON response into Pydantic model
|
||||
issue_data = json.loads(result.stdout)
|
||||
issue = GitHubIssue(**issue_data)
|
||||
|
||||
return issue
|
||||
else:
|
||||
print(result.stderr, file=sys.stderr)
|
||||
sys.exit(result.returncode)
|
||||
except FileNotFoundError:
|
||||
print("Error: GitHub CLI (gh) is not installed.", file=sys.stderr)
|
||||
print("\nTo install gh:", file=sys.stderr)
|
||||
print(" - macOS: brew install gh", file=sys.stderr)
|
||||
print(
|
||||
" - Linux: See https://github.com/cli/cli#installation",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(
|
||||
" - Windows: See https://github.com/cli/cli#installation", file=sys.stderr
|
||||
)
|
||||
print("\nAfter installation, authenticate with: gh auth login", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error parsing issue data: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def make_issue_comment(issue_id: str, comment: str) -> None:
|
||||
"""Post a comment to a GitHub issue using gh CLI."""
|
||||
# Get repo information from git remote
|
||||
github_repo_url = get_repo_url()
|
||||
repo_path = extract_repo_path(github_repo_url)
|
||||
|
||||
# Ensure comment has ADW_BOT_IDENTIFIER to prevent webhook loops
|
||||
if not comment.startswith(ADW_BOT_IDENTIFIER):
|
||||
comment = f"{ADW_BOT_IDENTIFIER} {comment}"
|
||||
|
||||
# Build command
|
||||
cmd = [
|
||||
"gh",
|
||||
"issue",
|
||||
"comment",
|
||||
issue_id,
|
||||
"-R",
|
||||
repo_path,
|
||||
"--body",
|
||||
comment,
|
||||
]
|
||||
|
||||
# Set up environment with GitHub token if available
|
||||
env = get_github_env()
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
|
||||
|
||||
if result.returncode == 0:
|
||||
print(f"Successfully posted comment to issue #{issue_id}")
|
||||
else:
|
||||
print(f"Error posting comment: {result.stderr}", file=sys.stderr)
|
||||
raise RuntimeError(f"Failed to post comment: {result.stderr}")
|
||||
except Exception as e:
|
||||
print(f"Error posting comment: {e}", file=sys.stderr)
|
||||
raise
|
||||
|
||||
|
||||
def mark_issue_in_progress(issue_id: str) -> None:
|
||||
"""Mark issue as in progress by adding label and comment."""
|
||||
# Get repo information from git remote
|
||||
github_repo_url = get_repo_url()
|
||||
repo_path = extract_repo_path(github_repo_url)
|
||||
|
||||
# Add "in_progress" label
|
||||
cmd = [
|
||||
"gh",
|
||||
"issue",
|
||||
"edit",
|
||||
issue_id,
|
||||
"-R",
|
||||
repo_path,
|
||||
"--add-label",
|
||||
"in_progress",
|
||||
]
|
||||
|
||||
# Set up environment with GitHub token if available
|
||||
env = get_github_env()
|
||||
|
||||
# Try to add label (may fail if label doesn't exist)
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
|
||||
if result.returncode != 0:
|
||||
print(f"Note: Could not add 'in_progress' label: {result.stderr}")
|
||||
|
||||
# Post comment indicating work has started
|
||||
# make_issue_comment(issue_id, "🚧 ADW is working on this issue...")
|
||||
|
||||
# Assign to self (optional)
|
||||
cmd = [
|
||||
"gh",
|
||||
"issue",
|
||||
"edit",
|
||||
issue_id,
|
||||
"-R",
|
||||
repo_path,
|
||||
"--add-assignee",
|
||||
"@me",
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
|
||||
if result.returncode == 0:
|
||||
print(f"Assigned issue #{issue_id} to self")
|
||||
|
||||
|
||||
def fetch_open_issues(repo_path: str) -> List[GitHubIssueListItem]:
|
||||
"""Fetch all open issues from the GitHub repository."""
|
||||
try:
|
||||
cmd = [
|
||||
"gh",
|
||||
"issue",
|
||||
"list",
|
||||
"--repo",
|
||||
repo_path,
|
||||
"--state",
|
||||
"open",
|
||||
"--json",
|
||||
"number,title,body,labels,createdAt,updatedAt",
|
||||
"--limit",
|
||||
"1000",
|
||||
]
|
||||
|
||||
# Set up environment with GitHub token if available
|
||||
env = get_github_env()
|
||||
|
||||
# DEBUG level - not printing command
|
||||
result = subprocess.run(
|
||||
cmd, capture_output=True, text=True, check=True, env=env
|
||||
)
|
||||
|
||||
issues_data = json.loads(result.stdout)
|
||||
issues = [GitHubIssueListItem(**issue_data) for issue_data in issues_data]
|
||||
print(f"Fetched {len(issues)} open issues")
|
||||
return issues
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"ERROR: Failed to fetch issues: {e.stderr}", file=sys.stderr)
|
||||
return []
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"ERROR: Failed to parse issues JSON: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
|
||||
def fetch_issue_comments(repo_path: str, issue_number: int) -> List[Dict]:
|
||||
"""Fetch all comments for a specific issue."""
|
||||
try:
|
||||
cmd = [
|
||||
"gh",
|
||||
"issue",
|
||||
"view",
|
||||
str(issue_number),
|
||||
"--repo",
|
||||
repo_path,
|
||||
"--json",
|
||||
"comments",
|
||||
]
|
||||
|
||||
# Set up environment with GitHub token if available
|
||||
env = get_github_env()
|
||||
|
||||
result = subprocess.run(
|
||||
cmd, capture_output=True, text=True, check=True, env=env
|
||||
)
|
||||
data = json.loads(result.stdout)
|
||||
comments = data.get("comments", [])
|
||||
|
||||
# Sort comments by creation time
|
||||
comments.sort(key=lambda c: c.get("createdAt", ""))
|
||||
|
||||
# DEBUG level - not printing
|
||||
return comments
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(
|
||||
f"ERROR: Failed to fetch comments for issue #{issue_number}: {e.stderr}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return []
|
||||
except json.JSONDecodeError as e:
|
||||
print(
|
||||
f"ERROR: Failed to parse comments JSON for issue #{issue_number}: {e}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return []
|
||||
|
||||
|
||||
def find_keyword_from_comment(keyword: str, issue: GitHubIssue) -> Optional[GitHubComment]:
|
||||
"""Find the latest comment containing a specific keyword.
|
||||
|
||||
Args:
|
||||
keyword: The keyword to search for in comments
|
||||
issue: The GitHub issue containing comments
|
||||
|
||||
Returns:
|
||||
The latest GitHubComment containing the keyword, or None if not found
|
||||
"""
|
||||
# Sort comments by created_at date (newest first)
|
||||
sorted_comments = sorted(issue.comments, key=lambda c: c.created_at, reverse=True)
|
||||
|
||||
# Search through sorted comments (newest first)
|
||||
for comment in sorted_comments:
|
||||
# Skip ADW bot comments to prevent loops
|
||||
if ADW_BOT_IDENTIFIER in comment.body:
|
||||
continue
|
||||
|
||||
if keyword in comment.body:
|
||||
return comment
|
||||
|
||||
return None
|
||||
172
skills/adw-bootstrap/reference/scaled/adw_modules/state.py
Normal file
172
skills/adw-bootstrap/reference/scaled/adw_modules/state.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""State management for ADW composable architecture.
|
||||
|
||||
Provides persistent state management via file storage and
|
||||
transient state passing between scripts via stdin/stdout.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
from adw_modules.data_types import ADWStateData
|
||||
|
||||
|
||||
class ADWState:
|
||||
"""Container for ADW workflow state with file persistence."""
|
||||
|
||||
STATE_FILENAME = "adw_state.json"
|
||||
|
||||
def __init__(self, adw_id: str):
|
||||
"""Initialize ADWState with a required ADW ID.
|
||||
|
||||
Args:
|
||||
adw_id: The ADW ID for this state (required)
|
||||
"""
|
||||
if not adw_id:
|
||||
raise ValueError("adw_id is required for ADWState")
|
||||
|
||||
self.adw_id = adw_id
|
||||
# Start with minimal state
|
||||
self.data: Dict[str, Any] = {"adw_id": self.adw_id}
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
def update(self, **kwargs):
|
||||
"""Update state with new key-value pairs."""
|
||||
# Filter to only our core fields
|
||||
core_fields = {"adw_id", "issue_number", "branch_name", "plan_file", "issue_class", "worktree_path", "backend_port", "frontend_port", "model_set", "all_adws"}
|
||||
for key, value in kwargs.items():
|
||||
if key in core_fields:
|
||||
self.data[key] = value
|
||||
|
||||
def get(self, key: str, default=None):
|
||||
"""Get value from state by key."""
|
||||
return self.data.get(key, default)
|
||||
|
||||
def append_adw_id(self, adw_id: str):
|
||||
"""Append an ADW ID to the all_adws list if not already present."""
|
||||
all_adws = self.data.get("all_adws", [])
|
||||
if adw_id not in all_adws:
|
||||
all_adws.append(adw_id)
|
||||
self.data["all_adws"] = all_adws
|
||||
|
||||
def get_working_directory(self) -> str:
|
||||
"""Get the working directory for this ADW instance.
|
||||
|
||||
Returns worktree_path if set (for isolated workflows),
|
||||
otherwise returns the main repo path.
|
||||
"""
|
||||
worktree_path = self.data.get("worktree_path")
|
||||
if worktree_path:
|
||||
return worktree_path
|
||||
|
||||
# Return main repo path (parent of adws directory)
|
||||
return os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
|
||||
def get_state_path(self) -> str:
|
||||
"""Get path to state file."""
|
||||
project_root = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
return os.path.join(project_root, "agents", self.adw_id, self.STATE_FILENAME)
|
||||
|
||||
def save(self, workflow_step: Optional[str] = None) -> None:
|
||||
"""Save state to file in agents/{adw_id}/adw_state.json."""
|
||||
state_path = self.get_state_path()
|
||||
os.makedirs(os.path.dirname(state_path), exist_ok=True)
|
||||
|
||||
# Create ADWStateData for validation
|
||||
state_data = ADWStateData(
|
||||
adw_id=self.data.get("adw_id"),
|
||||
issue_number=self.data.get("issue_number"),
|
||||
branch_name=self.data.get("branch_name"),
|
||||
plan_file=self.data.get("plan_file"),
|
||||
issue_class=self.data.get("issue_class"),
|
||||
worktree_path=self.data.get("worktree_path"),
|
||||
backend_port=self.data.get("backend_port"),
|
||||
frontend_port=self.data.get("frontend_port"),
|
||||
model_set=self.data.get("model_set", "base"),
|
||||
all_adws=self.data.get("all_adws", []),
|
||||
)
|
||||
|
||||
# Save as JSON
|
||||
with open(state_path, "w") as f:
|
||||
json.dump(state_data.model_dump(), f, indent=2)
|
||||
|
||||
self.logger.info(f"Saved state to {state_path}")
|
||||
if workflow_step:
|
||||
self.logger.info(f"State updated by: {workflow_step}")
|
||||
|
||||
@classmethod
|
||||
def load(
|
||||
cls, adw_id: str, logger: Optional[logging.Logger] = None
|
||||
) -> Optional["ADWState"]:
|
||||
"""Load state from file if it exists."""
|
||||
project_root = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
state_path = os.path.join(project_root, "agents", adw_id, cls.STATE_FILENAME)
|
||||
|
||||
if not os.path.exists(state_path):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(state_path, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Validate with ADWStateData
|
||||
state_data = ADWStateData(**data)
|
||||
|
||||
# Create ADWState instance
|
||||
state = cls(state_data.adw_id)
|
||||
state.data = state_data.model_dump()
|
||||
|
||||
if logger:
|
||||
logger.info(f"🔍 Found existing state from {state_path}")
|
||||
logger.info(f"State: {json.dumps(state_data.model_dump(), indent=2)}")
|
||||
|
||||
return state
|
||||
except Exception as e:
|
||||
if logger:
|
||||
logger.error(f"Failed to load state from {state_path}: {e}")
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_stdin(cls) -> Optional["ADWState"]:
|
||||
"""Read state from stdin if available (for piped input).
|
||||
|
||||
Returns None if no piped input is available (stdin is a tty).
|
||||
"""
|
||||
if sys.stdin.isatty():
|
||||
return None
|
||||
try:
|
||||
input_data = sys.stdin.read()
|
||||
if not input_data.strip():
|
||||
return None
|
||||
data = json.loads(input_data)
|
||||
adw_id = data.get("adw_id")
|
||||
if not adw_id:
|
||||
return None # No valid state without adw_id
|
||||
state = cls(adw_id)
|
||||
state.data = data
|
||||
return state
|
||||
except (json.JSONDecodeError, EOFError):
|
||||
return None
|
||||
|
||||
def to_stdout(self):
|
||||
"""Write state to stdout as JSON (for piping to next script)."""
|
||||
# Only output core fields
|
||||
output_data = {
|
||||
"adw_id": self.data.get("adw_id"),
|
||||
"issue_number": self.data.get("issue_number"),
|
||||
"branch_name": self.data.get("branch_name"),
|
||||
"plan_file": self.data.get("plan_file"),
|
||||
"issue_class": self.data.get("issue_class"),
|
||||
"worktree_path": self.data.get("worktree_path"),
|
||||
"backend_port": self.data.get("backend_port"),
|
||||
"frontend_port": self.data.get("frontend_port"),
|
||||
"all_adws": self.data.get("all_adws", []),
|
||||
}
|
||||
print(json.dumps(output_data, indent=2))
|
||||
@@ -0,0 +1,714 @@
|
||||
"""Shared AI Developer Workflow (ADW) operations."""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import re
|
||||
from typing import Tuple, Optional
|
||||
from adw_modules.data_types import (
|
||||
AgentTemplateRequest,
|
||||
GitHubIssue,
|
||||
AgentPromptResponse,
|
||||
IssueClassSlashCommand,
|
||||
ADWExtractionResult,
|
||||
)
|
||||
from adw_modules.agent import execute_template
|
||||
from adw_modules.github import get_repo_url, extract_repo_path, ADW_BOT_IDENTIFIER
|
||||
from adw_modules.state import ADWState
|
||||
from adw_modules.utils import parse_json
|
||||
|
||||
|
||||
# Agent name constants
|
||||
AGENT_PLANNER = "sdlc_planner"
|
||||
AGENT_IMPLEMENTOR = "sdlc_implementor"
|
||||
AGENT_CLASSIFIER = "issue_classifier"
|
||||
AGENT_BRANCH_GENERATOR = "branch_generator"
|
||||
AGENT_PR_CREATOR = "pr_creator"
|
||||
|
||||
# Available ADW workflows for runtime validation
|
||||
AVAILABLE_ADW_WORKFLOWS = [
|
||||
# Isolated workflows (all workflows are now iso-based)
|
||||
"adw_plan_iso",
|
||||
"adw_patch_iso",
|
||||
"adw_build_iso",
|
||||
"adw_test_iso",
|
||||
"adw_review_iso",
|
||||
"adw_document_iso",
|
||||
"adw_ship_iso",
|
||||
"adw_sdlc_ZTE_iso", # Zero Touch Execution workflow
|
||||
"adw_plan_build_iso",
|
||||
"adw_plan_build_test_iso",
|
||||
"adw_plan_build_test_review_iso",
|
||||
"adw_plan_build_document_iso",
|
||||
"adw_plan_build_review_iso",
|
||||
"adw_sdlc_iso",
|
||||
]
|
||||
|
||||
|
||||
def format_issue_message(
|
||||
adw_id: str, agent_name: str, message: str, session_id: Optional[str] = None
|
||||
) -> str:
|
||||
"""Format a message for issue comments with ADW tracking and bot identifier."""
|
||||
# Always include ADW_BOT_IDENTIFIER to prevent webhook loops
|
||||
if session_id:
|
||||
return f"{ADW_BOT_IDENTIFIER} {adw_id}_{agent_name}_{session_id}: {message}"
|
||||
return f"{ADW_BOT_IDENTIFIER} {adw_id}_{agent_name}: {message}"
|
||||
|
||||
|
||||
def extract_adw_info(text: str, temp_adw_id: str) -> ADWExtractionResult:
|
||||
"""Extract ADW workflow, ID, and model_set from text using classify_adw agent.
|
||||
Returns ADWExtractionResult with workflow_command, adw_id, and model_set."""
|
||||
|
||||
# Use classify_adw to extract structured info
|
||||
request = AgentTemplateRequest(
|
||||
agent_name="adw_classifier",
|
||||
slash_command="/classify_adw",
|
||||
args=[text],
|
||||
adw_id=temp_adw_id,
|
||||
)
|
||||
|
||||
try:
|
||||
response = execute_template(request) # No logger available in this function
|
||||
|
||||
if not response.success:
|
||||
print(f"Failed to classify ADW: {response.output}")
|
||||
return ADWExtractionResult() # Empty result
|
||||
|
||||
# Parse JSON response using utility that handles markdown
|
||||
try:
|
||||
data = parse_json(response.output, dict)
|
||||
adw_command = data.get("adw_slash_command", "").replace(
|
||||
"/", ""
|
||||
) # Remove slash
|
||||
adw_id = data.get("adw_id")
|
||||
model_set = data.get("model_set", "base") # Default to "base"
|
||||
|
||||
# Validate command
|
||||
if adw_command and adw_command in AVAILABLE_ADW_WORKFLOWS:
|
||||
return ADWExtractionResult(
|
||||
workflow_command=adw_command,
|
||||
adw_id=adw_id,
|
||||
model_set=model_set
|
||||
)
|
||||
|
||||
return ADWExtractionResult() # Empty result
|
||||
|
||||
except ValueError as e:
|
||||
print(f"Failed to parse classify_adw response: {e}")
|
||||
return ADWExtractionResult() # Empty result
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error calling classify_adw: {e}")
|
||||
return ADWExtractionResult() # Empty result
|
||||
|
||||
|
||||
def classify_issue(
|
||||
issue: GitHubIssue, adw_id: str, logger: logging.Logger
|
||||
) -> Tuple[Optional[IssueClassSlashCommand], Optional[str]]:
|
||||
"""Classify GitHub issue and return appropriate slash command.
|
||||
Returns (command, error_message) tuple."""
|
||||
|
||||
# Use the classify_issue slash command template with minimal payload
|
||||
# Only include the essential fields: number, title, body
|
||||
minimal_issue_json = issue.model_dump_json(
|
||||
by_alias=True, include={"number", "title", "body"}
|
||||
)
|
||||
|
||||
request = AgentTemplateRequest(
|
||||
agent_name=AGENT_CLASSIFIER,
|
||||
slash_command="/classify_issue",
|
||||
args=[minimal_issue_json],
|
||||
adw_id=adw_id,
|
||||
)
|
||||
|
||||
logger.debug(f"Classifying issue: {issue.title}")
|
||||
|
||||
response = execute_template(request)
|
||||
|
||||
logger.debug(
|
||||
f"Classification response: {response.model_dump_json(indent=2, by_alias=True)}"
|
||||
)
|
||||
|
||||
if not response.success:
|
||||
return None, response.output
|
||||
|
||||
# Extract the classification from the response
|
||||
output = response.output.strip()
|
||||
|
||||
# Look for the classification pattern in the output
|
||||
# Claude might add explanation, so we need to extract just the command
|
||||
classification_match = re.search(r"(/chore|/bug|/feature|0)", output)
|
||||
|
||||
if classification_match:
|
||||
issue_command = classification_match.group(1)
|
||||
else:
|
||||
issue_command = output
|
||||
|
||||
if issue_command == "0":
|
||||
return None, f"No command selected: {response.output}"
|
||||
|
||||
if issue_command not in ["/chore", "/bug", "/feature"]:
|
||||
return None, f"Invalid command selected: {response.output}"
|
||||
|
||||
return issue_command, None # type: ignore
|
||||
|
||||
|
||||
def build_plan(
|
||||
issue: GitHubIssue,
|
||||
command: str,
|
||||
adw_id: str,
|
||||
logger: logging.Logger,
|
||||
working_dir: Optional[str] = None,
|
||||
) -> AgentPromptResponse:
|
||||
"""Build implementation plan for the issue using the specified command."""
|
||||
# Use minimal payload like classify_issue does
|
||||
minimal_issue_json = issue.model_dump_json(
|
||||
by_alias=True, include={"number", "title", "body"}
|
||||
)
|
||||
|
||||
issue_plan_template_request = AgentTemplateRequest(
|
||||
agent_name=AGENT_PLANNER,
|
||||
slash_command=command,
|
||||
args=[str(issue.number), adw_id, minimal_issue_json],
|
||||
adw_id=adw_id,
|
||||
working_dir=working_dir,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"issue_plan_template_request: {issue_plan_template_request.model_dump_json(indent=2, by_alias=True)}"
|
||||
)
|
||||
|
||||
issue_plan_response = execute_template(issue_plan_template_request)
|
||||
|
||||
logger.debug(
|
||||
f"issue_plan_response: {issue_plan_response.model_dump_json(indent=2, by_alias=True)}"
|
||||
)
|
||||
|
||||
return issue_plan_response
|
||||
|
||||
|
||||
def implement_plan(
|
||||
plan_file: str,
|
||||
adw_id: str,
|
||||
logger: logging.Logger,
|
||||
agent_name: Optional[str] = None,
|
||||
working_dir: Optional[str] = None,
|
||||
) -> AgentPromptResponse:
|
||||
"""Implement the plan using the /implement command."""
|
||||
# Use provided agent_name or default to AGENT_IMPLEMENTOR
|
||||
implementor_name = agent_name or AGENT_IMPLEMENTOR
|
||||
|
||||
implement_template_request = AgentTemplateRequest(
|
||||
agent_name=implementor_name,
|
||||
slash_command="/implement",
|
||||
args=[plan_file],
|
||||
adw_id=adw_id,
|
||||
working_dir=working_dir,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"implement_template_request: {implement_template_request.model_dump_json(indent=2, by_alias=True)}"
|
||||
)
|
||||
|
||||
implement_response = execute_template(implement_template_request)
|
||||
|
||||
logger.debug(
|
||||
f"implement_response: {implement_response.model_dump_json(indent=2, by_alias=True)}"
|
||||
)
|
||||
|
||||
return implement_response
|
||||
|
||||
|
||||
def generate_branch_name(
|
||||
issue: GitHubIssue,
|
||||
issue_class: IssueClassSlashCommand,
|
||||
adw_id: str,
|
||||
logger: logging.Logger,
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""Generate a git branch name for the issue.
|
||||
Returns (branch_name, error_message) tuple."""
|
||||
# Remove the leading slash from issue_class for the branch name
|
||||
issue_type = issue_class.replace("/", "")
|
||||
|
||||
# Use minimal payload like classify_issue does
|
||||
minimal_issue_json = issue.model_dump_json(
|
||||
by_alias=True, include={"number", "title", "body"}
|
||||
)
|
||||
|
||||
request = AgentTemplateRequest(
|
||||
agent_name=AGENT_BRANCH_GENERATOR,
|
||||
slash_command="/generate_branch_name",
|
||||
args=[issue_type, adw_id, minimal_issue_json],
|
||||
adw_id=adw_id,
|
||||
)
|
||||
|
||||
response = execute_template(request)
|
||||
|
||||
if not response.success:
|
||||
return None, response.output
|
||||
|
||||
branch_name = response.output.strip()
|
||||
logger.info(f"Generated branch name: {branch_name}")
|
||||
return branch_name, None
|
||||
|
||||
|
||||
def create_commit(
|
||||
agent_name: str,
|
||||
issue: GitHubIssue,
|
||||
issue_class: IssueClassSlashCommand,
|
||||
adw_id: str,
|
||||
logger: logging.Logger,
|
||||
working_dir: str,
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""Create a git commit with a properly formatted message.
|
||||
Returns (commit_message, error_message) tuple."""
|
||||
# Remove the leading slash from issue_class
|
||||
issue_type = issue_class.replace("/", "")
|
||||
|
||||
# Create unique committer agent name by suffixing '_committer'
|
||||
unique_agent_name = f"{agent_name}_committer"
|
||||
|
||||
# Use minimal payload like classify_issue does
|
||||
minimal_issue_json = issue.model_dump_json(
|
||||
by_alias=True, include={"number", "title", "body"}
|
||||
)
|
||||
|
||||
request = AgentTemplateRequest(
|
||||
agent_name=unique_agent_name,
|
||||
slash_command="/commit",
|
||||
args=[agent_name, issue_type, minimal_issue_json],
|
||||
adw_id=adw_id,
|
||||
working_dir=working_dir,
|
||||
)
|
||||
|
||||
response = execute_template(request)
|
||||
|
||||
if not response.success:
|
||||
return None, response.output
|
||||
|
||||
commit_message = response.output.strip()
|
||||
logger.info(f"Created commit message: {commit_message}")
|
||||
return commit_message, None
|
||||
|
||||
|
||||
def create_pull_request(
|
||||
branch_name: str,
|
||||
issue: Optional[GitHubIssue],
|
||||
state: ADWState,
|
||||
logger: logging.Logger,
|
||||
working_dir: str,
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""Create a pull request for the implemented changes.
|
||||
Returns (pr_url, error_message) tuple."""
|
||||
|
||||
# Get plan file from state (may be None for test runs)
|
||||
plan_file = state.get("plan_file") or "No plan file (test run)"
|
||||
adw_id = state.get("adw_id")
|
||||
|
||||
# If we don't have issue data, try to construct minimal data
|
||||
if not issue:
|
||||
issue_data = state.get("issue", {})
|
||||
issue_json = json.dumps(issue_data) if issue_data else "{}"
|
||||
elif isinstance(issue, dict):
|
||||
# Try to reconstruct as GitHubIssue model which handles datetime serialization
|
||||
from adw_modules.data_types import GitHubIssue
|
||||
|
||||
try:
|
||||
issue_model = GitHubIssue(**issue)
|
||||
# Use minimal payload like classify_issue does
|
||||
issue_json = issue_model.model_dump_json(
|
||||
by_alias=True, include={"number", "title", "body"}
|
||||
)
|
||||
except Exception:
|
||||
# Fallback: use json.dumps with default str converter for datetime
|
||||
issue_json = json.dumps(issue, default=str)
|
||||
else:
|
||||
# Use minimal payload like classify_issue does
|
||||
issue_json = issue.model_dump_json(
|
||||
by_alias=True, include={"number", "title", "body"}
|
||||
)
|
||||
|
||||
request = AgentTemplateRequest(
|
||||
agent_name=AGENT_PR_CREATOR,
|
||||
slash_command="/pull_request",
|
||||
args=[branch_name, issue_json, plan_file, adw_id],
|
||||
adw_id=adw_id,
|
||||
working_dir=working_dir,
|
||||
)
|
||||
|
||||
response = execute_template(request)
|
||||
|
||||
if not response.success:
|
||||
return None, response.output
|
||||
|
||||
pr_url = response.output.strip()
|
||||
logger.info(f"Created pull request: {pr_url}")
|
||||
return pr_url, None
|
||||
|
||||
|
||||
def ensure_plan_exists(state: ADWState, issue_number: str) -> str:
|
||||
"""Find or error if no plan exists for issue.
|
||||
Used by isolated build workflows in standalone mode."""
|
||||
# Check if plan file is in state
|
||||
if state.get("plan_file"):
|
||||
return state.get("plan_file")
|
||||
|
||||
# Check current branch
|
||||
from adw_modules.git_ops import get_current_branch
|
||||
|
||||
branch = get_current_branch()
|
||||
|
||||
# Look for plan in branch name
|
||||
if f"-{issue_number}-" in branch:
|
||||
# Look for plan file
|
||||
plans = glob.glob(f"specs/*{issue_number}*.md")
|
||||
if plans:
|
||||
return plans[0]
|
||||
|
||||
# No plan found
|
||||
raise ValueError(
|
||||
f"No plan found for issue {issue_number}. Run adw_plan_iso.py first."
|
||||
)
|
||||
|
||||
|
||||
def ensure_adw_id(
|
||||
issue_number: str,
|
||||
adw_id: Optional[str] = None,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
) -> str:
|
||||
"""Get ADW ID or create a new one and initialize state.
|
||||
|
||||
Args:
|
||||
issue_number: The issue number to find/create ADW ID for
|
||||
adw_id: Optional existing ADW ID to use
|
||||
logger: Optional logger instance
|
||||
|
||||
Returns:
|
||||
The ADW ID (existing or newly created)
|
||||
"""
|
||||
# If ADW ID provided, check if state exists
|
||||
if adw_id:
|
||||
state = ADWState.load(adw_id, logger)
|
||||
if state:
|
||||
if logger:
|
||||
logger.info(f"Found existing ADW state for ID: {adw_id}")
|
||||
else:
|
||||
print(f"Found existing ADW state for ID: {adw_id}")
|
||||
return adw_id
|
||||
# ADW ID provided but no state exists, create state
|
||||
state = ADWState(adw_id)
|
||||
state.update(adw_id=adw_id, issue_number=issue_number)
|
||||
state.save("ensure_adw_id")
|
||||
if logger:
|
||||
logger.info(f"Created new ADW state for provided ID: {adw_id}")
|
||||
else:
|
||||
print(f"Created new ADW state for provided ID: {adw_id}")
|
||||
return adw_id
|
||||
|
||||
# No ADW ID provided, create new one with state
|
||||
from adw_modules.utils import make_adw_id
|
||||
|
||||
new_adw_id = make_adw_id()
|
||||
state = ADWState(new_adw_id)
|
||||
state.update(adw_id=new_adw_id, issue_number=issue_number)
|
||||
state.save("ensure_adw_id")
|
||||
if logger:
|
||||
logger.info(f"Created new ADW ID and state: {new_adw_id}")
|
||||
else:
|
||||
print(f"Created new ADW ID and state: {new_adw_id}")
|
||||
return new_adw_id
|
||||
|
||||
|
||||
def find_existing_branch_for_issue(
|
||||
issue_number: str, adw_id: Optional[str] = None, cwd: Optional[str] = None
|
||||
) -> Optional[str]:
|
||||
"""Find an existing branch for the given issue number.
|
||||
Returns branch name if found, None otherwise."""
|
||||
# List all branches
|
||||
result = subprocess.run(
|
||||
["git", "branch", "-a"], capture_output=True, text=True, cwd=cwd
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
return None
|
||||
|
||||
branches = result.stdout.strip().split("\n")
|
||||
|
||||
# Look for branch with standardized pattern: *-issue-{issue_number}-adw-{adw_id}-*
|
||||
for branch in branches:
|
||||
branch = branch.strip().replace("* ", "").replace("remotes/origin/", "")
|
||||
# Check for the standardized pattern
|
||||
if f"-issue-{issue_number}-" in branch:
|
||||
if adw_id and f"-adw-{adw_id}-" in branch:
|
||||
return branch
|
||||
elif not adw_id:
|
||||
# Return first match if no adw_id specified
|
||||
return branch
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def find_plan_for_issue(
|
||||
issue_number: str, adw_id: Optional[str] = None
|
||||
) -> Optional[str]:
|
||||
"""Find plan file for the given issue number and optional adw_id.
|
||||
Returns path to plan file if found, None otherwise."""
|
||||
import os
|
||||
|
||||
# Get project root
|
||||
project_root = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
agents_dir = os.path.join(project_root, "agents")
|
||||
|
||||
if not os.path.exists(agents_dir):
|
||||
return None
|
||||
|
||||
# If adw_id is provided, check specific directory first
|
||||
if adw_id:
|
||||
plan_path = os.path.join(agents_dir, adw_id, AGENT_PLANNER, "plan.md")
|
||||
if os.path.exists(plan_path):
|
||||
return plan_path
|
||||
|
||||
# Otherwise, search all agent directories
|
||||
for agent_id in os.listdir(agents_dir):
|
||||
agent_path = os.path.join(agents_dir, agent_id)
|
||||
if os.path.isdir(agent_path):
|
||||
plan_path = os.path.join(agent_path, AGENT_PLANNER, "plan.md")
|
||||
if os.path.exists(plan_path):
|
||||
# Check if this plan is for our issue by reading branch info or checking commits
|
||||
# For now, return the first plan found (can be improved)
|
||||
return plan_path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def create_or_find_branch(
|
||||
issue_number: str,
|
||||
issue: GitHubIssue,
|
||||
state: ADWState,
|
||||
logger: logging.Logger,
|
||||
cwd: Optional[str] = None,
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Create or find a branch for the given issue.
|
||||
|
||||
1. First checks state for existing branch name
|
||||
2. Then looks for existing branches matching the issue
|
||||
3. If none found, classifies the issue and creates a new branch
|
||||
|
||||
Returns (branch_name, error_message) tuple.
|
||||
"""
|
||||
# 1. Check state for branch name
|
||||
branch_name = state.get("branch_name") or state.get("branch", {}).get("name")
|
||||
if branch_name:
|
||||
logger.info(f"Found branch in state: {branch_name}")
|
||||
# Check if we need to checkout
|
||||
from adw_modules.git_ops import get_current_branch
|
||||
|
||||
current = get_current_branch(cwd=cwd)
|
||||
if current != branch_name:
|
||||
result = subprocess.run(
|
||||
["git", "checkout", branch_name],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
# Branch might not exist locally, try to create from remote
|
||||
result = subprocess.run(
|
||||
["git", "checkout", "-b", branch_name, f"origin/{branch_name}"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return "", f"Failed to checkout branch: {result.stderr}"
|
||||
return branch_name, None
|
||||
|
||||
# 2. Look for existing branch
|
||||
adw_id = state.get("adw_id")
|
||||
existing_branch = find_existing_branch_for_issue(issue_number, adw_id, cwd=cwd)
|
||||
if existing_branch:
|
||||
logger.info(f"Found existing branch: {existing_branch}")
|
||||
# Checkout the branch
|
||||
result = subprocess.run(
|
||||
["git", "checkout", existing_branch],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return "", f"Failed to checkout branch: {result.stderr}"
|
||||
state.update(branch_name=existing_branch)
|
||||
return existing_branch, None
|
||||
|
||||
# 3. Create new branch - classify issue first
|
||||
logger.info("No existing branch found, creating new one")
|
||||
|
||||
# Classify the issue
|
||||
issue_command, error = classify_issue(issue, adw_id, logger)
|
||||
if error:
|
||||
return "", f"Failed to classify issue: {error}"
|
||||
|
||||
state.update(issue_class=issue_command)
|
||||
|
||||
# Generate branch name
|
||||
branch_name, error = generate_branch_name(issue, issue_command, adw_id, logger)
|
||||
if error:
|
||||
return "", f"Failed to generate branch name: {error}"
|
||||
|
||||
# Create the branch
|
||||
from adw_modules.git_ops import create_branch
|
||||
|
||||
success, error = create_branch(branch_name, cwd=cwd)
|
||||
if not success:
|
||||
return "", f"Failed to create branch: {error}"
|
||||
|
||||
state.update(branch_name=branch_name)
|
||||
logger.info(f"Created and checked out new branch: {branch_name}")
|
||||
|
||||
return branch_name, None
|
||||
|
||||
|
||||
def find_spec_file(state: ADWState, logger: logging.Logger) -> Optional[str]:
|
||||
"""Find the spec file from state or by examining git diff.
|
||||
|
||||
For isolated workflows, automatically uses worktree_path from state.
|
||||
"""
|
||||
# Get worktree path if in isolated workflow
|
||||
worktree_path = state.get("worktree_path")
|
||||
|
||||
# Check if spec file is already in state (from plan phase)
|
||||
spec_file = state.get("plan_file")
|
||||
if spec_file:
|
||||
# If worktree_path exists and spec_file is relative, make it absolute
|
||||
if worktree_path and not os.path.isabs(spec_file):
|
||||
spec_file = os.path.join(worktree_path, spec_file)
|
||||
|
||||
if os.path.exists(spec_file):
|
||||
logger.info(f"Using spec file from state: {spec_file}")
|
||||
return spec_file
|
||||
|
||||
# Otherwise, try to find it from git diff
|
||||
logger.info("Looking for spec file in git diff")
|
||||
result = subprocess.run(
|
||||
["git", "diff", "origin/main", "--name-only"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=worktree_path,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
files = result.stdout.strip().split("\n")
|
||||
spec_files = [f for f in files if f.startswith("specs/") and f.endswith(".md")]
|
||||
|
||||
if spec_files:
|
||||
# Use the first spec file found
|
||||
spec_file = spec_files[0]
|
||||
if worktree_path:
|
||||
spec_file = os.path.join(worktree_path, spec_file)
|
||||
logger.info(f"Found spec file: {spec_file}")
|
||||
return spec_file
|
||||
|
||||
# If still not found, try to derive from branch name
|
||||
branch_name = state.get("branch_name")
|
||||
if branch_name:
|
||||
# Extract issue number from branch name
|
||||
import re
|
||||
|
||||
match = re.search(r"issue-(\d+)", branch_name)
|
||||
if match:
|
||||
issue_num = match.group(1)
|
||||
adw_id = state.get("adw_id")
|
||||
|
||||
# Look for spec files matching the pattern
|
||||
import glob
|
||||
|
||||
# Use worktree_path if provided, otherwise current directory
|
||||
search_dir = worktree_path if worktree_path else os.getcwd()
|
||||
pattern = os.path.join(
|
||||
search_dir, f"specs/issue-{issue_num}-adw-{adw_id}*.md"
|
||||
)
|
||||
spec_files = glob.glob(pattern)
|
||||
|
||||
if spec_files:
|
||||
spec_file = spec_files[0]
|
||||
logger.info(f"Found spec file by pattern: {spec_file}")
|
||||
return spec_file
|
||||
|
||||
logger.warning("No spec file found")
|
||||
return None
|
||||
|
||||
|
||||
def create_and_implement_patch(
|
||||
adw_id: str,
|
||||
review_change_request: str,
|
||||
logger: logging.Logger,
|
||||
agent_name_planner: str,
|
||||
agent_name_implementor: str,
|
||||
spec_path: Optional[str] = None,
|
||||
issue_screenshots: Optional[str] = None,
|
||||
working_dir: Optional[str] = None,
|
||||
) -> Tuple[Optional[str], AgentPromptResponse]:
|
||||
"""Create a patch plan and implement it.
|
||||
Returns (patch_file_path, implement_response) tuple."""
|
||||
|
||||
# Create patch plan using /patch command
|
||||
args = [adw_id, review_change_request]
|
||||
|
||||
# Add optional arguments in the correct order
|
||||
if spec_path:
|
||||
args.append(spec_path)
|
||||
else:
|
||||
args.append("") # Empty string for optional spec_path
|
||||
|
||||
args.append(agent_name_planner)
|
||||
|
||||
if issue_screenshots:
|
||||
args.append(issue_screenshots)
|
||||
|
||||
request = AgentTemplateRequest(
|
||||
agent_name=agent_name_planner,
|
||||
slash_command="/patch",
|
||||
args=args,
|
||||
adw_id=adw_id,
|
||||
working_dir=working_dir,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Patch plan request: {request.model_dump_json(indent=2, by_alias=True)}"
|
||||
)
|
||||
|
||||
response = execute_template(request)
|
||||
|
||||
logger.debug(
|
||||
f"Patch plan response: {response.model_dump_json(indent=2, by_alias=True)}"
|
||||
)
|
||||
|
||||
if not response.success:
|
||||
logger.error(f"Error creating patch plan: {response.output}")
|
||||
# Return None and a failed response
|
||||
return None, AgentPromptResponse(
|
||||
output=f"Failed to create patch plan: {response.output}", success=False
|
||||
)
|
||||
|
||||
# Extract the patch plan file path from the response
|
||||
patch_file_path = response.output.strip()
|
||||
|
||||
# Validate that it looks like a file path
|
||||
if "specs/patch/" not in patch_file_path or not patch_file_path.endswith(".md"):
|
||||
logger.error(f"Invalid patch plan path returned: {patch_file_path}")
|
||||
return None, AgentPromptResponse(
|
||||
output=f"Invalid patch plan path: {patch_file_path}", success=False
|
||||
)
|
||||
|
||||
logger.info(f"Created patch plan: {patch_file_path}")
|
||||
|
||||
# Now implement the patch plan using the provided implementor agent name
|
||||
implement_response = implement_plan(
|
||||
patch_file_path, adw_id, logger, agent_name_implementor, working_dir=working_dir
|
||||
)
|
||||
|
||||
return patch_file_path, implement_response
|
||||
@@ -0,0 +1,243 @@
|
||||
"""Worktree and port management operations for isolated ADW workflows.
|
||||
|
||||
Provides utilities for creating and managing git worktrees under trees/<adw_id>/
|
||||
and allocating unique ports for each isolated instance.
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
import socket
|
||||
from typing import Tuple, Optional
|
||||
from adw_modules.state import ADWState
|
||||
|
||||
|
||||
def create_worktree(adw_id: str, branch_name: str, logger: logging.Logger) -> Tuple[str, Optional[str]]:
|
||||
"""Create a git worktree for isolated ADW execution.
|
||||
|
||||
Args:
|
||||
adw_id: The ADW ID for this worktree
|
||||
branch_name: The branch name to create the worktree from
|
||||
logger: Logger instance
|
||||
|
||||
Returns:
|
||||
Tuple of (worktree_path, error_message)
|
||||
worktree_path is the absolute path if successful, None if error
|
||||
"""
|
||||
# Get project root (parent of adws directory)
|
||||
project_root = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
|
||||
# Create trees directory if it doesn't exist
|
||||
trees_dir = os.path.join(project_root, "trees")
|
||||
os.makedirs(trees_dir, exist_ok=True)
|
||||
|
||||
# Construct worktree path
|
||||
worktree_path = os.path.join(trees_dir, adw_id)
|
||||
|
||||
# Check if worktree already exists
|
||||
if os.path.exists(worktree_path):
|
||||
logger.warning(f"Worktree already exists at {worktree_path}")
|
||||
return worktree_path, None
|
||||
|
||||
# First, fetch latest changes from origin
|
||||
logger.info("Fetching latest changes from origin")
|
||||
fetch_result = subprocess.run(
|
||||
["git", "fetch", "origin"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=project_root
|
||||
)
|
||||
if fetch_result.returncode != 0:
|
||||
logger.warning(f"Failed to fetch from origin: {fetch_result.stderr}")
|
||||
|
||||
# Create the worktree using git, branching from origin/main
|
||||
# Use -b to create the branch as part of worktree creation
|
||||
cmd = ["git", "worktree", "add", "-b", branch_name, worktree_path, "origin/main"]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root)
|
||||
|
||||
if result.returncode != 0:
|
||||
# If branch already exists, try without -b
|
||||
if "already exists" in result.stderr:
|
||||
cmd = ["git", "worktree", "add", worktree_path, branch_name]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root)
|
||||
|
||||
if result.returncode != 0:
|
||||
error_msg = f"Failed to create worktree: {result.stderr}"
|
||||
logger.error(error_msg)
|
||||
return None, error_msg
|
||||
|
||||
logger.info(f"Created worktree at {worktree_path} for branch {branch_name}")
|
||||
return worktree_path, None
|
||||
|
||||
|
||||
def validate_worktree(adw_id: str, state: ADWState) -> Tuple[bool, Optional[str]]:
|
||||
"""Validate worktree exists in state, filesystem, and git.
|
||||
|
||||
Performs three-way validation to ensure consistency:
|
||||
1. State has worktree_path
|
||||
2. Directory exists on filesystem
|
||||
3. Git knows about the worktree
|
||||
|
||||
Args:
|
||||
adw_id: The ADW ID to validate
|
||||
state: The ADW state object
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
# Check state has worktree_path
|
||||
worktree_path = state.get("worktree_path")
|
||||
if not worktree_path:
|
||||
return False, "No worktree_path in state"
|
||||
|
||||
# Check directory exists
|
||||
if not os.path.exists(worktree_path):
|
||||
return False, f"Worktree directory not found: {worktree_path}"
|
||||
|
||||
# Check git knows about it
|
||||
result = subprocess.run(["git", "worktree", "list"], capture_output=True, text=True)
|
||||
if worktree_path not in result.stdout:
|
||||
return False, "Worktree not registered with git"
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def get_worktree_path(adw_id: str) -> str:
|
||||
"""Get absolute path to worktree.
|
||||
|
||||
Args:
|
||||
adw_id: The ADW ID
|
||||
|
||||
Returns:
|
||||
Absolute path to worktree directory
|
||||
"""
|
||||
project_root = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
return os.path.join(project_root, "trees", adw_id)
|
||||
|
||||
|
||||
def remove_worktree(adw_id: str, logger: logging.Logger) -> Tuple[bool, Optional[str]]:
|
||||
"""Remove a worktree and clean up.
|
||||
|
||||
Args:
|
||||
adw_id: The ADW ID for the worktree to remove
|
||||
logger: Logger instance
|
||||
|
||||
Returns:
|
||||
Tuple of (success, error_message)
|
||||
"""
|
||||
worktree_path = get_worktree_path(adw_id)
|
||||
|
||||
# First remove via git
|
||||
cmd = ["git", "worktree", "remove", worktree_path, "--force"]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
# Try to clean up manually if git command failed
|
||||
if os.path.exists(worktree_path):
|
||||
try:
|
||||
shutil.rmtree(worktree_path)
|
||||
logger.warning(f"Manually removed worktree directory: {worktree_path}")
|
||||
except Exception as e:
|
||||
return False, f"Failed to remove worktree: {result.stderr}, manual cleanup failed: {e}"
|
||||
|
||||
logger.info(f"Removed worktree at {worktree_path}")
|
||||
return True, None
|
||||
|
||||
|
||||
def setup_worktree_environment(worktree_path: str, backend_port: int, frontend_port: int, logger: logging.Logger) -> None:
|
||||
"""Set up worktree environment by creating .ports.env file.
|
||||
|
||||
The actual environment setup (copying .env files, installing dependencies) is handled
|
||||
by the install_worktree.md command which runs inside the worktree.
|
||||
|
||||
Args:
|
||||
worktree_path: Path to the worktree
|
||||
backend_port: Backend port number
|
||||
frontend_port: Frontend port number
|
||||
logger: Logger instance
|
||||
"""
|
||||
# Create .ports.env file with port configuration
|
||||
ports_env_path = os.path.join(worktree_path, ".ports.env")
|
||||
|
||||
with open(ports_env_path, "w") as f:
|
||||
f.write(f"BACKEND_PORT={backend_port}\n")
|
||||
f.write(f"FRONTEND_PORT={frontend_port}\n")
|
||||
f.write(f"VITE_BACKEND_URL=http://localhost:{backend_port}\n")
|
||||
|
||||
logger.info(f"Created .ports.env with Backend: {backend_port}, Frontend: {frontend_port}")
|
||||
|
||||
|
||||
# Port management functions
|
||||
|
||||
def get_ports_for_adw(adw_id: str) -> Tuple[int, int]:
|
||||
"""Deterministically assign ports based on ADW ID.
|
||||
|
||||
Args:
|
||||
adw_id: The ADW ID
|
||||
|
||||
Returns:
|
||||
Tuple of (backend_port, frontend_port)
|
||||
"""
|
||||
# Convert first 8 chars of ADW ID to index (0-14)
|
||||
# Using base 36 conversion and modulo to get consistent mapping
|
||||
try:
|
||||
# Take first 8 alphanumeric chars and convert from base 36
|
||||
id_chars = ''.join(c for c in adw_id[:8] if c.isalnum())
|
||||
index = int(id_chars, 36) % 15
|
||||
except ValueError:
|
||||
# Fallback to simple hash if conversion fails
|
||||
index = hash(adw_id) % 15
|
||||
|
||||
backend_port = 9100 + index
|
||||
frontend_port = 9200 + index
|
||||
|
||||
return backend_port, frontend_port
|
||||
|
||||
|
||||
def is_port_available(port: int) -> bool:
|
||||
"""Check if a port is available for binding.
|
||||
|
||||
Args:
|
||||
port: Port number to check
|
||||
|
||||
Returns:
|
||||
True if port is available, False otherwise
|
||||
"""
|
||||
try:
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.settimeout(1)
|
||||
s.bind(('localhost', port))
|
||||
return True
|
||||
except (socket.error, OSError):
|
||||
return False
|
||||
|
||||
|
||||
def find_next_available_ports(adw_id: str, max_attempts: int = 15) -> Tuple[int, int]:
|
||||
"""Find available ports starting from deterministic assignment.
|
||||
|
||||
Args:
|
||||
adw_id: The ADW ID
|
||||
max_attempts: Maximum number of attempts (default 15)
|
||||
|
||||
Returns:
|
||||
Tuple of (backend_port, frontend_port)
|
||||
|
||||
Raises:
|
||||
RuntimeError: If no available ports found
|
||||
"""
|
||||
base_backend, base_frontend = get_ports_for_adw(adw_id)
|
||||
base_index = base_backend - 9100
|
||||
|
||||
for offset in range(max_attempts):
|
||||
index = (base_index + offset) % 15
|
||||
backend_port = 9100 + index
|
||||
frontend_port = 9200 + index
|
||||
|
||||
if is_port_available(backend_port) and is_port_available(frontend_port):
|
||||
return backend_port, frontend_port
|
||||
|
||||
raise RuntimeError("No available ports in the allocated range")
|
||||
Reference in New Issue
Block a user