Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 09:05:52 +08:00
commit db12a906d2
62 changed files with 27669 additions and 0 deletions

1494
hooks/utils/bmad/bmad_generator.py Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,501 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "python-dotenv",
# ]
# ///
"""
BMAD Document Validator Utility
Validates BMAD documents match required structure and completeness.
Commands:
brief <file_path> Validate product brief
prd <file_path> Validate PRD
architecture <file_path> Validate architecture
epic <file_path> Validate epic
all <bmad_dir> Validate all documents in backlog
Examples:
uv run bmad_validator.py prd bmad-backlog/prd/prd.md
uv run bmad_validator.py all bmad-backlog/
"""
import json
import sys
import re
from pathlib import Path
from typing import Dict, List
def validate_brief(file_path: str) -> Dict:
"""
Validate Product Brief has all required sections.
Args:
file_path: Path to product-brief.md
Returns:
Validation results dict
"""
try:
with open(file_path, 'r') as f:
content = f.read()
except Exception as e:
return {
"valid": False,
"errors": [f"Cannot read file: {e}"],
"warnings": [],
"missing_sections": []
}
required_sections = [
"Executive Summary",
"Problem Statement",
"Proposed Solution",
"Target Users",
"Goals & Success Metrics",
"MVP Scope",
"Post-MVP Vision",
"Technical Considerations",
"Constraints & Assumptions",
"Risks & Open Questions",
"Next Steps"
]
results = {
"valid": True,
"errors": [],
"warnings": [],
"missing_sections": []
}
# Check for required sections
for section in required_sections:
if section not in content:
results["valid"] = False
results["missing_sections"].append(section)
# Check for header
if not re.search(r'#\s+Product Brief:', content):
results["errors"].append("Missing main header: # Product Brief: {Name}")
# Check for version info
if "**Version:**" not in content:
results["warnings"].append("Missing version field")
if "**Date:**" not in content:
results["warnings"].append("Missing date field")
return results
def validate_prd(file_path: str) -> Dict:
"""
Validate PRD has all required sections.
Args:
file_path: Path to prd.md
Returns:
Validation results dict
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return {
"valid": False,
"errors": [f"Cannot read file: {e}"],
"warnings": [],
"missing_sections": []
}
required_sections = [
"Executive Summary",
"Product Overview",
"Success Metrics",
"Feature Requirements",
"User Stories",
"Technical Requirements",
"Data Requirements",
"AI/ML Requirements",
"Design Requirements",
"Go-to-Market Strategy",
"Risks & Mitigation",
"Open Questions",
"Appendix"
]
results = {
"valid": True,
"errors": [],
"warnings": [],
"missing_sections": []
}
# Check for required sections
for section in required_sections:
if section not in content:
results["valid"] = False
results["missing_sections"].append(section)
# Check for header
if not re.search(r'#\s+Product Requirements Document', content):
results["errors"].append("Missing main header")
# Check for metadata
if "**Document Version:**" not in content:
results["warnings"].append("Missing document version")
if "**Last Updated:**" not in content:
results["warnings"].append("Missing last updated date")
# Check for user stories format
if "User Stories" in content:
# Should have "As a" pattern
if "As a" not in content:
results["warnings"].append("User stories missing 'As a... I want... so that' format")
# Check for acceptance criteria
if "Feature Requirements" in content or "User Stories" in content:
if "Acceptance Criteria:" not in content and "- [ ]" not in content:
results["warnings"].append("Missing acceptance criteria checkboxes")
return results
def validate_architecture(file_path: str) -> Dict:
"""
Validate Architecture document completeness.
Args:
file_path: Path to architecture.md
Returns:
Validation results dict
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return {
"valid": False,
"errors": [f"Cannot read file: {e}"],
"warnings": [],
"missing_sections": []
}
required_sections = [
"System Overview",
"Architecture Principles",
"High-Level Architecture",
"Component Details",
"Data Architecture",
"Infrastructure",
"Security Architecture",
"Deployment Strategy",
"Monitoring & Observability",
"Appendix"
]
results = {
"valid": True,
"errors": [],
"warnings": [],
"missing_sections": []
}
# Check for required sections
for section in required_sections:
if section not in content:
results["valid"] = False
results["missing_sections"].append(section)
# Check for code examples
if "```sql" not in content and "```python" not in content and "```typescript" not in content:
results["warnings"].append("Missing code examples (SQL, Python, or TypeScript)")
# Check for cost estimates
if "Cost" not in content:
results["warnings"].append("Missing cost estimates")
# Check for technology decisions
if "Technology Decisions" not in content:
results["warnings"].append("Missing technology decisions table")
return results
def validate_epic(file_path: str) -> Dict:
"""
Validate Epic file structure.
Args:
file_path: Path to EPIC-*.md
Returns:
Validation results dict
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return {
"valid": False,
"errors": [f"Cannot read file: {e}"],
"warnings": [],
"missing_sections": []
}
required_fields = [
"**Epic Owner:**",
"**Priority:**",
"**Status:**",
"**Estimated Effort:**"
]
required_sections = [
"Epic Description",
"Business Value",
"Success Criteria",
"User Stories",
"Dependencies",
"Definition of Done"
]
results = {
"valid": True,
"errors": [],
"warnings": [],
"missing_sections": [],
"missing_fields": []
}
# Check for required fields
for field in required_fields:
if field not in content:
results["valid"] = False
results["missing_fields"].append(field)
# Check for required sections
for section in required_sections:
if section not in content:
results["valid"] = False
results["missing_sections"].append(section)
# Check for story format
story_matches = re.findall(r'### STORY-(\d+)-(\d+):', content)
if not story_matches:
results["errors"].append("No stories found (expecting STORY-XXX-YY format)")
# Check stories have acceptance criteria
if story_matches:
has_criteria = "Acceptance Criteria:" in content or "**Acceptance Criteria:**" in content
if not has_criteria:
results["warnings"].append("Stories missing acceptance criteria")
# Check for "As a... I want... so that" format
has_user_story_format = "As a" in content and "I want" in content and "so that" in content
if not has_user_story_format:
results["warnings"].append("Stories missing user story format (As a... I want... so that...)")
return results
def validate_all(bmad_dir: str) -> Dict:
"""
Validate all documents in BMAD backlog.
Args:
bmad_dir: Path to bmad-backlog directory
Returns:
Combined validation results
"""
bmad_path = Path(bmad_dir)
results = {
"brief": None,
"prd": None,
"architecture": None,
"epics": [],
"overall_valid": True
}
# Validate brief (optional)
brief_path = bmad_path / "product-brief.md"
if brief_path.exists():
results["brief"] = validate_brief(str(brief_path))
if not results["brief"]["valid"]:
results["overall_valid"] = False
# Validate PRD (required)
prd_path = bmad_path / "prd" / "prd.md"
if prd_path.exists():
results["prd"] = validate_prd(str(prd_path))
if not results["prd"]["valid"]:
results["overall_valid"] = False
else:
results["overall_valid"] = False
results["prd"] = {"valid": False, "errors": ["PRD not found"]}
# Validate architecture (required)
arch_path = bmad_path / "architecture" / "architecture.md"
if arch_path.exists():
results["architecture"] = validate_architecture(str(arch_path))
if not results["architecture"]["valid"]:
results["overall_valid"] = False
else:
results["overall_valid"] = False
results["architecture"] = {"valid": False, "errors": ["Architecture not found"]}
# Validate epics (required)
epics_dir = bmad_path / "epics"
if epics_dir.exists():
epic_files = sorted(epics_dir.glob("EPIC-*.md"))
for epic_file in epic_files:
epic_result = validate_epic(str(epic_file))
epic_result["file"] = epic_file.name
results["epics"].append(epic_result)
if not epic_result["valid"]:
results["overall_valid"] = False
else:
results["overall_valid"] = False
return results
def print_validation_results(results: Dict, document_type: str):
"""Print validation results in readable format."""
print(f"\n{'='*60}")
print(f"Validation Results: {document_type}")
print(f"{'='*60}\n")
if results["valid"]:
print("✅ VALID - All required sections present")
else:
print("❌ INVALID - Missing required content")
if results.get("missing_sections"):
print("\n❌ Missing Required Sections:")
for section in results["missing_sections"]:
print(f" - {section}")
if results.get("missing_fields"):
print("\n❌ Missing Required Fields:")
for field in results["missing_fields"]:
print(f" - {field}")
if results.get("errors"):
print("\n❌ Errors:")
for error in results["errors"]:
print(f" - {error}")
if results.get("warnings"):
print("\n⚠️ Warnings:")
for warning in results["warnings"]:
print(f" - {warning}")
print()
def main():
"""CLI interface for validation."""
if len(sys.argv) < 3:
print("Usage: bmad_validator.py <command> <file_path>", file=sys.stderr)
print("\nCommands:", file=sys.stderr)
print(" brief <file_path>", file=sys.stderr)
print(" prd <file_path>", file=sys.stderr)
print(" architecture <file_path>", file=sys.stderr)
print(" epic <file_path>", file=sys.stderr)
print(" all <bmad_dir>", file=sys.stderr)
sys.exit(1)
command = sys.argv[1]
path = sys.argv[2]
try:
if command == "brief":
results = validate_brief(path)
print_validation_results(results, "Product Brief")
sys.exit(0 if results["valid"] else 1)
elif command == "prd":
results = validate_prd(path)
print_validation_results(results, "PRD")
sys.exit(0 if results["valid"] else 1)
elif command == "architecture":
results = validate_architecture(path)
print_validation_results(results, "Architecture")
sys.exit(0 if results["valid"] else 1)
elif command == "epic":
results = validate_epic(path)
print_validation_results(results, f"Epic ({Path(path).name})")
sys.exit(0 if results["valid"] else 1)
elif command == "all":
results = validate_all(path)
print(f"\n{'='*60}")
print(f"Complete Backlog Validation: {path}")
print(f"{'='*60}\n")
if results["overall_valid"]:
print("✅ ALL DOCUMENTS VALID\n")
else:
print("❌ VALIDATION FAILED\n")
# Print individual results
if results["brief"]:
print("Product Brief:", "✅ Valid" if results["brief"]["valid"] else "❌ Invalid")
else:
print("Product Brief: (not found - optional)")
if results["prd"]:
print("PRD:", "✅ Valid" if results["prd"]["valid"] else "❌ Invalid")
else:
print("PRD: ❌ Not found (required)")
if results["architecture"]:
print("Architecture:", "✅ Valid" if results["architecture"]["valid"] else "❌ Invalid")
else:
print("Architecture: ❌ Not found (required)")
print(f"Epics: {len(results['epics'])} found")
for epic in results["epics"]:
status = "" if epic["valid"] else ""
print(f" {status} {epic['file']}")
print(f"\n{'='*60}\n")
# Print details if invalid
if not results["overall_valid"]:
if results["prd"] and not results["prd"]["valid"]:
print_validation_results(results["prd"], "PRD")
if results["architecture"] and not results["architecture"]["valid"]:
print_validation_results(results["architecture"], "Architecture")
for epic in results["epics"]:
if not epic["valid"]:
print_validation_results(epic, f"Epic {epic['file']}")
sys.exit(0 if results["overall_valid"] else 1)
else:
print(f"Error: Unknown command: {command}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error: {e!s}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,682 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "python-dotenv",
# ]
# ///
"""
BMAD Research Prompt Generator
Generates research prompts and findings templates for technical decisions.
No GPT-4 calls - just template generation (Cost: $0).
Commands:
prompt <topic> <project_path> [prd_path] Generate research prompt
template <topic> <project_path> Generate findings template
Examples:
uv run research_generator.py prompt "data vendors" "$(pwd)" "bmad-backlog/prd/prd.md"
uv run research_generator.py template "data vendors" "$(pwd)"
"""
import sys
import re
from pathlib import Path
from datetime import datetime
def generate_research_prompt(topic: str, project_path: str, prd_path: str = None) -> str:
"""
Generate research prompt for web AI (ChatGPT/Claude).
Args:
topic: Research topic
project_path: Project directory
prd_path: Optional path to PRD for context
Returns:
Research prompt content
"""
current_date = datetime.now().strftime("%B %d, %Y")
topic_slug = topic.lower().replace(' ', '-').replace('/', '-')
# Read PRD for context if provided
project_context = ""
project_name = "New Project"
requirements_context = ""
if prd_path and Path(prd_path).exists():
try:
with open(prd_path, 'r') as f:
prd_content = f.read()
# Extract project name
match = re.search(r'##\s+(.+?)(?:\s+-|$)', prd_content, re.MULTILINE)
if match:
project_name = match.group(1).strip()
# Extract relevant requirements
if "data" in topic.lower() or "api" in topic.lower():
data_section = extract_section(prd_content, "Data Requirements")
if data_section:
requirements_context = f"\n**Project Requirements**:\n{data_section[:500]}"
if "auth" in topic.lower():
security_section = extract_section(prd_content, "Security")
if security_section:
requirements_context = f"\n**Security Requirements**:\n{security_section[:500]}"
project_context = f"\n**Project**: {project_name}\n"
except Exception:
pass
prompt_content = f"""# Research Prompt: {topic}
**Date**: {current_date}
**For**: {project_name}
---
## Instructions
**COPY THIS ENTIRE PROMPT** and paste into:
- ChatGPT (https://chat.openai.com) with GPT-4
- Claude (https://claude.ai) web version
They have web search capabilities for current, accurate information.
---
## Research Request
{project_context}
**Research Topic**: {topic}
{requirements_context}
Please research and provide comprehensive analysis:
---
### 1. Overview
- What options exist for {topic}?
- What are the top 5-7 solutions/vendors/APIs?
- Current market leaders?
- Recent changes in this space? (2024-2025)
---
### 2. Detailed Comparison Table
Create a comprehensive comparison:
| Option | Pricing | Key Features | Pros | Cons | Best For |
|--------|---------|--------------|------|------|----------|
| Option 1: [Name] | [Tiers] | [Top 3-5 features] | [2-3 pros] | [2-3 cons] | [Use case] |
| Option 2: [Name] | | | | | |
| Option 3: [Name] | | | | | |
| Option 4: [Name] | | | | | |
| Option 5: [Name] | | | | | |
---
### 3. Technical Details
For EACH option, provide:
#### [Option Name]
**API Documentation**: [Link to official docs]
**Authentication**:
- Method: API Key | OAuth | JWT | Other
- Security: HTTPS required? Token rotation?
**Rate Limits**:
- Free tier: X requests per minute/hour/day
- Paid tiers: Rate limit increases
**Data Format**:
- Response format: JSON | XML | GraphQL | CSV
- Webhook support: Yes/No
- Streaming: Yes/No
**SDK Availability**:
- Python: [pip package name] - [GitHub link]
- Node.js: [npm package name] - [GitHub link]
- Other languages: [List]
**Code Example**:
```python
# Basic usage example (if available from docs)
```
**Community**:
- GitHub stars: X
- Last updated: Date
- Issues: Open/closed ratio
- Stack Overflow: Questions count
---
### 4. Integration Complexity
For each option, estimate:
**Setup Time**:
- Account creation: X minutes
- API key generation: X minutes
- SDK integration: X hours
- Testing: X hours
**Total**: X hours/days
**Dependencies**:
- Libraries required
- Platform requirements
- Other services needed
**Learning Curve**:
- Documentation quality: Excellent | Good | Fair | Poor
- Tutorials available: Yes/No
- Community support: Active | Moderate | Limited
---
### 5. Recommendations
Based on the project requirements, provide specific recommendations:
**For MVP** (budget-conscious, speed):
- **Recommended**: [Option]
- **Why**: [Rationale]
- **Tradeoffs**: [What you give up]
**For Production** (quality-focused, scalable):
- **Recommended**: [Option]
- **Why**: [Rationale]
- **Cost**: $X/month at scale
**For Enterprise** (feature-complete):
- **Recommended**: [Option]
- **Why**: [Rationale]
- **Cost**: $Y/month
---
### 6. Detailed Cost Analysis
For each option:
#### [Option Name]
**Free Tier**:
- What's included: [Limits]
- Restrictions: [What's missing]
- Good for MVP? Yes/No - [Why]
**Starter/Basic Tier**:
- Price: $X/month
- Includes: [Features and limits]
- Rate limits: X requests/min
**Professional Tier**:
- Price: $Y/month
- Includes: [Features and limits]
- Rate limits: Y requests/min
**Enterprise Tier**:
- Price: $Z/month or Custom
- Includes: [Features]
- SLA: X% uptime
**Estimated Monthly Cost**:
- MVP (low volume): $X-Y
- Production (medium volume): $X-Y
- Scale (high volume): $X-Y
**Hidden Costs**:
- [Overage charges, add-ons, etc.]
---
### 7. Risks & Considerations
For each option, analyze:
**Vendor Lock-in**:
- How easy to migrate away? (Easy/Medium/Hard)
- Data export capabilities
- API compatibility with alternatives
**Data Quality/Reliability**:
- Uptime history (if available)
- Published SLAs
- Known outages or issues
- Data accuracy/freshness
**Compliance & Security**:
- Data residency (US/EU/Global)
- Compliance certifications (SOC 2, GDPR, etc.)
- Security features (encryption, access controls)
- Privacy policy concerns
**Support & Maintenance**:
- Support channels (email, chat, phone)
- Response time SLAs
- Documentation updates
- Release cadence
- Deprecation policy
**Scalability**:
- Auto-scaling capabilities
- Performance at high volume
- Regional availability
- CDN/edge locations
---
### 8. Source Links
Provide current, working links to:
**Official Resources**:
- Homepage: [URL]
- Pricing page: [URL]
- API documentation: [URL]
- Getting started guide: [URL]
- Status page: [URL]
**Developer Resources**:
- GitHub repository: [URL]
- SDK documentation: [URL]
- API reference: [URL]
- Code examples: [URL]
**Community**:
- Community forum: [URL]
- Discord/Slack: [URL]
- Stack Overflow tag: [URL]
- Twitter/X: [Handle]
**Reviews & Comparisons**:
- G2/Capterra reviews: [URL]
- Comparison articles: [URL]
- User testimonials: [URL]
- Case studies: [URL]
---
## Deliverable
Please structure your response with clear sections matching the template above.
This research will inform our architecture decisions and be documented for future reference.
Thank you!
---
**After completing research**:
1. Copy findings into template: bmad-backlog/research/RESEARCH-{topic_slug}-findings.md
2. Return to Claude Code
3. Continue with /bmad:architecture (will use your research)
"""
# Save prompt
prompt_path = Path(project_path) / "bmad-backlog" / "research" / f"RESEARCH-{topic_slug}-prompt.md"
prompt_path.parent.mkdir(parents=True, exist_ok=True)
with open(prompt_path, 'w') as f:
f.write(prompt_content)
return prompt_content
def generate_findings_template(topic: str, project_path: str) -> str:
"""
Generate findings template for documenting research.
Args:
topic: Research topic
project_path: Project directory
Returns:
Template content
"""
current_date = datetime.now().strftime("%B %d, %Y")
topic_slug = topic.lower().replace(' ', '-').replace('/', '-')
template_content = f"""# Research Findings: {topic}
**Date**: {current_date}
**Researcher**: [Your Name]
**Status**: Draft
---
## Research Summary
**Question**: What {topic} should we use?
**Recommendation**: [Chosen option and brief rationale]
**Confidence**: High | Medium | Low
**Decision Date**: [When decision was made]
---
## Options Evaluated
### Option 1: [Name]
**Overview**:
[1-2 sentence description of what this is]
**Pricing**:
- Free tier: [Details or N/A]
- Starter tier: $X/month - [What's included]
- Pro tier: $Y/month - [What's included]
- Enterprise: $Z/month or Custom
- **Estimated cost for our MVP**: $X/month
**Key Features**:
- [Feature 1]
- [Feature 2]
- [Feature 3]
- [Feature 4]
**Pros**:
- [Pro 1]
- [Pro 2]
- [Pro 3]
**Cons**:
- [Con 1]
- [Con 2]
- [Con 3]
**Technical Details**:
- API Type: REST | GraphQL | WebSocket | Other
- Authentication: API Key | OAuth | JWT | Other
- Rate Limits: X requests per minute/hour
- Data Format: JSON | XML | CSV | Other
- SDKs: Python ([package]), Node.js ([package]), Other
- Latency: Typical response time
- Uptime SLA: X%
**Documentation**: [Link]
**Community**:
- GitHub Stars: X
- Last Update: [Date]
- Active Development: Yes/No
---
### Option 2: [Name]
[Same structure as Option 1]
---
### Option 3: [Name]
[Same structure as Option 1]
---
### Option 4: [Name]
[Same structure as Option 1 - if evaluated]
---
## Comparison Matrix
| Criteria | Option 1 | Option 2 | Option 3 | Winner |
|----------|----------|----------|----------|--------|
| **Cost (MVP)** | $X/mo | $Y/mo | $Z/mo | [Option] |
| **Cost (Production)** | $X/mo | $Y/mo | $Z/mo | [Option] |
| **Features** | X/10 | Y/10 | Z/10 | [Option] |
| **API Quality** | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐ | ⭐⭐⭐ | [Option] |
| **Documentation** | Excellent | Good | Fair | [Option] |
| **Community** | Large | Medium | Small | [Option] |
| **Ease of Use** | Easy | Medium | Complex | [Option] |
| **Scalability** | High | Medium | High | [Option] |
| **Vendor Lock-in Risk** | Low | Medium | High | [Option] |
| **Overall Score** | X/10 | Y/10 | Z/10 | **[Winner]** |
---
## Final Recommendation
**Chosen**: [Option X]
**Rationale**:
1. [Primary reason - e.g., best balance of cost and features]
2. [Secondary reason - e.g., excellent documentation]
3. [Tertiary reason - e.g., active community]
**For MVP**:
- [Why this works for MVP]
- Cost: $X/month
- Timeline: [Can start immediately / Need 1 week setup]
**For Production**:
- [Scalability considerations]
- Cost at scale: $Y/month
- Migration path: [If we outgrow this]
**Implementation Priority**: MVP | Phase 2 | Future
---
## Implementation Plan
### Setup Steps
1. [Step 1 - e.g., Create account at vendor.com]
2. [Step 2 - e.g., Generate API key]
3. [Step 3 - e.g., Install SDK: pip install package]
4. [Step 4 - e.g., Test connection]
5. [Step 5 - e.g., Implement in production code]
**Estimated Setup Time**: X hours
### Configuration Required
**Environment Variables**:
```bash
# Add to .env.example
{{VENDOR}}_API_KEY=your_key_here
{{VENDOR}}_BASE_URL=https://api.vendor.com
```
**Code Configuration**:
```python
# Example configuration
from {{package}} import Client
client = Client(api_key=os.getenv('{{VENDOR}}_API_KEY'))
```
### Basic Usage Example
```python
# Example usage from documentation
{{code example if available}}
```
---
## Cost Projection
**Monthly Cost Breakdown**:
**MVP** (estimated volume):
- Base fee: $X
- Usage costs: $Y
- **Total**: $Z/month
**Production** (estimated volume):
- Base fee: $X
- Usage costs: $Y
- **Total**: $Z/month
**At Scale** (estimated volume):
- Base fee: $X
- Usage costs: $Y
- **Total**: $Z/month
**Cost Optimization**:
- [Strategy 1 to reduce costs]
- [Strategy 2]
---
## Risks & Mitigations
| Risk | Impact | Likelihood | Mitigation |
|------|--------|-----------|------------|
| Vendor increases pricing | Medium | Medium | [Monitor pricing, have backup option] |
| Service downtime | High | Low | [Implement fallback, cache data] |
| Rate limit hit | Medium | Medium | [Implement rate limiting, queue requests] |
| Data quality issues | High | Low | [Validation layer, monitoring] |
| Vendor shutdown | High | Low | [Data export plan, alternative ready] |
---
## Testing Checklist
- [ ] Create account and obtain credentials
- [ ] Test API in development
- [ ] Verify rate limits and error handling
- [ ] Test with production-like volume
- [ ] Set up monitoring and alerts
- [ ] Document API integration in code
- [ ] Add to .env.example
- [ ] Create fallback/error handling
- [ ] Test cost with real usage
- [ ] Review security and compliance
---
## References
**Official Documentation**:
- Website: [URL]
- Pricing: [URL]
- API Docs: [URL]
- Getting Started: [URL]
- Status Page: [URL]
**Community Resources**:
- GitHub: [URL]
- Discord/Slack: [URL]
- Stack Overflow: [URL with tag]
**Comparison Articles**:
- [Article 1 title]: [URL]
- [Article 2 title]: [URL]
**User Reviews**:
- G2: [URL]
- Reddit discussions: [URLs]
---
## Next Steps
1. ✅ Research complete
2. Review findings with team (if applicable)
3. Make final decision on [chosen option]
4. Update bmad-backlog/prd/prd.md Technical Assumptions
5. Reference in bmad-backlog/architecture/architecture.md
6. Add to implementation backlog
---
**Status**: ✅ Research Complete | ⏳ Awaiting Decision | ❌ Needs More Research
**Recommendation**: [Final recommendation]
---
*This document was generated from research conducted using web-based AI.*
*Fill in all sections with findings from your research.*
*Save this file when complete - it will be referenced during architecture generation.*
"""
# Save template
template_path = Path(project_path) / "bmad-backlog" / "research" / f"RESEARCH-{topic_slug}-findings.md"
template_path.parent.mkdir(parents=True, exist_ok=True)
with open(template_path, 'w') as f:
f.write(template_content)
return template_content
def extract_section(content: str, section_header: str) -> str:
"""Extract section from markdown document."""
lines = content.split('\n')
section_lines = []
in_section = False
for line in lines:
if section_header.lower() in line.lower() and line.startswith('#'):
in_section = True
continue
elif in_section and line.startswith('#') and len(line.split()) > 1:
# New section started
break
elif in_section:
section_lines.append(line)
return '\n'.join(section_lines).strip()
def main():
"""CLI interface for research prompt generation."""
if len(sys.argv) < 4:
print("Usage: research_generator.py <command> <topic> <project_path> [prd_path]", file=sys.stderr)
print("\nCommands:", file=sys.stderr)
print(" prompt <topic> <project_path> [prd_path] Generate research prompt", file=sys.stderr)
print(" template <topic> <project_path> Generate findings template", file=sys.stderr)
print("\nExamples:", file=sys.stderr)
print(' uv run research_generator.py prompt "data vendors" "$(pwd)" "bmad-backlog/prd/prd.md"', file=sys.stderr)
print(' uv run research_generator.py template "hosting platforms" "$(pwd)"', file=sys.stderr)
sys.exit(1)
command = sys.argv[1]
topic = sys.argv[2]
project_path = sys.argv[3]
prd_path = sys.argv[4] if len(sys.argv) > 4 else None
topic_slug = topic.lower().replace(' ', '-').replace('/', '-')
try:
if command == "prompt":
content = generate_research_prompt(topic, project_path, prd_path)
print(f"✅ Research prompt generated: bmad-backlog/research/RESEARCH-{topic_slug}-prompt.md")
elif command == "template":
content = generate_findings_template(topic, project_path)
print(f"✅ Findings template generated: bmad-backlog/research/RESEARCH-{topic_slug}-findings.md")
else:
print(f"Error: Unknown command: {command}", file=sys.stderr)
print("Valid commands: prompt, template", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error: {str(e)}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

114
hooks/utils/llm/anth.py Executable file
View File

@@ -0,0 +1,114 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "anthropic",
# "python-dotenv",
# ]
# ///
import os
import sys
from dotenv import load_dotenv
def prompt_llm(prompt_text):
"""
Base Anthropic LLM prompting method using fastest model.
Args:
prompt_text (str): The prompt to send to the model
Returns:
str: The model's response text, or None if error
"""
load_dotenv()
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
return None
try:
import anthropic
client = anthropic.Anthropic(api_key=api_key)
message = client.messages.create(
model="claude-3-5-haiku-20241022", # Fastest Anthropic model
max_tokens=100,
temperature=0.7,
messages=[{"role": "user", "content": prompt_text}],
)
return message.content[0].text.strip()
except Exception:
return None
def generate_completion_message():
"""
Generate a completion message using Anthropic LLM.
Returns:
str: A natural language completion message, or None if error
"""
engineer_name = os.getenv("ENGINEER_NAME", "").strip()
if engineer_name:
name_instruction = f"Sometimes (about 30% of the time) include the engineer's name '{engineer_name}' in a natural way."
examples = f"""Examples of the style:
- Standard: "Work complete!", "All done!", "Task finished!", "Ready for your next move!"
- Personalized: "{engineer_name}, all set!", "Ready for you, {engineer_name}!", "Complete, {engineer_name}!", "{engineer_name}, we're done!" """
else:
name_instruction = ""
examples = """Examples of the style: "Work complete!", "All done!", "Task finished!", "Ready for your next move!" """
prompt = f"""Generate a short, friendly completion message for when an AI coding assistant finishes a task.
Requirements:
- Keep it under 10 words
- Make it positive and future focused
- Use natural, conversational language
- Focus on completion/readiness
- Do NOT include quotes, formatting, or explanations
- Return ONLY the completion message text
{name_instruction}
{examples}
Generate ONE completion message:"""
response = prompt_llm(prompt)
# Clean up response - remove quotes and extra formatting
if response:
response = response.strip().strip('"').strip("'").strip()
# Take first line if multiple lines
response = response.split("\\n")[0].strip()
return response
def main():
"""Command line interface for testing."""
if len(sys.argv) > 1:
if sys.argv[1] == "--completion":
message = generate_completion_message()
if message:
print(message)
else:
print("Error generating completion message")
else:
prompt_text = " ".join(sys.argv[1:])
response = prompt_llm(prompt_text)
if response:
print(response)
else:
print("Error calling Anthropic API")
else:
print("Usage: ./anth.py 'your prompt here' or ./anth.py --completion")
if __name__ == "__main__":
main()

117
hooks/utils/llm/oai.py Executable file
View File

@@ -0,0 +1,117 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "openai",
# "python-dotenv",
# ]
# ///
import os
import sys
from dotenv import load_dotenv
def prompt_llm(prompt_text):
"""
Base OpenAI LLM prompting method using fastest model.
Args:
prompt_text (str): The prompt to send to the model
Returns:
str: The model's response text, or None if error
"""
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
return None
try:
from openai import OpenAI
client = OpenAI(api_key=api_key)
response = client.chat.completions.create(
model="gpt-4o-mini", # Fast OpenAI model
messages=[{"role": "user", "content": prompt_text}],
max_tokens=100,
temperature=0.7,
)
return response.choices[0].message.content.strip()
except Exception:
return None
def generate_completion_message():
"""
Generate a completion message using OpenAI LLM.
Returns:
str: A natural language completion message, or None if error
"""
engineer_name = os.getenv("ENGINEER_NAME", "").strip()
if engineer_name:
name_instruction = f"Sometimes (about 30% of the time) include the engineer's name '{engineer_name}' in a natural way."
examples = f"""Examples of the style:
- Standard: "Work complete!", "All done!", "Task finished!", "Ready for your next move!"
- Personalized: "{engineer_name}, all set!", "Ready for you, {engineer_name}!", "Complete, {engineer_name}!", "{engineer_name}, we're done!" """
else:
name_instruction = ""
examples = """Examples of the style: "Work complete!", "All done!", "Task finished!", "Ready for your next move!" """
prompt = f"""Generate a short, friendly completion message for when an AI coding assistant finishes a task.
Requirements:
- Keep it under 10 words
- Make it positive and future focused
- Use natural, conversational language
- Focus on completion/readiness
- Do NOT include quotes, formatting, or explanations
- Return ONLY the completion message text
{name_instruction}
{examples}
Generate ONE completion message:"""
response = prompt_llm(prompt)
# Clean up response - remove quotes and extra formatting
if response:
response = response.strip().strip('"').strip("'").strip()
# Take first line if multiple lines
response = response.split("\\n")[0].strip()
return response
def main():
"""Command line interface for testing."""
if len(sys.argv) > 1:
if sys.argv[1] == "--completion":
message = generate_completion_message()
if message:
print(message)
else:
print("Error generating completion message", file=sys.stderr)
sys.exit(1)
else:
prompt_text = " ".join(sys.argv[1:])
response = prompt_llm(prompt_text)
if response:
print(response)
else:
print("Error calling OpenAI API", file=sys.stderr)
sys.exit(1)
else:
print("Usage: ./oai.py 'your prompt here' or ./oai.py --completion", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

114
hooks/utils/tts/elevenlabs_mcp.py Executable file
View File

@@ -0,0 +1,114 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "python-dotenv",
# ]
# ///
import os
import sys
import json
import subprocess
from pathlib import Path
from dotenv import load_dotenv
def main():
"""
ElevenLabs MCP TTS Script
Uses ElevenLabs MCP server for high-quality text-to-speech via Claude Code.
Accepts optional text prompt as command-line argument.
Usage:
- ./elevenlabs_mcp.py # Uses default text
- ./elevenlabs_mcp.py "Your custom text" # Uses provided text
Features:
- Integration with Claude Code MCP
- Automatic voice selection
- High-quality voice synthesis via ElevenLabs API
- Optimized for hook usage (quick, reliable)
"""
# Load environment variables
load_dotenv()
try:
print("🎙️ ElevenLabs MCP TTS")
print("=" * 25)
# Get text from command line argument or use default
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:]) # Join all arguments as text
else:
text = "Task completed successfully!"
print(f"🎯 Text: {text}")
print("🔊 Generating and playing via MCP...")
try:
# Use Claude Code CLI to invoke ElevenLabs MCP
# This assumes the ElevenLabs MCP server is configured in Claude Code
claude_cmd = [
"claude", "mcp", "call", "ElevenLabs", "text_to_speech",
"--text", text,
"--voice_name", "Adam", # Default voice
"--model_id", "eleven_turbo_v2_5", # Fast model
"--output_directory", str(Path.home() / "Desktop"),
"--speed", "1.0",
"--stability", "0.5",
"--similarity_boost", "0.75"
]
# Try to run the Claude MCP command
result = subprocess.run(
claude_cmd,
capture_output=True,
text=True,
timeout=15 # 15-second timeout for TTS generation
)
if result.returncode == 0:
print("✅ TTS generated and played via MCP!")
# Try to play the generated audio file
# Look for recently created audio files on Desktop
desktop = Path.home() / "Desktop"
audio_files = list(desktop.glob("*.mp3"))
if audio_files:
# Find the most recent audio file
latest_audio = max(audio_files, key=lambda f: f.stat().st_mtime)
# Try to play with system default audio player
if sys.platform == "darwin": # macOS
subprocess.run(["afplay", str(latest_audio)], capture_output=True)
elif sys.platform == "linux": # Linux
subprocess.run(["aplay", str(latest_audio)], capture_output=True)
elif sys.platform == "win32": # Windows
subprocess.run(["start", str(latest_audio)], shell=True, capture_output=True)
print("🎵 Audio playback attempted")
else:
print("⚠️ Audio file not found on Desktop")
else:
print(f"❌ MCP Error: {result.stderr}")
# Fall back to simple notification
print("🔔 TTS via MCP failed - task completion noted")
except subprocess.TimeoutExpired:
print("⏰ MCP TTS timed out - continuing...")
except FileNotFoundError:
print("❌ Claude CLI not found - MCP TTS unavailable")
except Exception as e:
print(f"❌ MCP Error: {e}")
except Exception as e:
print(f"❌ Unexpected error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "elevenlabs",
# "python-dotenv",
# ]
# ///
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
def main():
"""
ElevenLabs Turbo v2.5 TTS Script
Uses ElevenLabs' Turbo v2.5 model for fast, high-quality text-to-speech.
Accepts optional text prompt as command-line argument.
Usage:
- ./elevenlabs_tts.py # Uses default text
- ./elevenlabs_tts.py "Your custom text" # Uses provided text
Features:
- Fast generation (optimized for real-time use)
- High-quality voice synthesis
- Stable production model
- Cost-effective for high-volume usage
"""
# Load environment variables
load_dotenv()
# Get API key from environment
api_key = os.getenv('ELEVENLABS_API_KEY')
if not api_key:
print("❌ Error: ELEVENLABS_API_KEY not found in environment variables", file=sys.stderr)
print("Please add your ElevenLabs API key to .env file:", file=sys.stderr)
print("ELEVENLABS_API_KEY=your_api_key_here", file=sys.stderr)
sys.exit(1)
try:
from elevenlabs.client import ElevenLabs
from elevenlabs.play import play
# Initialize client
elevenlabs = ElevenLabs(api_key=api_key)
# Get text from command line argument or use default
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:]) # Join all arguments as text
else:
text = "Task completed successfully."
try:
# Generate and play audio directly
audio = elevenlabs.text_to_speech.convert(
text=text,
voice_id="EXAVITQu4vr4xnSDxMaL", # Sarah voice
model_id="eleven_turbo_v2_5",
output_format="mp3_44100_128",
)
play(audio)
except Exception as e:
print(f"❌ Error: {e}", file=sys.stderr)
sys.exit(1)
except ImportError:
print("❌ Error: elevenlabs package not installed", file=sys.stderr)
print("This script uses UV to auto-install dependencies.", file=sys.stderr)
print("Make sure UV is installed: https://docs.astral.sh/uv/", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"❌ Unexpected error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

93
hooks/utils/tts/local_tts.py Executable file
View File

@@ -0,0 +1,93 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "pyttsx3",
# ]
# ///
import sys
import random
import os
def main():
"""
Local TTS Script (pyttsx3)
Uses pyttsx3 for offline text-to-speech synthesis.
Accepts optional text prompt as command-line argument.
Usage:
- ./local_tts.py # Uses default text
- ./local_tts.py "Your custom text" # Uses provided text
Features:
- Offline TTS (no API key required)
- Cross-platform compatibility
- Configurable voice settings
- Immediate audio playback
- Engineer name personalization support
"""
try:
import pyttsx3
# Initialize TTS engine
engine = pyttsx3.init()
# Configure engine settings
engine.setProperty('rate', 180) # Speech rate (words per minute)
engine.setProperty('volume', 0.9) # Volume (0.0 to 1.0)
print("🎙️ Local TTS")
print("=" * 12)
# Get text from command line argument or use default
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:]) # Join all arguments as text
else:
# Default completion messages with engineer name support
engineer_name = os.getenv("ENGINEER_NAME", "").strip()
if engineer_name and random.random() < 0.3: # 30% chance to use name
personalized_messages = [
f"{engineer_name}, all set!",
f"Ready for you, {engineer_name}!",
f"Complete, {engineer_name}!",
f"{engineer_name}, we're done!",
f"Task finished, {engineer_name}!"
]
text = random.choice(personalized_messages)
else:
completion_messages = [
"Work complete!",
"All done!",
"Task finished!",
"Job complete!",
"Ready for next task!",
"Ready for your next move!",
"All set!"
]
text = random.choice(completion_messages)
print(f"🎯 Text: {text}")
print("🔊 Speaking...")
# Speak the text
engine.say(text)
engine.runAndWait()
print("✅ Playback complete!")
except ImportError:
print("❌ Error: pyttsx3 package not installed")
print("This script uses UV to auto-install dependencies.")
sys.exit(1)
except Exception as e:
print(f"❌ Error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

109
hooks/utils/tts/openai_tts.py Executable file
View File

@@ -0,0 +1,109 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "openai",
# "python-dotenv",
# ]
# ///
import os
import sys
import asyncio
from pathlib import Path
from dotenv import load_dotenv
async def main():
"""
OpenAI TTS Script
Uses OpenAI's TTS model for high-quality text-to-speech.
Accepts optional text prompt as command-line argument.
Usage:
- ./openai_tts.py # Uses default text
- ./openai_tts.py "Your custom text" # Uses provided text
Features:
- OpenAI TTS-1 model (fast and reliable)
- Nova voice (engaging and warm)
- Direct audio streaming and playback
- Optimized for hook usage
"""
# Load environment variables
load_dotenv()
# Get API key from environment
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
print("❌ Error: OPENAI_API_KEY not found in environment variables")
sys.exit(1)
try:
from openai import AsyncOpenAI
# Initialize OpenAI client
openai = AsyncOpenAI(api_key=api_key)
print("🎙️ OpenAI TTS")
print("=" * 15)
# Get text from command line argument or use default
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:]) # Join all arguments as text
else:
text = "Task completed successfully!"
print(f"🎯 Text: {text}")
print("🔊 Generating audio...")
try:
# Generate audio using OpenAI TTS
response = await openai.audio.speech.create(
model="tts-1",
voice="nova",
input=text,
response_format="mp3",
)
# Save to temporary file
audio_file = Path.home() / "Desktop" / "tts_completion.mp3"
with open(audio_file, "wb") as f:
async for chunk in response.iter_bytes():
f.write(chunk)
print("🎵 Playing audio...")
# Play the audio file
import subprocess
if sys.platform == "darwin": # macOS
subprocess.run(["afplay", str(audio_file)], capture_output=True)
elif sys.platform == "linux": # Linux
subprocess.run(["aplay", str(audio_file)], capture_output=True)
elif sys.platform == "win32": # Windows
subprocess.run(["start", str(audio_file)], shell=True, capture_output=True)
print("✅ Playback complete!")
# Clean up the temporary file
try:
audio_file.unlink()
except:
pass
except Exception as e:
print(f"❌ Error: {e}")
except ImportError as e:
print("❌ Error: Required package not installed")
print("This script uses UV to auto-install dependencies.")
sys.exit(1)
except Exception as e:
print(f"❌ Unexpected error: {e}")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,238 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "python-dotenv",
# "anthropic",
# ]
# ///
"""
Plan Parser Utility
Uses Claude Haiku 4.5 to break down requirements into structured implementation plans.
Creates .titanium/plan.json with epics, stories, tasks, and agent assignments.
Usage:
uv run plan_parser.py <requirements_file> <project_path>
Example:
uv run plan_parser.py .titanium/requirements.md "$(pwd)"
Output:
- Creates .titanium/plan.json with structured plan
- Prints JSON to stdout
"""
import json
import sys
import os
from pathlib import Path
from dotenv import load_dotenv
def get_claude_model(task_type: str = "default") -> str:
"""
Get Claude model based on task complexity.
Args:
task_type: "complex" for large model, "default" for small model
Returns:
Model name string
"""
load_dotenv()
if task_type == "complex":
# Use large model (Sonnet) for complex tasks
return os.getenv("ANTHROPIC_LARGE_MODEL", "claude-sonnet-4-5-20250929")
else:
# Use small model (Haiku) for faster tasks
return os.getenv("ANTHROPIC_SMALL_MODEL", "claude-haiku-4-5-20251001")
def parse_requirements_to_plan(requirements_text: str, project_path: str) -> dict:
"""
Use Claude Haiku 4.5 to break down requirements into structured plan.
Args:
requirements_text: Requirements document text
project_path: Absolute path to project directory
Returns:
Structured plan dictionary with epics, stories, tasks
"""
# Load environment variables
load_dotenv()
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
print("Error: ANTHROPIC_API_KEY not found in environment variables", file=sys.stderr)
print("Please add your Anthropic API key to ~/.env file:", file=sys.stderr)
print("ANTHROPIC_API_KEY=sk-ant-your-key-here", file=sys.stderr)
sys.exit(1)
try:
from anthropic import Anthropic
client = Anthropic(api_key=api_key)
except ImportError:
print("Error: anthropic package not installed", file=sys.stderr)
print("This should be handled by uv automatically.", file=sys.stderr)
sys.exit(1)
# Build Claude prompt
prompt = f"""Analyze these requirements and create a structured implementation plan.
Requirements:
{requirements_text}
Create a JSON plan with this exact structure:
{{
"epics": [
{{
"name": "Epic name",
"description": "Epic description",
"stories": [
{{
"name": "Story name",
"description": "User story or technical description",
"tasks": [
{{
"name": "Task name",
"agent": "@agent-name",
"estimated_time": "30m",
"dependencies": []
}}
]
}}
]
}}
],
"agents_needed": ["@api-developer", "@frontend-developer"],
"estimated_total_time": "4h"
}}
Available agents to use:
- @product-manager: Requirements validation, clarification, acceptance criteria
- @api-developer: Backend APIs (REST/GraphQL), database, authentication
- @frontend-developer: UI/UX, React/Vue/etc, responsive design
- @devops-engineer: CI/CD, deployment, infrastructure, Docker/K8s
- @test-runner: Running tests, test execution, test reporting
- @tdd-specialist: Writing tests, test-driven development, test design
- @code-reviewer: Code review, best practices, code quality
- @security-scanner: Security vulnerabilities, security best practices
- @doc-writer: Technical documentation, API docs, README files
- @api-documenter: OpenAPI/Swagger specs, API documentation
- @debugger: Debugging, error analysis, troubleshooting
- @refactor: Code refactoring, code improvement, tech debt
- @project-planner: Project breakdown, task planning, estimation
- @shadcn-ui-builder: UI components using shadcn/ui library
- @meta-agent: Creating new custom agents
Guidelines:
1. Break down into logical epics (major features)
2. Each epic should have 1-5 stories
3. Each story should have 2-10 tasks
4. Assign the most appropriate agent to each task
5. Estimate time realistically (15m, 30m, 1h, 2h, etc.)
6. List dependencies between tasks (use task names)
7. Start with @product-manager for requirements validation
8. Always include @test-runner or @tdd-specialist for testing
9. Consider @security-scanner for auth/payment/sensitive features
10. End with @doc-writer for documentation
Return ONLY valid JSON, no markdown code blocks, no explanations."""
try:
# Get model (configurable via env var, defaults to Sonnet for complex epics)
model = get_claude_model("complex") # Use large model for complex epics
# Call Claude
response = client.messages.create(
model=model,
max_tokens=8192, # Increased for large epics with many stories
temperature=0.3, # Lower temperature for deterministic planning
messages=[{"role": "user", "content": prompt}]
)
plan_json = response.content[0].text.strip()
# Clean up markdown code blocks if present
if plan_json.startswith("```json"):
plan_json = plan_json[7:]
if plan_json.startswith("```"):
plan_json = plan_json[3:]
if plan_json.endswith("```"):
plan_json = plan_json[:-3]
plan_json = plan_json.strip()
# Parse and validate JSON
plan = json.loads(plan_json)
# Validate structure
if "epics" not in plan:
raise ValueError("Plan missing 'epics' field")
if "agents_needed" not in plan:
raise ValueError("Plan missing 'agents_needed' field")
if "estimated_total_time" not in plan:
raise ValueError("Plan missing 'estimated_total_time' field")
# Save plan to file
plan_path = Path(project_path) / ".titanium" / "plan.json"
plan_path.parent.mkdir(parents=True, exist_ok=True)
# Atomic write
temp_path = plan_path.with_suffix('.tmp')
with open(temp_path, 'w') as f:
json.dump(plan, f, indent=2)
temp_path.replace(plan_path)
return plan
except json.JSONDecodeError as e:
print(f"Error: Claude returned invalid JSON: {e}", file=sys.stderr)
print(f"Response was: {plan_json[:200]}...", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error calling Claude API: {e}", file=sys.stderr)
sys.exit(1)
def main():
"""CLI interface for plan parsing."""
if len(sys.argv) < 3:
print("Usage: plan_parser.py <requirements_file> <project_path>", file=sys.stderr)
print("\nExample:", file=sys.stderr)
print(" uv run plan_parser.py .titanium/requirements.md \"$(pwd)\"", file=sys.stderr)
sys.exit(1)
requirements_file = sys.argv[1]
project_path = sys.argv[2]
# Validate requirements file exists
if not Path(requirements_file).exists():
print(f"Error: Requirements file not found: {requirements_file}", file=sys.stderr)
sys.exit(1)
# Read requirements
try:
with open(requirements_file, 'r') as f:
requirements_text = f.read()
except Exception as e:
print(f"Error reading requirements file: {e}", file=sys.stderr)
sys.exit(1)
if not requirements_text.strip():
print("Error: Requirements file is empty", file=sys.stderr)
sys.exit(1)
# Parse requirements to plan
plan = parse_requirements_to_plan(requirements_text, project_path)
# Output plan to stdout
print(json.dumps(plan, indent=2))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,253 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "python-dotenv",
# ]
# ///
"""
Workflow State Management Utility
Manages workflow state via file-based JSON storage in .titanium/workflow-state.json
Commands:
init <project_path> <workflow_type> <goal> Initialize new workflow
update_phase <project_path> <phase> <status> Update current phase
get <project_path> Get current state
complete <project_path> Mark workflow complete
Examples:
uv run workflow_state.py init "$(pwd)" "development" "Implement user auth"
uv run workflow_state.py update_phase "$(pwd)" "implementation" "in_progress"
uv run workflow_state.py get "$(pwd)"
uv run workflow_state.py complete "$(pwd)"
"""
import json
import sys
import os
from pathlib import Path
from datetime import datetime
# Constants
STATE_FILE = ".titanium/workflow-state.json"
def init_workflow(project_path: str, workflow_type: str, goal: str) -> dict:
"""
Initialize a new workflow state file.
Args:
project_path: Absolute path to project directory
workflow_type: Type of workflow (development, bug-fix, refactor, review)
goal: User's stated goal for this workflow
Returns:
Initial state dictionary
"""
state_path = Path(project_path) / STATE_FILE
state_path.parent.mkdir(parents=True, exist_ok=True)
state = {
"workflow_type": workflow_type,
"goal": goal,
"status": "planning",
"started_at": datetime.now().isoformat(),
"current_phase": "planning",
"phases": [],
"completed_tasks": [],
"pending_tasks": []
}
# Atomic write
temp_path = state_path.with_suffix('.tmp')
with open(temp_path, 'w') as f:
json.dump(state, f, indent=2)
temp_path.replace(state_path)
return state
def update_phase(project_path: str, phase_name: str, status: str = "in_progress") -> dict:
"""
Update current workflow phase.
Args:
project_path: Absolute path to project directory
phase_name: Name of phase (planning, implementation, review, completed)
status: Status of phase (in_progress, completed, failed)
Returns:
Updated state dictionary or None if state doesn't exist
"""
state_path = Path(project_path) / STATE_FILE
if not state_path.exists():
print(f"Error: No workflow state found at {state_path}", file=sys.stderr)
return None
# Read current state
with open(state_path, 'r') as f:
state = json.load(f)
# Update current phase and status
state["current_phase"] = phase_name
state["status"] = status
# Update or add phase
phase_exists = False
for i, p in enumerate(state["phases"]):
if p["name"] == phase_name:
# Preserve original started_at when updating existing phase
state["phases"][i]["status"] = status
# Only add completed_at if completing and doesn't already exist
if status == "completed" and "completed_at" not in state["phases"][i]:
state["phases"][i]["completed_at"] = datetime.now().isoformat()
phase_exists = True
break
if not phase_exists:
# Create new phase entry with current timestamp
phase_entry = {
"name": phase_name,
"status": status,
"started_at": datetime.now().isoformat()
}
if status == "completed":
phase_entry["completed_at"] = datetime.now().isoformat()
state["phases"].append(phase_entry)
# Atomic write
temp_path = state_path.with_suffix('.tmp')
with open(temp_path, 'w') as f:
json.dump(state, f, indent=2)
temp_path.replace(state_path)
return state
def get_state(project_path: str) -> dict:
"""
Get current workflow state.
Args:
project_path: Absolute path to project directory
Returns:
State dictionary or None if state doesn't exist
"""
state_path = Path(project_path) / STATE_FILE
if not state_path.exists():
return None
with open(state_path, 'r') as f:
return json.load(f)
def complete_workflow(project_path: str) -> dict:
"""
Mark workflow as complete.
Args:
project_path: Absolute path to project directory
Returns:
Updated state dictionary or None if state doesn't exist
"""
state_path = Path(project_path) / STATE_FILE
if not state_path.exists():
print(f"Error: No workflow state found at {state_path}", file=sys.stderr)
return None
# Read current state
with open(state_path, 'r') as f:
state = json.load(f)
# Update to completed
state["status"] = "completed"
state["current_phase"] = "completed"
state["completed_at"] = datetime.now().isoformat()
# Mark current phase as completed if it exists
if state["phases"]:
for phase in state["phases"]:
if phase["status"] == "in_progress":
phase["status"] = "completed"
phase["completed_at"] = datetime.now().isoformat()
# Atomic write
temp_path = state_path.with_suffix('.tmp')
with open(temp_path, 'w') as f:
json.dump(state, f, indent=2)
temp_path.replace(state_path)
return state
def main():
"""CLI interface for workflow state management."""
if len(sys.argv) < 3:
print("Usage: workflow_state.py <command> <project_path> [args...]", file=sys.stderr)
print("\nCommands:", file=sys.stderr)
print(" init <project_path> <workflow_type> <goal>", file=sys.stderr)
print(" update_phase <project_path> <phase> [status]", file=sys.stderr)
print(" get <project_path>", file=sys.stderr)
print(" complete <project_path>", file=sys.stderr)
sys.exit(1)
command = sys.argv[1]
project_path = sys.argv[2]
try:
if command == "init":
if len(sys.argv) < 5:
print("Error: init requires workflow_type and goal", file=sys.stderr)
sys.exit(1)
workflow_type = sys.argv[3]
goal = sys.argv[4]
state = init_workflow(project_path, workflow_type, goal)
print(json.dumps(state, indent=2))
elif command == "update_phase":
if len(sys.argv) < 4:
print("Error: update_phase requires phase_name", file=sys.stderr)
sys.exit(1)
phase_name = sys.argv[3]
status = sys.argv[4] if len(sys.argv) > 4 else "in_progress"
state = update_phase(project_path, phase_name, status)
if state:
print(json.dumps(state, indent=2))
else:
sys.exit(1)
elif command == "get":
state = get_state(project_path)
if state:
print(json.dumps(state, indent=2))
else:
print("No workflow found", file=sys.stderr)
sys.exit(1)
elif command == "complete":
state = complete_workflow(project_path)
if state:
print(json.dumps(state, indent=2))
else:
sys.exit(1)
else:
print(f"Error: Unknown command: {command}", file=sys.stderr)
print("\nValid commands: init, update_phase, get, complete", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error: {str(e)}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()