Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 09:05:52 +08:00
commit db12a906d2
62 changed files with 27669 additions and 0 deletions

1494
hooks/utils/bmad/bmad_generator.py Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,501 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "python-dotenv",
# ]
# ///
"""
BMAD Document Validator Utility
Validates BMAD documents match required structure and completeness.
Commands:
brief <file_path> Validate product brief
prd <file_path> Validate PRD
architecture <file_path> Validate architecture
epic <file_path> Validate epic
all <bmad_dir> Validate all documents in backlog
Examples:
uv run bmad_validator.py prd bmad-backlog/prd/prd.md
uv run bmad_validator.py all bmad-backlog/
"""
import json
import sys
import re
from pathlib import Path
from typing import Dict, List
def validate_brief(file_path: str) -> Dict:
"""
Validate Product Brief has all required sections.
Args:
file_path: Path to product-brief.md
Returns:
Validation results dict
"""
try:
with open(file_path, 'r') as f:
content = f.read()
except Exception as e:
return {
"valid": False,
"errors": [f"Cannot read file: {e}"],
"warnings": [],
"missing_sections": []
}
required_sections = [
"Executive Summary",
"Problem Statement",
"Proposed Solution",
"Target Users",
"Goals & Success Metrics",
"MVP Scope",
"Post-MVP Vision",
"Technical Considerations",
"Constraints & Assumptions",
"Risks & Open Questions",
"Next Steps"
]
results = {
"valid": True,
"errors": [],
"warnings": [],
"missing_sections": []
}
# Check for required sections
for section in required_sections:
if section not in content:
results["valid"] = False
results["missing_sections"].append(section)
# Check for header
if not re.search(r'#\s+Product Brief:', content):
results["errors"].append("Missing main header: # Product Brief: {Name}")
# Check for version info
if "**Version:**" not in content:
results["warnings"].append("Missing version field")
if "**Date:**" not in content:
results["warnings"].append("Missing date field")
return results
def validate_prd(file_path: str) -> Dict:
"""
Validate PRD has all required sections.
Args:
file_path: Path to prd.md
Returns:
Validation results dict
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return {
"valid": False,
"errors": [f"Cannot read file: {e}"],
"warnings": [],
"missing_sections": []
}
required_sections = [
"Executive Summary",
"Product Overview",
"Success Metrics",
"Feature Requirements",
"User Stories",
"Technical Requirements",
"Data Requirements",
"AI/ML Requirements",
"Design Requirements",
"Go-to-Market Strategy",
"Risks & Mitigation",
"Open Questions",
"Appendix"
]
results = {
"valid": True,
"errors": [],
"warnings": [],
"missing_sections": []
}
# Check for required sections
for section in required_sections:
if section not in content:
results["valid"] = False
results["missing_sections"].append(section)
# Check for header
if not re.search(r'#\s+Product Requirements Document', content):
results["errors"].append("Missing main header")
# Check for metadata
if "**Document Version:**" not in content:
results["warnings"].append("Missing document version")
if "**Last Updated:**" not in content:
results["warnings"].append("Missing last updated date")
# Check for user stories format
if "User Stories" in content:
# Should have "As a" pattern
if "As a" not in content:
results["warnings"].append("User stories missing 'As a... I want... so that' format")
# Check for acceptance criteria
if "Feature Requirements" in content or "User Stories" in content:
if "Acceptance Criteria:" not in content and "- [ ]" not in content:
results["warnings"].append("Missing acceptance criteria checkboxes")
return results
def validate_architecture(file_path: str) -> Dict:
"""
Validate Architecture document completeness.
Args:
file_path: Path to architecture.md
Returns:
Validation results dict
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return {
"valid": False,
"errors": [f"Cannot read file: {e}"],
"warnings": [],
"missing_sections": []
}
required_sections = [
"System Overview",
"Architecture Principles",
"High-Level Architecture",
"Component Details",
"Data Architecture",
"Infrastructure",
"Security Architecture",
"Deployment Strategy",
"Monitoring & Observability",
"Appendix"
]
results = {
"valid": True,
"errors": [],
"warnings": [],
"missing_sections": []
}
# Check for required sections
for section in required_sections:
if section not in content:
results["valid"] = False
results["missing_sections"].append(section)
# Check for code examples
if "```sql" not in content and "```python" not in content and "```typescript" not in content:
results["warnings"].append("Missing code examples (SQL, Python, or TypeScript)")
# Check for cost estimates
if "Cost" not in content:
results["warnings"].append("Missing cost estimates")
# Check for technology decisions
if "Technology Decisions" not in content:
results["warnings"].append("Missing technology decisions table")
return results
def validate_epic(file_path: str) -> Dict:
"""
Validate Epic file structure.
Args:
file_path: Path to EPIC-*.md
Returns:
Validation results dict
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
return {
"valid": False,
"errors": [f"Cannot read file: {e}"],
"warnings": [],
"missing_sections": []
}
required_fields = [
"**Epic Owner:**",
"**Priority:**",
"**Status:**",
"**Estimated Effort:**"
]
required_sections = [
"Epic Description",
"Business Value",
"Success Criteria",
"User Stories",
"Dependencies",
"Definition of Done"
]
results = {
"valid": True,
"errors": [],
"warnings": [],
"missing_sections": [],
"missing_fields": []
}
# Check for required fields
for field in required_fields:
if field not in content:
results["valid"] = False
results["missing_fields"].append(field)
# Check for required sections
for section in required_sections:
if section not in content:
results["valid"] = False
results["missing_sections"].append(section)
# Check for story format
story_matches = re.findall(r'### STORY-(\d+)-(\d+):', content)
if not story_matches:
results["errors"].append("No stories found (expecting STORY-XXX-YY format)")
# Check stories have acceptance criteria
if story_matches:
has_criteria = "Acceptance Criteria:" in content or "**Acceptance Criteria:**" in content
if not has_criteria:
results["warnings"].append("Stories missing acceptance criteria")
# Check for "As a... I want... so that" format
has_user_story_format = "As a" in content and "I want" in content and "so that" in content
if not has_user_story_format:
results["warnings"].append("Stories missing user story format (As a... I want... so that...)")
return results
def validate_all(bmad_dir: str) -> Dict:
"""
Validate all documents in BMAD backlog.
Args:
bmad_dir: Path to bmad-backlog directory
Returns:
Combined validation results
"""
bmad_path = Path(bmad_dir)
results = {
"brief": None,
"prd": None,
"architecture": None,
"epics": [],
"overall_valid": True
}
# Validate brief (optional)
brief_path = bmad_path / "product-brief.md"
if brief_path.exists():
results["brief"] = validate_brief(str(brief_path))
if not results["brief"]["valid"]:
results["overall_valid"] = False
# Validate PRD (required)
prd_path = bmad_path / "prd" / "prd.md"
if prd_path.exists():
results["prd"] = validate_prd(str(prd_path))
if not results["prd"]["valid"]:
results["overall_valid"] = False
else:
results["overall_valid"] = False
results["prd"] = {"valid": False, "errors": ["PRD not found"]}
# Validate architecture (required)
arch_path = bmad_path / "architecture" / "architecture.md"
if arch_path.exists():
results["architecture"] = validate_architecture(str(arch_path))
if not results["architecture"]["valid"]:
results["overall_valid"] = False
else:
results["overall_valid"] = False
results["architecture"] = {"valid": False, "errors": ["Architecture not found"]}
# Validate epics (required)
epics_dir = bmad_path / "epics"
if epics_dir.exists():
epic_files = sorted(epics_dir.glob("EPIC-*.md"))
for epic_file in epic_files:
epic_result = validate_epic(str(epic_file))
epic_result["file"] = epic_file.name
results["epics"].append(epic_result)
if not epic_result["valid"]:
results["overall_valid"] = False
else:
results["overall_valid"] = False
return results
def print_validation_results(results: Dict, document_type: str):
"""Print validation results in readable format."""
print(f"\n{'='*60}")
print(f"Validation Results: {document_type}")
print(f"{'='*60}\n")
if results["valid"]:
print("✅ VALID - All required sections present")
else:
print("❌ INVALID - Missing required content")
if results.get("missing_sections"):
print("\n❌ Missing Required Sections:")
for section in results["missing_sections"]:
print(f" - {section}")
if results.get("missing_fields"):
print("\n❌ Missing Required Fields:")
for field in results["missing_fields"]:
print(f" - {field}")
if results.get("errors"):
print("\n❌ Errors:")
for error in results["errors"]:
print(f" - {error}")
if results.get("warnings"):
print("\n⚠️ Warnings:")
for warning in results["warnings"]:
print(f" - {warning}")
print()
def main():
"""CLI interface for validation."""
if len(sys.argv) < 3:
print("Usage: bmad_validator.py <command> <file_path>", file=sys.stderr)
print("\nCommands:", file=sys.stderr)
print(" brief <file_path>", file=sys.stderr)
print(" prd <file_path>", file=sys.stderr)
print(" architecture <file_path>", file=sys.stderr)
print(" epic <file_path>", file=sys.stderr)
print(" all <bmad_dir>", file=sys.stderr)
sys.exit(1)
command = sys.argv[1]
path = sys.argv[2]
try:
if command == "brief":
results = validate_brief(path)
print_validation_results(results, "Product Brief")
sys.exit(0 if results["valid"] else 1)
elif command == "prd":
results = validate_prd(path)
print_validation_results(results, "PRD")
sys.exit(0 if results["valid"] else 1)
elif command == "architecture":
results = validate_architecture(path)
print_validation_results(results, "Architecture")
sys.exit(0 if results["valid"] else 1)
elif command == "epic":
results = validate_epic(path)
print_validation_results(results, f"Epic ({Path(path).name})")
sys.exit(0 if results["valid"] else 1)
elif command == "all":
results = validate_all(path)
print(f"\n{'='*60}")
print(f"Complete Backlog Validation: {path}")
print(f"{'='*60}\n")
if results["overall_valid"]:
print("✅ ALL DOCUMENTS VALID\n")
else:
print("❌ VALIDATION FAILED\n")
# Print individual results
if results["brief"]:
print("Product Brief:", "✅ Valid" if results["brief"]["valid"] else "❌ Invalid")
else:
print("Product Brief: (not found - optional)")
if results["prd"]:
print("PRD:", "✅ Valid" if results["prd"]["valid"] else "❌ Invalid")
else:
print("PRD: ❌ Not found (required)")
if results["architecture"]:
print("Architecture:", "✅ Valid" if results["architecture"]["valid"] else "❌ Invalid")
else:
print("Architecture: ❌ Not found (required)")
print(f"Epics: {len(results['epics'])} found")
for epic in results["epics"]:
status = "" if epic["valid"] else ""
print(f" {status} {epic['file']}")
print(f"\n{'='*60}\n")
# Print details if invalid
if not results["overall_valid"]:
if results["prd"] and not results["prd"]["valid"]:
print_validation_results(results["prd"], "PRD")
if results["architecture"] and not results["architecture"]["valid"]:
print_validation_results(results["architecture"], "Architecture")
for epic in results["epics"]:
if not epic["valid"]:
print_validation_results(epic, f"Epic {epic['file']}")
sys.exit(0 if results["overall_valid"] else 1)
else:
print(f"Error: Unknown command: {command}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error: {e!s}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,682 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "python-dotenv",
# ]
# ///
"""
BMAD Research Prompt Generator
Generates research prompts and findings templates for technical decisions.
No GPT-4 calls - just template generation (Cost: $0).
Commands:
prompt <topic> <project_path> [prd_path] Generate research prompt
template <topic> <project_path> Generate findings template
Examples:
uv run research_generator.py prompt "data vendors" "$(pwd)" "bmad-backlog/prd/prd.md"
uv run research_generator.py template "data vendors" "$(pwd)"
"""
import sys
import re
from pathlib import Path
from datetime import datetime
def generate_research_prompt(topic: str, project_path: str, prd_path: str = None) -> str:
"""
Generate research prompt for web AI (ChatGPT/Claude).
Args:
topic: Research topic
project_path: Project directory
prd_path: Optional path to PRD for context
Returns:
Research prompt content
"""
current_date = datetime.now().strftime("%B %d, %Y")
topic_slug = topic.lower().replace(' ', '-').replace('/', '-')
# Read PRD for context if provided
project_context = ""
project_name = "New Project"
requirements_context = ""
if prd_path and Path(prd_path).exists():
try:
with open(prd_path, 'r') as f:
prd_content = f.read()
# Extract project name
match = re.search(r'##\s+(.+?)(?:\s+-|$)', prd_content, re.MULTILINE)
if match:
project_name = match.group(1).strip()
# Extract relevant requirements
if "data" in topic.lower() or "api" in topic.lower():
data_section = extract_section(prd_content, "Data Requirements")
if data_section:
requirements_context = f"\n**Project Requirements**:\n{data_section[:500]}"
if "auth" in topic.lower():
security_section = extract_section(prd_content, "Security")
if security_section:
requirements_context = f"\n**Security Requirements**:\n{security_section[:500]}"
project_context = f"\n**Project**: {project_name}\n"
except Exception:
pass
prompt_content = f"""# Research Prompt: {topic}
**Date**: {current_date}
**For**: {project_name}
---
## Instructions
**COPY THIS ENTIRE PROMPT** and paste into:
- ChatGPT (https://chat.openai.com) with GPT-4
- Claude (https://claude.ai) web version
They have web search capabilities for current, accurate information.
---
## Research Request
{project_context}
**Research Topic**: {topic}
{requirements_context}
Please research and provide comprehensive analysis:
---
### 1. Overview
- What options exist for {topic}?
- What are the top 5-7 solutions/vendors/APIs?
- Current market leaders?
- Recent changes in this space? (2024-2025)
---
### 2. Detailed Comparison Table
Create a comprehensive comparison:
| Option | Pricing | Key Features | Pros | Cons | Best For |
|--------|---------|--------------|------|------|----------|
| Option 1: [Name] | [Tiers] | [Top 3-5 features] | [2-3 pros] | [2-3 cons] | [Use case] |
| Option 2: [Name] | | | | | |
| Option 3: [Name] | | | | | |
| Option 4: [Name] | | | | | |
| Option 5: [Name] | | | | | |
---
### 3. Technical Details
For EACH option, provide:
#### [Option Name]
**API Documentation**: [Link to official docs]
**Authentication**:
- Method: API Key | OAuth | JWT | Other
- Security: HTTPS required? Token rotation?
**Rate Limits**:
- Free tier: X requests per minute/hour/day
- Paid tiers: Rate limit increases
**Data Format**:
- Response format: JSON | XML | GraphQL | CSV
- Webhook support: Yes/No
- Streaming: Yes/No
**SDK Availability**:
- Python: [pip package name] - [GitHub link]
- Node.js: [npm package name] - [GitHub link]
- Other languages: [List]
**Code Example**:
```python
# Basic usage example (if available from docs)
```
**Community**:
- GitHub stars: X
- Last updated: Date
- Issues: Open/closed ratio
- Stack Overflow: Questions count
---
### 4. Integration Complexity
For each option, estimate:
**Setup Time**:
- Account creation: X minutes
- API key generation: X minutes
- SDK integration: X hours
- Testing: X hours
**Total**: X hours/days
**Dependencies**:
- Libraries required
- Platform requirements
- Other services needed
**Learning Curve**:
- Documentation quality: Excellent | Good | Fair | Poor
- Tutorials available: Yes/No
- Community support: Active | Moderate | Limited
---
### 5. Recommendations
Based on the project requirements, provide specific recommendations:
**For MVP** (budget-conscious, speed):
- **Recommended**: [Option]
- **Why**: [Rationale]
- **Tradeoffs**: [What you give up]
**For Production** (quality-focused, scalable):
- **Recommended**: [Option]
- **Why**: [Rationale]
- **Cost**: $X/month at scale
**For Enterprise** (feature-complete):
- **Recommended**: [Option]
- **Why**: [Rationale]
- **Cost**: $Y/month
---
### 6. Detailed Cost Analysis
For each option:
#### [Option Name]
**Free Tier**:
- What's included: [Limits]
- Restrictions: [What's missing]
- Good for MVP? Yes/No - [Why]
**Starter/Basic Tier**:
- Price: $X/month
- Includes: [Features and limits]
- Rate limits: X requests/min
**Professional Tier**:
- Price: $Y/month
- Includes: [Features and limits]
- Rate limits: Y requests/min
**Enterprise Tier**:
- Price: $Z/month or Custom
- Includes: [Features]
- SLA: X% uptime
**Estimated Monthly Cost**:
- MVP (low volume): $X-Y
- Production (medium volume): $X-Y
- Scale (high volume): $X-Y
**Hidden Costs**:
- [Overage charges, add-ons, etc.]
---
### 7. Risks & Considerations
For each option, analyze:
**Vendor Lock-in**:
- How easy to migrate away? (Easy/Medium/Hard)
- Data export capabilities
- API compatibility with alternatives
**Data Quality/Reliability**:
- Uptime history (if available)
- Published SLAs
- Known outages or issues
- Data accuracy/freshness
**Compliance & Security**:
- Data residency (US/EU/Global)
- Compliance certifications (SOC 2, GDPR, etc.)
- Security features (encryption, access controls)
- Privacy policy concerns
**Support & Maintenance**:
- Support channels (email, chat, phone)
- Response time SLAs
- Documentation updates
- Release cadence
- Deprecation policy
**Scalability**:
- Auto-scaling capabilities
- Performance at high volume
- Regional availability
- CDN/edge locations
---
### 8. Source Links
Provide current, working links to:
**Official Resources**:
- Homepage: [URL]
- Pricing page: [URL]
- API documentation: [URL]
- Getting started guide: [URL]
- Status page: [URL]
**Developer Resources**:
- GitHub repository: [URL]
- SDK documentation: [URL]
- API reference: [URL]
- Code examples: [URL]
**Community**:
- Community forum: [URL]
- Discord/Slack: [URL]
- Stack Overflow tag: [URL]
- Twitter/X: [Handle]
**Reviews & Comparisons**:
- G2/Capterra reviews: [URL]
- Comparison articles: [URL]
- User testimonials: [URL]
- Case studies: [URL]
---
## Deliverable
Please structure your response with clear sections matching the template above.
This research will inform our architecture decisions and be documented for future reference.
Thank you!
---
**After completing research**:
1. Copy findings into template: bmad-backlog/research/RESEARCH-{topic_slug}-findings.md
2. Return to Claude Code
3. Continue with /bmad:architecture (will use your research)
"""
# Save prompt
prompt_path = Path(project_path) / "bmad-backlog" / "research" / f"RESEARCH-{topic_slug}-prompt.md"
prompt_path.parent.mkdir(parents=True, exist_ok=True)
with open(prompt_path, 'w') as f:
f.write(prompt_content)
return prompt_content
def generate_findings_template(topic: str, project_path: str) -> str:
"""
Generate findings template for documenting research.
Args:
topic: Research topic
project_path: Project directory
Returns:
Template content
"""
current_date = datetime.now().strftime("%B %d, %Y")
topic_slug = topic.lower().replace(' ', '-').replace('/', '-')
template_content = f"""# Research Findings: {topic}
**Date**: {current_date}
**Researcher**: [Your Name]
**Status**: Draft
---
## Research Summary
**Question**: What {topic} should we use?
**Recommendation**: [Chosen option and brief rationale]
**Confidence**: High | Medium | Low
**Decision Date**: [When decision was made]
---
## Options Evaluated
### Option 1: [Name]
**Overview**:
[1-2 sentence description of what this is]
**Pricing**:
- Free tier: [Details or N/A]
- Starter tier: $X/month - [What's included]
- Pro tier: $Y/month - [What's included]
- Enterprise: $Z/month or Custom
- **Estimated cost for our MVP**: $X/month
**Key Features**:
- [Feature 1]
- [Feature 2]
- [Feature 3]
- [Feature 4]
**Pros**:
- [Pro 1]
- [Pro 2]
- [Pro 3]
**Cons**:
- [Con 1]
- [Con 2]
- [Con 3]
**Technical Details**:
- API Type: REST | GraphQL | WebSocket | Other
- Authentication: API Key | OAuth | JWT | Other
- Rate Limits: X requests per minute/hour
- Data Format: JSON | XML | CSV | Other
- SDKs: Python ([package]), Node.js ([package]), Other
- Latency: Typical response time
- Uptime SLA: X%
**Documentation**: [Link]
**Community**:
- GitHub Stars: X
- Last Update: [Date]
- Active Development: Yes/No
---
### Option 2: [Name]
[Same structure as Option 1]
---
### Option 3: [Name]
[Same structure as Option 1]
---
### Option 4: [Name]
[Same structure as Option 1 - if evaluated]
---
## Comparison Matrix
| Criteria | Option 1 | Option 2 | Option 3 | Winner |
|----------|----------|----------|----------|--------|
| **Cost (MVP)** | $X/mo | $Y/mo | $Z/mo | [Option] |
| **Cost (Production)** | $X/mo | $Y/mo | $Z/mo | [Option] |
| **Features** | X/10 | Y/10 | Z/10 | [Option] |
| **API Quality** | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐ | ⭐⭐⭐ | [Option] |
| **Documentation** | Excellent | Good | Fair | [Option] |
| **Community** | Large | Medium | Small | [Option] |
| **Ease of Use** | Easy | Medium | Complex | [Option] |
| **Scalability** | High | Medium | High | [Option] |
| **Vendor Lock-in Risk** | Low | Medium | High | [Option] |
| **Overall Score** | X/10 | Y/10 | Z/10 | **[Winner]** |
---
## Final Recommendation
**Chosen**: [Option X]
**Rationale**:
1. [Primary reason - e.g., best balance of cost and features]
2. [Secondary reason - e.g., excellent documentation]
3. [Tertiary reason - e.g., active community]
**For MVP**:
- [Why this works for MVP]
- Cost: $X/month
- Timeline: [Can start immediately / Need 1 week setup]
**For Production**:
- [Scalability considerations]
- Cost at scale: $Y/month
- Migration path: [If we outgrow this]
**Implementation Priority**: MVP | Phase 2 | Future
---
## Implementation Plan
### Setup Steps
1. [Step 1 - e.g., Create account at vendor.com]
2. [Step 2 - e.g., Generate API key]
3. [Step 3 - e.g., Install SDK: pip install package]
4. [Step 4 - e.g., Test connection]
5. [Step 5 - e.g., Implement in production code]
**Estimated Setup Time**: X hours
### Configuration Required
**Environment Variables**:
```bash
# Add to .env.example
{{VENDOR}}_API_KEY=your_key_here
{{VENDOR}}_BASE_URL=https://api.vendor.com
```
**Code Configuration**:
```python
# Example configuration
from {{package}} import Client
client = Client(api_key=os.getenv('{{VENDOR}}_API_KEY'))
```
### Basic Usage Example
```python
# Example usage from documentation
{{code example if available}}
```
---
## Cost Projection
**Monthly Cost Breakdown**:
**MVP** (estimated volume):
- Base fee: $X
- Usage costs: $Y
- **Total**: $Z/month
**Production** (estimated volume):
- Base fee: $X
- Usage costs: $Y
- **Total**: $Z/month
**At Scale** (estimated volume):
- Base fee: $X
- Usage costs: $Y
- **Total**: $Z/month
**Cost Optimization**:
- [Strategy 1 to reduce costs]
- [Strategy 2]
---
## Risks & Mitigations
| Risk | Impact | Likelihood | Mitigation |
|------|--------|-----------|------------|
| Vendor increases pricing | Medium | Medium | [Monitor pricing, have backup option] |
| Service downtime | High | Low | [Implement fallback, cache data] |
| Rate limit hit | Medium | Medium | [Implement rate limiting, queue requests] |
| Data quality issues | High | Low | [Validation layer, monitoring] |
| Vendor shutdown | High | Low | [Data export plan, alternative ready] |
---
## Testing Checklist
- [ ] Create account and obtain credentials
- [ ] Test API in development
- [ ] Verify rate limits and error handling
- [ ] Test with production-like volume
- [ ] Set up monitoring and alerts
- [ ] Document API integration in code
- [ ] Add to .env.example
- [ ] Create fallback/error handling
- [ ] Test cost with real usage
- [ ] Review security and compliance
---
## References
**Official Documentation**:
- Website: [URL]
- Pricing: [URL]
- API Docs: [URL]
- Getting Started: [URL]
- Status Page: [URL]
**Community Resources**:
- GitHub: [URL]
- Discord/Slack: [URL]
- Stack Overflow: [URL with tag]
**Comparison Articles**:
- [Article 1 title]: [URL]
- [Article 2 title]: [URL]
**User Reviews**:
- G2: [URL]
- Reddit discussions: [URLs]
---
## Next Steps
1. ✅ Research complete
2. Review findings with team (if applicable)
3. Make final decision on [chosen option]
4. Update bmad-backlog/prd/prd.md Technical Assumptions
5. Reference in bmad-backlog/architecture/architecture.md
6. Add to implementation backlog
---
**Status**: ✅ Research Complete | ⏳ Awaiting Decision | ❌ Needs More Research
**Recommendation**: [Final recommendation]
---
*This document was generated from research conducted using web-based AI.*
*Fill in all sections with findings from your research.*
*Save this file when complete - it will be referenced during architecture generation.*
"""
# Save template
template_path = Path(project_path) / "bmad-backlog" / "research" / f"RESEARCH-{topic_slug}-findings.md"
template_path.parent.mkdir(parents=True, exist_ok=True)
with open(template_path, 'w') as f:
f.write(template_content)
return template_content
def extract_section(content: str, section_header: str) -> str:
"""Extract section from markdown document."""
lines = content.split('\n')
section_lines = []
in_section = False
for line in lines:
if section_header.lower() in line.lower() and line.startswith('#'):
in_section = True
continue
elif in_section and line.startswith('#') and len(line.split()) > 1:
# New section started
break
elif in_section:
section_lines.append(line)
return '\n'.join(section_lines).strip()
def main():
"""CLI interface for research prompt generation."""
if len(sys.argv) < 4:
print("Usage: research_generator.py <command> <topic> <project_path> [prd_path]", file=sys.stderr)
print("\nCommands:", file=sys.stderr)
print(" prompt <topic> <project_path> [prd_path] Generate research prompt", file=sys.stderr)
print(" template <topic> <project_path> Generate findings template", file=sys.stderr)
print("\nExamples:", file=sys.stderr)
print(' uv run research_generator.py prompt "data vendors" "$(pwd)" "bmad-backlog/prd/prd.md"', file=sys.stderr)
print(' uv run research_generator.py template "hosting platforms" "$(pwd)"', file=sys.stderr)
sys.exit(1)
command = sys.argv[1]
topic = sys.argv[2]
project_path = sys.argv[3]
prd_path = sys.argv[4] if len(sys.argv) > 4 else None
topic_slug = topic.lower().replace(' ', '-').replace('/', '-')
try:
if command == "prompt":
content = generate_research_prompt(topic, project_path, prd_path)
print(f"✅ Research prompt generated: bmad-backlog/research/RESEARCH-{topic_slug}-prompt.md")
elif command == "template":
content = generate_findings_template(topic, project_path)
print(f"✅ Findings template generated: bmad-backlog/research/RESEARCH-{topic_slug}-findings.md")
else:
print(f"Error: Unknown command: {command}", file=sys.stderr)
print("Valid commands: prompt, template", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error: {str(e)}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()