Files
gh-epieczko-betty/skills/story.write/story_write.py
2025-11-29 18:26:08 +08:00

339 lines
9.7 KiB
Python
Executable File

#!/usr/bin/env python3
"""
story.write - Convert decomposed items from epic.decompose into fully formatted user stories. Generates individual Markdown files for each story following standard user story format.
Generated by meta.skill with Betty Framework certification
"""
import os
import sys
import json
import yaml
from pathlib import Path
from typing import Dict, List, Any, Optional
from datetime import datetime
import hashlib
from betty.config import BASE_DIR
from betty.logging_utils import setup_logger
from betty.certification import certified_skill
logger = setup_logger(__name__)
class StoryWrite:
"""
Convert decomposed items from epic.decompose into fully formatted user stories. Generates individual Markdown files for each story following standard user story format.
"""
def __init__(self, base_dir: str = BASE_DIR):
"""Initialize skill"""
self.base_dir = Path(base_dir)
@certified_skill("story.write")
def execute(self, stories_file: Optional[str] = None, epic_reference: Optional[str] = None,
output_dir: Optional[str] = None) -> Dict[str, Any]:
"""
Execute the skill
Args:
stories_file: Path to the stories.json file from epic.decompose
epic_reference: Reference to the source Epic for traceability
output_dir: Directory to save story files (default: ./stories/)
Returns:
Dict with execution results
"""
try:
logger.info("Executing story.write...")
# Validate required inputs
if not stories_file:
raise ValueError("stories_file is required")
# Set defaults
if not output_dir:
output_dir = "./stories/"
# Read stories JSON file
stories_path = Path(stories_file)
if not stories_path.exists():
raise FileNotFoundError(f"Stories file not found: {stories_file}")
with open(stories_path, 'r') as f:
stories = json.load(f)
if not isinstance(stories, list):
raise ValueError("Stories file must contain a JSON array")
# Create output directory
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
# Generate story files
created_files = []
story_index = []
for i, story_data in enumerate(stories, start=1):
story_id = self._generate_story_id(story_data, i)
filename = f"story_{i:03d}.md"
filepath = output_path / filename
# Generate story content
story_content = self._generate_story_markdown(
story_data,
story_id,
i,
epic_reference
)
# Write story file
filepath.write_text(story_content)
created_files.append(str(filepath))
# Add to index
story_index.append({
"id": story_id,
"number": i,
"title": story_data.get("title", f"Story {i}"),
"file": filename
})
logger.info(f"Created story {i}: {filename}")
# Create index file
index_file = output_path / "stories_index.md"
index_content = self._generate_index(story_index, epic_reference)
index_file.write_text(index_content)
created_files.append(str(index_file))
logger.info(f"Created {len(stories)} story files in {output_dir}")
result = {
"ok": True,
"status": "success",
"message": f"Generated {len(stories)} user story documents",
"output_dir": str(output_dir),
"created_files": created_files,
"story_count": len(stories),
"artifact_type": "user-story",
"index_file": str(index_file)
}
logger.info("Skill completed successfully")
return result
except Exception as e:
logger.error(f"Error executing skill: {e}")
return {
"ok": False,
"status": "failed",
"error": str(e)
}
def _generate_story_id(self, story_data: Dict[str, Any], number: int) -> str:
"""
Generate unique story ID
Args:
story_data: Story data dictionary
number: Story number
Returns:
Story ID string
"""
# Generate ID from story title hash + number
title = story_data.get("title", f"Story {number}")
title_hash = hashlib.md5(title.encode()).hexdigest()[:8]
return f"STORY-{number:03d}-{title_hash.upper()}"
def _generate_story_markdown(self, story_data: Dict[str, Any], story_id: str,
number: int, epic_reference: Optional[str]) -> str:
"""
Generate markdown content for a user story
Args:
story_data: Story data dictionary
story_id: Unique story ID
number: Story number
epic_reference: Reference to source Epic
Returns:
Formatted markdown content
"""
title = story_data.get("title", f"Story {number}")
persona = story_data.get("persona", "As a User")
goal = story_data.get("goal", "achieve a goal")
benefit = story_data.get("benefit", "So that value is delivered")
acceptance_criteria = story_data.get("acceptance_criteria", [])
# Format acceptance criteria as checklist
ac_list = '\n'.join(f"- [ ] {ac}" for ac in acceptance_criteria)
# Build markdown content
markdown = f"""# User Story: {title}
## Story ID
{story_id}
## User Story
{persona}
I want to {goal}
{benefit}
## Acceptance Criteria
{ac_list}
"""
# Add Epic reference if provided
if epic_reference:
markdown += f"""
## Linked Epic
{epic_reference}
"""
# Add metadata section
markdown += f"""
## Metadata
- **Story ID**: {story_id}
- **Story Number**: {number}
- **Created**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
- **Status**: Draft
- **Priority**: TBD
- **Estimate**: TBD
## INVEST Criteria Check
- [ ] **Independent**: Can this story be developed independently?
- [ ] **Negotiable**: Is there room for discussion on implementation?
- [ ] **Valuable**: Does this deliver value to users/stakeholders?
- [ ] **Estimable**: Can the team estimate the effort required?
- [ ] **Small**: Is this small enough to fit in a sprint?
- [ ] **Testable**: Can we define clear tests for this story?
## Notes
_Add implementation notes, technical considerations, or dependencies here._
---
**Generated by**: Betty Framework - epic-to-story skill chain
**Artifact Type**: user-story
**Version**: 1.0
"""
return markdown
def _generate_index(self, story_index: List[Dict[str, Any]],
epic_reference: Optional[str]) -> str:
"""
Generate index/summary file for all stories
Args:
story_index: List of story metadata
epic_reference: Reference to source Epic
Returns:
Index markdown content
"""
markdown = f"""# User Stories Index
**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
**Total Stories**: {len(story_index)}
"""
if epic_reference:
markdown += f"**Source Epic**: {epic_reference}\n"
markdown += """
## Story List
"""
# Add table of stories
markdown += "| # | Story ID | Title | File |\n"
markdown += "|---|----------|-------|------|\n"
for story in story_index:
markdown += f"| {story['number']} | {story['id']} | {story['title']} | [{story['file']}](./{story['file']}) |\n"
markdown += """
## Progress Tracking
- [ ] All stories reviewed and refined
- [ ] Story priorities assigned
- [ ] Story estimates completed
- [ ] Stories added to backlog
- [ ] Sprint planning completed
## Notes
This index was automatically generated from the Epic decomposition process. Review each story to ensure it meets INVEST criteria and refine as needed before adding to the sprint backlog.
---
**Generated by**: Betty Framework - epic-to-story skill chain
"""
return markdown
def main():
"""CLI entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Convert decomposed items from epic.decompose into fully formatted user stories. Generates individual Markdown files for each story following standard user story format."
)
parser.add_argument(
"--stories-file",
required=True,
help="Path to the stories.json file from epic.decompose"
)
parser.add_argument(
"--epic-reference",
help="Reference to the source Epic for traceability"
)
parser.add_argument(
"--output-dir",
default="./stories/",
help="Directory to save story files (default: ./stories/)"
)
parser.add_argument(
"--output-format",
choices=["json", "yaml"],
default="json",
help="Output format"
)
args = parser.parse_args()
# Create skill instance
skill = StoryWrite()
# Execute skill
result = skill.execute(
stories_file=args.stories_file,
epic_reference=args.epic_reference,
output_dir=args.output_dir,
)
# Output result
if args.output_format == "json":
print(json.dumps(result, indent=2))
else:
print(yaml.dump(result, default_flow_style=False))
# Exit with appropriate code
sys.exit(0 if result.get("ok") else 1)
if __name__ == "__main__":
main()