Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:26:08 +08:00
commit 8f22ddf339
295 changed files with 59710 additions and 0 deletions

View File

@@ -0,0 +1,87 @@
# story.write
Convert decomposed items from epic.decompose into fully formatted user stories. Generates individual Markdown files for each story following standard user story format.
## Overview
**Purpose:** Convert decomposed items from epic.decompose into fully formatted user stories. Generates individual Markdown files for each story following standard user story format.
**Command:** `/story/write`
## Usage
### Basic Usage
```bash
python3 skills/story/write/story_write.py
```
### With Arguments
```bash
python3 skills/story/write/story_write.py \
--stories_file_(string,_required):_path_to_the_stories.json_file_from_epic.decompose "value" \
--epic_reference_(string,_optional):_reference_to_the_source_epic_for_traceability "value" \
--output_dir_(string,_optional):_directory_to_save_story_files_(default:_./stories/) "value" \
--output-format json
```
## Inputs
- **stories_file (string, required): Path to the stories.json file from epic.decompose**
- **epic_reference (string, optional): Reference to the source Epic for traceability**
- **output_dir (string, optional): Directory to save story files (default: ./stories/)**
## Outputs
- **story_<n>.md: Markdown file per story with persona, goal, benefit, acceptance criteria, and metadata**
- **stories_index.md: Summary index of all created stories**
## Artifact Metadata
### Consumes
- `user-stories-list`
### Produces
- `user-story`
## Examples
- python3 skills/story.write/story_write.py --stories-file ./stories.json --epic-reference "EPIC-001" --output-dir ./stories/
## Permissions
- `filesystem:read`
- `filesystem:write`
## Implementation Notes
Load and parse stories.json. Generate unique story ID for each item. Format as standard user story (As a/I want/So that). Convert acceptance criteria to checklist format. Add traceability links to Epic. Include metadata for tracking. Create summary index file listing all stories. Support batch processing. Validate each story against INVEST criteria.
## Integration
This skill can be used in agents by including it in `skills_available`:
```yaml
name: my.agent
skills_available:
- story.write
```
## Testing
Run tests with:
```bash
pytest skills/story/write/test_story_write.py -v
```
## Created By
This skill was generated by **meta.skill**, the skill creator meta-agent.
---
*Part of the Betty Framework*

View File

@@ -0,0 +1 @@
# Auto-generated package initializer for skills.

View File

@@ -0,0 +1,29 @@
name: story.write
version: 0.1.0
description: Convert decomposed items from epic.decompose into fully formatted user
stories. Generates individual Markdown files for each story following standard user
story format.
inputs:
- 'stories_file (string, required): Path to the stories.json file from epic.decompose'
- 'epic_reference (string, optional): Reference to the source Epic for traceability'
- 'output_dir (string, optional): Directory to save story files (default: ./stories/)'
outputs:
- 'story_<n>.md: Markdown file per story with persona, goal, benefit, acceptance criteria,
and metadata'
- 'stories_index.md: Summary index of all created stories'
status: active
permissions:
- filesystem:read
- filesystem:write
entrypoints:
- command: /story/write
handler: story_write.py
runtime: python
description: Convert decomposed items from epic.decompose into fully formatted user
stories. Generates individual
artifact_metadata:
produces:
- type: user-story
consumes:
- type: user-stories-list
required: true

338
skills/story.write/story_write.py Executable file
View File

@@ -0,0 +1,338 @@
#!/usr/bin/env python3
"""
story.write - Convert decomposed items from epic.decompose into fully formatted user stories. Generates individual Markdown files for each story following standard user story format.
Generated by meta.skill with Betty Framework certification
"""
import os
import sys
import json
import yaml
from pathlib import Path
from typing import Dict, List, Any, Optional
from datetime import datetime
import hashlib
from betty.config import BASE_DIR
from betty.logging_utils import setup_logger
from betty.certification import certified_skill
logger = setup_logger(__name__)
class StoryWrite:
"""
Convert decomposed items from epic.decompose into fully formatted user stories. Generates individual Markdown files for each story following standard user story format.
"""
def __init__(self, base_dir: str = BASE_DIR):
"""Initialize skill"""
self.base_dir = Path(base_dir)
@certified_skill("story.write")
def execute(self, stories_file: Optional[str] = None, epic_reference: Optional[str] = None,
output_dir: Optional[str] = None) -> Dict[str, Any]:
"""
Execute the skill
Args:
stories_file: Path to the stories.json file from epic.decompose
epic_reference: Reference to the source Epic for traceability
output_dir: Directory to save story files (default: ./stories/)
Returns:
Dict with execution results
"""
try:
logger.info("Executing story.write...")
# Validate required inputs
if not stories_file:
raise ValueError("stories_file is required")
# Set defaults
if not output_dir:
output_dir = "./stories/"
# Read stories JSON file
stories_path = Path(stories_file)
if not stories_path.exists():
raise FileNotFoundError(f"Stories file not found: {stories_file}")
with open(stories_path, 'r') as f:
stories = json.load(f)
if not isinstance(stories, list):
raise ValueError("Stories file must contain a JSON array")
# Create output directory
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
# Generate story files
created_files = []
story_index = []
for i, story_data in enumerate(stories, start=1):
story_id = self._generate_story_id(story_data, i)
filename = f"story_{i:03d}.md"
filepath = output_path / filename
# Generate story content
story_content = self._generate_story_markdown(
story_data,
story_id,
i,
epic_reference
)
# Write story file
filepath.write_text(story_content)
created_files.append(str(filepath))
# Add to index
story_index.append({
"id": story_id,
"number": i,
"title": story_data.get("title", f"Story {i}"),
"file": filename
})
logger.info(f"Created story {i}: {filename}")
# Create index file
index_file = output_path / "stories_index.md"
index_content = self._generate_index(story_index, epic_reference)
index_file.write_text(index_content)
created_files.append(str(index_file))
logger.info(f"Created {len(stories)} story files in {output_dir}")
result = {
"ok": True,
"status": "success",
"message": f"Generated {len(stories)} user story documents",
"output_dir": str(output_dir),
"created_files": created_files,
"story_count": len(stories),
"artifact_type": "user-story",
"index_file": str(index_file)
}
logger.info("Skill completed successfully")
return result
except Exception as e:
logger.error(f"Error executing skill: {e}")
return {
"ok": False,
"status": "failed",
"error": str(e)
}
def _generate_story_id(self, story_data: Dict[str, Any], number: int) -> str:
"""
Generate unique story ID
Args:
story_data: Story data dictionary
number: Story number
Returns:
Story ID string
"""
# Generate ID from story title hash + number
title = story_data.get("title", f"Story {number}")
title_hash = hashlib.md5(title.encode()).hexdigest()[:8]
return f"STORY-{number:03d}-{title_hash.upper()}"
def _generate_story_markdown(self, story_data: Dict[str, Any], story_id: str,
number: int, epic_reference: Optional[str]) -> str:
"""
Generate markdown content for a user story
Args:
story_data: Story data dictionary
story_id: Unique story ID
number: Story number
epic_reference: Reference to source Epic
Returns:
Formatted markdown content
"""
title = story_data.get("title", f"Story {number}")
persona = story_data.get("persona", "As a User")
goal = story_data.get("goal", "achieve a goal")
benefit = story_data.get("benefit", "So that value is delivered")
acceptance_criteria = story_data.get("acceptance_criteria", [])
# Format acceptance criteria as checklist
ac_list = '\n'.join(f"- [ ] {ac}" for ac in acceptance_criteria)
# Build markdown content
markdown = f"""# User Story: {title}
## Story ID
{story_id}
## User Story
{persona}
I want to {goal}
{benefit}
## Acceptance Criteria
{ac_list}
"""
# Add Epic reference if provided
if epic_reference:
markdown += f"""
## Linked Epic
{epic_reference}
"""
# Add metadata section
markdown += f"""
## Metadata
- **Story ID**: {story_id}
- **Story Number**: {number}
- **Created**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
- **Status**: Draft
- **Priority**: TBD
- **Estimate**: TBD
## INVEST Criteria Check
- [ ] **Independent**: Can this story be developed independently?
- [ ] **Negotiable**: Is there room for discussion on implementation?
- [ ] **Valuable**: Does this deliver value to users/stakeholders?
- [ ] **Estimable**: Can the team estimate the effort required?
- [ ] **Small**: Is this small enough to fit in a sprint?
- [ ] **Testable**: Can we define clear tests for this story?
## Notes
_Add implementation notes, technical considerations, or dependencies here._
---
**Generated by**: Betty Framework - epic-to-story skill chain
**Artifact Type**: user-story
**Version**: 1.0
"""
return markdown
def _generate_index(self, story_index: List[Dict[str, Any]],
epic_reference: Optional[str]) -> str:
"""
Generate index/summary file for all stories
Args:
story_index: List of story metadata
epic_reference: Reference to source Epic
Returns:
Index markdown content
"""
markdown = f"""# User Stories Index
**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
**Total Stories**: {len(story_index)}
"""
if epic_reference:
markdown += f"**Source Epic**: {epic_reference}\n"
markdown += """
## Story List
"""
# Add table of stories
markdown += "| # | Story ID | Title | File |\n"
markdown += "|---|----------|-------|------|\n"
for story in story_index:
markdown += f"| {story['number']} | {story['id']} | {story['title']} | [{story['file']}](./{story['file']}) |\n"
markdown += """
## Progress Tracking
- [ ] All stories reviewed and refined
- [ ] Story priorities assigned
- [ ] Story estimates completed
- [ ] Stories added to backlog
- [ ] Sprint planning completed
## Notes
This index was automatically generated from the Epic decomposition process. Review each story to ensure it meets INVEST criteria and refine as needed before adding to the sprint backlog.
---
**Generated by**: Betty Framework - epic-to-story skill chain
"""
return markdown
def main():
"""CLI entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Convert decomposed items from epic.decompose into fully formatted user stories. Generates individual Markdown files for each story following standard user story format."
)
parser.add_argument(
"--stories-file",
required=True,
help="Path to the stories.json file from epic.decompose"
)
parser.add_argument(
"--epic-reference",
help="Reference to the source Epic for traceability"
)
parser.add_argument(
"--output-dir",
default="./stories/",
help="Directory to save story files (default: ./stories/)"
)
parser.add_argument(
"--output-format",
choices=["json", "yaml"],
default="json",
help="Output format"
)
args = parser.parse_args()
# Create skill instance
skill = StoryWrite()
# Execute skill
result = skill.execute(
stories_file=args.stories_file,
epic_reference=args.epic_reference,
output_dir=args.output_dir,
)
# Output result
if args.output_format == "json":
print(json.dumps(result, indent=2))
else:
print(yaml.dump(result, default_flow_style=False))
# Exit with appropriate code
sys.exit(0 if result.get("ok") else 1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env python3
"""
Tests for story.write
Generated by meta.skill
"""
import pytest
import sys
import os
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
from skills.story_write import story_write
class TestStoryWrite:
"""Tests for StoryWrite"""
def setup_method(self):
"""Setup test fixtures"""
self.skill = story_write.StoryWrite()
def test_initialization(self):
"""Test skill initializes correctly"""
assert self.skill is not None
assert self.skill.base_dir is not None
def test_execute_basic(self):
"""Test basic execution"""
result = self.skill.execute()
assert result is not None
assert "ok" in result
assert "status" in result
def test_execute_success(self):
"""Test successful execution"""
result = self.skill.execute()
assert result["ok"] is True
assert result["status"] == "success"
# TODO: Add more specific tests based on skill functionality
def test_cli_help(capsys):
"""Test CLI help message"""
sys.argv = ["story_write.py", "--help"]
with pytest.raises(SystemExit) as exc_info:
story_write.main()
assert exc_info.value.code == 0
captured = capsys.readouterr()
assert "Convert decomposed items from epic.decompose into " in captured.out
if __name__ == "__main__":
pytest.main([__file__, "-v"])