From 9c0b92f0259ddeb79d572128519f0d31a1bc7043 Mon Sep 17 00:00:00 2001 From: Zhongwei Li Date: Sat, 29 Nov 2025 18:01:30 +0800 Subject: [PATCH] Initial commit --- .claude-plugin/plugin.json | 20 + README.md | 3 + agent-integrator/.gitkeep | 0 agent-integrator/SKILL.md | 150 ++ .../scripts/update-agents-file.sh | 78 + change-integrator/.gitkeep | 0 change-integrator/SKILL.md | 271 +++ change-integrator/scripts/integrate-change.sh | 150 ++ .../scripts/summarize_retrospective_llm.py | 89 + doc-indexer/.gitkeep | 0 doc-indexer/SKILL.md | 185 ++ doc-indexer/scripts/scan-docs.sh | 80 + issue-executor/SKILL.md | 123 + issue-executor/references/work-on-issue.md | 609 +++++ issue-executor/scripts/work-on-issue.sh | 108 + plugin.lock.json | 185 ++ prd-authoring/.gitkeep | 0 prd-authoring/SKILL.md | 210 ++ .../examples/01-product-brief-example.md | 127 + prd-authoring/examples/02-research-example.md | 315 +++ .../examples/03-prd-example-abbreviated.md | 419 ++++ .../examples/IMPLEMENTATION_SUMMARY.md | 301 +++ prd-authoring/examples/QUICK_START.md | 260 ++ prd-authoring/examples/README.md | 165 ++ prd-authoring/examples/workflow-test-log.md | 428 ++++ prd-authoring/scripts/prd-authoring.sh | 1731 ++++++++++++++ prd-authoring/scripts/validate_prd_llm.py | 117 + project-init/.gitkeep | 0 project-init/SKILL.md | 216 ++ project-init/scripts/init-project.sh | 70 + project-migrate/SKILL.md | 105 + project-migrate/scripts/correct_links_llm.py | 309 +++ project-migrate/scripts/project-migrate.sh | 2091 +++++++++++++++++ spec-authoring/.gitkeep | 0 spec-authoring/SKILL.md | 125 + spec-authoring/scripts/spec-authoring.sh | 180 ++ sprint-planner/.gitkeep | 0 sprint-planner/SKILL.md | 104 + .../scripts/create-sprint-issues.sh | 188 ++ 39 files changed, 9512 insertions(+) create mode 100644 .claude-plugin/plugin.json create mode 100644 README.md create mode 100644 agent-integrator/.gitkeep create mode 100644 agent-integrator/SKILL.md create mode 100755 agent-integrator/scripts/update-agents-file.sh create mode 100644 change-integrator/.gitkeep create mode 100644 change-integrator/SKILL.md create mode 100755 change-integrator/scripts/integrate-change.sh create mode 100755 change-integrator/scripts/summarize_retrospective_llm.py create mode 100644 doc-indexer/.gitkeep create mode 100644 doc-indexer/SKILL.md create mode 100755 doc-indexer/scripts/scan-docs.sh create mode 100644 issue-executor/SKILL.md create mode 100644 issue-executor/references/work-on-issue.md create mode 100755 issue-executor/scripts/work-on-issue.sh create mode 100644 plugin.lock.json create mode 100644 prd-authoring/.gitkeep create mode 100644 prd-authoring/SKILL.md create mode 100644 prd-authoring/examples/01-product-brief-example.md create mode 100644 prd-authoring/examples/02-research-example.md create mode 100644 prd-authoring/examples/03-prd-example-abbreviated.md create mode 100644 prd-authoring/examples/IMPLEMENTATION_SUMMARY.md create mode 100644 prd-authoring/examples/QUICK_START.md create mode 100644 prd-authoring/examples/README.md create mode 100644 prd-authoring/examples/workflow-test-log.md create mode 100755 prd-authoring/scripts/prd-authoring.sh create mode 100755 prd-authoring/scripts/validate_prd_llm.py create mode 100644 project-init/.gitkeep create mode 100644 project-init/SKILL.md create mode 100755 project-init/scripts/init-project.sh create mode 100644 project-migrate/SKILL.md create mode 100755 project-migrate/scripts/correct_links_llm.py create mode 100755 project-migrate/scripts/project-migrate.sh create mode 100644 spec-authoring/.gitkeep create mode 100644 spec-authoring/SKILL.md create mode 100755 spec-authoring/scripts/spec-authoring.sh create mode 100644 sprint-planner/.gitkeep create mode 100644 sprint-planner/SKILL.md create mode 100755 sprint-planner/scripts/create-sprint-issues.sh diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..1fe3a5c --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,20 @@ +{ + "name": "synthesisflow-skills", + "description": "SynthesisFlow: Modular skills for spec-driven development with hybrid LLM-guided + helper-script architecture", + "version": "0.0.0-2025.11.28", + "author": { + "name": "Daniel Bo", + "email": "bodangren@gmail.com" + }, + "skills": [ + "./project-init", + "./project-migrate", + "./prd-authoring", + "./doc-indexer", + "./spec-authoring", + "./sprint-planner", + "./issue-executor", + "./change-integrator", + "./agent-integrator" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..f786b1b --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# synthesisflow-skills + +SynthesisFlow: Modular skills for spec-driven development with hybrid LLM-guided + helper-script architecture diff --git a/agent-integrator/.gitkeep b/agent-integrator/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/agent-integrator/SKILL.md b/agent-integrator/SKILL.md new file mode 100644 index 0000000..abad81f --- /dev/null +++ b/agent-integrator/SKILL.md @@ -0,0 +1,150 @@ +--- +name: agent-integrator +description: Use this skill to create or update the root AGENTS.md file to register SynthesisFlow skills for AI agent discovery. Triggers include "register SynthesisFlow", "update AGENTS.md", "setup agent guide", or initializing a new project. +--- + +# Agent Integrator Skill + +## Purpose + +Idempotently create or update the AGENTS.md file in a project to register SynthesisFlow skills for discovery by AI agents. This skill ensures that any compatible AI agent working in the repository can discover and use the SynthesisFlow methodology and available skills. + +## When to Use + +Use this skill in the following situations: + +- After running project-init to initialize SynthesisFlow in a new project +- When installing SynthesisFlow skills in an existing project +- After adding new skills to the `.claude/skills/` directory +- Updating the agent guide with new workflow information +- Ensuring AI agents can discover available SynthesisFlow capabilities + +## Prerequisites + +- Project has `.claude/skills/` directory with SynthesisFlow skills installed +- Write permissions to project root directory +- Optional: Existing AGENTS.md file (script creates if missing) + +## AGENTS.md Purpose + +The AGENTS.md file serves as a discovery mechanism for AI agents: + +- **Agent Discovery**: AI agents read this file to learn about available workflows +- **Methodology Documentation**: Explains SynthesisFlow philosophy and core principles +- **Skill Catalog**: Lists all available skills and their purposes +- **Getting Started**: Provides entry point for new agents working in the project + +## Workflow + +### Step 1: Determine If Update Is Needed + +Check if AGENTS.md needs to be created or updated: + +```bash +# Check if file exists +ls -la AGENTS.md + +# Check if SynthesisFlow section exists +grep "SYNTHESIS_FLOW" AGENTS.md +``` + +### Step 2: Run the Helper Script + +Execute the script to update AGENTS.md: + +```bash +# Use default location (AGENTS.md in project root) +bash scripts/update-agents-file.sh + +# Or specify custom location +bash scripts/update-agents-file.sh -f path/to/custom-agents.md +``` + +### Step 3: Understand What the Script Does + +The helper script uses an idempotent update strategy: + +1. **Creates file if missing**: + - Uses `touch` to ensure target file exists + - Safe to run even if file doesn't exist yet + +2. **Checks for existing content**: + - Looks for `` marker + - Determines if this is an update or initial creation + +3. **Updates existing content**: + - If markers found, replaces content between markers + - Preserves any other content in the file + - Uses awk to safely replace marked section + +4. **Adds new content**: + - If markers not found, appends SynthesisFlow guide to end of file + - Adds both start and end markers for future updates + +5. **Preserves other content**: + - Only modifies content between markers + - Safe to run multiple times (idempotent) + - Won't overwrite other project documentation + +### Step 4: Verify the Update + +Check that AGENTS.md was updated correctly: + +```bash +# View the file +cat AGENTS.md + +# Verify markers are present +grep -A 5 "SYNTHESIS_FLOW_START" AGENTS.md +``` + +### Step 5: Commit the Changes + +If the update looks correct, commit to the repository: + +```bash +git add AGENTS.md +git commit -m "docs: Update AGENTS.md with SynthesisFlow guide" +git push +``` + +## Error Handling + +### Permission Denied + +**Symptom**: Script cannot write to AGENTS.md + +**Solution**: +- Check file permissions: `ls -la AGENTS.md` +- Ensure you have write access to project root +- Run with appropriate permissions + +### Marker Corruption + +**Symptom**: Content between markers is malformed + +**Solution**: +- Manually edit AGENTS.md to fix markers +- Ensure both `` and `` are present +- Re-run script to regenerate content + +### Custom File Path Issues + +**Symptom**: Script creates file in wrong location + +**Solution**: +- Use `-f` flag with full path: `bash scripts/update-agents-file.sh -f /full/path/to/file.md` +- Verify path exists: `mkdir -p /path/to/directory` +- Check current working directory + +## Notes + +- **Idempotent design**: Safe to run multiple times without side effects +- **Preserves other content**: Only updates content between markers +- **Marker-based**: Uses HTML comments as markers (invisible in rendered markdown) +- **Default location**: AGENTS.md in project root (standard convention) +- **Custom locations**: Use `-f` flag for alternative file paths +- **Run after setup**: Typically run once after project-init, then rarely +- **Update when skills change**: Re-run if new skills are added or removed +- **AI agent discovery**: Helps agents understand available SynthesisFlow capabilities +- **Version control**: Commit AGENTS.md so all contributors see the guide diff --git a/agent-integrator/scripts/update-agents-file.sh b/agent-integrator/scripts/update-agents-file.sh new file mode 100755 index 0000000..02ab4c7 --- /dev/null +++ b/agent-integrator/scripts/update-agents-file.sh @@ -0,0 +1,78 @@ +#!/bin/bash +# This script idempotently creates or updates a SynthesisFlow agent guide in a markdown file. + +set -e + +usage() { + echo "Usage: $0 [-f ]" + echo " -f : The path to the markdown file to update. Defaults to AGENTS.md in the project root." + exit 1 +} + +TARGET_FILE="AGENTS.md" + +while getopts ":f:" opt; do + case ${opt} in + f ) + TARGET_FILE=$OPTARG + ;; + \? ) + echo "Invalid option: $OPTARG" 1>&2 + usage + ;; + : ) + echo "Invalid option: $OPTARG requires an argument" 1>&2 + usage + ;; + esac +done + +# Define the content block to be inserted/updated +read -r -d '' AGENT_CONTENT << EOM + +# SynthesisFlow Agent Guide + +This project uses SynthesisFlow, a modular, spec-driven development methodology. The workflow is broken down into several discrete skills located in the `.claude/skills/` directory. + +## Core Philosophy +1. **Specs as Code:** All specification changes are proposed and approved via Pull Requests. +2. **Just-in-Time Context:** Use the `doc-indexer` skill to get a real-time map of all project documentation. +3. **Sprint-Based:** Work is organized into GitHub Milestones and planned via the `sprint-planner` skill. +4. **Atomic Issues:** Implementation is done via atomic GitHub Issues, which are executed by the `issue-executor` skill. + +## Available Skillsets +- **`.claude/skills/skill-lister/`**: For listing all available skills and their descriptions. +- **`.claude/skills/project-init/`**: For initial project scaffolding. +- **`.claude/skills/doc-indexer/`**: For real-time documentation discovery. +- **`.claude/skills/spec-authoring/`**: For proposing and refining new specifications. +- **`.claude/skills/sprint-planner`**: For creating GitHub issues from approved specs. +- **`.claude/skills/issue-executor/`**: For implementing code for a single issue. +- **`.claude/skills/change-integrator/`**: For finalizing and archiving a completed change. +- **`.claude/skills/agent-integrator/`**: For creating or updating this guide in `AGENTS.md`. + +## Getting Started + +To begin, always assess the current state by checking the git branch and running the `doc-indexer`. +1. Run `skill-lister` to see the list of available tools and their descriptions. + +EOM + +# Ensure the target file exists +touch "$TARGET_FILE" + +# Check if the markers exist in the file +if grep -q "" "$TARGET_FILE"; then + echo "Updating existing SynthesisFlow guide in $TARGET_FILE..." + # Use awk to replace the content between the markers + awk -v content="$AGENT_CONTENT" ' + // { print content; in_block=1 } + // { in_block=0; next } + !in_block { print } + ' "$TARGET_FILE" > "${TARGET_FILE}.tmp" && mv "${TARGET_FILE}.tmp" "$TARGET_FILE" +else + echo "Adding SynthesisFlow guide to $TARGET_FILE..." + # Append the content to the end of the file + echo -e "\n$AGENT_CONTENT" >> "$TARGET_FILE" +fi + +echo "$TARGET_FILE has been updated successfully." \ No newline at end of file diff --git a/change-integrator/.gitkeep b/change-integrator/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/change-integrator/SKILL.md b/change-integrator/SKILL.md new file mode 100644 index 0000000..2985ead --- /dev/null +++ b/change-integrator/SKILL.md @@ -0,0 +1,271 @@ +--- +name: change-integrator +description: Use this skill after a code PR is merged to integrate approved specs, update the retrospective with learnings, and clean up branches. It now automatically summarizes the retrospective file to keep it concise. Triggers include "integrate change", "post-merge cleanup", or completing a feature implementation. +--- + +# Change Integrator Skill + +## Purpose + +Perform post-merge integration tasks after a code PR is successfully merged. This skill completes the development cycle by moving approved specs from `docs/changes/` to `docs/specs/`, updating the retrospective with learnings, cleaning up feature branches, and updating project board status. It ensures the repository remains clean and the documentation reflects the current state. + +A key feature of this skill is the **automated maintenance of `RETROSPECTIVE.md`**. When the file grows too large, the script automatically uses the Gemini CLI to summarize older entries, keeping the document concise and readable while preserving key historical learnings. + +## When to Use + +Use this skill in the following situations: + +- After a code PR is merged to main +- Completing a feature that had a spec proposal +- Finalizing a task and cleaning up branches +- Updating retrospective with completed work +- Moving approved specs to source-of-truth location + +## Prerequisites + +- Code PR has been merged to main branch +- Feature branch name is known +- PR number is known +- Project board item ID is known (if using project boards) +- `gh` CLI tool installed and authenticated +- `gemini` CLI tool installed and authenticated +- Currently on main branch with latest changes + +## Workflow + +### Step 1: Verify PR is Merged + +Before running integration, confirm the PR was successfully merged: + +```bash +gh pr view PR_NUMBER --json state,mergedAt +``` + +Ensure the state is "MERGED" and mergedAt timestamp is populated. + +### Step 2: Identify Integration Needs + +Determine what needs to be integrated: +- **Spec files**: Was this a feature with a spec proposal in `docs/changes/`? +- **Branch cleanup**: What is the feature branch name? +- **Project board**: What is the item ID to mark as done? +- **Retrospective**: What were the key learnings from this task? + +### Step 3: Run the Helper Script (Optional) + +If using the automated script for integration: + +```bash +bash scripts/integrate-change.sh -p PR_NUMBER -b BRANCH_NAME -i ITEM_ID -w "WENT_WELL" -l "LESSON" [-c CHANGE_DIR] +``` + +**Parameters**: +- `-p`: PR number that was merged +- `-b`: Feature branch name (e.g., `feat/45-restructure-doc-indexer`) +- `-i`: Project board item ID +- `-w`: A quote about what went well. +- `-l`: A quote about the key lesson learned. +- `-c`: Optional path to change proposal directory (e.g., `docs/changes/my-feature`) + +### Step 4: Understand What the Script Does + +The helper script automates these steps: + +1. **Verifies PR is merged**: Queries GitHub API and aborts if PR is not in MERGED state. +2. **Switches to main and pulls**: Ensures work is on the latest main branch. +3. **Deletes feature branch**: Removes both remote and local branches. +4. **Integrates spec files (if applicable)**: Moves `spec-delta.md` to `docs/specs/` and commits the change. +5. **Updates project board**: Sets the task status to "Done". +6. **Updates retrospective**: Appends a new entry. If `RETROSPECTIVE.md` exceeds a line limit (e.g., 150 lines), it **automatically summarizes older sprint entries** using the Gemini CLI to keep the file manageable. +7. **Pushes all changes**: Pushes integration commits to main. + +### Step 5: Manual Integration (Alternative) + +If not using the script, perform these steps manually: + +#### 5a. Switch to Main and Update + +```bash +git switch main +git pull +``` + +#### 5b. Delete Feature Branch + +```bash +# Delete remote branch +git push origin --delete feat/45-restructure-doc-indexer + +# Delete local branch +git branch -D feat/45-restructure-doc-indexor +``` + +#### 5c. Integrate Spec Files (If Applicable) + +If the feature had a spec proposal: + +```bash +# Identify the change directory +ls docs/changes/ + +# Move spec-delta to specs +SPEC_NAME="my-feature" +cp docs/changes/$SPEC_NAME/spec-delta.md docs/specs/$SPEC_NAME.md + +# Remove change directory +rm -r docs/changes/$SPEC_NAME + +# Commit integration +git add docs/ +git commit -m "docs: Integrate approved spec from feat/45-my-feature" +``` + +#### 5d. Update Project Board + +```bash +gh project item-edit \ + --project-id PROJECT_ID \ + --id ITEM_ID \ + --field-id FIELD_ID \ + --single-select-option-id DONE_OPTION_ID +``` + +#### 5e. Update Retrospective + +Add learnings to RETROSPECTIVE.md following the established format. See the retrospective for examples. + +```bash +# Edit RETROSPECTIVE.md to add entry +git add RETROSPECTIVE.md +git commit -m "docs: Add retrospective for PR #45" +``` + +#### 5f. Push Changes + +```bash +git push +``` + +### Step 6: Verify Integration + +After integration completes: + +```bash +# Verify branch deleted +git branch -a | grep feat/45 + +# Verify spec integrated (if applicable) +ls docs/specs/ + +# Verify retrospective updated +tail -20 RETROSPECTIVE.md + +# Verify project board updated +gh project item-list PROJECT_NUMBER --owner @me +``` + +## Automated Retrospective Summarization + +To prevent `RETROSPECTIVE.md` from becoming unmanageably long, the `integrate-change.sh` script includes an automated summarization feature powered by the Gemini CLI. + +**How it works:** +1. **Threshold Check**: Before adding a new entry, the script checks the line count of `RETROSPECTIVE.md`. +2. **Trigger**: If the line count exceeds a defined threshold (e.g., 150 lines), the summarization process is triggered. +3. **Preservation**: The script preserves the initial sections of the file, including the introduction and the "Historical Learnings" section. +4. **Summarization**: It takes the older sprint entries, sends them to the `gemini` CLI, and requests a concise summary that preserves key learnings and markdown structure. +5. **Reconstruction**: The script then overwrites `RETROSPECTIVE.md` with the preserved header, a new "Summarized Sprints (via Gemini)" section, and the newly generated summary. +6. **New Entry**: Finally, it appends the new retrospective entry to the freshly summarized file. + +This ensures that the file remains a valuable and readable source of information without requiring manual pruning. + +## Error Handling + +### PR Not Merged + +**Symptom**: Script reports PR is not in MERGED state + +**Solution**: +- Verify PR number is correct +- Wait for PR to be merged +- Check auto-merge status if enabled +- Manually merge PR if needed + +### Branch Already Deleted + +**Symptom**: Git reports branch doesn't exist + +**Solution**: +- This is normal if auto-merge deleted the branch +- Continue with remaining integration steps +- Script handles this gracefully with `|| echo "..."` + +### Spec Directory Not Found + +**Symptom**: Script cannot find change directory + +**Solution**: +- Verify the change directory path is correct +- Check if this feature even had a spec proposal +- Skip spec integration step if not applicable +- Use `-c` flag only when spec exists + +### Permission Denied on Project Board + +**Symptom**: GitHub API returns 403 error + +**Solution**: +- Verify project board IDs are correct +- Ensure you have write access to the project +- Check `gh` authentication: `gh auth status` +- Update script configuration variables if needed + +### Gemini CLI Issues + +**Symptom**: The script fails during the "Updating retrospective..." step with an error related to the `gemini` command. + +**Solution**: +- Ensure the `gemini` CLI is installed and in your system's PATH. +- Verify you are authenticated. Run `gemini auth` if needed. +- Check for any Gemini API-related issues or outages. +- If the issue persists, you can temporarily increase the `RETROSPECTIVE_MAX_LINES` variable in the script to bypass the summarization and add your entry. + +### Retrospective Format Issues + +**Symptom**: The automated summary has formatting problems or seems to have lost critical information. + +**Solution**: +- The summarization is automated and may not be perfect. The original, unsummarized content is not retained by the script. +- You can review the commit history for `RETROSPECTIVE.md` in git to find the previous version if you need to recover information. +- Manually edit the summarized content to fix any formatting issues. +- Consider adjusting the prompt sent to the Gemini CLI within the `summarize_retrospective` function in the script for better results in the future. + +## Configuration Notes + +The script uses these hardcoded configuration variables (lines 31-33): + +```bash +PROJECT_ID="PVT_kwHOARC_Ns4BG9YU" +FIELD_ID="PVTSSF_lAHOARC_Ns4BG9YUzg32qas" # Workflow Stage +DONE_OPTION_ID="6bc77efe" +``` + +**To adapt for your project:** +1. Find your project ID: `gh project list --owner @me` +2. Find field ID: `gh api graphql -f query='...'` (see GitHub docs) +3. Find done option ID: Query project field values +4. Update these variables in the script + +**Note**: A future version should detect these dynamically. + +## Notes + +- **Run after PR is merged**: This is post-merge cleanup, not pre-merge preparation +- **Spec integration is optional**: Only for features that started with spec proposals +- **Retrospective is required**: Always update with learnings from completed work +- **Branch cleanup prevents clutter**: Keeps repository clean and organized +- **Project board sync**: Ensures status accurately reflects completed work +- **Manual steps work too**: Script is a convenience, not required +- **Integration commits go to main**: These are documentation updates, not code changes +- **Keep retrospective focused**: Capture what worked, what didn't, and key lessons +- **One PR per integration**: Run the workflow once per merged PR +- **Script is not fully automated**: Still requires parameters and decision-making \ No newline at end of file diff --git a/change-integrator/scripts/integrate-change.sh b/change-integrator/scripts/integrate-change.sh new file mode 100755 index 0000000..c74be7a --- /dev/null +++ b/change-integrator/scripts/integrate-change.sh @@ -0,0 +1,150 @@ +#!/bin/bash +# This script finalizes a completed task by integrating approved specs and cleaning up branches. + +set -e + +usage() { + echo "Usage: $0 -p -b -i -w -l [-c ]" + echo " -p: The number of the pull request that was merged." + echo " -b: The name of the feature branch that was merged." + echo " -i: The project board item ID for the task." + echo " -w: What went well with this change." + echo " -l: What was learned from this change." + echo " -c: (Optional) The path to the original change proposal directory." + exit 1 +} + +while getopts ":p:b:i:w:l:c:" opt; do + case ${opt} in + p ) PR_NUMBER=$OPTARG;; + b ) BRANCH_NAME=$OPTARG;; + i ) ITEM_ID=$OPTARG;; + w ) WENT_WELL=$OPTARG;; + l ) LESSON=$OPTARG;; + c ) CHANGE_DIR=$OPTARG;; + \? ) echo "Invalid option: $OPTARG" 1>&2; usage;; + : ) echo "Invalid option: $OPTARG requires an argument" 1>&2; usage;; + esac +done + +if [ -z "$PR_NUMBER" ] || [ -z "$BRANCH_NAME" ] || [ -z "$WENT_WELL" ] || [ -z "$LESSON" ]; then + usage +fi + +# --- CONFIGURATION (should be detected dynamically in a future version) --- +PROJECT_ID="PVT_kwHOARC_Ns4BG9YU" +FIELD_ID="PVTSSF_lAHOARC_Ns4BG9YUzg32qas" # Workflow Stage +DONE_OPTION_ID="6bc77efe" + +echo "Starting complete-change workflow for PR #$PR_NUMBER..." + +# 1. Verify PR is merged +echo "Verifying PR status..." +if ! gh pr view "$PR_NUMBER" --json state | grep -q '"state":"MERGED"'; then + echo "Error: PR #$PR_NUMBER is not merged. Aborting." >&2 + exit 1 +fi +echo "PR #$PR_NUMBER is confirmed as merged." + +# 2. Checkout main and pull +echo "Switching to main and pulling latest changes..." +git checkout main +git pull + +# 3. Delete merged branch +echo "Deleting merged branch: $BRANCH_NAME..." +git push origin --delete "$BRANCH_NAME" || echo "Remote branch $BRANCH_NAME may have already been deleted." +git branch -D "$BRANCH_NAME" || true + +# 4. Integrate Spec (if a change directory was provided) +if [ -n "$CHANGE_DIR" ] && [ -d "$CHANGE_DIR" ]; then + echo "Integrating spec files from $CHANGE_DIR..." + # A more robust script would combine files; for now, we just move the delta. + SPEC_FILE_NAME=$(basename "$CHANGE_DIR").md + mv "$CHANGE_DIR/spec-delta.md" "docs/specs/$SPEC_FILE_NAME" + rm -r "$CHANGE_DIR" + git add docs/ +git commit -m "docs: Integrate approved spec from $BRANCH_NAME" +else + echo "No spec change directory provided or found, skipping spec integration." +fi + +# 5. Update Project Board +if [ -n "$ITEM_ID" ]; then + echo "Updating project board for item $ITEM_ID..." + gh project item-edit --project-id "$PROJECT_ID" --id "$ITEM_ID" --field-id "$FIELD_ID" --single-select-option-id "$DONE_OPTION_ID" || true +else + echo "No project board item ID provided, skipping project board update." +fi + +# 6. Update Retrospective +echo "Updating retrospective..." + +summarize_retrospective() { + echo "RETROSPECTIVE.md has $(wc -l < RETROSPECTIVE.md) lines. Summarizing with Gemini..." + + # Isolate content to summarize + local temp_summary_input="retro_to_summarize_$$.md" # Create in CWD + awk '/^## Sprint 4/{f=1}f' RETROSPECTIVE.md > "$temp_summary_input" + + # Preserve the header and historical learnings + local header_content + header_content=$(awk '/^## Sprint 4/{exit}1' RETROSPECTIVE.md) + + # Call Gemini to summarize + local summarized_sprints + summarized_sprints=$(gemini -p "Summarize the following sprint retrospective entries into a more concise format, extracting the most important, recurring, or impactful learnings. Preserve the markdown structure with '### #PR' headers. @$temp_summary_input") + + # Clean up the temp file + rm "$temp_summary_input" + + # Reconstruct the file + echo "$header_content" > RETROSPECTIVE.md + echo -e "\n## Summarized Sprints (via Gemini)\n" >> RETROSPECTIVE.md + echo "$summarized_sprints" >> RETROSPECTIVE.md + + echo "Retrospective summarized and overwritten." +} + +# Check current retrospective length and summarize if needed +if [ -f "RETROSPECTIVE.md" ]; then + LINE_COUNT=$(wc -l < "RETROSPECTIVE.md") + RETROSPECTIVE_MAX_LINES=150 + if [ "$LINE_COUNT" -gt $RETROSPECTIVE_MAX_LINES ]; then + summarize_retrospective + fi +fi + +# Generate retrospective entry using LLM +echo "Generating retrospective entry with LLM..." +LLM_SCRIPT="$(dirname "${BASH_SOURCE[0]}")/summarize_retrospective_llm.py" + +# Try to generate summary with LLM +if [ -f "$LLM_SCRIPT" ]; then + LLM_SUMMARY=$(python3 "$LLM_SCRIPT" --went-well "$WENT_WELL" --lesson-learned "$LESSON" 2>&1) + LLM_EXIT_CODE=$? + + if [ $LLM_EXIT_CODE -eq 0 ] && [ -n "$LLM_SUMMARY" ]; then + # LLM call succeeded - use structured format with details tag + echo "✓ LLM summary generated successfully." + RETRO_ENTRY="### #$PR_NUMBER - $BRANCH_NAME\n\n$LLM_SUMMARY\n\n
\nOriginal inputs\n\n- **Went well:** $WENT_WELL\n- **Lesson:** $LESSON\n
\n" + else + # LLM call failed - fall back to original format + echo "⚠️ LLM summary generation failed, using original format." + RETRO_ENTRY="### #$PR_NUMBER - $BRANCH_NAME\n\n- **Went well:** $WENT_WELL\n- **Lesson:** $LESSON\n" + fi +else + # Script not found - fall back to original format + echo "⚠️ LLM script not found at $LLM_SCRIPT, using original format." + RETRO_ENTRY="### #$PR_NUMBER - $BRANCH_NAME\n\n- **Went well:** $WENT_WELL\n- **Lesson:** $LESSON\n" +fi + +echo -e "\n$RETRO_ENTRY" >> RETROSPECTIVE.md +git add RETROSPECTIVE.md +git commit -m "docs: Add retrospective for PR #$PR_NUMBER" + +# 7. Push final changes +echo "Pushing final integration commits..." +git push + +echo "Complete-change workflow finished for PR #$PR_NUMBER." diff --git a/change-integrator/scripts/summarize_retrospective_llm.py b/change-integrator/scripts/summarize_retrospective_llm.py new file mode 100755 index 0000000..99c1f20 --- /dev/null +++ b/change-integrator/scripts/summarize_retrospective_llm.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +""" +LLM-based retrospective summarizer for the change-integrator skill. + +This script uses an LLM to generate a structured and insightful summary +from 'went well' and 'lesson learned' inputs for the RETROSPECTIVE.md file. +""" + +import sys +import subprocess +import argparse + + +def call_llm_for_summary(went_well: str, lesson_learned: str) -> str: + """ + Calls the LLM to generate a retrospective summary. + + Args: + went_well: The 'went well' input from the user. + lesson_learned: The 'lesson learned' input from the user. + + Returns: + The LLM-generated summary as a string. + Returns an empty string if the LLM call fails. + """ + prompt = f"""You are an assistant that helps write project retrospectives. +Based on the following points, please write a concise, structured summary for a retrospective document. +The summary should be in markdown format and focus on extracting key insights and learnings. + +**What Went Well:** +{went_well} + +**Lesson Learned:** +{lesson_learned} + +Please provide a well-structured summary that: +1. Highlights the key successes and positive outcomes +2. Emphasizes the main lesson learned and its implications +3. Is suitable for a professional engineering team's retrospective document +4. Uses clear, concise markdown formatting + +Return ONLY the markdown summary without any additional explanation or preamble. +""" + + try: + # Using gemini-2.5-flash for efficient text generation + result = subprocess.run( + ['gemini', '--model', 'gemini-2.5-flash'], + input=prompt, + capture_output=True, + text=True, + timeout=45, + check=True # This will raise CalledProcessError for non-zero exit codes + ) + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.CalledProcessError) as e: + print(f"Warning: LLM call failed: {e}", file=sys.stderr) + return "" + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description='LLM-based retrospective summarizer.' + ) + parser.add_argument( + '--went-well', + required=True, + help='What went well with this change.' + ) + parser.add_argument( + '--lesson-learned', + required=True, + help='What was learned from this change.' + ) + args = parser.parse_args() + + summary = call_llm_for_summary(args.went_well, args.lesson_learned) + + if summary: + print(summary) + sys.exit(0) + else: + # Exit with a non-zero status code to indicate failure + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/doc-indexer/.gitkeep b/doc-indexer/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/doc-indexer/SKILL.md b/doc-indexer/SKILL.md new file mode 100644 index 0000000..917e9cd --- /dev/null +++ b/doc-indexer/SKILL.md @@ -0,0 +1,185 @@ +--- +name: doc-indexer +description: Use this skill at the beginning of any session or when needing to understand available project documentation. Provides just-in-time context by scanning YAML frontmatter from all markdown files in the docs/ directory without loading full content. +--- + +# Document Indexer Skill + +## Purpose + +Provide just-in-time context about available project documentation without loading full file content into the context window. The doc-indexer scans all markdown files in the `docs/` directory, extracts their YAML frontmatter metadata, and returns a structured map of available documentation. This enables efficient discovery of specs, plans, retrospectives, and other documentation while minimizing token usage. + +## When to Use + +Use this skill in the following situations: + +- At the beginning of any work session to understand the current state of documentation +- When starting work on a new issue to identify relevant specs and context +- Before proposing changes to understand existing specifications +- When planning a sprint to review available approved specs +- Anytime you need an overview of project documentation without reading full files + +## Prerequisites + +- The project must have a `docs/` directory +- Documentation files should follow the convention of including YAML frontmatter +- The `jq` tool is NOT required (script works without it) + +## Workflow + +### Step 1: Run the Documentation Scanner + +Execute the helper script to scan all markdown files in the docs/ directory: + +```bash +bash scripts/scan-docs.sh +``` + +This will output a human-readable summary showing each document's frontmatter metadata. + +For machine-readable JSON output (useful for programmatic processing): + +```bash +bash scripts/scan-docs.sh -j +``` + +### Step 2: Review the Documentation Map + +The scanner returns information about all markdown files found in `docs/`, including: + +- **File path**: Location of the documentation file +- **Frontmatter metadata**: Key-value pairs from YAML frontmatter (title, status, type, etc.) +- **Compliance warnings**: Files missing YAML frontmatter are flagged + +**Example human-readable output**: +``` +--- +file: docs/specs/001-synthesis-flow.md +title: SynthesisFlow Methodology +status: approved +type: spec +--- +file: docs/changes/my-feature/proposal.md +title: My Feature Proposal +status: in-review +type: proposal +[WARNING] Non-compliant file (no frontmatter): docs/README.md +``` + +**Example JSON output**: +```json +[ + { + "file": "docs/specs/001-synthesis-flow.md", + "compliant": true, + "frontmatter": { + "title": "SynthesisFlow Methodology", + "status": "approved", + "type": "spec" + } + }, + { + "file": "docs/README.md", + "compliant": false, + "frontmatter": null + } +] +``` + +### Step 3: Use the Map to Identify Relevant Documentation + +Based on the documentation map, identify which specific files to read for your current task: + +- **For implementation work**: Look for approved specs related to your issue +- **For spec proposals**: Review existing specs to understand the current state +- **For sprint planning**: Identify approved specs ready for implementation +- **For learning context**: Find retrospectives and design docs + +### Step 4: Read Specific Documentation Files + +Once you've identified relevant files from the map, use the Read tool to load their full content: + +```bash +# Example: Read a specific spec identified from the map +Read docs/specs/001-synthesis-flow.md +``` + +This two-step approach (scan first, then read selectively) minimizes token usage while ensuring you have access to all necessary context. + +## Error Handling + +### No docs/ Directory + +**Symptom**: Script reports "No such file or directory" + +**Solution**: +- Verify you're in the project root directory +- Check if the project has been initialized with `project-init` skill +- Create `docs/` directory structure if needed + +### Files Missing Frontmatter + +**Symptom**: Script outputs "[WARNING] Non-compliant file (no frontmatter): ..." + +**Impact**: These files won't have structured metadata in the output + +**Solution**: +- Add YAML frontmatter to documentation files for better discoverability +- Frontmatter should be at the top of the file between `---` markers +- Example format: + ```markdown + --- + title: My Document + status: draft + type: design + --- + + # Document content starts here + ``` + +### Script Permission Errors + +**Symptom**: "Permission denied" when running the script + +**Solution**: +```bash +chmod +x scripts/scan-docs.sh +``` + +## Output Interpretation Guide + +### Frontmatter Fields + +Common frontmatter fields you'll encounter: + +- **title**: Human-readable document title +- **status**: Document state (draft, in-review, approved, archived) +- **type**: Document category (spec, proposal, design, retrospective, plan) +- **epic**: Associated epic issue number +- **sprint**: Sprint identifier +- **author**: Document author +- **created**: Creation date +- **updated**: Last update date + +### Using JSON Output Programmatically + +The JSON output mode is particularly useful when: + +- Filtering documents by specific criteria (e.g., only approved specs) +- Counting documents by type or status +- Building automated workflows +- Integrating with other tools + +Example using `jq` to filter approved specs: +```bash +bash scripts/scan-docs.sh -j | jq '.[] | select(.frontmatter.status == "approved")' +``` + +## Notes + +- The scanner is non-invasive and read-only - it never modifies files +- Large projects with many docs benefit most from this just-in-time approach +- The script scans recursively through all subdirectories in `docs/` +- Empty frontmatter sections are treated as non-compliant +- The scan is fast and can be run frequently without performance concerns +- Consider running this at the start of each work session to stay current with documentation changes diff --git a/doc-indexer/scripts/scan-docs.sh b/doc-indexer/scripts/scan-docs.sh new file mode 100755 index 0000000..61cd954 --- /dev/null +++ b/doc-indexer/scripts/scan-docs.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# This script finds all markdown files in the docs/ directory, extracts the YAML frontmatter, +# reports any files that are missing frontmatter, and can output as JSON. + +JSON_OUTPUT=false + +usage() { + echo "Usage: $0 [-j]" + echo " -j: Output the result as a JSON array." + exit 1 +} + +while getopts ":j" opt; do + case ${opt} in + j ) + JSON_OUTPUT=true + ;; + \? ) + echo "Invalid option: $OPTARG" 1>&2 + usage + ;; + esac +done + +if [ "$JSON_OUTPUT" = true ]; then + # Use a temporary file to store the JSON objects + json_objects_file=$(mktemp) + + while IFS= read -r -d '' file; do + frontmatter=$(awk '/^---$/{if(c>0){exit} c++} c>0' "$file" | sed '1d') + + # Basic JSON escaping for file path + escaped_file=$(echo "$file" | sed 's/\\/\\\\/g' | sed 's/"/\\"/g') + + json_object="{" + json_object+="\"file\": \"$escaped_file\"," + if [ -n "$frontmatter" ]; then + json_object+="\"compliant\": true," + json_object+="\"frontmatter\": {" + + # Convert YAML to JSON: + # 1. Remove leading/trailing whitespace + # 2. Escape double quotes + # 3. Add quotes around keys and values + # 4. Join with commas + frontmatter_json=$(echo "$frontmatter" | sed -e 's/^[ \t]*//;s/[ \t]*$//' | awk -F': ' 'NF>1{gsub(/"/, "\\\""); printf "\"%s\": \"%s\",", $1, $2}' | sed 's/,$//') + + json_object+="$frontmatter_json" + json_object+="}" + else + json_object+="\"compliant\": false," + json_object+="\"frontmatter\": null" + fi + json_object+="}" + + echo "$json_object" >> "$json_objects_file" + done < <(find docs -name "*.md" -print0) + + # Now, assemble the final JSON array + echo "[" + if [ -s "$json_objects_file" ]; then + paste -sd, "$json_objects_file" + fi + echo "]" + + # Clean up the temporary file + rm "$json_objects_file" +else + while IFS= read -r -d '' file; do + frontmatter=$(awk '/^---$/{if(c>0){exit} c++} c>0' "$file" | sed '1d') + + if [ -n "$frontmatter" ]; then + echo "---" + echo "file: $file" + echo "$frontmatter" + else + echo "[WARNING] Non-compliant file (no frontmatter): $file" + fi + done < <(find docs -name "*.md" -print0) +fi diff --git a/issue-executor/SKILL.md b/issue-executor/SKILL.md new file mode 100644 index 0000000..dfff2b6 --- /dev/null +++ b/issue-executor/SKILL.md @@ -0,0 +1,123 @@ +--- +name: issue-executor +description: Use this skill to start work on a GitHub issue. It synthesizes all relevant context (specs, retrospective, issue details) using the Gemini CLI to generate a step-by-step implementation plan, then creates a feature branch to begin work. Triggers include "start work on issue #X" or "implement issue". +--- + +# Issue Executor + +## Purpose + +To kickstart the development workflow for a single GitHub issue by generating a comprehensive, context-aware implementation plan. This skill leverages the Gemini CLI to synthesize issue details, relevant specifications, and historical learnings from the project retrospective into a clear, actionable plan. It then creates an isolated feature branch, setting the stage for focused, spec-driven development. + +## When to Use + +Use this skill in the following situations: + +- Starting work on a planned GitHub issue from the current sprint. +- Beginning a work session and wanting a synthesized plan before coding. +- Needing to load and understand all context for an issue efficiently. + +## Prerequisites + +- GitHub repository with issues created (via sprint-planner skill). +- Git working directory is clean (no uncommitted changes). +- Currently on the `main` branch. +- `gh` CLI tool installed and authenticated. +- `jq` tool installed for JSON parsing. +- `gemini` CLI tool installed and authenticated. +- Project has a `docs/` structure with specs and a `RETROSPECTIVE.md`. + +## Core Principles + +### Context is King + +Instead of just viewing files, the skill synthesizes all relevant context into a coherent plan: +- **Issue details**: Requirements and acceptance criteria. +- **Spec files**: All specifications referenced in the issue. +- **Retrospective**: Learnings from past work to avoid repeating mistakes. + +### Isolation + +All work happens on a dedicated feature branch to: +- Protect the `main` branch from work-in-progress. +- Enable a clean Pull Request workflow. +- Allow abandoning work without impacting the main codebase. + +### Atomic Work + +Each issue represents a single, well-defined task that can be completed and reviewed as a unit. + +## Workflow + +### Step 1: Identify the Issue + +Determine which issue to work on. The user specifies the issue number (e.g., #45). + +### Step 2: Run the Helper Script + +Execute the `work-on-issue.sh` script with the issue number: + +```bash +bash scripts/work-on-issue.sh 45 +``` + +### Step 3: Understand What the Script Does + +The helper script automates these critical setup steps: + +1. **Validates Prerequisites**: Checks for `jq`, a clean git status, and being on the `main` branch. +2. **Fetches Issue Details**: Retrieves the issue title and body from GitHub. +3. **Finds Context Files**: Locates all referenced spec files (`.md`) in `docs/specs/` or `docs/changes/` and identifies `RETROSPECTIVE.md`. +4. **Synthesizes Implementation Plan**: Constructs a detailed prompt with the issue details and file context (`@file` syntax) and calls the `gemini` CLI. It asks Gemini to produce a step-by-step implementation plan based on all provided information. +5. **Displays the Plan**: Prints the generated plan from Gemini to the console. +6. **Creates Feature Branch**: Generates a conventional branch name (e.g., `feat/45-restructure-doc-indexer`) and checks it out. +7. **Confirms Readiness**: Displays a success message confirming that the branch is ready and the plan has been generated. + +### Step 4: Review the Implementation Plan + +After the script completes, carefully review the implementation plan generated by Gemini. This plan is your starting guide for the implementation, synthesized from all available project context. + +### Step 5: Begin Implementation + +With the plan and feature branch ready: + +1. Follow the steps outlined in the generated plan. +2. Write code that meets the acceptance criteria. +3. Test your changes thoroughly. +4. Commit work incrementally. +5. Push to the remote branch when ready to create a Pull Request. + +## Error Handling + +### Working Directory Not Clean / Not on Main Branch + +**Symptom**: Script reports uncommitted changes or that you are not on the `main` branch. +**Solution**: Commit, stash, or discard your changes. Switch back to the `main` branch before re-running the script. + +### Missing `jq` or `gemini` Tool + +**Symptom**: Script reports that `jq` or `gemini` is not installed. +**Solution**: Install the required tool. For `jq`, use `sudo apt install jq` or `brew install jq`. For `gemini`, follow its official installation instructions. + +### Gemini CLI Issues + +**Symptom**: The script fails while generating the implementation plan, showing an error from the `gemini` command. +**Solution**: +- Ensure the `gemini` CLI is installed correctly and is in your system's PATH. +- Verify your authentication status with `gemini auth`. +- Check for Gemini API outages or connectivity issues. +- Examine the prompt being sent to Gemini for any syntax errors. + +### Spec File Not Found + +**Symptom**: The script runs, but the plan doesn't seem to include context from a spec file you expected. +**Solution**: +- Ensure the issue body explicitly references the spec file with its full path (e.g., `docs/specs/my-spec.md`). The script only includes files that are directly mentioned. +- Verify the referenced file path is correct and the file exists. + +## Notes + +- The script's primary output is now an **actionable implementation plan**, not just a list of files. +- The quality of the plan depends on the quality of the input (issue description, specs, retrospective). +- The generated plan is a guide; use your own expertise to adapt and improve it as you work. +- Branch naming follows the convention: `feat/ISSUE_NUMBER-kebab-case-title`. \ No newline at end of file diff --git a/issue-executor/references/work-on-issue.md b/issue-executor/references/work-on-issue.md new file mode 100644 index 0000000..d4f7bdc --- /dev/null +++ b/issue-executor/references/work-on-issue.md @@ -0,0 +1,609 @@ +# Work on Issue Workflow + +Select and start work on a specific issue from assigned GitHub issues. + +## Purpose + +Load full context for an issue and create a feature branch to begin implementation. Ensures clean workflow start with all necessary context loaded before any code is written. + +## When to Use + +Use this workflow when: +- Starting work on a new issue +- Switching between issues +- After completing previous issue +- Beginning work session + +## Workflow + +### 1. Verify Clean Working State + +```bash +# Check current branch is clean +CURRENT_BRANCH=$(git branch --show-current) +if [ "$CURRENT_BRANCH" != "main" ]; then + echo "⚠ Currently on branch: $CURRENT_BRANCH" + echo "Finish current work or switch to main first." + exit 1 +fi + +# Check for uncommitted changes +if [ -n "$(git status --porcelain)" ]; then + echo "⚠ Uncommitted changes detected" + git status + echo "Commit or stash changes before starting new issue." + exit 1 +fi +``` + +### 2. Get Current User's Assigned Issues + +```bash +# Get all open issues assigned to current user +gh issue list \ + --assignee @me \ + --state open \ + --json number,title,labels,milestone,createdAt \ + --limit 50 | jq '.' +``` + +### 3. Get Milestone Details for Context + +```bash +# Get current sprint milestones +gh api repos/:owner/:repo/milestones \ + --jq '.[] | select(.state == "open") | {number, title, state, dueOn}' \ + | jq -s 'sort_by(.dueOn)' +``` + +### 4. Check for Spec Conflicts + +For each candidate issue, verify no conflicting work in progress: + +```bash +# Extract affected specs from issue body +ISSUE_NUM=123 +AFFECTED_SPECS=$(gh issue view $ISSUE_NUM --json body --jq .body \ + | rg "Affected Specs.*" -A 5 \ + | rg "docs/specs/([^/]+)" -o -r '$1') + +# Check if any other open issues affect same specs +for SPEC in $AFFECTED_SPECS; do + CONFLICTS=$(gh issue list \ + --assignee @me \ + --state open \ + --search "docs/specs/$SPEC in:body" \ + --json number,title) + + if [ $(echo "$CONFLICTS" | jq 'length') -gt 1 ]; then + echo "⚠ Conflict detected: Multiple issues affect $SPEC" + echo "$CONFLICTS" | jq -r '.[] | "#\(.number) - \(.title)"' + fi +done +``` + +### 5. Analyze and Recommend Issues + +Sort and prioritize issues by: + +1. **Priority**: P0 > P1 > P2 > P3 +2. **Sprint alignment**: Current milestone first +3. **Spec readiness**: Issues with existing specs +4. **Dependencies**: Non-blocked issues first + +```bash +# Parse priorities from labels +P0_ISSUES=$(gh issue list --assignee @me --state open --label "priority:P0" --json number,title,milestone) +P1_ISSUES=$(gh issue list --assignee @me --state open --label "priority:P1" --json number,title,milestone) +P2_ISSUES=$(gh issue list --assignee @me --state open --label "priority:P2" --json number,title,milestone) + +# Show recommendations +echo "=== Recommended Issues ===" +echo "" +echo "Priority P0 (Critical):" +echo "$P0_ISSUES" | jq -r '.[] | " #\(.number) - \(.title) [\(.milestone.title)]"' +echo "" +echo "Priority P1 (High):" +echo "$P1_ISSUES" | jq -r '.[] | " #\(.number) - \(.title) [\(.milestone.title)]"' +echo "" +echo "Priority P2 (Medium):" +echo "$P2_ISSUES" | jq -r '.[] | " #\(.number) - \(.title) [\(.milestone.title)]"' +``` + +Present top 3-5 recommendations: +``` +Top recommendations: + +1. #201 - Curriculum Framework (P1, S2) ✓ Specs ready +2. #202 - Lesson Player (P1, S2) ✓ Specs ready +3. #203 - Virtual Laboratory System (P2, S2) ⚠ Depends on #201 + +Which issue would you like to start? (Enter number) +``` + +### 6. Read Full Issue Details + +After user selects issue: + +```bash +ISSUE_NUMBER=201 + +# Read complete issue +gh issue view $ISSUE_NUMBER + +# Read ALL comments (including review suggestions) +gh issue view $ISSUE_NUMBER --comments +``` + +**IMPORTANT**: Pay special attention to review comments posted by `review-sprint`. These may include: +- Architecture compliance suggestions +- Wording and clarity improvements +- Planning enhancements +- Spec change recommendations + +Consider these suggestions during implementation but use your judgment on how to apply them. + +### 7. Extract and Validate Spec References + +```bash +# Get issue body +ISSUE_BODY=$(gh issue view $ISSUE_NUMBER --json body --jq .body) + +# Extract affected specs +AFFECTED_SPECS=$(echo "$ISSUE_BODY" | rg "Affected Specs" -A 10 | rg "docs/specs/[^)]+") + +# Verify each spec exists +echo "Validating spec references..." +for SPEC_PATH in $AFFECTED_SPECS; do + if [ -f "$SPEC_PATH" ]; then + echo "✓ Found: $SPEC_PATH" + else + echo "✗ Missing: $SPEC_PATH" + echo "" + echo "⚠ Spec not found. Run 'init-spec' to create it first." + exit 1 + fi +done +``` + +### 8. Check Related Issues and Dependencies + +```bash +# Look for "Related Issues" section in issue body +RELATED_ISSUES=$(echo "$ISSUE_BODY" | rg "Related Issues" -A 5 | rg "#[0-9]+" -o) + +if [ -n "$RELATED_ISSUES" ]; then + echo "=== Related Issues ===" + for RELATED in $RELATED_ISSUES; do + RELATED_NUM=$(echo "$RELATED" | tr -d '#') + RELATED_STATE=$(gh issue view $RELATED_NUM --json state --jq .state) + RELATED_TITLE=$(gh issue view $RELATED_NUM --json title --jq .title) + + if [ "$RELATED_STATE" = "OPEN" ]; then + echo "⚠ $RELATED - $RELATED_TITLE (OPEN - may be dependency)" + else + echo "✓ $RELATED - $RELATED_TITLE (CLOSED)" + fi + done +fi + +# Check for blocking issues +BLOCKING=$(echo "$ISSUE_BODY" | rg -i "blocked by|depends on" -A 2) +if [ -n "$BLOCKING" ]; then + echo "" + echo "⚠ Warning: This issue may have dependencies:" + echo "$BLOCKING" + echo "" + read -p "Continue anyway? (y/n) " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Aborted. Select a different issue." + exit 1 + fi +fi +``` + +### 9. Read Affected Specs + +```bash +echo "=== Reading Affected Specs ===" +for SPEC_PATH in $AFFECTED_SPECS; do + echo "" + echo "--- $SPEC_PATH ---" + cat "$SPEC_PATH" + echo "" +done + +# If spec has design.md, read it too +for SPEC_PATH in $AFFECTED_SPECS; do + SPEC_DIR=$(dirname "$SPEC_PATH") + DESIGN_PATH="$SPEC_DIR/design.md" + + if [ -f "$DESIGN_PATH" ]; then + echo "--- $DESIGN_PATH ---" + cat "$DESIGN_PATH" + echo "" + fi +done +``` + +### 10. Read Development Retrospective + +Read accumulated learnings to inform implementation: + +```bash +if [ -f "RETROSPECTIVE.md" ]; then + echo "" + echo "=== Development Retrospective ===" + cat RETROSPECTIVE.md + echo "" + echo "Consider these learnings during implementation:" + echo " - Apply success patterns from recent issues" + echo " - Avoid known friction points" + echo " - Reference well-specified capabilities" + echo " - Follow active improvement initiatives" + echo "" +elif [ -f "docs/RETROSPECTIVE.md" ]; then + echo "" + echo "=== Development Retrospective ===" + cat docs/RETROSPECTIVE.md + echo "" + echo "Consider these learnings during implementation." + echo "" +else + echo "" + echo "ℹ No RETROSPECTIVE.md found (this is normal for first few issues)" + echo "" +fi +``` + +**Purpose**: The retrospective captures: +- **Recent patterns**: What worked/didn't work in last 3-5 issues +- **Historical wisdom**: Compressed learnings from earlier issues +- **Spec quality**: Which specs are good references vs need improvement +- **Active improvements**: Current process improvements being tracked + +This context helps avoid repeating past mistakes and apply proven patterns. + +### 11. Create Feature Branch + +```bash +# Extract title and create kebab-case branch name +TITLE=$(gh issue view $ISSUE_NUMBER --json title --jq .title) +KEBAB_TITLE=$(echo "$TITLE" | tr '[:upper:]' '[:lower:]' | sed -E 's/[^a-z0-9]+/-/g;s/^-|-$//g' | cut -c1-40) + +# Determine branch prefix from change type +CHANGE_TYPE=$(echo "$ISSUE_BODY" | rg "\[x\] (ADDED|MODIFIED|REMOVED)" -o -r '$1') + +case "$CHANGE_TYPE" in + "ADDED") + PREFIX="feat" + ;; + "MODIFIED") + PREFIX="feat" # or "refactor" depending on scope + ;; + "REMOVED") + PREFIX="refactor" + ;; + *) + # Infer from labels + if echo "$ISSUE_BODY" | rg -q "type:bug"; then + PREFIX="fix" + elif echo "$ISSUE_BODY" | rg -q "type:chore"; then + PREFIX="chore" + else + PREFIX="feat" + fi + ;; +esac + +BRANCH_NAME="${PREFIX}/${ISSUE_NUMBER}-${KEBAB_TITLE}" + +# Create and switch to branch +git switch -c "$BRANCH_NAME" + +echo "✓ Created and switched to: $BRANCH_NAME" +``` + +### 12. Initial Setup + +Check if any setup is needed: + +```bash +# Check if package.json changed recently (might need npm install) +MAIN_PACKAGE_DATE=$(git log -1 --format="%ai" main -- package.json 2>/dev/null) +if [ -n "$MAIN_PACKAGE_DATE" ]; then + echo "Note: package.json was updated recently ($MAIN_PACKAGE_DATE)" + echo "Running npm install..." + npm install +fi + +# Check if Prisma schema exists (might need generate) +if [ -f "prisma/schema.prisma" ]; then + echo "Note: Prisma schema detected" + echo "Running npx prisma generate..." + npx prisma generate +fi + +# Check for other setup scripts +if [ -f "scripts/setup.sh" ]; then + echo "Note: Found setup script" + echo "Running scripts/setup.sh..." + bash scripts/setup.sh +fi +``` + +### 13. Update TODO.md + +Add issue to "In Progress" section: + +```markdown +## In Progress + +- [ ] #201 - Curriculum Framework (feat/201-curriculum-framework) + - **Started**: 2025-10-21 + - **Branch**: feat/201-curriculum-framework + - **Specs**: docs/specs/curriculum-management/spec.md + - **Priority**: P1 + - **Milestone**: S2 +``` + +If TODO.md doesn't have "In Progress" section, create it: + +```bash +# Check if section exists +if ! grep -q "## In Progress" TODO.md; then + # Add section after sprint section + sed -i '/^## Phase/a \\n## In Progress\n' TODO.md +fi + +# Add issue entry +cat >> TODO.md << EOF + +- [ ] #${ISSUE_NUMBER} - ${TITLE} (${BRANCH_NAME}) + - **Started**: $(date +%Y-%m-%d) + - **Branch**: ${BRANCH_NAME} + - **Specs**: ${AFFECTED_SPECS} + - **Priority**: ${PRIORITY} + - **Milestone**: ${MILESTONE} +EOF +``` + +### 14. Update Sprint File + +Find the relevant sprint file and update story status: + +```bash +# Find sprint file (match milestone to sprint file) +MILESTONE=$(gh issue view $ISSUE_NUMBER --json milestone --jq .milestone.title) +SPRINT_NUM=$(echo "$MILESTONE" | rg "S([0-9]+)" -o -r '$1') +SPRINT_FILE="docs/sprint/S${SPRINT_NUM}.md" + +if [ -f "$SPRINT_FILE" ]; then + # Find the story section and update status + # This is complex; in practice, manually update the file + echo "Update $SPRINT_FILE:" + echo " - Find story section for: $TITLE" + echo " - Add: **Status**: In Progress" + echo " - Add: **Branch**: $BRANCH_NAME" + echo " - Add: **Started**: $(date +%Y-%m-%d)" +fi +``` + +Example sprint file update: + +```markdown +## Curriculum Framework + +**User Story**: As an educator... + +**Status**: In Progress +**Branch**: feat/201-curriculum-framework +**Started**: 2025-10-21 +**Issue**: #201 +``` + +### 15. Provide Summary + +``` +✓ Started work on issue #201 + +Issue: Curriculum Framework +Branch: feat/201-curriculum-framework +Priority: P1 +Milestone: S2 + +Affected specs: + - docs/specs/curriculum-management/spec.md + +Review comments: 1 comment (review suggestions available) + +Next steps: + 1. Review issue comments and review suggestions + 2. Review spec requirements and scenarios + 3. Implement according to acceptance criteria (considering review feedback) + 4. Write tests per test plan + 5. Run 'test-issue' before submitting + 6. Run 'submit-issue' when ready for PR + +Dependencies to watch: + - None identified + +Happy coding! 🚀 +``` + +## Priority Order + +Issues are recommended in this order: + +1. **P0 - Critical**: Blocking bugs, production issues +2. **P1 - High**: Important features, high-value work +3. **P2 - Medium**: Standard features, improvements +4. **P3 - Low**: Nice-to-have, technical debt + +Within same priority: +- Current sprint milestone before future sprints +- Issues with ready specs before those needing spec work +- Non-blocked issues before those with dependencies + +## Dependency Checking + +### Types of Dependencies + +1. **Hard dependencies**: "Blocked by #123", "Depends on #123" + - Must be completed first + - Halt if not complete + +2. **Soft dependencies**: "Related to #123", "See also #123" + - Provide context but not blocking + - Read for additional information + +3. **Spec dependencies**: One spec depends on another + - Check if dependency specs exist + - Verify dependency requirements are clear + +### Handling Blocked Issues + +If selected issue is blocked: + +``` +⚠ Issue #203 is blocked by #201 (Curriculum Framework) + +#201 status: In Progress (50% complete) + +Options: +1. Wait for #201 to complete +2. Select different issue +3. Proceed with risk (may need rework) + +Recommendation: Select different issue and return to #203 later. +``` + +## Spec Readiness Validation + +Before starting work, ensure: + +- [ ] All referenced specs exist in `docs/specs/` +- [ ] Specs have clear requirements +- [ ] Each requirement has scenarios +- [ ] Dependencies between specs are documented +- [ ] Design docs exist for complex changes + +If spec is incomplete: +1. Consider running `init-spec` to complete it +2. Or flag issue for spec refinement before starting + +## File Update Patterns + +### TODO.md Format + +```markdown +## In Progress + +- [ ] #201 - Curriculum Framework (feat/201-curriculum-framework) + - **Started**: 2025-10-21 + - **Branch**: feat/201-curriculum-framework + - **Specs**: docs/specs/curriculum-management/spec.md + - **Priority**: P1 + - **Milestone**: S2 + - **Dependencies**: None +``` + +### Sprint File Format + +```markdown +## Curriculum Framework + +**User Story**: As an educator, I want to define course structures... + +**Acceptance Criteria**: +- Course hierarchy supported +- Learning objectives trackable + +**Status**: In Progress +**Branch**: feat/201-curriculum-framework +**Started**: 2025-10-21 +**Issue**: #201 +**Assigned**: @username +``` + +## Error Handling + +### Dirty Working Directory + +``` +⚠ Error: Uncommitted changes detected + +Please commit or stash changes before starting new issue: + git add . + git commit -m "wip: save progress" + # or + git stash + +Then run 'next-issue' again. +``` + +### Not on Main Branch + +``` +⚠ Error: Currently on branch: feat/123-old-issue + +Please finish current work first: + 1. Complete implementation + 2. Run 'submit-issue' to create PR + 3. Or: git switch main (to abandon work) + +Then run 'next-issue' again. +``` + +### Missing Specs + +``` +⚠ Error: Spec not found + docs/specs/curriculum-management/spec.md + +Issue #201 references specs that don't exist yet. + +Action required: + 1. Run: init-spec curriculum-management + 2. Create spec.md with requirements + 3. Run 'next-issue' again +``` + +### No Available Issues + +``` +ℹ No issues assigned to you. + +Possible actions: + 1. Check sprint backlog in GitHub + 2. Assign yourself an issue from current sprint + 3. Run 'seed-sprint' to create new issues + 4. Contact Scrum Master for assignment +``` + +## Advanced: Context Caching + +To optimize repeated API calls, consider caching: + +```bash +# Cache issue list for 5 minutes +CACHE_FILE="/tmp/gh-issues-cache-$(date +%Y%m%d-%H%M).json" +if [ ! -f "$CACHE_FILE" ]; then + gh issue list --assignee @me --state open --json number,title,labels,milestone > "$CACHE_FILE" +fi + +# Use cached data +ISSUES=$(cat "$CACHE_FILE") +``` + +## Notes + +- Always verify clean state before starting new work +- Read all related issues for full context +- Check spec completeness before implementation +- Update tracking files immediately after branch creation +- Consider dependencies when selecting issues +- Prefer current sprint issues over future work +- Validate specs exist and are complete +- Note any setup requirements (npm install, prisma generate, etc.) diff --git a/issue-executor/scripts/work-on-issue.sh b/issue-executor/scripts/work-on-issue.sh new file mode 100755 index 0000000..8cbf837 --- /dev/null +++ b/issue-executor/scripts/work-on-issue.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# This script manages the core development loop for a single issue. + +set -e + +usage() { + echo "Usage: $0 " + exit 1 +} + +ISSUE_NUMBER=$1 + +if [ -z "$ISSUE_NUMBER" ]; then + echo "Error: Issue number not provided." >&2 + usage +fi + +# --- VALIDATION --- +if ! command -v jq &> /dev/null +then + echo "Error: jq is not installed. Please install it to continue." >&2 + exit 1 +fi + +echo "Starting work on Issue #$ISSUE_NUMBER..." + +# 1. Verify clean git state +echo "Verifying git status..." +if [ -n "$(git status --porcelain)" ]; then + echo "Error: Working directory is not clean. Please commit or stash changes." >&2 + exit 1 +fi +echo "Git status is clean." + +# 2. Synthesize Implementation Plan with Gemini +echo "Synthesizing implementation plan for Issue #$ISSUE_NUMBER with Gemini..." + +# 2a. Read issue details from GitHub +echo "Fetching issue details..." +ISSUE_JSON=$(gh issue view "$ISSUE_NUMBER" --json title,body) +ISSUE_TITLE=$(echo "$ISSUE_JSON" | jq -r '.title') +ISSUE_BODY=$(echo "$ISSUE_JSON" | jq -r '.body') + +# 2b. Find all associated spec files +echo "Finding associated spec files..." +# This pattern finds all markdown files in docs/specs and docs/changes +SPEC_FILES=$(echo "$ISSUE_BODY" | grep -o 'docs/\(specs\|changes\)/[^[:space:]`'"'"']*\.md' || true) + +# 2c. Fetch issue comments +echo "Fetching issue comments..." +COMMENTS_JSON=$(gh issue view "$ISSUE_NUMBER" --json comments) +COMMENTS=$(echo "$COMMENTS_JSON" | jq -r '.comments[] | "### Comment from @\(.author.login)\n\n\(.body)\n"') + +# 2d. Construct the Gemini prompt +GEMINI_PROMPT="" + +if [ -n "$PARENT_EPIC_CONTEXT" ]; then + GEMINI_PROMPT+="$PARENT_EPIC_CONTEXT\n\n" +fi + +GEMINI_PROMPT+="I am about to start work on GitHub issue #${ISSUE_NUMBER}. Here is all the context. Please provide a concise, step-by-step implementation plan. + +**Issue Details:** +Title: ${ISSUE_TITLE} +Body: +${ISSUE_BODY} +" + +# Add comments to prompt if they exist +if [ -n "$COMMENTS" ]; then + GEMINI_PROMPT+="\n**Issue Comments:**\n${COMMENTS}" +fi + + +# Add retrospective to prompt if it exists +if [ -f "RETROSPECTIVE.md" ]; then + GEMINI_PROMPT+="\n**Retrospective Learnings:**\n@RETROSPECTIVE.md" +fi + +# Add spec files to prompt +if [ -n "$SPEC_FILES" ]; then + GEMINI_PROMPT+="\n\n**Referenced Specifications:**" + for spec in $SPEC_FILES; do + if [ -f "$spec" ]; then + GEMINI_PROMPT+="\n@$spec" + fi + done +fi + +# Add final instruction to prompt +GEMINI_PROMPT+="\n\nBased on all this context, what are the key steps I should take to implement this feature correctly, keeping in mind past learnings and adhering to the specifications? Provide a clear, actionable plan." + +# 2d. Call Gemini +echo "------------------------- GEMINI IMPLEMENTATION PLAN -------------------------" +gemini -p "$GEMINI_PROMPT" +echo "----------------------------------------------------------------------------" +echo "Context loaded and implementation plan generated." + +# 3. Create a feature branch +echo "Generating branch name..." +# Sanitize title to create a branch name +BRANCH_NAME=$(echo "$ISSUE_TITLE" | tr '[:upper:]' '[:lower:]' | sed -e 's/task: //g' -e 's/[^a-z0-9]/-/g' -e 's/--/-/g' -e 's/^-//' -e 's/-$//') +BRANCH_NAME="feat/$ISSUE_NUMBER-$BRANCH_NAME" + +echo "Creating new branch: $BRANCH_NAME..." +git checkout -b "$BRANCH_NAME" + +echo "Setup complete. You are now on branch '$BRANCH_NAME' and ready to implement Issue #$ISSUE_NUMBER." \ No newline at end of file diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..2544cbd --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,185 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:bodangren/git-workflow:skills", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "28a64e03022160159dc2b6e3fc4df637db6bdd42", + "treeHash": "d1e378437a42c6e259eab7c2038eacf7795111967ba5daf04d2440c938a3addb", + "generatedAt": "2025-11-28T10:14:18.628799Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "synthesisflow-skills", + "description": "SynthesisFlow: Modular skills for spec-driven development with hybrid LLM-guided + helper-script architecture", + "version": null + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "515c38ecfc8fa2e798cb7fcd7cfaba1b14ce6a7fdf5d29eaf8c4fcf20922bdc5" + }, + { + "path": "sprint-planner/.gitkeep", + "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "path": "sprint-planner/SKILL.md", + "sha256": "8301d19c125d091247befb957878d34f78b447d7d75cfbb0effbd769ba8efdec" + }, + { + "path": "sprint-planner/scripts/create-sprint-issues.sh", + "sha256": "30e67c8dd0a960b1742fee2bfe6fb0ecbb91c2e5be85a0fa060f8d2f17516153" + }, + { + "path": "agent-integrator/.gitkeep", + "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "path": "agent-integrator/SKILL.md", + "sha256": "f0be6cd760d5b2fa461e14f14384713c6a0eefad9196949da267f8e23101e8a3" + }, + { + "path": "agent-integrator/scripts/update-agents-file.sh", + "sha256": "7abd7e8973970be82d8c7036d1ba2b5251a9909b43c6c89e9375ad7e74fe396e" + }, + { + "path": "project-migrate/SKILL.md", + "sha256": "445bda14985ddd657daeafec453bce8c9ef0f34b054b77b54561e865f88abf52" + }, + { + "path": "project-migrate/scripts/correct_links_llm.py", + "sha256": "ac2323f228f97674e5c2cf331d1261da4288d5bacf4f98e017d3037e5186da07" + }, + { + "path": "project-migrate/scripts/project-migrate.sh", + "sha256": "3d36b185a7fd3837d438b06be580b7310ccbe4b9f209670b040bee045074b9d8" + }, + { + "path": "change-integrator/.gitkeep", + "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "path": "change-integrator/SKILL.md", + "sha256": "87a078375f39dd6dc27511e55d72d22a8886e6630ef5cd5ee432d5a006cd9fd6" + }, + { + "path": "change-integrator/scripts/summarize_retrospective_llm.py", + "sha256": "f7cf32fee46de38255adf9b08121292ab5e060d0127ad5865f09dfb167bea940" + }, + { + "path": "change-integrator/scripts/integrate-change.sh", + "sha256": "2a12257da0cf1daf4ef6795380a3f3a4ea105da55c2f21e3bc67d23dda849e38" + }, + { + "path": "prd-authoring/.gitkeep", + "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "path": "prd-authoring/SKILL.md", + "sha256": "8895f2792d529d0ea83c8c4fd4e426e6abb364757ac2fd3949d0faf4527f26e7" + }, + { + "path": "prd-authoring/examples/IMPLEMENTATION_SUMMARY.md", + "sha256": "958773a0306ac1ea7a5e10c627eb59b47ead7a3cc7c4d9836ce64667fc29c9ca" + }, + { + "path": "prd-authoring/examples/README.md", + "sha256": "e06bba6883cbed77dc3968c1c3ae59e491eace8c66daf08192ccf9200515800b" + }, + { + "path": "prd-authoring/examples/workflow-test-log.md", + "sha256": "6cc271fc28c1e0a54ba31e54b236346f0b17836e3429bb8495450c164c882ab8" + }, + { + "path": "prd-authoring/examples/01-product-brief-example.md", + "sha256": "1f8b5a5a4afb8855363859e97a8669dc3b43af1d8d1d6e125652d2cd6f63b5d9" + }, + { + "path": "prd-authoring/examples/02-research-example.md", + "sha256": "3afd5135ed9992371c44afb734924e3ae6702fb7f337925191eef9e211955b34" + }, + { + "path": "prd-authoring/examples/QUICK_START.md", + "sha256": "ce0007c9a169a3f1f20f4e12ac400429d19b85feb274cadb076ef1f29e4bb5f7" + }, + { + "path": "prd-authoring/examples/03-prd-example-abbreviated.md", + "sha256": "26fdc2596042cd928beaf75e568c5389c7cf41a3f6e53e822f335fb16e7e1ce1" + }, + { + "path": "prd-authoring/scripts/validate_prd_llm.py", + "sha256": "d93a6487092fa77446990c076116710fefd1585ca33c719a15be583174871ff9" + }, + { + "path": "prd-authoring/scripts/prd-authoring.sh", + "sha256": "66c7c1e10d1d2f295c533bc9ace1c83bb9f74e64752ecc78b359add5eee430ed" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "d762ea8adb19ba74b1e05cd9f3ccbd1a89c29e2f174bc7977300ef8accf6153a" + }, + { + "path": "issue-executor/SKILL.md", + "sha256": "756a19ea009c49f5ea07d6803d02fd14f0ba7455f82aec7138ff8d07b8f69f6f" + }, + { + "path": "issue-executor/references/work-on-issue.md", + "sha256": "da6f04e7fbe1cf5c0014b030ed80fba97ba1bc1ed8025d824ed054c60cb31a22" + }, + { + "path": "issue-executor/scripts/work-on-issue.sh", + "sha256": "15eb1c95a9fa210d0051b9ade86303863295651b618636dbfb78dc311c775aeb" + }, + { + "path": "project-init/.gitkeep", + "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "path": "project-init/SKILL.md", + "sha256": "5c7441b75e8faa9378edc2db09f288bdf8b3370be55cf4a6d9cec79e830e5ec1" + }, + { + "path": "project-init/scripts/init-project.sh", + "sha256": "f9adc9e047af30ad1097ad73087e527150068a0ba840958736590d7a385136b8" + }, + { + "path": "doc-indexer/.gitkeep", + "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "path": "doc-indexer/SKILL.md", + "sha256": "c6bd369dbd488793f07611eb384171adb79dfd496cb82772f46cab31c593f1a6" + }, + { + "path": "doc-indexer/scripts/scan-docs.sh", + "sha256": "845985500285c7fdb1d0b4638a701a7bcb6459d5cde09ee284ce58ca5905107b" + }, + { + "path": "spec-authoring/.gitkeep", + "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "path": "spec-authoring/SKILL.md", + "sha256": "fe9f8ff0723f7cdaaa3bc565a6e2c8a2e69a25919849d8fffe42c7abdfe352d0" + }, + { + "path": "spec-authoring/scripts/spec-authoring.sh", + "sha256": "02644df21cbfacaa5e2bc495263be99ba3cb00bff17dc24c537e1919ad0dfb39" + } + ], + "dirSha256": "d1e378437a42c6e259eab7c2038eacf7795111967ba5daf04d2440c938a3addb" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/prd-authoring/.gitkeep b/prd-authoring/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/prd-authoring/SKILL.md b/prd-authoring/SKILL.md new file mode 100644 index 0000000..d169561 --- /dev/null +++ b/prd-authoring/SKILL.md @@ -0,0 +1,210 @@ +--- +name: prd-authoring +description: Use this skill for early-stage project planning. It leverages the Gemini CLI to generate high-quality first drafts of Product Briefs, Research Documents, and full PRDs, guiding users from idea to validated requirements. Triggers include "create PRD", "product brief", or "validate requirements". +--- + +# PRD Authoring Skill + +## Purpose + +To accelerate and enhance early-stage project planning by using a powerful generative AI to create high-quality first drafts of key strategic documents. This skill integrates the Gemini CLI into the PRD authoring workflow, transforming it from a manual template-filling exercise into a dynamic, AI-assisted process. + +The skill guides users from a vague project idea to a well-defined Product Requirements Document (PRD) by: +- **Generating a Product Brief:** Creates a comprehensive brief from a simple project name. +- **Generating a Research Plan:** Uses the product brief to generate a targeted research document. +- **Generating a full PRD:** Synthesizes the brief and research into a detailed PRD with objectives, requirements, and success criteria. + +This approach bridges the gap between "we have an idea" and "we're ready to write specs" with unprecedented speed and quality. + +## When to Use + +Use this skill in the following situations: + +- Starting a new project from an initial concept. +- Generating a first draft of a product brief, research plan, or PRD. +- Validating an existing PRD against quality standards. +- Breaking down a PRD into epics for sprint planning. + +Do NOT use this skill for: +- Implementation-level specifications (use spec-authoring instead). +- Sprint planning from approved specs (use sprint-planner instead). + +## Prerequisites + +- Project initialized with SynthesisFlow structure (`docs/` directory exists). +- `gemini` CLI tool installed and authenticated. + +## PRD Philosophy + +**Strategy Before Tactics**: PRDs define WHAT we're building and WHY before specs define HOW we'll build it. This skill uses AI to rapidly generate the "WHAT" and "WHY" so that teams can focus on review, refinement, and strategic alignment. + +--- + +## Workflow Commands + +### The `status` Command + +#### Purpose + +Assess project readiness and provide guidance on next workflow steps. This is the recommended starting point. + +#### Workflow + +Run the status check to understand the current state of your PRD documents. +```bash +bash scripts/prd-authoring.sh status [project-name] +``` +The script will report which documents exist (`product-brief.md`, `research.md`, `prd.md`, etc.) and recommend the next logical command to run. + +--- + +### The `brief` Command + +#### Purpose + +Generate a comprehensive, high-quality first draft of a Product Brief from a simple project name. + +#### Workflow + +##### Step 1: Run Brief Generation Script + +Execute the script with your project name. +```bash +bash scripts/prd-authoring.sh brief "Your Awesome Project Name" +``` + +##### Step 2: Understand What the Script Does + +Instead of creating an empty template, the script calls the **Gemini CLI** with a detailed prompt, asking it to generate a full product brief. This includes plausible, well-structured content for: +- Problem Statement +- Target Users +- Proposed Solution +- Value Proposition +- Success Metrics + +The output from Gemini is saved as the first draft in `docs/prds/your-awesome-project-name/product-brief.md`. + +##### Step 3: Review and Refine + +Open the generated file. Review the AI-generated content with your team and stakeholders, refining the details to match your specific vision. The draft provides a strong foundation, saving hours of initial writing. + +--- + +### The `research` Command + +#### Purpose + +Generate a targeted, context-aware market research plan based on the contents of your product brief. + +#### Workflow + +##### Step 1: Run Research Generation Script + +Once your product brief is reviewed and saved, run the research command: +```bash +bash scripts/prd-authoring.sh research your-awesome-project-name +``` + +##### Step 2: Understand What the Script Does + +The script sends the entire content of your `product-brief.md` to the **Gemini CLI**. It prompts the AI to act as a business analyst and generate a research plan that logically follows from the brief. The generated draft will include sections for: +- Competitive Analysis +- Market Insights +- User Feedback Analysis +- Technical Considerations +- Actionable Recommendations + +This draft is saved to `docs/prds/your-awesome-project-name/research.md`. + +##### Step 3: Execute Research and Refine Document + +Use the AI-generated document as a guide for your research activities. Fill in the details and refine the analysis based on your actual findings. + +--- + +### The `create-prd` Command + +#### Purpose + +Generate a comprehensive, detailed first draft of a Product Requirements Document (PRD) by synthesizing the product brief and the research document. + +#### Workflow + +##### Step 1: Run PRD Creation Script + +After completing your brief and research documents, run the `create-prd` command: +```bash +bash scripts/prd-authoring.sh create-prd your-awesome-project-name +``` + +##### Step 2: Understand What the Script Does + +This is the most powerful feature. The script sends the **full content of both your product brief and your research document** to the **Gemini CLI**. It prompts the AI to generate a detailed PRD that includes: +- SMART Objectives +- Measurable Success Criteria +- Specific Functional and Non-Functional Requirements +- Constraints, Assumptions, and Out-of-Scope items + +The resulting draft, saved in `docs/prds/your-awesome-project-name/prd.md`, is a deeply contextualized document that connects business goals from the brief with insights from the research. + +##### Step 3: Review, Validate, and Refine + +The generated PRD provides an excellent starting point. Review it with your team to ensure all requirements are accurate, testable, and aligned with project goals. Use the `validate-prd` command to check for quality. + +--- + +### The `validate-prd` Command + +#### Purpose + +Validate an existing PRD against quality standards, checking for missing sections, vague requirements, and unmeasurable success criteria. This command does **not** use the Gemini CLI; it uses pattern matching to enforce quality. + +#### Workflow + +Run the validation check on your PRD: +```bash +bash scripts/prd-authoring.sh validate-prd your-awesome-project-name +``` +Review the report and address any issues found. + +--- + +### The `decompose` Command + +#### Purpose + +Break down a validated PRD into epics for sprint planning. This helps transition from strategic planning to tactical execution. + +#### Workflow + +Once your PRD is validated, run the decompose command: +```bash +bash scripts/prd-authoring.sh decompose your-awesome-project-name +``` +This creates an `epics.md` file with a template structure for you to define your epics. + +--- + +## Error Handling and Troubleshooting + +### Gemini CLI Issues + +**Symptom**: The script fails during the `brief`, `research`, or `create-prd` commands with an error related to the `gemini` command. + +**Solution**: +- Ensure the `gemini` CLI is installed and in your system's PATH. +- Verify you are authenticated. Run `gemini auth` if needed. +- Check for any Gemini API-related issues or outages. +- Examine the prompt being constructed in the `prd-authoring.sh` script for any potential issues. + +### Other Issues + +For issues related to file existence, permissions, or validation errors, the script provides detailed error messages and recommendations. Always check the script's output for guidance. + +--- + +## Best Practices + +- **Review and Refine**: The AI-generated drafts are a starting point, not a final product. Always review and tailor the content to your specific project needs. +- **Garbage In, Garbage Out**: The quality of the generated `research` and `prd` documents depends on the quality of the `product-brief` you provide. Take time to refine the initial brief. +- **Iterate**: Use the `status` command to guide you through the workflow. Don't be afraid to go back and refine a previous document if new insights emerge. \ No newline at end of file diff --git a/prd-authoring/examples/01-product-brief-example.md b/prd-authoring/examples/01-product-brief-example.md new file mode 100644 index 0000000..8b81d6f --- /dev/null +++ b/prd-authoring/examples/01-product-brief-example.md @@ -0,0 +1,127 @@ +--- +title: Payment Gateway Integration +type: product-brief +status: draft +created: 2025-11-04 +updated: 2025-11-04 +--- + +# Product Brief: Payment Gateway Integration + +## Problem Statement + +**What problem exists?** +Our e-commerce platform currently lacks integrated payment processing capabilities, forcing customers to complete purchases through manual invoice processing. This creates significant friction in the buying process, with 45% of customers abandoning their carts during checkout when they discover they cannot pay immediately online. + +**Who experiences this problem?** +- E-commerce customers attempting to purchase products online (15,000 unique monthly visitors) +- Sales team manually processing invoices and payment confirmations (handling 800-1,000 transactions/month) +- Finance team reconciling payments and updating accounting systems (20 hours/week manual work) +- Customer support handling payment-related inquiries (30% of all support tickets) + +**How often does it occur?** +- Affects 100% of purchase transactions (approximately 1,000 transactions per month) +- Daily manual payment processing required for 30-40 orders +- Weekly reconciliation bottlenecks cause 2-3 day delays in order fulfillment + +**What's the business impact?** +- Lost revenue: $2.4M annually from cart abandonment (45% abandonment rate on $5.3M annual pipeline) +- Operational costs: $120K/year in manual payment processing labor (sales + finance teams) +- Customer satisfaction: NPS score of 35 (below industry average of 50) with payment process cited as top complaint +- Competitive disadvantage: Losing deals to competitors with seamless online checkout + +## Target Users + +### Primary Users + +**Persona 1: Online Shopper (Sarah)** +- **Who they are**: Tech-savvy consumers aged 25-45, making purchases $50-$500, expect modern e-commerce experience +- **Key goals**: Complete purchases quickly and securely, receive instant confirmation, avoid payment delays +- **Pain points**: Cannot pay online, must wait for invoice email, manual payment is time-consuming and feels outdated +- **Frequency of use**: 1-3 purchases per month, expect to checkout in under 2 minutes + +**Persona 2: Sales Representative (Mike)** +- **Who they are**: Inside sales team member, processes 25-30 orders daily, manages customer relationships +- **Key goals**: Close deals faster, reduce administrative work, focus on selling not payment processing +- **Pain points**: Spends 2 hours daily creating invoices and following up on payments, manual errors cause delays +- **Frequency of use**: Multiple times daily, processes every transaction + +### Secondary Users + +- **Finance Team**: Needs automated reconciliation and accurate transaction records, currently spends 20 hours/week on manual entry +- **Customer Support**: Handles payment status inquiries and issues, needs visibility into transaction status +- **Business Leadership**: Requires revenue reporting, conversion metrics, and fraud prevention + +## Proposed Solution + +**Solution Overview** +Integrate a best-in-class payment gateway (Stripe) to enable secure, real-time online payment processing with support for credit/debit cards, digital wallets (Apple Pay, Google Pay), and one-click checkout for returning customers. The solution will automate payment processing, provide instant confirmation, and integrate with our existing CRM and accounting systems. + +**How it addresses the problem** +By enabling online payment processing, customers can complete purchases immediately without friction, eliminating the manual invoice workflow. Sales team can focus on selling instead of payment administration, finance team benefits from automated reconciliation, and the business captures revenue that was previously lost to abandonment. + +**Key capabilities** +- Secure payment processing for credit/debit cards and digital wallets with PCI DSS compliance +- One-click checkout for returning customers with saved payment methods +- Real-time payment confirmation and automated receipt generation +- Integration with Salesforce CRM for order management and customer records +- Automated reconciliation with QuickBooks accounting system +- Fraud detection and prevention with 3D Secure 2.0 + +**What makes this solution different?** +Unlike our current manual process, this provides instant payment processing (under 3 seconds) with zero manual intervention. Compared to basic payment gateways, we're choosing Stripe for its superior developer experience, comprehensive documentation, and proven reliability at scale. + +## Value Proposition + +### User Benefits + +- **Speed**: Reduce checkout time from 24-48 hours (manual invoice) to under 60 seconds (online payment) +- **Convenience**: Pay with preferred method (card, Apple Pay, Google Pay) without leaving the site +- **Security**: PCI-compliant payment processing eliminates concerns about sharing card details +- **Trust**: Instant confirmation email and receipt provides peace of mind +- **Returning customers**: Save payment method for one-click future purchases + +### Business Benefits + +- **Revenue**: Reduce cart abandonment from 45% to 15%, recovering $1.8M in annual revenue +- **Operational efficiency**: Eliminate 20 hours/week of manual payment processing, saving $100K annually +- **Cash flow**: Accelerate payment collection from 3-5 days to instant, improving cash flow by $200K +- **Scalability**: Support 10x growth without adding payment processing headcount +- **Data insights**: Real-time transaction analytics and conversion funnel visibility +- **Customer satisfaction**: Improve NPS score from 35 to 55+ with modern checkout experience + +### Competitive Advantages + +- Achieve parity with competitors on basic online payment capability (table stakes) +- Differentiate with faster checkout experience (target: under 60 seconds vs industry average 2-3 minutes) +- Build foundation for future innovations: subscription billing, international expansion, marketplace features +- Create switching cost through saved payment methods and purchase history + +## Success Metrics + +### Launch Success Criteria + + +- **Checkout conversion rate**: 55% → 75% (reduce abandonment from 45% to 25%) +- **Average checkout time**: N/A (manual) → 45 seconds (90th percentile) +- **Payment success rate**: N/A → 98% (transactions completed successfully) +- **Customer satisfaction**: 35 NPS → 50+ NPS for checkout experience +- **Payment processing uptime**: Target 99.9% (maximum 45 minutes downtime per month) + +### Long-term Success Metrics + + +- **Monthly transaction volume**: 1,000 → 5,000 transactions per month within 6 months +- **Revenue recovery**: Recover $1.5M+ in previously abandoned cart revenue within 12 months +- **Saved payment method adoption**: 60% of customers save payment method for future use +- **Operational cost reduction**: Reduce manual payment processing costs by 80% ($100K annual savings) +- **Average order value**: Increase from $275 to $325 due to reduced friction + +### Leading Indicators + + +- First-week transaction volume exceeds 100 successful payments +- 70%+ of users complete checkout without contacting support +- Less than 5% of transactions require sales team intervention +- Payment-related support tickets decrease by 50% in first month +- 80%+ customer satisfaction rating on post-purchase survey diff --git a/prd-authoring/examples/02-research-example.md b/prd-authoring/examples/02-research-example.md new file mode 100644 index 0000000..ec48657 --- /dev/null +++ b/prd-authoring/examples/02-research-example.md @@ -0,0 +1,315 @@ +--- +title: Payment Gateway Integration Research +type: research +status: complete +created: 2025-11-04 +updated: 2025-11-04 +--- + +# Research: Payment Gateway Integration + +## Competitive Analysis + +### Competitor 1: Stripe + +**Overview** +Market leader in developer-focused payment processing with 40%+ market share among tech companies. Powers payment processing for millions of businesses worldwide including Amazon, Shopify, and Lyft. + +**Strengths** +- Superior API design and documentation (rated #1 by developers) +- Supports 135+ currencies and 45+ countries +- Comprehensive fraud detection with Radar (machine learning-based) +- Strong developer ecosystem with extensive libraries +- Excellent uptime (99.99% historical availability) +- Built-in PCI compliance (SAQ-A eligible) +- Transparent, predictable pricing + +**Weaknesses** +- Higher fees for international cards (3.9% + $0.30 vs 2.9% + $0.30 domestic) +- Limited phone support (primarily email and chat) +- Can hold funds for new accounts (rolling reserve for high-risk industries) + +**Key Features** +- Payment processing (cards, wallets, bank transfers) +- Recurring billing and subscription management +- Payment method tokenization +- 3D Secure 2.0 and fraud detection +- Real-time webhooks +- Mobile SDKs for iOS and Android + +**Pricing Model** +Standard: 2.9% + $0.30 per successful card charge (US domestic), 3.9% + $0.30 for international. No setup or monthly fees. + +**Market Position** +Premium developer-friendly solution targeting startups, SaaS companies, and growth-stage businesses. + +**Our Advantage Over Them** +We leverage their strengths while they handle payment processing complexity, compliance, and fraud detection. + +--- + +### Competitor 2: PayPal/Braintree + +**Overview** +Consumer payment giant with 400M+ active accounts. Braintree is PayPal's developer product. Strong brand recognition and customer trust. + +**Strengths** +- Massive user base (400M+ PayPal accounts) +- Strong buyer trust and brand recognition +- Built-in buyer protection +- Venmo integration +- International presence in 200+ markets + +**Weaknesses** +- Higher dispute and chargeback rates +- More complex API compared to Stripe +- Account holds more common +- Slower innovation + +**Key Features** +- PayPal checkout +- Credit/debit cards via Braintree +- Venmo integration +- PayPal Credit (BNPL) +- Recurring billing support + +**Pricing Model** +PayPal Standard: 3.49% + $0.49, Braintree: 2.59% + $0.49 + +**Market Position** +Consumer-focused platform with strong brand trust, prioritizing buyer confidence over developer experience. + +**Our Advantage Over Them** +PayPal's higher fees and complex integration make Stripe more attractive. May add PayPal in Phase 2. + +--- + +### Competitor 3: Square + +**Overview** +Payment processor for small businesses and omnichannel commerce. Known for simple pricing and POS hardware. + +**Strengths** +- Unified platform for in-person and online +- Simple, flat-rate pricing +- No monthly fees or commitments +- Fast payouts (next business day) +- Integrated POS hardware + +**Weaknesses** +- Limited international support +- Fewer currencies than Stripe +- Less sophisticated API capabilities +- Higher fees for keyed transactions + +**Key Features** +- Card processing (in-person and online) +- Square Terminal and readers +- Inventory management +- Invoicing and recurring payments +- E-commerce integration + +**Pricing Model** +Online: 2.9% + $0.30, In-person: 2.6% + $0.10, Keyed: 3.5% + $0.15. No monthly fees. + +**Market Position** +Small business and retail-focused, positioned as simple all-in-one for businesses needing both online and in-person. + +**Our Advantage Over Them** +Square is optimized for retail/POS, not pure e-commerce. Stripe's API-first approach suits our needs better. + +--- + +## Market Insights + +### Market Size & Growth +Global digital payment market: $79.3B in 2020 → $154.1B by 2025 (14.2% CAGR). Growth drivers: e-commerce adoption, shift from cash, mobile wallets, subscriptions. + +**Primary segment: E-commerce businesses (SMB)** +- Size: 2.1 million e-commerce businesses in US +- Growth rate: 15% annual growth +- Key characteristics: Need reliable, easy-to-integrate processing with low fixed costs + +### Market Trends +- Mobile wallet adoption: 25% of e-commerce transactions (up from 10% in 2020) +- One-click checkout: 40% abandon if they must re-enter payment details +- Buy Now, Pay Later: 300% growth since 2020 for purchases >$200 +- Fraud concerns: $20B globally in 2021, driving demand for advanced detection +- Embedded finance: Payment processing embedded directly in software platforms + +### Regulatory & Compliance +- PCI DSS Level 1: Required for card processing; using tokenization (SAQ-A) reduces compliance burden +- Strong Customer Authentication (SCA): EU regulation requiring 2FA; 3D Secure 2.0 is table stakes +- Data privacy (GDPR, CCPA): Payment data subject to strict privacy regulations + +### Industry Standards & Best Practices +- OAuth 2.0 for API authentication +- 3D Secure 2.0 for SCA compliance +- Tokenization (never store card numbers) +- Webhooks for async events +- TLS 1.3 for encryption +- CVV verification for fraud reduction + +## User Feedback Analysis + +### Common Pain Points + +1. **Checkout complexity**: 70% mention as pain point. "I filled my cart but gave up at the 8-step checkout" + - Impact: 69.8% average cart abandonment rate + +2. **Payment method limitations**: 40% request more options. "No Apple Pay, went to competitor" + - Impact: 10-15% abandon if preferred method unavailable + +3. **Security concerns**: 55% cite as top concern. "Don't feel safe entering card on small websites" + - Impact: Trust badges increase conversion 20-30% + +4. **Re-entering information**: 60% of returning customers frustrated. "Why can't this site remember my card like Amazon?" + - Impact: Saved methods reduce checkout time 75% + +5. **Slow processing**: 30% mention frustration. "Waited 10 seconds, thought it failed" + - Impact: Each second reduces conversions 7% + +### Desired Features + +**Must-have** (Table stakes) +- Credit/debit card acceptance (Visa, MC, Amex, Discover) +- Mobile-responsive checkout +- Secure processing with trust indicators +- Email receipt and confirmation +- Basic fraud detection + +**High-value** (Differentiators) +- Digital wallets (Apple Pay, Google Pay) +- One-click for returning customers +- Guest checkout option +- Real-time updates during checkout +- Instant confirmation + +**Nice-to-have** (Future) +- Buy now, pay later (Klarna, Affirm) +- Cryptocurrency support +- International currencies +- Subscription billing + +### User Preferences & Expectations +- Checkout speed: Complete within 60 seconds (2 min maximum tolerance) +- Payment security: Want trust badges, recognizable brands +- Guest checkout: 25% prefer not to create account first +- Save payment: 70% willing if they trust the site +- Mobile: 60% of traffic; expect wallet options +- Error messages: Want clear, actionable feedback + +## Technical Considerations + +### Competitor Technical Approaches +- **Tokenization**: All providers use it to avoid storing card data (SAQ-A vs SAQ-D compliance) +- **Integration patterns**: Hosted (easiest), Elements (balanced), API (most flexible) +- **Webhooks**: All use for async event handling (requires retry logic, idempotency) + +### Architecture Patterns +- **PSP pattern**: Use third-party provider vs building in-house + - Pros: Fast deployment, reduced compliance, proven reliability + - Cons: Dependency, transaction fees + - Recommendation: Strongly recommended + +- **Event-driven**: Use webhooks for downstream actions + - Pros: Decouples payment from business logic + - Cons: Requires robust event processing + - Recommendation: Essential for production + +### Integration Requirements +- Stripe SDK: REST API + JavaScript SDK +- CRM: Salesforce (update customer records, orders) +- Accounting: QuickBooks (automated posting, reconciliation) +- Email: SendGrid (confirmations, receipts, failures) + +### Performance & Scalability +- Expected load: 1,000/month currently, 5,000/month in 6 months +- Performance targets: API <500ms p95, checkout <3s total, page load <2s +- Scalability: Stripe handles scaling, we need webhook queue for high volume + +### Technical Risks +- Stripe downtime: 99.99% uptime but would block all payments + - Mitigation: Graceful degradation, monitoring, communication plan + +- Webhook failures: Network issues could cause missed events + - Mitigation: Stripe retries for 3 days, implement idempotency, poll as backup + +- PCI violations: Improper storage could result in fines + - Mitigation: Never store cards, use tokens, annual SAQ-A, security audits + +- Fraud: Costs 2-3x transaction amount + - Mitigation: Stripe Radar, CVV required, 3D Secure, velocity limits + +## Recommendations + +### Priority Features + +**Must-build** +1. Credit/debit card processing - 100% of competitors have this, 80% of transactions +2. PCI compliance - Legal requirement, use Stripe tokenization for SAQ-A +3. Mobile-responsive - 60% of traffic is mobile +4. Basic fraud detection - 1-2% fraud rate costs 2-3x transaction value + +**Should-build** +1. Digital wallets - 25% of transactions, converts 10-15% higher +2. Saved payment methods - 75% faster checkout, 30-40% higher repeat rate +3. CRM/accounting integration - Saves $100K annually in manual work + +**Could-build** +- BNPL (Phase 2), Cryptocurrency (Phase 3), Subscriptions (Phase 2) + +### Technical Approach + +**Recommended**: Cloud-native API integration with event-driven fulfillment + +**Key choices**: +- Payment processor: Stripe (best DX, features, pricing, documentation) +- Integration: Stripe Elements (balances customization with ease) +- Backend: Stripe Node.js SDK +- Events: Webhook processing with queue (Bull/Redis or SQS) +- Database: Add payment_methods and transactions tables (metadata only, no card data) + +### Go-to-Market Positioning +"Complete your purchase in under 60 seconds with secure, one-click checkout - just like major e-commerce brands" + +**Target**: E-commerce customers (B2C) expecting modern, frictionless experiences + +**Differentiators**: +- 60 seconds vs 3-5 minutes competitors +- Amazon-like one-click for returning customers +- Multiple payment methods including Apple/Google Pay +- Enterprise security with consumer UX + +### Constraints & Considerations + +**Compliance**: PCI DSS SAQ-A (cannot store card numbers) + +**Budget**: 2.9% + $0.30 = $99K annually at 1,000 transactions averaging $275 +- Acceptable given $1.8M revenue recovery + +**Timeline**: Q2 2026 (6 months) - favors proven solutions + +**Resources**: 2 FE, 1 BE, 1 QA - must use SDK/libraries, not build from scratch + +### Risk Assessment + +1. **Stripe dependency** + - Likelihood: Low, Impact: High + - Mitigation: Monitor status, communication plan, backup provider Phase 2 + +2. **Fraud/chargebacks** + - Likelihood: Medium (1-2%), Impact: Medium ($200-500 per incident) + - Mitigation: Radar, CVV, velocity limits, 3D Secure for high-value + +3. **Integration complexity** + - Likelihood: Medium, Impact: Medium (delay or missing features) + - Mitigation: Official SDKs, integration guides, schedule buffer + +4. **User adoption of saved payments** + - Likelihood: Low (60-70% industry), Impact: Low + - Mitigation: Security messaging, trust indicators, incentives + +5. **Compliance violations** + - Likelihood: Low (following best practices), Impact: High (fines, loss of processing) + - Mitigation: Never store cards, annual SAQ-A, security audits diff --git a/prd-authoring/examples/03-prd-example-abbreviated.md b/prd-authoring/examples/03-prd-example-abbreviated.md new file mode 100644 index 0000000..a79e126 --- /dev/null +++ b/prd-authoring/examples/03-prd-example-abbreviated.md @@ -0,0 +1,419 @@ +--- +title: Payment Gateway Integration PRD +type: prd +status: draft +created: 2025-11-04 +updated: 2025-11-04 +--- + +# Product Requirements Document: Payment Gateway Integration + +## Objectives + +### Primary Objectives + +1. **Enable Real-time Online Payment Processing** + - **Goal**: Allow customers to complete purchases instantly without manual invoice processing + - **Measure**: Checkout conversion rate and average checkout time + - **Target**: 75% conversion rate (up from 55%), average checkout time under 45 seconds + - **Timeline**: Launch by Q2 2026, achieve targets within 3 months post-launch + - **Why it matters**: Eliminates primary source of cart abandonment (45% currently) and recovers $1.8M in lost annual revenue + +2. **Reduce Operational Costs and Manual Work** + - **Goal**: Automate payment processing and reconciliation to eliminate manual labor + - **Measure**: Hours spent on manual payment processing and reconciliation + - **Target**: Reduce from 20 hours/week to 4 hours/week (80% reduction) + - **Timeline**: Immediate upon launch + - **Why it matters**: Saves $100K annually in labor costs, allows sales/finance teams to focus on high-value work + +3. **Improve Customer Satisfaction with Modern Checkout** + - **Goal**: Provide seamless, secure checkout experience matching major e-commerce sites + - **Measure**: NPS score for checkout experience and payment-related support tickets + - **Target**: NPS 50+ (up from 35), reduce payment support tickets by 50% + - **Timeline**: Measure within 30 days post-launch + - **Why it matters**: Customer satisfaction drives repeat purchases and positive word-of-mouth + +### Secondary Objectives + +1. Enable subscription and recurring billing capabilities (Phase 2 - deferred 6-12 months) +2. Support international currencies and payment methods (Phase 2 - market dependent) + +## Success Criteria + +### Launch Criteria (Must-Have) + +**Functional Completeness** +- [ ] Process 100 test transactions with 0% failure rate +- [ ] Support card payments (Visa, MC, Amex, Discover) and digital wallets (Apple Pay, Google Pay) +- [ ] One-click checkout functional for returning customers with saved payment methods +- [ ] Integration with Salesforce CRM and QuickBooks accounting complete +- [ ] Email confirmations sent within 30 seconds of successful payment + +**Quality Standards** +- [ ] Payment processing time <3 seconds at 95th percentile +- [ ] Checkout page load time <2 seconds +- [ ] PCI DSS SAQ-A compliance validation complete +- [ ] Security audit passed with zero critical vulnerabilities +- [ ] Mobile-responsive checkout tested on iOS and Android + +**Operational Readiness** +- [ ] Stripe integration monitoring and alerting configured +- [ ] Webhook processing with retry logic implemented +- [ ] Runbooks for common payment issues created +- [ ] Support team trained on new checkout flow and troubleshooting +- [ ] Customer-facing documentation published + +### Success Metrics (Post-Launch) + +**Adoption Metrics (30 days)** +- [ ] **Transaction volume**: 0 → 1,000+ online transactions processed +- [ ] **Saved payment adoption**: 60% of customers save payment method on first use + +**Engagement Metrics (30 days)** +- [ ] **Checkout conversion**: 55% → 75% (reduce abandonment 45% → 25%) +- [ ] **Average checkout time**: N/A → 45 seconds (90th percentile) + +**Business Metrics (90 days)** +- [ ] **Revenue recovery**: $150K+ in previously abandoned cart revenue +- [ ] **Operational cost reduction**: 80% reduction in manual payment processing time +- [ ] **Customer satisfaction**: NPS 35 → 50+ for checkout experience + +**Quality Metrics (30 days)** +- [ ] **Payment success rate**: 98%+ of initiated transactions complete successfully +- [ ] **System uptime**: 99.9%+ (max 45 minutes downtime per month) +- [ ] **Support ticket reduction**: 50% fewer payment-related inquiries + +## Functional Requirements + +### FR1: Credit/Debit Card Payment Processing + +**Description**: Process credit and debit card payments securely in real-time with instant confirmation + +**User Story**: As an online shopper, I want to pay with my credit/debit card directly on the checkout page, so that I can complete my purchase immediately without waiting for invoices + +**Inputs**: +- Card number, expiration date, CVV (via Stripe Elements) +- Billing address +- Purchase amount and order details +- Customer email + +**Outputs**: +- Payment confirmation with transaction ID +- Order receipt via email +- Updated order status in CRM + +**Business Rules**: +- Accept Visa, Mastercard, American Express, Discover +- Require CVV for all transactions (fraud prevention) +- Maximum transaction amount: $10,000 (fraud threshold) +- Minimum transaction amount: $1.00 + +**Acceptance Criteria**: +- [ ] Given valid card details, when customer submits payment, then transaction processes in <3 seconds +- [ ] Given invalid card number, when customer submits, then clear error message displays before submission +- [ ] Given successful payment, when transaction completes, then confirmation email sent within 30 seconds +- [ ] Given payment failure, when Stripe returns error, then user-friendly message explains issue and suggests resolution + +**Priority**: Must Have + +**Dependencies**: Stripe API integration, email service (SendGrid) + +--- + +### FR2: Digital Wallet Support (Apple Pay / Google Pay) + +**Description**: Enable payment via Apple Pay and Google Pay for faster mobile checkout + +**User Story**: As a mobile shopper, I want to pay with Apple Pay/Google Pay, so that I can checkout with a single tap using my saved payment method + +**Inputs**: +- Apple Pay/Google Pay token +- Purchase amount +- Shipping address (from wallet if available) + +**Outputs**: +- Payment confirmation +- Order receipt + +**Business Rules**: +- Available only on supported browsers/devices +- Gracefully degrade to card entry if wallet unavailable +- Auto-fill shipping address from wallet when possible + +**Acceptance Criteria**: +- [ ] Given iPhone with Apple Pay, when user selects Apple Pay, then payment completes with Face ID/Touch ID +- [ ] Given Android with Google Pay, when user selects Google Pay, then payment completes with fingerprint/PIN +- [ ] Given wallet payment, when user confirms, then checkout completes in <10 seconds total +- [ ] Given unsupported browser, when checkout loads, then wallet buttons hidden and card entry shown + +**Priority**: Should Have + +**Dependencies**: Stripe Payment Request API, HTTPS (required for Apple Pay) + +--- + +### FR3: Saved Payment Methods (One-Click Checkout) + +**Description**: Allow customers to securely save payment methods for faster future checkouts + +**User Story**: As a returning customer, I want to save my payment method, so that I can checkout with one click on future purchases without re-entering my card + +**Inputs**: +- "Save payment method" checkbox selection +- Customer account (must be logged in) +- Payment method details (tokenized by Stripe) + +**Outputs**: +- Payment method saved to customer account (Stripe token stored) +- Display last 4 digits and card brand in account + +**Business Rules**: +- Maximum 5 saved payment methods per customer +- Must be logged in to save payment method +- Can set one method as default +- Can delete saved methods anytime + +**Acceptance Criteria**: +- [ ] Given logged-in customer, when they check "save payment method", then method saved after successful payment +- [ ] Given returning customer, when they view saved methods, then see last 4 digits and expiration date (not full number) +- [ ] Given saved payment method, when customer selects it at checkout, then auto-fills payment form +- [ ] Given multiple saved methods, when customer sets default, then it pre-selects at checkout + +**Priority**: Must Have + +**Dependencies**: FR1, user authentication, Stripe payment methods API + +--- + +### FR4: CRM Integration (Salesforce) + +**Description**: Automatically sync payment transactions and customer payment methods with Salesforce CRM + +**User Story**: As a sales rep, I want payment information automatically updated in Salesforce, so that I have complete customer transaction history without manual entry + +**Inputs**: +- Successful payment transaction +- Customer email (matches Salesforce contact) +- Order details + +**Outputs**: +- Salesforce opportunity updated to "Closed Won" +- Transaction record created in Salesforce +- Customer payment method status updated + +**Business Rules**: +- Match customers by email address +- Create new contact if email not found +- Update opportunity within 5 minutes of payment +- Store only last 4 digits of card in Salesforce + +**Acceptance Criteria**: +- [ ] Given successful payment, when transaction completes, then Salesforce opportunity updated within 5 minutes +- [ ] Given new customer, when payment completes, then new Salesforce contact created +- [ ] Given existing customer, when payment completes, then transaction added to existing contact +- [ ] Given CRM sync failure, when Stripe payment succeeds, then retry CRM update 3 times with exponential backoff + +**Priority**: Must Have + +**Dependencies**: Salesforce API access, webhook processing + +--- + +### FR5: Accounting Integration (QuickBooks) + +**Description**: Automatically post successful transactions to QuickBooks for revenue recognition and reconciliation + +**User Story**: As a finance team member, I want transactions automatically posted to QuickBooks, so that I don't spend 20 hours/week on manual data entry and reconciliation + +**Inputs**: +- Successful payment transaction +- Customer details +- Product/service purchased +- Payment amount and fees + +**Outputs**: +- QuickBooks invoice created and marked paid +- Revenue recognized in correct account +- Stripe fees recorded as expense + +**Business Rules**: +- Post within 1 hour of successful payment +- Separate revenue and Stripe fees into different accounts +- Match customer to existing QuickBooks customer or create new +- Handle partial refunds correctly + +**Acceptance Criteria**: +- [ ] Given successful payment, when transaction completes, then QuickBooks invoice posted within 1 hour +- [ ] Given $100 transaction with $3.20 Stripe fee, when posted, then $100 revenue and $3.20 fee expense recorded +- [ ] Given full refund, when processed, then QuickBooks invoice voided +- [ ] Given partial refund, when processed, then credit memo created for refund amount + +**Priority**: Should Have + +**Dependencies**: QuickBooks API integration, webhook processing + +--- + +## Non-Functional Requirements + +### NFR1: Performance + +**Response Time**: +- Payment API calls: <500ms at 95th percentile +- Checkout page load: <2 seconds +- Payment processing (submit to confirmation): <3 seconds at 95th percentile + +**Throughput**: +- Support 1,000 concurrent users during peak sales +- Process 150 transactions per hour during peak load +- Handle webhook processing for 200 events/hour + +**Testing Requirements**: +- Load test with 1,000 concurrent users for 1 hour +- Stress test to 2x expected peak load + +--- + +### NFR2: Security + +**Authentication/Authorization**: +- API keys stored in environment variables (never in code) +- Use Stripe API key with minimum required permissions +- Session-based auth for saved payment methods (must be logged in) + +**Data Protection**: +- Never store raw credit card numbers (use Stripe tokens only) +- All payment data transmitted via TLS 1.3 +- PCI DSS SAQ-A compliance (tokenization model) +- 3D Secure 2.0 enabled for high-risk transactions + +**Compliance**: +- Complete PCI DSS SAQ-A questionnaire annually +- GDPR compliant (support payment method deletion requests) +- SOC 2 compliant webhook processing + +**Security Testing**: +- Pass OWASP Top 10 security audit +- Penetration testing before launch +- Annual security review + +--- + +### NFR3: Reliability + +**Availability**: +- 99.9% uptime SLA (maximum 45 minutes downtime per month) +- Graceful degradation if Stripe API unavailable + +**Error Handling**: +- Retry transient Stripe API failures (3 retries with exponential backoff) +- Display user-friendly error messages (never show raw API errors) +- Log all errors for debugging and alerting + +**Data Integrity**: +- Idempotent payment processing (prevent duplicate charges) +- Webhook processing with deduplication +- Transaction logging for audit trail + +**Monitoring & Alerting**: +- Alert if payment success rate <95% over 15-minute window +- Alert if average response time >5 seconds +- Alert on any Stripe webhook failures +- Daily reconciliation report comparing Stripe transactions to database + +--- + +### NFR4: Usability + +**Checkout Experience**: +- Maximum 4 steps to complete checkout (cart → info → payment → confirm) +- Guest checkout option (no account required) +- Auto-fill billing address from shipping address +- Clear progress indicator showing checkout steps + +**Mobile Experience**: +- Fully responsive design (mobile, tablet, desktop) +- Digital wallet buttons prominent on mobile +- Card input fields optimized for mobile keyboards +- Minimum tap target size 44x44 pixels + +**Accessibility**: +- WCAG 2.1 AA compliance +- Keyboard navigation for all form fields +- Screen reader compatible +- Clear focus indicators + +**Error Messages**: +- Specific, actionable error messages ("Card declined - try different card" not "Error 402") +- Inline validation (show errors immediately, not after submit) +- Error summary at top of form + +--- + +## Constraints + +**Technical Constraints**: +- Must use Stripe as payment processor (existing vendor relationship) +- Must integrate with existing Salesforce CRM instance +- Must use existing QuickBooks accounting system +- Frontend must support IE11+ (legacy enterprise requirement) + +**Business Constraints**: +- Launch deadline: June 30, 2026 (hard deadline for fiscal year) +- Budget: $150K total (development + first year transaction fees) +- Transaction fee budget: 3% of GMV (already factored into pricing) + +**Regulatory Constraints**: +- PCI DSS compliance required (using SAQ-A) +- GDPR compliance (right to deletion of payment data) +- State sales tax collection required (out of scope for payment integration) + +**Resource Constraints**: +- 2 frontend engineers, 1 backend engineer, 1 QA engineer +- 16-week development timeline (4 sprints of 4 weeks each) +- No dedicated DevOps engineer (must use existing infrastructure) + +--- + +## Assumptions + +**User Assumptions**: +- 60% of users will access from mobile devices +- 70% of users will save payment method if offered +- Users have modern browsers (Chrome, Safari, Firefox, Edge - last 2 versions) +- Average transaction value: $275 + +**Technical Assumptions**: +- Stripe API maintains 99.99% uptime (historical average) +- Stripe API remains backward compatible (no breaking changes) +- Webhooks delivered within 5 minutes (Stripe SLA) +- Current infrastructure can handle 10x transaction volume growth + +**Business Assumptions**: +- Transaction volume grows from 1,000/month to 5,000/month within 6 months +- Cart abandonment reduces from 45% to 25% after launch +- Customers willing to pay current pricing plus transaction fees +- Sales team capacity sufficient for increased order volume + +--- + +## Out of Scope + +**Features Explicitly Excluded**: +- Cryptocurrency payments (deferred to Phase 3, market still nascent) +- Buy now, pay later (BNPL) options like Klarna/Affirm (Phase 2, evaluate demand) +- Subscription and recurring billing (Phase 2, not needed for MVP) +- International currency support beyond USD (Phase 2, market dependent) +- ACH/bank transfer payments (low demand, complex compliance) +- Gift cards and store credit (Phase 3 feature) + +**Deferred to Future Phases**: +- Advanced fraud detection rules customization (use Stripe Radar defaults for MVP) +- Multi-currency pricing and display (Phase 2, after international expansion) +- Invoice payment portal for net-30 terms (separate project) +- Payment plan/installment options (Phase 2 after BNPL evaluation) + +**Platforms Not Supported**: +- Internet Explorer 10 and older (< 2% traffic, not worth compatibility effort) +- Native mobile apps (web-only for MVP, may build apps in Phase 3) +- In-person/POS payments (separate product line, not e-commerce focus) diff --git a/prd-authoring/examples/IMPLEMENTATION_SUMMARY.md b/prd-authoring/examples/IMPLEMENTATION_SUMMARY.md new file mode 100644 index 0000000..49637d0 --- /dev/null +++ b/prd-authoring/examples/IMPLEMENTATION_SUMMARY.md @@ -0,0 +1,301 @@ +# PRD Authoring Skill - Issue #107 Implementation Summary + +## Issue Overview +**Issue #107**: Create comprehensive usage examples and test the complete PRD authoring workflow + +**Assigned**: Implementation of examples, testing, and documentation for prd-authoring skill + +## Deliverables Completed + +### 1. Examples Directory Created +Location: `/skills/prd-authoring/examples/` + +Contains 5 comprehensive files demonstrating the complete workflow: + +#### a. Product Brief Example (`01-product-brief-example.md`) +- **Project**: Payment Gateway Integration +- **Content**: Complete product brief with real-world business case +- **Key Features**: + - Quantified problem statement: 45% cart abandonment, $2.4M lost revenue + - Detailed user personas: Online Shopper Sarah, Sales Rep Mike + - Measurable success metrics: 55% → 75% conversion rate + - Clear value propositions for users and business + +#### b. Research Document Example (`02-research-example.md`) +- **Scope**: Comprehensive market research supporting the PRD +- **Content**: + - Competitive analysis: Stripe, PayPal/Braintree, Square (full profiles) + - Market insights: $154B market, 14.2% CAGR growth + - User feedback analysis: Pain points and desired features + - Technical considerations: APIs, compliance, architecture patterns + - Risk assessment with mitigation strategies + - Clear recommendation: Use Stripe for best developer experience + +#### c. PRD Example - Abbreviated (`03-prd-example-abbreviated.md`) +- **Format**: Condensed but complete PRD (easier to digest than full template) +- **Content**: + - 3 SMART primary objectives linked to business outcomes + - Comprehensive success criteria (launch, metrics, stretch goals) + - 5 detailed functional requirements (FR1-FR5) with full acceptance criteria + - 4 non-functional requirements: Performance, Security, Reliability, Usability + - Constraints, assumptions, and explicit out-of-scope items +- **Quality**: Demonstrates proper requirement structure, acceptance criteria format, and traceability + +#### d. Workflow Test Log (`workflow-test-log.md`) +- **Scope**: Complete testing of all commands and edge cases +- **Test Coverage**: + - Happy path: All 7 commands tested successfully + - Edge cases: 10 scenarios tested (missing files, duplicates, invalid input) + - Validation quality: Tests for vague language, unmeasurable criteria, missing sections + - **Result**: ALL TESTS PASSED ✓ +- **Value**: Proves skill is production-ready with robust error handling + +#### e. Examples README (`README.md`) +- **Purpose**: Guide users through the examples +- **Content**: + - Overview of each example file + - How to use examples for learning and testing + - Project statistics and breakdown + - Common patterns demonstrated + - Tips and next steps + +### 2. SKILL.md Updated + +#### Added Examples Section (Lines 1538-1689) +- **Project Overview**: Payment Gateway Integration example +- **Example Files**: Descriptions of all 4 example documents +- **Key Patterns Demonstrated**: + - Problem statement format + - Success metric format + - Functional requirement structure + - Complete FR1 example showing all components +- **Running the Example Workflow**: Step-by-step commands +- **Expected Timeline**: 18-36 hours of planning work +- **ROI Calculation**: 1 week upfront prevents weeks of rework + +#### Added Troubleshooting Section (Lines 1693-2110) +Comprehensive troubleshooting guide with 3 categories: + +**Common Errors (9 issues)**: +1. Missing docs/prds directory +2. Product brief already exists +3. Project directory doesn't exist +4. Research document not found (warning) +5. Vague language detected +6. Unmeasurable success criteria +7. Missing acceptance criteria +8. Epics document already exists +9. Spec proposal directory already exists + +**Quality Issues (4 issues)**: +1. PRD validation passes but requirements unclear +2. Epic dependencies complex and create bottlenecks +3. Stakeholders disagree on objectives +4. Research taking too long + +**Integration Issues (2 issues)**: +1. Unclear how to transition from PRD to spec-authoring +2. Multiple people working on same PRD causing conflicts + +Each issue includes: +- **Symptom**: What the user observes +- **Cause**: Why it happens +- **Solution**: Step-by-step fix with commands +- **Prevention**: How to avoid in future (where applicable) + +### 3. Testing Completed + +#### Happy Path Testing +**All 7 commands tested successfully**: +1. ✓ `status` - Works with and without project name +2. ✓ `brief` - Creates template with proper structure +3. ✓ `research` - Generates comprehensive research template +4. ✓ `create-prd` - Creates full PRD template +5. ✓ `validate-prd` - Detects quality issues accurately (strict & lenient modes) +6. ✓ `decompose` - Generates epic breakdown template +7. ✓ `generate-spec` - Creates spec proposal structure + +#### Edge Case Testing +**10 edge cases tested, all handled correctly**: +1. ✓ Missing directories - Proper error messages +2. ✓ Duplicate files - Prevents overwriting +3. ✓ Missing prerequisites - Clear guidance provided +4. ✓ Invalid project names - Sanitization works +5. ✓ Incomplete documents - Warnings appropriate +6. ✓ Invalid commands - Help text displayed +7. ✓ Missing arguments - Usage guidance provided +8. ✓ Parallel projects - Proper isolation +9. ✓ Validation modes - Both work as expected +10. ✓ Epic generation - Handles missing epics gracefully + +#### Validation Quality Testing +**Validation accurately detects**: +- Vague language (should, might, probably, good, fast) +- Unmeasurable criteria (qualitative without numbers) +- Missing sections (strict mode) +- Missing acceptance criteria +- YAML frontmatter issues + +### 4. Example Project Statistics + +**Payment Gateway Integration**: +- **Problem**: 45% cart abandonment, $2.4M lost revenue annually +- **Solution**: Stripe integration for real-time payments +- **Value**: $1.8M revenue recovery + $100K cost savings = 12x ROI +- **Timeline**: 6 months to launch (Q2 2026) +- **Team**: 2 FE, 1 BE, 1 QA engineer +- **Budget**: $150K (development + first year fees) +- **Scope**: 5 functional requirements, 4 NFRs +- **Expected Volume**: 1,000 → 5,000 transactions/month + +## Quality Metrics + +### Documentation Coverage +- ✓ All 7 commands documented with examples +- ✓ 15 troubleshooting scenarios covered +- ✓ Complete workflow demonstrated end-to-end +- ✓ Best practices and patterns documented +- ✓ Error handling and edge cases explained + +### Example Quality +- ✓ Realistic business case (e-commerce payment integration) +- ✓ Quantified metrics throughout (no vague statements) +- ✓ Proper formatting and structure +- ✓ Demonstrates SMART criteria +- ✓ Shows traceability from business goals to requirements + +### Testing Coverage +- ✓ 100% of commands tested +- ✓ 10 edge cases validated +- ✓ Both validation modes tested +- ✓ Error messages verified for clarity +- ✓ End-to-end workflow validated + +## Files Created/Modified + +### Created Files (5): +1. `/skills/prd-authoring/examples/01-product-brief-example.md` (128 lines) +2. `/skills/prd-authoring/examples/02-research-example.md` (316 lines) +3. `/skills/prd-authoring/examples/03-prd-example-abbreviated.md` (394 lines) +4. `/skills/prd-authoring/examples/workflow-test-log.md` (385 lines) +5. `/skills/prd-authoring/examples/README.md` (185 lines) + +### Modified Files (1): +1. `/skills/prd-authoring/SKILL.md` + - Added Examples section (151 lines) + - Added Troubleshooting section (417 lines) + - Total additions: 568 lines + +**Total Lines Added**: ~2,000 lines of comprehensive documentation and examples + +## Acceptance Criteria Met + +### From Issue #107: + +✓ **Examples cover common project types** +- Feature project demonstrated (payment gateway integration) +- Example applicable to system and enhancement projects + +✓ **All commands tested and working** +- 7/7 commands tested successfully +- Both happy path and edge cases validated + +✓ **Edge cases identified and documented** +- 10 edge cases tested +- 15 troubleshooting scenarios documented + +✓ **Troubleshooting guides added for common errors** +- 15 issues with symptom/cause/solution +- Preventive guidance included + +✓ **Examples are included in skill documentation** +- Examples section added to SKILL.md +- README added to examples directory +- Cross-references between examples and documentation + +## Key Achievements + +### 1. Production-Ready Validation +- All commands tested and working +- Robust error handling confirmed +- Clear, actionable error messages verified + +### 2. Comprehensive Examples +- Real-world business case (payment gateway) +- Complete workflow from brief → PRD → epics → spec +- $1.8M revenue recovery + $100K cost savings ROI + +### 3. Extensive Troubleshooting +- 15 common issues documented +- Solutions with step-by-step commands +- Prevention guidance for avoiding issues + +### 4. Quality Documentation +- 2,000+ lines of new documentation +- Examples demonstrate best practices +- Clear patterns for users to follow + +## Usage Recommendations + +### For New Users +1. Start with `examples/README.md` for overview +2. Read `01-product-brief-example.md` to see a well-formed brief +3. Review `03-prd-example-abbreviated.md` for PRD structure +4. Use `workflow-test-log.md` to understand testing approach + +### For Testing +1. Copy examples to test environment +2. Run through workflow commands +3. Verify output matches expected results +4. Test edge cases from workflow-test-log.md + +### For Production Use +1. Use examples as templates (customize for context) +2. Reference troubleshooting section for issues +3. Follow patterns demonstrated in examples +4. Validate PRD early and often + +## Lessons Learned + +### What Worked Well +1. **Realistic Example**: Payment gateway project is relatable and well-scoped +2. **Complete Coverage**: Testing all commands and edge cases proved robustness +3. **Structured Troubleshooting**: Symptom/Cause/Solution format is clear and actionable +4. **Quantified Metrics**: Real numbers throughout examples make them credible + +### Recommendations +1. Consider adding video walkthrough of workflow +2. Create additional examples for different project types (system redesign, enhancement) +3. Add templates for common industries (fintech, healthcare, e-commerce) +4. Create validation ruleset customization guide + +## Time Investment + +### Estimated Effort +- Example creation: 6 hours (product brief, research, PRD) +- Testing and validation: 3 hours (all commands, edge cases) +- Documentation updates: 4 hours (examples section, troubleshooting) +- README and summary: 1 hour +- **Total**: ~14 hours + +### ROI of This Work +- **Upfront time**: 14 hours +- **Prevents**: Hours of user confusion and support requests +- **Enables**: Self-service learning and troubleshooting +- **Result**: Skill is production-ready and well-documented + +## Conclusion + +Issue #107 has been successfully implemented with comprehensive examples, thorough testing, and extensive troubleshooting documentation. The prd-authoring skill is now production-ready with: + +- ✓ Complete workflow examples (payment gateway integration) +- ✓ All 7 commands tested and validated +- ✓ 10 edge cases handled correctly +- ✓ 15 troubleshooting scenarios documented +- ✓ ~2,000 lines of quality documentation added + +Users can now learn the PRD authoring workflow through realistic examples, test commands in a safe environment, and troubleshoot issues independently using the comprehensive troubleshooting guide. + +**Status**: COMPLETE ✓ +**Quality**: PRODUCTION-READY ✓ +**Documentation**: COMPREHENSIVE ✓ diff --git a/prd-authoring/examples/QUICK_START.md b/prd-authoring/examples/QUICK_START.md new file mode 100644 index 0000000..f902a8b --- /dev/null +++ b/prd-authoring/examples/QUICK_START.md @@ -0,0 +1,260 @@ +# PRD Authoring - Quick Start Guide + +## 5-Minute Overview + +The PRD authoring skill guides you from vague project ideas to validated Product Requirements Documents ready for implementation. + +**Workflow**: status → brief → research → create-prd → validate-prd → decompose → generate-spec + +**Time Investment**: 18-36 hours of planning (saves weeks of rework later) + +**Output**: Validated PRD with measurable objectives, testable requirements, and epic breakdown + +## Prerequisites + +```bash +# Ensure you have docs/prds directory +mkdir -p docs/prds + +# Navigate to project root +cd /path/to/your/project +``` + +## Quick Workflow + +### Step 1: Check Status (30 seconds) +```bash +bash skills/prd-authoring/scripts/prd-authoring.sh status +``` +**Output**: Shows what exists, recommends next step + +### Step 2: Create Product Brief (2-4 hours) +```bash +bash skills/prd-authoring/scripts/prd-authoring.sh brief "Your Project Name" +``` +**Then**: Edit `docs/prds/your-project-name/product-brief.md` + +**Fill in**: +- Problem statement (what, who, frequency, business impact) +- Target users (personas with goals and pain points) +- Proposed solution (what you'll build and why) +- Value proposition (user benefits, business benefits) +- Success metrics (baseline → target within timeframe) + +**Example**: See `examples/01-product-brief-example.md` + +### Step 3: Conduct Research (4-8 hours) +```bash +bash skills/prd-authoring/scripts/prd-authoring.sh research your-project-name +``` +**Then**: Edit `docs/prds/your-project-name/research.md` + +**Research**: +- Competitive analysis (3-5 competitors) +- Market insights (size, growth, trends) +- User feedback (pain points, desired features) +- Technical considerations (approaches, risks) +- Recommendations (must-build, should-build, could-build) + +**Example**: See `examples/02-research-example.md` + +### Step 4: Create PRD (8-16 hours) +```bash +bash skills/prd-authoring/scripts/prd-authoring.sh create-prd your-project-name +``` +**Then**: Edit `docs/prds/your-project-name/prd.md` + +**Define**: +- Objectives (SMART: Specific, Measurable, Achievable, Relevant, Time-bound) +- Success criteria (launch criteria, post-launch metrics) +- Functional requirements (FR1, FR2, etc. with acceptance criteria) +- Non-functional requirements (performance, security, reliability, usability) +- Constraints and assumptions +- Out of scope (what you won't build) + +**Example**: See `examples/03-prd-example-abbreviated.md` + +### Step 5: Validate PRD (iterative) +```bash +# Draft validation (lenient mode) +bash skills/prd-authoring/scripts/prd-authoring.sh validate-prd your-project-name --lenient + +# Fix issues, then strict validation +bash skills/prd-authoring/scripts/prd-authoring.sh validate-prd your-project-name +``` +**Goal**: "GOOD" or "EXCELLENT" rating with zero critical issues + +**Common fixes**: +- Replace vague terms ("fast" → "<200ms at p95") +- Add measurable targets ("improve UX" → "task completion rate >85%") +- Add acceptance criteria to requirements + +### Step 6: Decompose into Epics (4-8 hours) +```bash +bash skills/prd-authoring/scripts/prd-authoring.sh decompose your-project-name +``` +**Then**: Edit `docs/prds/your-project-name/epics.md` + +**Break down**: +- Group requirements into 3-7 independently deliverable epics +- Map epic dependencies +- Ensure 100% requirements coverage +- Estimate effort (2-4 sprints per epic) + +### Step 7: Generate Spec Proposals +```bash +bash skills/prd-authoring/scripts/prd-authoring.sh generate-spec your-project-name "Epic Name" +``` +**Output**: Creates `docs/changes/epic-name/` with: +- `proposal.md` (epic scope and objectives) +- `spec-delta.md` (technical requirements) +- `tasks.md` (implementation breakdown) + +**Then**: Transition to spec-authoring workflow for each epic + +## Command Reference + +| Command | Purpose | When to Use | +|---------|---------|-------------| +| `status` | Check project state | Start of session, after each step | +| `brief` | Create product brief | First step for new project | +| `research` | Create research doc | After brief is complete | +| `create-prd` | Create PRD template | After brief and research | +| `validate-prd` | Check PRD quality | After writing PRD, before decompose | +| `decompose` | Break into epics | After PRD validated | +| `generate-spec` | Create spec proposal | For each epic, transition to development | + +## Common Patterns + +### Problem Statement +``` +[What problem] + [Who experiences] + [Frequency] + [Business impact] + +Example: "Our e-commerce platform lacks payment processing, forcing +customers through manual invoices. This affects 100% of transactions +(1,000/month), causing 45% cart abandonment and $2.4M lost revenue annually." +``` + +### Success Metric +``` +[Metric name]: [Baseline] → [Target] within [Timeframe] + +Example: "Checkout conversion rate: 55% → 75% within 30 days post-launch" +``` + +### Functional Requirement +```markdown +### FR1: [Requirement Name] + +**Description**: [What the system must do] + +**User Story**: As a [user], I want [capability], so that [benefit] + +**Acceptance Criteria**: +- [ ] Given [precondition], when [action], then [result] +- [ ] Given [precondition], when [action], then [result] +- [ ] Given [precondition], when [action], then [result] + +**Priority**: Must Have / Should Have / Could Have + +**Dependencies**: [Other requirements or systems] +``` + +## Troubleshooting Quick Fixes + +### "docs/prds/ directory does not exist" +```bash +mkdir -p docs/prds +``` + +### "Product brief already exists" +```bash +# Check what exists +bash scripts/prd-authoring.sh status + +# Edit existing or use different name +vim docs/prds/project-name/product-brief.md +``` + +### "Vague language detected" +Replace with specific metrics: +- "fast" → "<200ms at 95th percentile" +- "many users" → "10,000 concurrent users" +- "good UX" → "task completion rate >85%" + +### "Success criteria may lack measurable targets" +Add numbers: +- Before: "Improve customer satisfaction" +- After: "Customer satisfaction: NPS 35 → 55 within 3 months" + +## Tips for Success + +### Do This ✓ +- Run `status` frequently to track progress +- Be specific with numbers (avoid "fast", "good", "many") +- Link requirements back to objectives (traceability) +- Validate early and often (use lenient mode for drafts) +- Time-box research (4-8 hours max) +- Include out-of-scope to prevent scope creep + +### Avoid This ✗ +- Skipping research (leads to uninformed requirements) +- Vague requirements ("should be fast and secure") +- Unmeasurable success criteria ("improve user experience") +- Missing acceptance criteria (how do you test?) +- Over-engineering the PRD (done > perfect) +- Changing PRD endlessly (lock after 2-3 iterations) + +## Example Project + +**Payment Gateway Integration** (see `examples/` directory): +- **Problem**: 45% cart abandonment, $2.4M lost revenue +- **Solution**: Stripe integration for real-time payments +- **Value**: $1.8M revenue recovery + $100K cost savings +- **Timeline**: 18-36 hours planning, 6 months to launch +- **Outcome**: 5 functional requirements, 4 epics, validated PRD + +## Next Steps + +1. **Learn**: Read `examples/README.md` for detailed examples +2. **Practice**: Run workflow on test project +3. **Apply**: Create brief for your real project +4. **Validate**: Use validate-prd to check quality +5. **Iterate**: Refine based on feedback +6. **Deploy**: Transition to spec-authoring for implementation + +## Need Help? + +- **Examples**: See `skills/prd-authoring/examples/` directory +- **Troubleshooting**: See SKILL.md Troubleshooting section +- **Workflow Details**: See SKILL.md for command documentation +- **Test Results**: See `examples/workflow-test-log.md` + +## Time Budget + +| Activity | Time | Cumulative | +|----------|------|------------| +| Product Brief | 2-4 hours | 2-4 hours | +| Research | 4-8 hours | 6-12 hours | +| PRD Creation | 8-16 hours | 14-28 hours | +| Validation | 1-2 hours | 15-30 hours | +| Epic Decomposition | 4-8 hours | 19-38 hours | +| **Total Planning** | **18-36 hours** | | + +**ROI**: 1 week of planning prevents 4-8 weeks of rework from unclear requirements + +## Success Criteria + +Your PRD is ready when: +- ✓ Validation passes with "GOOD" or "EXCELLENT" +- ✓ All objectives are SMART (Specific, Measurable, Achievable, Relevant, Time-bound) +- ✓ Every requirement has acceptance criteria +- ✓ Success metrics have baseline → target → timeframe +- ✓ Stakeholders approve and understand what to build +- ✓ Team knows how to test each requirement + +## Remember + +> "Hours of planning save weeks of rework. A validated PRD is your blueprint for success." + +Start with `status`, follow the workflow, validate often, and maintain traceability. Good luck! diff --git a/prd-authoring/examples/README.md b/prd-authoring/examples/README.md new file mode 100644 index 0000000..4e77cc0 --- /dev/null +++ b/prd-authoring/examples/README.md @@ -0,0 +1,165 @@ +# PRD Authoring Examples + +This directory contains comprehensive examples demonstrating the complete PRD authoring workflow for the payment gateway integration project. + +## Example Files + +### 1. Product Brief (`01-product-brief-example.md`) +A complete product brief for a payment gateway integration project showing: +- Clear problem statement with quantified business impact +- Well-defined user personas (primary and secondary) +- Specific value propositions for users and business +- Measurable success metrics (SMART criteria) + +**Key Takeaways**: +- Problem statement includes who, what, frequency, and business impact +- Success metrics are specific numbers with baselines and targets +- Value propositions tied to concrete outcomes ($1.8M revenue recovery) + +### 2. Research Document (`02-research-example.md`) +Comprehensive market research supporting the PRD, including: +- Competitive analysis of 3 major providers (Stripe, PayPal, Square) +- Market size, growth trends, and regulatory landscape +- User feedback analysis with pain points and desired features +- Technical considerations and risk assessment + +**Key Takeaways**: +- Each competitor analyzed for strengths, weaknesses, features, pricing +- Research findings directly inform PRD recommendations +- Technical risks identified early with mitigation strategies +- Clear recommendation: Use Stripe for best developer experience + +### 3. PRD - Abbreviated Version (`03-prd-example-abbreviated.md`) +A condensed but complete PRD showing: +- 3 SMART primary objectives linked to business outcomes +- Comprehensive success criteria (launch, metrics, stretch goals) +- 5 detailed functional requirements with acceptance criteria +- 4 non-functional requirements (performance, security, reliability, usability) +- Constraints, assumptions, and explicit out-of-scope items + +**Key Takeaways**: +- Each requirement has description, user story, inputs, outputs, acceptance criteria, priority +- Non-functional requirements are measurable (99.9% uptime, <3s response time) +- Out of scope clearly defines what will NOT be built to prevent scope creep +- Full traceability from requirements back to objectives + +### 4. Workflow Test Log (`workflow-test-log.md`) +Complete test results showing: +- Happy path: Full workflow from status → brief → research → PRD → validate → decompose → generate-spec +- Edge cases: 10 error scenarios tested (missing files, duplicates, invalid input) +- Validation quality: Tests for vague language, unmeasurable criteria, missing sections +- All tests passed with proper error handling + +**Key Takeaways**: +- All 7 commands work correctly +- Error messages are clear and actionable +- Validation accurately detects quality issues +- Workflow maintains traceability throughout + +## Using These Examples + +### For Learning +1. Start with `01-product-brief-example.md` to see a well-formed brief +2. Read `02-research-example.md` to understand depth of research needed +3. Study `03-prd-example-abbreviated.md` for PRD structure and completeness +4. Review `workflow-test-log.md` to understand the complete workflow + +### For Your Own Projects +1. Use these as templates, but customize for your specific context +2. Note the level of detail and specificity required +3. Pay attention to how requirements link back to objectives +4. Observe how assumptions and constraints are documented + +### For Testing +1. Set up a test environment: `mkdir -p /tmp/test-prd && cd /tmp/test-prd && mkdir -p docs/prds` +2. Copy an example to test with: `cp 01-product-brief-example.md /tmp/test-prd/docs/prds/test-project/product-brief.md` +3. Run commands to test workflow + +## Example Project: Payment Gateway Integration + +This example represents a realistic e-commerce payment integration project with: + +**Problem**: 45% cart abandonment due to manual invoice process +**Solution**: Integrate Stripe for real-time online payments +**Value**: Recover $1.8M in lost revenue, save $100K in operational costs +**Scope**: Credit card processing, digital wallets, saved payment methods, CRM/accounting integration +**Timeline**: 6 months to launch (Q2 2026) + +### Project Statistics +- **Transaction Volume**: 1,000/month current → 5,000/month target +- **Team Size**: 2 frontend, 1 backend, 1 QA engineer +- **Budget**: $150K (development + first year fees) +- **Expected ROI**: $1.8M revenue recovery + $100K cost savings = 12x ROI + +### Requirements Breakdown +- **Functional Requirements**: 5 detailed (FR1-FR5) + - Payment processing + - Digital wallets + - Saved payment methods + - CRM integration + - Accounting integration + +- **Non-Functional Requirements**: 4 categories + - Performance (<3s payment processing) + - Security (PCI DSS compliance) + - Reliability (99.9% uptime) + - Usability (WCAG 2.1 AA) + +### Epic Decomposition (Not Shown but Tested) +The PRD would decompose into 4-5 epics: +1. Payment Processing Core (Stripe integration, card payments) +2. Payment Methods (Apple Pay, Google Pay, saved methods) +3. CRM & Accounting Integration +4. Security & Compliance (PCI, fraud detection) + +Each epic would then become a spec using the spec-authoring workflow. + +## Common Patterns Demonstrated + +### Problem Statements +Format: What problem + Who experiences + Frequency + Business impact + +Example: +> "Our e-commerce platform lacks payment processing, forcing customers through manual invoices. This affects 100% of transactions (1,000/month), causing 45% cart abandonment and $2.4M lost revenue annually." + +### Success Metrics +Format: Metric name: Baseline → Target within Timeframe + +Example: +> "Checkout conversion rate: 55% → 75% within 30 days post-launch" + +### Functional Requirements +Format: Description + User Story + Inputs + Outputs + Business Rules + Acceptance Criteria + Priority + Dependencies + +Example FR1 shows complete structure for payment processing requirement. + +### Acceptance Criteria +Format: Given [precondition], when [action], then [expected result] + +Example: +> "Given valid card details, when customer submits payment, then transaction processes in <3 seconds" + +## Tips from These Examples + +1. **Be Specific**: Notice how every metric has a number, every timeline has a date +2. **Show Impact**: Every feature ties back to business value (revenue, cost, satisfaction) +3. **Link Everything**: Requirements → Objectives → Business goals (traceability) +4. **Set Boundaries**: Out of scope is as important as in scope +5. **Document Assumptions**: Make implicit assumptions explicit +6. **Measure Quality**: Use validate-prd to catch vague language early + +## Next Steps + +After reviewing these examples: +1. Create your own project brief using the `brief` command +2. Conduct research for your specific domain +3. Write your PRD referencing these examples for structure +4. Validate early and often using `validate-prd` +5. Decompose into epics when PRD is complete +6. Transition to spec-authoring for implementation + +## Questions? + +- Refer to SKILL.md for detailed command documentation +- Review workflow-test-log.md for edge cases and error handling +- Compare your work to these examples for quality benchmarking diff --git a/prd-authoring/examples/workflow-test-log.md b/prd-authoring/examples/workflow-test-log.md new file mode 100644 index 0000000..0c04be0 --- /dev/null +++ b/prd-authoring/examples/workflow-test-log.md @@ -0,0 +1,428 @@ +# PRD Authoring Workflow - Test Results + +## Test Date: 2025-11-04 + +## Test Environment +- Project: payment-gateway-integration +- Location: /tmp/test-prd-authoring + +## Test Scenario 1: Complete Happy Path Workflow + +### Step 1: Status Command (No Projects) +```bash +$ bash prd-authoring.sh status +``` + +**Result**: PASSED +- Correctly identified no projects exist +- Recommended running `brief` command +- Provided correct next command + +### Step 2: Brief Command +```bash +$ bash prd-authoring.sh brief "Payment Gateway Integration" +``` + +**Result**: PASSED +- Created directory: `docs/prds/payment-gateway-integration/` +- Created file: `product-brief.md` with proper YAML frontmatter +- Kebab-cased project name correctly +- Template included all required sections: + - Problem Statement + - Target Users + - Proposed Solution + - Value Proposition + - Success Metrics + +### Step 3: Status Command (Brief Created) +```bash +$ bash prd-authoring.sh status payment-gateway-integration +``` + +**Result**: PASSED +- Detected brief exists +- Validated brief completeness (all required sections present) +- Status: "Brief Complete" +- Recommended running `research` command + +### Step 4: Research Command +```bash +$ bash prd-authoring.sh research payment-gateway-integration +``` + +**Result**: PASSED +- Created file: `research.md` with YAML frontmatter +- Template included all sections: + - Competitive Analysis (3 competitor templates) + - Market Insights + - User Feedback Analysis + - Technical Considerations + - Recommendations + +### Step 5: Status Command (Research Created) +```bash +$ bash prd-authoring.sh status payment-gateway-integration +``` + +**Result**: PASSED +- Detected both brief and research exist +- Validated completeness +- Status: "Research Phase" +- Recommended running `create-prd` command + +### Step 6: Create PRD Command +```bash +$ bash prd-authoring.sh create-prd payment-gateway-integration +``` + +**Result**: PASSED +- Created file: `prd.md` with comprehensive template +- YAML frontmatter present +- All major sections included: + - Objectives (Primary and Secondary) + - Success Criteria (Launch, Metrics, Stretch Goals) + - Functional Requirements (numbered FR1, FR2, etc.) + - Non-Functional Requirements (NFR1-6) + - Constraints + - Assumptions + - Out of Scope + +### Step 7: Validate PRD Command (Draft Mode) +```bash +$ bash prd-authoring.sh validate-prd payment-gateway-integration --lenient +``` + +**Result**: PASSED (Lenient Mode) +- Detected incomplete PRD (template placeholders) +- Warnings issued for vague language +- Recommended completing sections +- Lenient mode allowed template placeholders + +### Step 8: Validate PRD Command (Strict Mode - After Completion) +```bash +$ bash prd-authoring.sh validate-prd payment-gateway-integration +``` + +**Result**: PASSED (after populating PRD) +- All required sections present +- SMART criteria validated +- Measurable success criteria detected +- No critical issues found +- Rating: "GOOD" (some minor warnings acceptable) + +### Step 9: Decompose Command +```bash +$ bash prd-authoring.sh decompose payment-gateway-integration +``` + +**Result**: PASSED +- Created file: `epics.md` +- Template included: + - Epic decomposition guidelines + - Epic templates (1-3 examples) + - Dependencies and sequencing section + - Requirements traceability matrix + - Sprint planning guidance + +### Step 10: Generate Spec Command +```bash +$ bash prd-authoring.sh generate-spec payment-gateway-integration "Payment Processing Core" +``` + +**Result**: PASSED +- Created directory: `docs/changes/payment-processing-core/` +- Created files: + - `proposal.md` (Epic scope and objectives) + - `spec-delta.md` (Technical specifications) + - `tasks.md` (Initial task breakdown) +- All files properly linked back to PRD and epic + +--- + +## Test Scenario 2: Edge Cases and Error Handling + +### Test 2.1: Missing docs/prds Directory +```bash +$ rm -rf docs/prds +$ bash prd-authoring.sh status +``` + +**Result**: PASSED +- Error message: "Error: docs/prds/ directory does not exist." +- Helpful guidance: "Please create it first: mkdir -p docs/prds" +- Non-zero exit code + +### Test 2.2: Brief Already Exists +```bash +$ bash prd-authoring.sh brief "Payment Gateway Integration" +$ bash prd-authoring.sh brief "Payment Gateway Integration" +``` + +**Result**: PASSED +- Error message: "Error: Product brief already exists" +- File not overwritten +- Suggested using different name or editing existing + +### Test 2.3: Research Without Brief +```bash +$ bash prd-authoring.sh research nonexistent-project +``` + +**Result**: PASSED +- Error: "Error: Project directory 'docs/prds/nonexistent-project' does not exist." +- Recommended: "Run 'brief' command first" + +### Test 2.4: Create PRD Without Prerequisites +```bash +$ mkdir -p docs/prds/incomplete-project +$ bash prd-authoring.sh create-prd incomplete-project +``` + +**Result**: PASSED +- Detected missing product brief +- Error: "Run 'brief' command first to create the product brief." + +### Test 2.5: Create PRD Without Research (Warning) +```bash +$ bash prd-authoring.sh brief "No Research Project" +$ bash prd-authoring.sh create-prd no-research-project +``` + +**Result**: PASSED +- Warning: "Research document not found. PRD quality may be reduced." +- Prompted for confirmation: "Continue anyway? (y/n)" +- Allowed proceeding with 'y' but discouraged it + +### Test 2.6: Validate PRD That Doesn't Exist +```bash +$ bash prd-authoring.sh validate-prd nonexistent-project +``` + +**Result**: PASSED +- Error: "Error: PRD not found" +- Recommended: "Run 'create-prd' command first" + +### Test 2.7: Decompose Without Complete PRD +```bash +$ bash prd-authoring.sh decompose payment-gateway-integration +``` +(With incomplete/template PRD) + +**Result**: PASSED +- Warning: "PRD appears incomplete" +- Recommended running validate-prd first +- Still allowed decomposition (user judgment) + +### Test 2.8: Generate Spec for Non-existent Epic +```bash +$ bash prd-authoring.sh generate-spec payment-gateway-integration "Nonexistent Epic" +``` + +**Result**: PASSED +- Warning: "Could not find epic 'Nonexistent Epic' in epics.md" +- Generated generic template anyway +- User responsible for populating manually + +### Test 2.9: Generate Spec When Spec Already Exists +```bash +$ bash prd-authoring.sh generate-spec payment-gateway-integration "Payment Processing Core" +$ bash prd-authoring.sh generate-spec payment-gateway-integration "Payment Processing Core" +``` + +**Result**: PASSED +- Error: "Spec proposal directory already exists" +- Suggested using different name or deleting existing +- Files not overwritten + +### Test 2.10: Invalid Project Name Characters +```bash +$ bash prd-authoring.sh brief "Test@#$%Project!" +``` + +**Result**: PASSED +- Sanitized to kebab-case: "testproject" +- Special characters removed +- Valid directory created + +--- + +## Test Scenario 3: Validation Quality Checks + +### Test 3.1: Vague Language Detection +Created PRD with vague terms: "should", "might", "probably", "good", "fast" + +**Result**: PASSED +- Validation detected all vague terms +- Listed line numbers where issues occurred +- Provided suggestions for making language specific +- Example warnings: + - "Line 45: Contains 'should' - be more specific" + - "Line 67: Contains 'fast' - provide numeric target" + +### Test 3.2: Unmeasurable Success Criteria +Created PRD with qualitative success criteria: "improve user experience", "better performance" + +**Result**: PASSED +- Validation flagged unmeasurable criteria +- Suggested adding numeric targets +- Example: "improve UX" → "task completion rate > 85%" + +### Test 3.3: Missing Required Sections +Created PRD without "Assumptions" section + +**Result**: PASSED (Strict Mode) +- Error: "Missing required section: ## Assumptions" +- Validation failed with recommendation to add section + +**Result**: PASSED (Lenient Mode) +- Warning: "Missing section: ## Assumptions (lenient mode)" +- Validation passed but noted improvement needed + +### Test 3.4: Well-Formed PRD +Created PRD with: +- All sections present +- Specific, measurable requirements +- SMART objectives +- Clear acceptance criteria + +**Result**: PASSED +- Validation: "EXCELLENT ✓" +- "PRD meets all quality standards" +- Zero issues, zero warnings + +--- + +## Test Scenario 4: Command Variations + +### Test 4.1: Status Command Without Project Name +```bash +$ bash prd-authoring.sh status +``` +(With multiple projects) + +**Result**: PASSED +- Listed all projects in docs/prds/ +- Suggested running status with specific project name + +### Test 4.2: Validate PRD Lenient Mode +```bash +$ bash prd-authoring.sh validate-prd payment-gateway-integration --lenient +``` + +**Result**: PASSED +- Lenient mode enabled +- Warnings instead of errors for missing sections +- Useful for draft PRDs + +### Test 4.3: Invalid Command +```bash +$ bash prd-authoring.sh invalid-command +``` + +**Result**: PASSED +- Error: "Unknown command 'invalid-command'" +- Usage help displayed +- Listed all valid commands + +### Test 4.4: Missing Required Argument +```bash +$ bash prd-authoring.sh brief +``` + +**Result**: PASSED +- Error: "Project name not provided for 'brief' command" +- Usage help: "Usage: $0 brief " + +--- + +## Test Scenario 5: Integration Tests + +### Test 5.1: Complete Workflow End-to-End +Executed full workflow from status → brief → research → create-prd → validate-prd → decompose → generate-spec + +**Result**: PASSED +- All commands executed successfully +- Each step built on previous +- Final output: Complete spec proposal ready for development +- Traceability maintained: spec → epic → PRD → brief + +### Test 5.2: Parallel Projects +Created two separate projects: +1. payment-gateway-integration +2. mobile-app-redesign + +**Result**: PASSED +- Both projects coexist independently +- Status command lists both +- No cross-contamination of data +- Proper isolation in separate directories + +--- + +## Test Scenario 6: Validation Accuracy + +### Test 6.1: YAML Frontmatter Validation +- Missing frontmatter: FAILED validation ✓ +- Incomplete frontmatter: WARNING issued ✓ +- Proper frontmatter: PASSED ✓ + +### Test 6.2: Section Completeness +- All sections present: PASSED ✓ +- Missing Objectives: FAILED ✓ +- Missing Success Criteria: FAILED ✓ +- Missing Constraints: FAILED (strict) / WARNING (lenient) ✓ + +### Test 6.3: Requirements Quality +- Specific acceptance criteria: PASSED ✓ +- Vague requirements: WARNED ✓ +- Missing acceptance criteria: WARNED ✓ +- Unnumbered requirements: WARNED ✓ + +--- + +## Summary of Test Results + +### Commands Tested: 7/7 PASSED +1. ✓ status - Works with and without project name +2. ✓ brief - Creates template with proper structure +3. ✓ research - Generates comprehensive research template +4. ✓ create-prd - Creates full PRD template +5. ✓ validate-prd - Detects quality issues accurately +6. ✓ decompose - Generates epic breakdown template +7. ✓ generate-spec - Creates spec proposal structure + +### Edge Cases Tested: 10/10 PASSED +1. ✓ Missing directories - Proper error messages +2. ✓ Duplicate files - Prevents overwriting +3. ✓ Missing prerequisites - Clear guidance provided +4. ✓ Invalid project names - Sanitization works correctly +5. ✓ Incomplete documents - Warnings appropriate +6. ✓ Invalid commands - Help text displayed +7. ✓ Missing arguments - Usage guidance provided +8. ✓ Parallel projects - Proper isolation +9. ✓ Validation modes - Strict and lenient work as expected +10. ✓ Epic generation - Handles missing epics gracefully + +### Validation Quality: EXCELLENT +- Detects vague language accurately +- Identifies unmeasurable criteria +- Checks section completeness +- SMART criteria validation working +- Both strict and lenient modes functional + +### Overall Assessment: ALL TESTS PASSED ✓ + +The prd-authoring skill is production-ready with: +- Complete functionality for all commands +- Robust error handling for edge cases +- Clear, actionable error messages +- Proper validation of document quality +- Helpful guidance at each step +- Maintains traceability throughout workflow + +## Recommendations + +1. **Documentation**: Add examples to SKILL.md showing this workflow +2. **Troubleshooting**: Document common errors and solutions +3. **Edge Cases**: Add more examples of error scenarios to documentation +4. **User Guidance**: Consider adding more inline help in templates diff --git a/prd-authoring/scripts/prd-authoring.sh b/prd-authoring/scripts/prd-authoring.sh new file mode 100755 index 0000000..2d579e1 --- /dev/null +++ b/prd-authoring/scripts/prd-authoring.sh @@ -0,0 +1,1731 @@ +#!/bin/bash +# This script manages the authoring of Product Requirements Documents (PRDs). + +set -e + +# --- UTILITY FUNCTIONS --- + +function to_kebab_case() { + echo "$1" | tr '[:upper:]' '[:lower:]' | tr ' ' '-' | sed 's/[^a-z0-9-]//g' +} + +function get_date() { + date +%Y-%m-%d +} + +function check_prd_directory() { + if [ ! -d "docs/prds" ]; then + echo "Error: docs/prds/ directory does not exist." >&2 + echo "Please create it first: mkdir -p docs/prds" >&2 + exit 1 + fi +} + +function check_project_exists() { + local project_name=$1 + local project_dir="docs/prds/$project_name" + + if [ ! -d "$project_dir" ]; then + echo "Error: Project directory '$project_dir' does not exist." >&2 + echo "Run 'brief' command first to create the project." >&2 + exit 1 + fi +} + +function check_file_exists() { + local file_path=$1 + local description=$2 + + if [ ! -f "$file_path" ]; then + echo "Error: $description not found at '$file_path'." >&2 + return 1 + fi + return 0 +} + +# --- STATUS COMMAND --- + +function status() { + local project_name=$1 + check_prd_directory + + echo "=== PRD Status Report ===" + echo "" + + # If no project name provided, list all projects + if [ -z "$project_name" ]; then + local projects=(docs/prds/*/) + if [ ${#projects[@]} -eq 0 ] || [ ! -d "${projects[0]}" ]; then + echo "No PRD projects found in docs/prds/" + echo "" + echo "Recommendation: Run 'brief' command to start a new project" + echo "Next command: bash scripts/prd-authoring.sh brief \"Project Name\"" + return + fi + + echo "Found projects:" + for project_dir in "${projects[@]}"; do + if [ -d "$project_dir" ]; then + local proj_name=$(basename "$project_dir") + echo " - $proj_name" + fi + done + echo "" + echo "Run 'status ' to check specific project status" + return + fi + + local project_dir="docs/prds/$project_name" + if [ ! -d "$project_dir" ]; then + echo "Project: $project_name (NOT FOUND)" + echo "" + echo "Recommendation: Run 'brief' command to create this project" + echo "Next command: bash scripts/prd-authoring.sh brief \"$project_name\"" + return + fi + + echo "Project: $project_name" + echo "" + + # Check product brief + local brief_status="✗" + local brief_complete="✗" + if [ -f "$project_dir/product-brief.md" ]; then + brief_status="✓" + # Check for required sections + if grep -q "## Problem Statement" "$project_dir/product-brief.md" && \ + grep -q "## Target Users" "$project_dir/product-brief.md" && \ + grep -q "## Success Metrics" "$project_dir/product-brief.md"; then + brief_complete="✓" + fi + fi + + # Check research + local research_status="✗" + local research_complete="✗" + if [ -f "$project_dir/research.md" ]; then + research_status="✓" + # Check for required sections + if grep -q "## Competitive Analysis" "$project_dir/research.md" && \ + grep -q "## Recommendations" "$project_dir/research.md"; then + research_complete="✓" + fi + fi + + # Check PRD + local prd_status="✗" + local prd_complete="✗" + if [ -f "$project_dir/prd.md" ]; then + prd_status="✓" + # Check for required sections + if grep -q "## Objectives" "$project_dir/prd.md" && \ + grep -q "## Success Criteria" "$project_dir/prd.md" && \ + grep -q "## Functional Requirements" "$project_dir/prd.md"; then + prd_complete="✓" + fi + fi + + # Check epics + local epics_status="✗" + if [ -f "$project_dir/epics.md" ]; then + epics_status="✓" + fi + + # Determine status and recommendation + local status_phase="Inception" + local recommendation="" + local next_command="" + + if [ "$epics_status" = "✓" ]; then + status_phase="Ready for Development" + recommendation="PRD decomposed into epics. Ready for spec-authoring workflow." + next_command="Transition to spec-authoring for each epic" + elif [ "$prd_complete" = "✓" ]; then + status_phase="PRD Complete" + recommendation="Run 'decompose' command to break PRD into epics" + next_command="bash scripts/prd-authoring.sh decompose $project_name" + elif [ "$prd_status" = "✓" ]; then + status_phase="PRD Draft" + recommendation="Complete PRD sections, then run 'validate-prd' command" + next_command="bash scripts/prd-authoring.sh validate-prd $project_name" + elif [ "$research_complete" = "✓" ] || [ "$research_status" = "✓" ]; then + status_phase="Research Phase" + recommendation="Run 'create-prd' command to create PRD from brief and research" + next_command="bash scripts/prd-authoring.sh create-prd $project_name" + elif [ "$brief_complete" = "✓" ]; then + status_phase="Brief Complete" + recommendation="Run 'research' command to conduct market analysis" + next_command="bash scripts/prd-authoring.sh research $project_name" + elif [ "$brief_status" = "✓" ]; then + status_phase="Brief Draft" + recommendation="Complete product brief sections" + next_command="Edit docs/prds/$project_name/product-brief.md" + fi + + echo "Status: $status_phase" + echo "- $brief_status Product brief exists (docs/prds/$project_name/product-brief.md)" + if [ "$brief_status" = "✓" ]; then + echo " - Brief completeness: $brief_complete" + fi + echo "- $research_status Research document exists (docs/prds/$project_name/research.md)" + if [ "$research_status" = "✓" ]; then + echo " - Research completeness: $research_complete" + fi + echo "- $prd_status PRD exists (docs/prds/$project_name/prd.md)" + if [ "$prd_status" = "✓" ]; then + echo " - PRD completeness: $prd_complete" + fi + echo "- $epics_status Epic decomposition exists (docs/prds/$project_name/epics.md)" + echo "" + echo "Recommendation: $recommendation" + echo "Next command: $next_command" +} + +# --- BRIEF COMMAND --- + +function brief() { + local project_name=$1 + if [ -z "$project_name" ]; then + echo "Error: Project name not provided for 'brief' command." >&2 + echo "Usage: $0 brief " >&2 + exit 1 + fi + + check_prd_directory + + local dir_name=$(to_kebab_case "$project_name") + local project_dir="docs/prds/$dir_name" + local brief_file="$project_dir/product-brief.md" + + echo "Creating product brief: $project_name" + + if [ -d "$project_dir" ]; then + echo "Warning: Project directory '$project_dir' already exists." >&2 + if [ -f "$brief_file" ]; then + echo "Error: Product brief already exists at '$brief_file'." >&2 + exit 1 + fi + fi + + mkdir -p "$project_dir" + + local today=$(get_date) + + local today=$(get_date) + echo "Generating a draft product brief with Gemini..." + + local gemini_prompt="Generate a comprehensive and well-structured product brief for a new project called '${project_name}'. +The brief should be in markdown format and include the following sections: +- A YAML frontmatter with title, type (product-brief), status (draft), created, and updated fields. +- A main title: '# Product Brief: ${project_name}' +- ## Problem Statement: Articulate a specific, measurable, and customer-focused problem. +- ## Target Users: Define primary and secondary user personas. +- ## Proposed Solution: Give a high-level overview of the solution. +- ## Value Proposition: Detail user and business benefits. +- ## Success Metrics: Define specific, measurable, achievable, relevant, and time-bound (SMART) metrics for launch and long-term success. + +Flesh out each section with plausible, high-quality example content appropriate for a project of this nature. The goal is to create a strong first draft that can be reviewed and refined." + + # Call Gemini and write the output to the file + gemini -p "$gemini_prompt" > "$brief_file" + + # Ensure the frontmatter has the correct dates + sed -i "s/created: .*/created: $today/" "$brief_file" + sed -i "s/updated: .*/updated: $today/" "$brief_file" + + echo "Successfully created product brief at $brief_file" + echo "" + echo "Next steps:" + echo "1. Edit $brief_file to populate all sections" + echo "2. Run 'status $dir_name' to verify completion" + echo "3. Run 'research $dir_name' to conduct market analysis" +} + +# --- RESEARCH COMMAND --- + +function research() { + local project_name=$1 + if [ -z "$project_name" ]; then + echo "Error: Project name not provided for 'research' command." >&2 + echo "Usage: $0 research " >&2 + exit 1 + fi + + check_prd_directory + check_project_exists "$project_name" + + local project_dir="docs/prds/$project_name" + local research_file="$project_dir/research.md" + + # Check if brief exists + if ! check_file_exists "$project_dir/product-brief.md" "Product brief"; then + echo "Run 'brief' command first to create the product brief." >&2 + exit 1 + fi + + echo "Creating research document for: $project_name" + + if [ -f "$research_file" ]; then + echo "Error: Research document already exists at '$research_file'." >&2 + exit 1 + fi + + local today=$(get_date) + + local today=$(get_date) + local brief_file="$project_dir/product-brief.md" + echo "Generating a draft research document with Gemini..." + + local gemini_prompt="Based on the following product brief, generate a comprehensive market research document for the project '${project_name}'. +The research document should be in markdown format and include: +- A YAML frontmatter with title, type (research), status (in-progress), created, and updated fields. +- A main title: '# Research: ${project_name}' +- ## Competitive Analysis: Identify 2-3 potential competitors and analyze their strengths, weaknesses, and market position. +- ## Market Insights: Discuss market size, trends, and any relevant regulations. +- ## User Feedback Analysis: Synthesize potential user pain points and desired features. +- ## Technical Considerations: Examine common technical approaches, architecture patterns, and risks. +- ## Recommendations: Provide actionable recommendations for priority features, technical approach, and go-to-market positioning. + +Flesh out each section with plausible, high-quality example content that logically follows from the product brief. The goal is to create a strong first draft that can be reviewed and refined. + +Here is the product brief for context: +@${brief_file} +" + + # Call Gemini and write the output to the file + gemini -p "$gemini_prompt" > "$research_file" + + # Ensure the frontmatter has the correct dates + sed -i "s/created: .*/created: $today/" "$research_file" + sed -i "s/updated: .*/updated: $today/" "$research_file" + + echo "Successfully created research document at $research_file" + echo "" + echo "Next steps:" + echo "1. Conduct competitive analysis and market research" + echo "2. Edit $research_file to document findings" + echo "3. Run 'status $project_name' to verify completion" + echo "4. Run 'create-prd $project_name' to create PRD" +} + +# --- CREATE-PRD COMMAND --- + +function create-prd() { + local project_name=$1 + if [ -z "$project_name" ]; then + echo "Error: Project name not provided for 'create-prd' command." >&2 + echo "Usage: $0 create-prd " >&2 + exit 1 + fi + + check_prd_directory + check_project_exists "$project_name" + + local project_dir="docs/prds/$project_name" + local prd_file="$project_dir/prd.md" + + # Check if brief exists + if ! check_file_exists "$project_dir/product-brief.md" "Product brief"; then + echo "Run 'brief' command first to create the product brief." >&2 + exit 1 + fi + + # Check if research exists (warning only, not required) + if ! check_file_exists "$project_dir/research.md" "Research document"; then + echo "Warning: Research document not found. PRD quality may be reduced." >&2 + echo "Consider running 'research $project_name' first." >&2 + read -p "Continue anyway? (y/n) " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi + fi + + echo "Creating PRD for: $project_name" + + if [ -f "$prd_file" ]; then + echo "Error: PRD already exists at '$prd_file'." >&2 + exit 1 + fi + + local today=$(get_date) + + local today=$(get_date) + local brief_file="$project_dir/product-brief.md" + local research_file="$project_dir/research.md" + echo "Generating a draft PRD with Gemini..." + + local gemini_prompt="Based on the following product brief and research document, generate a comprehensive Product Requirements Document (PRD) for the project '${project_name}'. +The PRD should be in markdown format and include: +- A YAML frontmatter with title, type (prd), status (draft), created, and updated fields. +- A main title: '# Product Requirements Document: ${project_name}' +- ## Objectives: Define 2-3 primary, SMART objectives. +- ## Success Criteria: Detail both launch criteria and post-launch success metrics. +- ## Functional Requirements: List at least 3-5 detailed functional requirements (FRs) with descriptions, user stories, and acceptance criteria. +- ## Non-Functional Requirements: Detail requirements for Performance, Security, Reliability, and Usability. +- ## Constraints: List any known technical, business, or resource constraints. +- ## Assumptions: Document key assumptions being made. +- ## Out of Scope: Clearly define what will not be built. + +Flesh out each section with plausible, high-quality example content that logically follows from the provided context. The requirements should be specific and testable, and the success criteria should be measurable. The goal is to create a strong first draft that can be reviewed and refined. + +Here is the product brief for context: +@${brief_file} + +Here is the research document for context: +@${research_file} +" + + # Call Gemini and write the output to the file + gemini -p "$gemini_prompt" > "$prd_file" + + # Ensure the frontmatter has the correct dates + sed -i "s/created: .*/created: $today/" "$prd_file" + sed -i "s/updated: .*/updated: $today/" "$prd_file" + + echo "Successfully created PRD at $prd_file" + echo "" + echo "Next steps:" + echo "1. Populate PRD sections with requirements and success criteria" + echo "2. Ensure requirements are specific, measurable, and testable" + echo "3. Run 'validate-prd $project_name' to check quality" + echo "4. Run 'decompose $project_name' after PRD is complete" +} + +# --- VALIDATE-PRD COMMAND --- + +function validate-prd() { + set +e # Temporarily disable exit on error for this function + local project_name=$1 + local mode=${2:-strict} + + if [ -z "$project_name" ]; then + echo "Error: Project name not provided for 'validate-prd' command." >&2 + echo "Usage: $0 validate-prd [--lenient]" >&2 + exit 1 + fi + + # Handle --lenient flag + if [ "$mode" = "--lenient" ]; then + mode="lenient" + elif [ "$mode" != "strict" ] && [ "$mode" != "lenient" ]; then + mode="strict" + fi + + check_prd_directory + check_project_exists "$project_name" + + local project_dir="docs/prds/$project_name" + local prd_file="$project_dir/prd.md" + + if ! check_file_exists "$prd_file" "PRD"; then + echo "Run 'create-prd' command first to create the PRD." >&2 + exit 1 + fi + + echo "=== PRD Validation Report ===" + echo "Project: $project_name" + echo "File: $prd_file" + echo "Mode: $mode" + echo "" + + local issues=0 + local warnings=0 + + # Check YAML frontmatter + if head -n 1 "$prd_file" | grep -q "^---$"; then + echo "✓ YAML frontmatter present" + else + echo "✗ YAML frontmatter missing" + ((issues++)) + fi + + # Check for required sections + local required_sections=( + "## Objectives" + "## Success Criteria" + "## Functional Requirements" + "## Non-Functional Requirements" + "## Constraints" + "## Assumptions" + ) + + echo "" + echo "Completeness Checks:" + for section in "${required_sections[@]}"; do + if grep -q "^$section" "$prd_file"; then + echo " ✓ $section section present" + else + if [ "$mode" = "strict" ]; then + echo " ✗ $section section missing" + ((issues++)) + else + echo " ⚠ $section section missing (lenient mode)" + ((warnings++)) + fi + fi + done + + # Check for vague language + echo "" + echo "Quality Checks:" + + local vague_terms=("should" "might" "probably" "maybe" "could" "reasonable" "fast" "slow" "good" "better" "best" "many" "few" "most" "some") + local vague_found=0 + + for term in "${vague_terms[@]}"; do + local matches=$(grep -n -i "\b$term\b" "$prd_file" | grep -v "^#" | head -n 3) + if [ -n "$matches" ]; then + if [ $vague_found -eq 0 ]; then + echo " Vague language detected:" + vague_found=1 + fi + echo "$matches" | while read -r line; do + local line_num=$(echo "$line" | cut -d: -f1) + echo " ⚠ Line $line_num: Contains '$term'" + ((warnings++)) || true + done + fi + done + + if [ $vague_found -eq 0 ]; then + echo " ✓ No vague language detected" + fi + + # Check for measurable success criteria + if grep -q "## Success Criteria" "$prd_file"; then + local success_section=$(sed -n '/## Success Criteria/,/^## /p' "$prd_file") + if echo "$success_section" | grep -qE '[0-9]+%|[0-9]+ (seconds|minutes|hours|users|transactions)'; then + echo " ✓ Success criteria include measurable targets" + else + echo " ⚠ Success criteria may lack measurable targets" + ((warnings++)) + fi + fi + + # Check for acceptance criteria in requirements + if grep -q "## Functional Requirements" "$prd_file"; then + local fr_count=$(grep -c "^### FR[0-9]" "$prd_file" || true) + fr_count=${fr_count:-0} + local ac_count=$(sed -n '/## Functional Requirements/,/^## /p' "$prd_file" | grep -c "Acceptance Criteria:" || true) + ac_count=${ac_count:-0} + + if [ "$fr_count" -gt 0 ] && [ "$ac_count" -ge "$fr_count" ]; then + echo " ✓ Functional requirements include acceptance criteria" + else + echo " ⚠ Some functional requirements may lack acceptance criteria" + ((warnings++)) + fi + fi + + # Check for out of scope section + if grep -q "## Out of Scope" "$prd_file"; then + echo " ✓ Out of scope section defines boundaries" + else + echo " ⚠ Out of scope section missing (recommended)" + ((warnings++)) + fi + + # LLM-based quality analysis + echo "" + echo "Advanced Quality Analysis (LLM):" + + LLM_VALIDATOR="$(dirname "${BASH_SOURCE[0]}")/validate_prd_llm.py" + + if [ -f "$LLM_VALIDATOR" ]; then + LLM_ANALYSIS=$(python3 "$LLM_VALIDATOR" --prd-file "$prd_file" 2>&1) + LLM_EXIT_CODE=$? + + if [ $LLM_EXIT_CODE -eq 0 ] && [ -n "$LLM_ANALYSIS" ]; then + # Check if LLM found no issues + if echo "$LLM_ANALYSIS" | grep -q "No quality issues found"; then + echo " ✓ LLM quality check passed" + echo " $LLM_ANALYSIS" + else + echo " ⚠ LLM identified potential quality issues:" + echo "" + echo "$LLM_ANALYSIS" | sed 's/^/ /' + echo "" + ((warnings++)) + fi + else + echo " ⚠ LLM quality analysis failed or unavailable" + ((warnings++)) + fi + else + echo " ⚠ LLM validator script not found at $LLM_VALIDATOR" + ((warnings++)) + fi + + # Summary + echo "" + echo "=== Summary ===" + echo "Issues: $issues" + echo "Warnings: $warnings" + echo "" + + if [ $issues -eq 0 ] && [ $warnings -eq 0 ]; then + echo "Overall: EXCELLENT ✓" + echo "PRD meets all quality standards." + elif [ $issues -eq 0 ] && [ $warnings -le 3 ]; then + echo "Overall: GOOD ✓" + echo "PRD is solid with minor improvements recommended." + elif [ $issues -eq 0 ]; then + echo "Overall: ACCEPTABLE" + echo "PRD is complete but could be improved." + else + echo "Overall: NEEDS WORK" + echo "Address critical issues before proceeding." + if [ "$mode" = "strict" ]; then + echo "" + echo "Tip: Use '--lenient' flag for draft validation" + fi + fi + + echo "" + echo "Next steps:" + if [ $issues -gt 0 ]; then + echo "1. Address critical issues in $prd_file" + echo "2. Re-run validation: bash scripts/prd-authoring.sh validate-prd $project_name" + else + echo "1. Run 'decompose $project_name' to break PRD into epics" + fi +} + +# --- DECOMPOSE COMMAND --- + +function decompose() { + local project_name=$1 + if [ -z "$project_name" ]; then + echo "Error: Project name not provided for 'decompose' command." >&2 + echo "Usage: $0 decompose " >&2 + exit 1 + fi + + check_prd_directory + check_project_exists "$project_name" + + local project_dir="docs/prds/$project_name" + local prd_file="$project_dir/prd.md" + local epics_file="$project_dir/epics.md" + + if ! check_file_exists "$prd_file" "PRD"; then + echo "Run 'create-prd' command first to create the PRD." >&2 + exit 1 + fi + + echo "Creating epic decomposition for: $project_name" + + if [ -f "$epics_file" ]; then + echo "Error: Epics document already exists at '$epics_file'." >&2 + echo "Edit existing file or delete it to regenerate." >&2 + exit 1 + fi + + local today=$(get_date) + + cat > "$epics_file" << EOF +--- +title: $project_name Epics +type: epic-breakdown +prd: docs/prds/$project_name/prd.md +status: draft +created: $today +updated: $today +--- + +# Epic Breakdown: $project_name + + + + +## Epic Decomposition Guidelines + + + +**What makes a good epic?** +- **Independently deliverable**: Can be shipped and provide value on its own +- **Right-sized**: Completable in 2-4 sprints (4-8 weeks) +- **Clear scope**: Well-defined boundaries of what's included/excluded +- **Aligned with objectives**: Directly supports one or more PRD objectives +- **Testable**: Has clear success criteria from the PRD +- **Minimal dependencies**: Reduces blocking between teams/sprints + +**Common decomposition strategies:** +- By user journey (onboarding epic, checkout epic, etc.) +- By functional area (authentication epic, payment epic, analytics epic) +- By technical layer (frontend epic, API epic, data migration epic) +- By user persona (admin tools epic, end-user features epic) + +--- + +## Epic 1: [Epic Name] + + + + + +### Objective + + + + + +### Scope + + + + +**Core Features**: +- [Feature/capability 1] + - Example: "User can save up to 5 payment methods to their account" +- [Feature/capability 2] + - Example: "User can select saved payment method during checkout" +- [Feature/capability 3] + - Example: "User can set a default payment method" + +**Supporting Features**: +- [Feature/capability] + - Example: "User can edit or delete saved payment methods" +- [Feature/capability] + - Example: "System validates payment methods before saving" + +### Requirements Coverage + + + + +**Functional Requirements**: +- FR1: [Requirement name from PRD] + - Coverage: [Complete / Partial - specify what's included] +- FR3: [Requirement name from PRD] + - Coverage: [Complete / Partial - specify what's included] + +**Non-Functional Requirements**: +- NFR2: Security + - Coverage: Payment data encryption, PCI compliance +- NFR3: Reliability + - Coverage: Error handling for payment service failures + +### Success Criteria + + + + + +**Functional Success** (Must meet all): +- [ ] [Specific, testable criterion from PRD] + - Example: "User can save payment method and use it on next checkout" +- [ ] [Specific, testable criterion from PRD] + - Example: "Payment method data encrypted at rest and in transit" +- [ ] [Specific, testable criterion from PRD] + - Example: "System handles invalid payment data gracefully with clear error messages" + +**Quality Success** (Must meet all): +- [ ] [Performance/quality target from PRD] + - Example: "Payment method save operation completes in <1 second" +- [ ] [Performance/quality target from PRD] + - Example: "Zero stored plaintext payment data in database" + +**Business Success** (Measured post-launch): +- [ ] [Business metric from PRD] + - Example: "30% of users save payment method on first use" +- [ ] [Business metric from PRD] + - Example: "Returning users using saved payment methods checkout 60% faster" + +### User Stories (Optional) + + + + +1. As a [user type], I want [capability], so that [benefit] + - Example: "As a returning customer, I want to save my payment method, so that I don't have to re-enter it on every purchase" + +2. As a [user type], I want [capability], so that [benefit] + - Example: "As a user, I want to see only the last 4 digits of my saved card, so that my payment information stays private" + +### Dependencies + + + + +**Epic Dependencies**: +- [Other epic]: [What's needed from that epic] + - Example: "Epic 2 (User Account Management): Requires user authentication to be complete" + +**Technical Dependencies**: +- [System/service]: [What's needed] + - Example: "Stripe API integration: Account must be set up and credentials configured" + +**Team Dependencies**: +- [Team/person]: [What's needed] + - Example: "Security team: Review and approval of encryption approach" + +**If no dependencies**: "None - this is a foundational epic that other work can build on" + +### Technical Approach (Optional) + + + + +- [Key architectural decision or approach] + - Example: "Use Stripe's Token API to avoid storing raw payment data" +- [Key architectural decision or approach] + - Example: "Implement encryption at application layer using AWS KMS" + +### Estimated Effort + + + + +**Estimate**: [Number of sprints] +- Example: "3 sprints (6 weeks)" + +**Breakdown**: +- Design & Planning: [Duration] + - Example: "1 sprint: UI design, API design, security review" +- Implementation: [Duration] + - Example: "1.5 sprints: Frontend, backend, integration" +- Testing & Polish: [Duration] + - Example: "0.5 sprints: Testing, bug fixes, documentation" + +**Confidence Level**: [High / Medium / Low] + + + + +### Out of Scope + + + + +- [Feature/capability explicitly excluded] + - Example: "Support for cryptocurrency payments (deferred to Phase 2)" +- [Feature/capability explicitly excluded] + - Example: "Payment method sharing between family accounts (not in PRD)" +- [Feature/capability explicitly excluded] + - Example: "Integration with non-Stripe payment processors" + +### Risks & Mitigations + + + +**Risk 1**: [Description of risk] +- Likelihood: [High / Medium / Low] +- Impact: [High / Medium / Low] +- Mitigation: [How to reduce or handle the risk] +- Example: "Stripe API changes - Likelihood: Low, Impact: High, Mitigation: Use stable API version, monitor changelog" + +**Risk 2**: [Description of risk] +- Likelihood: +- Impact: +- Mitigation: + +### Acceptance Process + + + +1. [Validation step] + - Example: "All functional and quality success criteria met" +2. [Validation step] + - Example: "Product owner sign-off on user experience" +3. [Validation step] + - Example: "Security review passed" +4. [Validation step] + - Example: "Documentation complete and published" + +--- + +## Epic 2: [Epic Name] + + + +### Objective + +### Scope + +**Core Features**: +- +- + +**Supporting Features**: +- + +### Requirements Coverage + +**Functional Requirements**: +- FRX: + - Coverage: + +**Non-Functional Requirements**: +- NFRX: + - Coverage: + +### Success Criteria + +**Functional Success**: +- [ ] + +**Quality Success**: +- [ ] + +**Business Success**: +- [ ] + +### User Stories (Optional) + +1. As a [user type], I want [capability], so that [benefit] + +### Dependencies + +**Epic Dependencies**: +- + +**Technical Dependencies**: +- + +**Team Dependencies**: +- + +### Technical Approach (Optional) + +- + +### Estimated Effort + +**Estimate**: + +**Breakdown**: +- Design & Planning: +- Implementation: +- Testing & Polish: + +**Confidence Level**: + +### Out of Scope + +- + +### Risks & Mitigations + +**Risk 1**: +- Likelihood: +- Impact: +- Mitigation: + +### Acceptance Process + +1. +2. + +--- + +## Epic 3: [Epic Name] + + + + + + + +--- + +## Epic Dependencies & Sequencing + + + + +### Dependency Diagram + +\`\`\` +Epic 1: [Name] (Foundational - no dependencies) + │ + ├─→ Epic 2: [Name] (Depends on Epic 1) + │ │ + │ └─→ Epic 4: [Name] (Depends on Epic 2) + │ + └─→ Epic 3: [Name] (Depends on Epic 1) + +Epic 5: [Name] (Independent - can be done in parallel) +\`\`\` + +### Recommended Sequence + + + + +**Phase 1** (Sprints 1-3): +- Epic 1: [Name] + - Rationale: [Why this comes first] + - Example: "Must complete foundational authentication before other features can build on it" + +**Phase 2** (Sprints 4-6): +- Epic 2: [Name] (Depends on Epic 1) +- Epic 3: [Name] (Depends on Epic 1) - Can run in parallel with Epic 2 + - Rationale: [Why these are next and can be parallel] + +**Phase 3** (Sprints 7-9): +- Epic 4: [Name] +- Epic 5: [Name] - Can run in parallel with Epic 4 + - Rationale: [Why these come last] + +**Critical Path**: Epic 1 → Epic 2 → Epic 4 + + +--- + +## Requirements Traceability Matrix + + + + +### Functional Requirements Coverage + +| Requirement | Epic(s) | Coverage | Notes | +|-------------|---------|----------|-------| +| FR1: [Name] | Epic 1 | 100% | Fully covered | +| FR2: [Name] | Epic 1, Epic 2 | 100% | Split across two epics | +| FR3: [Name] | Epic 3 | 100% | | +| FR4: [Name] | Epic 2 | 50% | Partial: MVP only, full implementation in Phase 2 | + + + +### Non-Functional Requirements Coverage + +| Requirement | Epic(s) | Coverage | Notes | +|-------------|---------|----------|-------| +| NFR1: Performance | All Epics | 100% | Each epic includes performance testing | +| NFR2: Security | Epic 1, Epic 2 | 100% | Auth & data protection | +| NFR3: Reliability | All Epics | 100% | Error handling in each epic | +| NFR4: Usability | Epic 3, Epic 4 | 100% | | +| NFR5: Scalability | Epic 5 | 100% | Dedicated infrastructure epic | + + + +### Coverage Summary + +- **Total Functional Requirements**: [Count from PRD] +- **Requirements Fully Covered**: [Count] +- **Requirements Partially Covered**: [Count] - [Explain which and why] +- **Requirements Not Covered**: [Count] - [Explain which and why] +- **Overall Coverage**: [Percentage] + +**Target**: 100% coverage of all "Must Have" requirements + +--- + +## Sprint Planning Guidance + + + +### Estimated Timeline + +- **Total Epics**: [Count] +- **Estimated Duration**: [Total sprints needed] + - Example: "5 epics, 12 sprints (24 weeks)" +- **Target Completion**: [Date based on sprint cadence] + +### Team Capacity Assumptions + + + +- **Team Size**: [Number of engineers] +- **Sprint Length**: [Weeks per sprint] +- **Velocity**: [Story points or similar per sprint] +- **Availability**: [Any known constraints] + - Example: "Assumes full team availability, accounts for holidays" + +### Prerequisites for Sprint Planning + + + +- [ ] All epics reviewed and approved by stakeholders +- [ ] Dependencies identified and coordinated +- [ ] Team capacity confirmed +- [ ] Technical approach validated for high-risk epics +- [ ] [Other prerequisites] + +--- + +## Next Steps + + + +### Immediate Actions + +1. **Review with Stakeholders** + - Schedule: [When] + - Attendees: [Who needs to review] + - Goal: [What you're seeking approval for] + - Example: "Present epic breakdown to product leadership on [date] for approval" + +2. **Refine Epic Boundaries** + - Identify any overlaps or gaps between epics + - Ensure each epic is independently deliverable + - Validate effort estimates with engineering team + +3. **Begin Spec Authoring** + - Use SynthesisFlow spec-authoring skill to create detailed specs + - Start with Epic 1 (foundational/highest priority) + - Create one spec per epic or per major user story + +4. **Coordinate Dependencies** + - [Action for specific dependency] + - Example: "Schedule kickoff with security team for Epic 1 review" + - [Action for specific dependency] + +### Transition to Execution + +**Once epics are approved:** + +1. Use **spec-authoring** skill to write detailed specifications for Epic 1 +2. Use **sprint-planner** skill to decompose specs into GitHub issues +3. Begin Sprint 1 execution using **issue-executor** skill +4. After each epic completion, use **change-integrator** skill to update documentation + +**Success Criteria for This Document**: +- [ ] All PRD requirements mapped to epics (100% coverage) +- [ ] Epic dependencies clearly identified and sequenced +- [ ] Effort estimates reviewed by engineering team +- [ ] Stakeholder approval obtained +- [ ] Ready to begin spec authoring for Epic 1 + +EOF + + echo "Successfully created epic decomposition at $epics_file" + echo "" + echo "Next steps:" + echo "1. Review PRD requirements and group into logical epics" + echo "2. Edit $epics_file to define each epic" + echo "3. Map dependencies between epics" + echo "4. Verify 100% requirements coverage" + echo "5. Transition to spec-authoring workflow for each epic" +} + +# --- GENERATE-SPEC COMMAND --- + +function generate-spec() { + local project_name=$1 + local epic_name=$2 + + if [ -z "$project_name" ] || [ -z "$epic_name" ]; then + echo "Error: Project name and epic name required for 'generate-spec' command." >&2 + echo "Usage: $0 generate-spec " >&2 + exit 1 + fi + + check_prd_directory + check_project_exists "$project_name" + + local project_dir="docs/prds/$project_name" + local prd_file="$project_dir/prd.md" + local epics_file="$project_dir/epics.md" + + # Validate PRD and epics exist + if ! check_file_exists "$prd_file" "PRD"; then + echo "Run 'create-prd' command first to create the PRD." >&2 + exit 1 + fi + + if ! check_file_exists "$epics_file" "Epic decomposition"; then + echo "Run 'decompose' command first to create epic decomposition." >&2 + exit 1 + fi + + # Normalize epic name to kebab-case + local epic_dir_name=$(to_kebab_case "$epic_name") + local changes_dir="docs/changes/$epic_dir_name" + + echo "Generating spec proposal for epic: $epic_name" + + # Check if changes directory exists + if [ ! -d "docs/changes" ]; then + echo "Creating docs/changes directory..." + mkdir -p "docs/changes" + fi + + # Check if spec proposal already exists + if [ -d "$changes_dir" ]; then + echo "Error: Spec proposal directory '$changes_dir' already exists." >&2 + echo "Either use a different epic name or delete the existing directory." >&2 + exit 1 + fi + + # Create changes directory + mkdir -p "$changes_dir" + + local today=$(get_date) + + # Extract epic information from epics.md + # This is a simplified extraction - looks for the epic by searching for the epic name in headers + local epic_found=0 + + # Try to find the epic section in epics.md + # We'll look for patterns like "## Epic N: " or just the epic name in headers + if grep -qi "^## .*$epic_name" "$epics_file"; then + epic_found=1 + echo "Found epic '$epic_name' in $epics_file" + else + echo "Warning: Could not find epic '$epic_name' in $epics_file" >&2 + echo "Generating generic spec proposal template. Please populate manually." >&2 + fi + + # Generate proposal.md + cat > "$changes_dir/proposal.md" << EOF +--- +title: $epic_name +type: spec-proposal +status: draft +prd: docs/prds/$project_name/prd.md +epic: $epic_name +created: $today +updated: $today +--- + +# Proposal: $epic_name + +## Problem Statement + + + + + +## Proposed Solution + + + + +## Benefits + + + + +- **User Impact**: [How this improves user experience] +- **Business Value**: [How this supports business objectives] +- **Technical Impact**: [How this improves system architecture/quality] + +## Success Criteria + + + + +**Functional Success**: +- [ ] [Criterion from epic] +- [ ] [Criterion from epic] + +**Quality Success**: +- [ ] [Performance/quality target] +- [ ] [Security/reliability requirement] + +**Business Success** (Measured post-launch): +- [ ] [Business metric target] + +## Scope + +**In Scope**: + +- [Feature/capability 1] +- [Feature/capability 2] +- [Feature/capability 3] + +**Out of Scope**: + +- [Excluded feature/capability] +- [Deferred to future phase] + +## Dependencies + + + +**Epic Dependencies**: +- [Other epic or requirement] + +**Technical Dependencies**: +- [System/service/API requirement] + +**Team Dependencies**: +- [Required reviews or approvals] + +## Risks + + + +**Risk 1**: [Description] +- Likelihood: [High/Medium/Low] +- Impact: [High/Medium/Low] +- Mitigation: [Strategy] + +## Traceability + +**PRD Reference**: docs/prds/$project_name/prd.md + +**Requirements Coverage**: + +- FR[N]: [Requirement name] +- NFR[N]: [Non-functional requirement] + +**Success Metrics Mapping**: + +- [PRD metric]: [How this epic contributes] + +EOF + + # Generate spec-delta.md + cat > "$changes_dir/spec-delta.md" << EOF +--- +title: $epic_name Specification +type: spec-delta +status: draft +prd: docs/prds/$project_name/prd.md +epic: $epic_name +created: $today +updated: $today +--- + +# Spec Delta: $epic_name + +## Overview + + + + +This specification defines the implementation requirements for the "$epic_name" epic from the $project_name PRD. + +## Requirements + +### Functional Requirements + + + + +#### FR1: [Requirement Name] +- **Description**: [What this functionality does] +- **Inputs**: [What data or actions trigger this] +- **Outputs**: [What results or changes occur] +- **Business Rules**: [Constraints or special conditions] +- **Acceptance Criteria**: + - [ ] [Specific, testable criterion] + - [ ] [Specific, testable criterion] + - [ ] [Specific, testable criterion] +- **Priority**: [Must Have / Should Have / Could Have] +- **PRD Reference**: FR[N] in docs/prds/$project_name/prd.md + +#### FR2: [Requirement Name] +- **Description**: +- **Inputs**: +- **Outputs**: +- **Business Rules**: +- **Acceptance Criteria**: + - [ ] + - [ ] +- **Priority**: +- **PRD Reference**: + +### Non-Functional Requirements + + + +#### NFR1: Performance +- [Performance target from PRD] +- [Measurement method] +- **PRD Reference**: NFR[N] in docs/prds/$project_name/prd.md + +#### NFR2: Security +- [Security requirement from PRD] +- [Compliance requirement] +- **PRD Reference**: NFR[N] in docs/prds/$project_name/prd.md + +#### NFR3: Reliability +- [Reliability requirement from PRD] +- [Error handling requirement] +- **PRD Reference**: NFR[N] in docs/prds/$project_name/prd.md + +#### NFR4: Usability +- [Usability requirement from PRD] +- [User experience requirement] +- **PRD Reference**: NFR[N] in docs/prds/$project_name/prd.md + +## Design Decisions + + + +### DD1: [Decision Name] +**Decision**: [What was decided] + +**Rationale**: +- [Reason for this approach] +- [How it supports epic objectives] + +**Alternatives Considered**: +- [Alternative approach]: [Why rejected] + +**Impact**: [Effect on system architecture or user experience] + +### DD2: [Decision Name] +**Decision**: + +**Rationale**: +- + +**Alternatives Considered**: +- + +**Impact**: + +## Implementation Approach + + + + +### Architecture + +[Describe component architecture, data flow, integration points] + +### Components + +**Component 1**: [Name/Purpose] +- Responsibilities: [What it does] +- Interfaces: [APIs or contracts] +- Dependencies: [What it depends on] + +**Component 2**: [Name/Purpose] +- Responsibilities: +- Interfaces: +- Dependencies: + +### Data Model + + + +### Integration Points + + + +- [System/Service]: [Integration type and purpose] + +## Testing Strategy + + + +### Unit Testing +- [What will be unit tested] +- Target coverage: [Percentage] + +### Integration Testing +- [What integration scenarios to test] +- [Key workflows to validate] + +### End-to-End Testing +- [User workflows to test] +- [Success criteria validation] + +### Performance Testing +- [Performance benchmarks to validate] +- [Load testing approach] + +## Migration Path + + + +### For New Installations +[Approach for greenfield deployments] + +### For Existing Systems +[Upgrade path, data migration, backward compatibility] + +## Dependencies + + + +### Upstream Dependencies + +- [Dependency]: [What's required and why] + +### Downstream Impact + +- [Affected system/component]: [Nature of impact] + +## Success Metrics + + + + +### Launch Criteria +- [ ] [Specific launch requirement] +- [ ] [Specific launch requirement] + +### Post-Launch Metrics +- **[Metric name]**: [Baseline] → [Target] within [timeframe] +- **[Metric name]**: [Baseline] → [Target] within [timeframe] + +## Risks and Mitigations + + + +### Technical Risks +- **Risk**: [Description] + - Likelihood: [High/Medium/Low] + - Impact: [High/Medium/Low] + - Mitigation: [Strategy] + - Contingency: [Fallback plan] + +### Schedule Risks +- **Risk**: [Description] + - Mitigation: [Strategy] + +## Traceability Matrix + + + +| Spec Requirement | PRD Requirement | Epic Section | Coverage | +|------------------|-----------------|--------------|----------| +| FR1 | FR[N] | [Section] | Complete | +| FR2 | FR[N] | [Section] | Partial | +| NFR1 | NFR[N] | [Section] | Complete | + +**Coverage Summary**: +- Total PRD requirements in epic: [Count] +- Requirements addressed in spec: [Count] +- Coverage percentage: [Percentage] + +EOF + + # Generate tasks.md + cat > "$changes_dir/tasks.md" << EOF +--- +title: $epic_name Tasks +type: task-breakdown +status: draft +prd: docs/prds/$project_name/prd.md +epic: $epic_name +created: $today +updated: $today +--- + +# Tasks: $epic_name + + + + +## Overview + +**Epic**: $epic_name +**PRD**: docs/prds/$project_name/prd.md +**Estimated Effort**: [From epic] sprints ([N] weeks) + +## Task Breakdown + + + + + +--- + +## Task 1: [Task Name] + + + +**Description**: [What needs to be done] + +**Subtasks**: +- [ ] [Specific subtask] +- [ ] [Specific subtask] +- [ ] [Specific subtask] + +**Acceptance Criteria**: +- [Specific, testable criterion] +- [Specific, testable criterion] + +**Dependencies**: +- [What must be complete first] + +**Estimated Effort**: [Hours or days] + +**Related Requirements**: FR[N], NFR[N] + +--- + +## Task 2: [Task Name] + + + +**Description**: + +**Subtasks**: +- [ ] +- [ ] +- [ ] + +**Acceptance Criteria**: +- +- + +**Dependencies**: +- Task 1 + +**Estimated Effort**: + +**Related Requirements**: + +--- + +## Task 3: [Task Name] + + + +**Description**: + +**Subtasks**: +- [ ] +- [ ] + +**Acceptance Criteria**: +- +- + +**Dependencies**: +- Task 2 + +**Estimated Effort**: + +**Related Requirements**: + +--- + +## Task 4: [Task Name] + + + +**Description**: + +**Subtasks**: +- [ ] +- [ ] + +**Acceptance Criteria**: +- +- + +**Dependencies**: +- Task 3 + +**Estimated Effort**: + +**Related Requirements**: + +--- + +## Summary + +**Total Tasks**: [Count] +**Total Estimated Effort**: [Sum] hours/days +**Sprint Allocation**: [How tasks map to sprints] + +### Task Dependencies + +\`\`\` +Task 1 (Foundation) + │ + ├─→ Task 2 (Core Implementation) + │ │ + │ └─→ Task 3 (Integration) + │ + └─→ Task 4 (Documentation) - Can run in parallel +\`\`\` + +### Milestones + +1. **Foundation Complete** (Task 1): [Description] +2. **Core Features** (Task 2): [Description] +3. **Integration Ready** (Task 3): [Description] +4. **Launch Ready** (Task 4): [Description] + +## Next Steps + + + +1. **Review and Refine** + - Review task breakdown with team + - Validate effort estimates + - Identify any missing tasks + +2. **Sprint Planning** + - Use sprint-planner skill to create GitHub issues + - Allocate tasks across sprint(s) + - Assign team members + +3. **Begin Execution** + - Use issue-executor skill to start implementation + - Track progress through GitHub issues + - Update task status as work completes + +## Traceability + +**Epic**: $epic_name in docs/prds/$project_name/epics.md +**PRD**: docs/prds/$project_name/prd.md +**Spec**: docs/changes/$epic_dir_name/spec-delta.md + +**Requirements Covered**: + +- FR[N]: [Requirement name] - Tasks [N, N, N] +- NFR[N]: [Requirement name] - Tasks [N, N] + +EOF + + echo "Successfully generated spec proposal in $changes_dir" + echo "" + echo "Generated files:" + echo " - $changes_dir/proposal.md" + echo " - $changes_dir/spec-delta.md" + echo " - $changes_dir/tasks.md" + echo "" + echo "Next steps:" + echo "1. Review and populate the generated files with epic details from $epics_file" + echo "2. Extract requirements from the epic section in epics.md" + echo "3. Link spec requirements back to PRD requirements" + echo "4. Validate traceability from spec to PRD" + echo "5. Open a Spec PR using spec-authoring workflow" +} + +# --- MAIN --- + +COMMAND=$1 +shift || true + +case "$COMMAND" in + status) + status "$@" + ;; + brief) + brief "$@" + ;; + research) + research "$@" + ;; + create-prd) + create-prd "$@" + ;; + validate-prd) + validate-prd "$@" + ;; + decompose) + decompose "$@" + ;; + generate-spec) + generate-spec "$@" + ;; + *) + echo "Error: Unknown command '$COMMAND'" >&2 + echo "Usage: $0 {status|brief|research|create-prd|validate-prd|decompose|generate-spec} ..." >&2 + echo "" >&2 + echo "Commands:" >&2 + echo " status [project-name] - Assess project readiness and show next steps" >&2 + echo " brief - Create product brief template" >&2 + echo " research - Create research template" >&2 + echo " create-prd - Create PRD template" >&2 + echo " validate-prd [--lenient] - Validate PRD quality" >&2 + echo " decompose - Break PRD into epics" >&2 + echo " generate-spec - Generate spec proposal from epic" >&2 + exit 1 + ;; +esac diff --git a/prd-authoring/scripts/validate_prd_llm.py b/prd-authoring/scripts/validate_prd_llm.py new file mode 100755 index 0000000..8c02bdf --- /dev/null +++ b/prd-authoring/scripts/validate_prd_llm.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +""" +LLM-based PRD quality validator for the prd-authoring skill. + +This script uses an LLM to analyze PRD files for quality issues such as +untestable requirements and unclear success criteria. +""" + +import sys +import subprocess +import argparse + + +def analyze_prd_quality(prd_content: str) -> str: + """ + Calls the LLM to perform quality analysis on a PRD. + + Args: + prd_content: The full content of the PRD file. + + Returns: + The LLM's analysis as a string. + Returns an empty string if the LLM call fails. + """ + prompt = f"""You are a technical product manager reviewing a Product Requirements Document (PRD) for quality issues. + +Please analyze the following PRD and identify: + +1. **Untestable Requirements**: Requirements that are difficult or impossible to test because they lack: + - Clear, measurable acceptance criteria + - Specific, quantifiable metrics + - Objective verification methods + - Clear pass/fail conditions + +2. **Unclear Success Criteria**: Success criteria that are ambiguous or poorly defined because they: + - Use vague or subjective language + - Lack specific numeric targets or thresholds + - Are not measurable or verifiable + - Don't specify timeframes or measurement methods + +For each issue found, provide: +- The section/requirement where the issue appears (with line reference if possible) +- A brief explanation of why it's problematic +- A specific suggestion for improvement + +If the PRD is well-written with clear, testable requirements and measurable success criteria, respond with exactly: "No quality issues found. The PRD has clear, testable requirements and well-defined success criteria." + +Format your response in markdown with clear sections for Untestable Requirements and Unclear Success Criteria. + +--- + +PRD CONTENT: + +{prd_content} + +--- + +ANALYSIS: +""" + + try: + # Using gemini-2.5-flash for text analysis + result = subprocess.run( + ['gemini', '--model', 'gemini-2.5-flash'], + input=prompt, + capture_output=True, + text=True, + timeout=60, + check=True # This will raise CalledProcessError for non-zero exit codes + ) + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.CalledProcessError) as e: + print(f"Warning: LLM quality analysis failed: {e}", file=sys.stderr) + return "" + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description='LLM-based PRD quality validator.' + ) + parser.add_argument( + '--prd-file', + required=True, + help='Path to the PRD file to analyze.' + ) + args = parser.parse_args() + + try: + # Read PRD content from file + with open(args.prd_file, 'r', encoding='utf-8') as f: + prd_content = f.read() + + if not prd_content.strip(): + print("Error: PRD file is empty", file=sys.stderr) + sys.exit(1) + + # Perform LLM analysis + analysis = analyze_prd_quality(prd_content) + + if analysis: + print(analysis) + sys.exit(0) + else: + # Exit with non-zero status code to indicate failure + sys.exit(1) + + except FileNotFoundError: + print(f"Error: PRD file not found: {args.prd_file}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/project-init/.gitkeep b/project-init/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/project-init/SKILL.md b/project-init/SKILL.md new file mode 100644 index 0000000..9f696dd --- /dev/null +++ b/project-init/SKILL.md @@ -0,0 +1,216 @@ +--- +name: project-init +description: Use this skill when starting a new project or adding SynthesisFlow to an existing project. Scaffolds the directory structure (docs/specs, docs/changes) and configuration files needed for the spec-driven development workflow. +--- + +# Project Init Skill + +## Purpose + +Initialize a new project with the SynthesisFlow directory structure and configuration files. This skill sets up the foundational folders needed for the spec-driven development workflow, creating a standard structure for specifications, change proposals, and documentation. + +## When to Use + +Use this skill in the following situations: + +- Starting a completely new project that will use SynthesisFlow +- Adding SynthesisFlow methodology to an existing project **with no existing documentation** +- Setting up a consistent structure for spec-driven development +- Ensuring project follows SynthesisFlow conventions from the beginning + +**Important**: If the project already has documentation in a `docs/` directory, use the **project-migrate** skill instead. It will properly catalog, categorize, and migrate existing documentation into the SynthesisFlow structure while preserving git history and updating links. + +## Prerequisites + +- Write permissions to the target directory +- Git repository already initialized (recommended but not required) + +## Workflow + +### Step 1: Assess the Current Project State + +Before initializing, determine: +- Is this a brand new project or an existing codebase? +- Does a `docs/` directory already exist? +- If `docs/` exists, does it contain markdown files? +- Where should the SynthesisFlow structure be created? + +**Decision Tree**: +- **No docs/ directory**: Proceed with project-init (this skill) +- **Empty docs/ directory**: Proceed with project-init (this skill) +- **docs/ with existing markdown files**: Use **project-migrate** skill instead + +The init-project.sh script will automatically detect existing documentation and suggest using project-migrate if appropriate. + +### Step 2: Run the Initialization Script + +Execute the helper script to create the directory structure: + +**For current directory:** +```bash +bash scripts/init-project.sh +``` + +**For a specific directory:** +```bash +bash scripts/init-project.sh -d /path/to/project +``` + +The script will create: +- `docs/specs/` - Source-of-truth for approved specifications +- `docs/changes/` - Staging area for proposed changes (Spec PRs) + +### Step 3: Verify Structure Creation + +Check that the directories were created successfully: +```bash +ls -la docs/ +``` + +Expected output: +``` +docs/ +├── specs/ +└── changes/ +``` + +### Step 4: Initialize Supporting Files (Manual) + +After the directory structure is created, consider adding these files: + +**Create RETROSPECTIVE.md** (in project root): +```bash +cat > RETROSPECTIVE.md << 'EOF' +# Development Retrospective + +This file captures learnings from completed tasks to inform and improve future development work. + +## Active Improvements +EOF +``` + +**Create AGENTS.md** (using agent-integrator skill): +```bash +# Use the agent-integrator skill to create AGENTS.md +bash skills/agent-integrator/scripts/update-agents-file.sh +``` + +### Step 5: Next Steps + +After initialization, guide the user on getting started: + +1. **Create first specification**: Use the `spec-authoring` skill to propose the first feature +2. **Set up GitHub integration**: Create GitHub repository if not exists, set up project board +3. **Document the system**: Add initial specs to `docs/specs/` directory +4. **Initialize git tracking**: Ensure new directories are committed to version control + +## Error Handling + +### Directory Already Exists + +**Symptom**: Script reports that directories already exist or initialization appears to do nothing + +**Solution**: +- Check if `docs/specs/` and `docs/changes/` already exist +- If they exist, the project is already initialized +- No action needed - the script is idempotent + +### Permission Denied + +**Symptom**: "Permission denied" when creating directories + +**Solution**: +- Verify write permissions to the target directory +- Check if parent directory exists +- Try with appropriate permissions: `sudo` if necessary (rare) + +### Wrong Directory Initialized + +**Symptom**: Directories created in unexpected location + +**Solution**: +- Remove incorrect directories: `rm -rf docs/` +- Re-run with explicit path: `bash scripts/init-project.sh -d /correct/path` +- Always verify current working directory before running + +## Directory Structure Explained + +### docs/specs/ + +**Purpose**: Source-of-truth for all approved specifications + +**Contents**: +- Approved specification files +- Design documents +- Architecture decisions +- System requirements + +**Example structure**: +``` +docs/specs/ +├── 001-initial-system.md +├── 002-authentication.md +└── feature-name/ + ├── spec.md + └── design.md +``` + +### docs/changes/ + +**Purpose**: Staging area for proposed changes before approval + +**Contents**: +- Change proposals in review +- Spec deltas for new features +- Task breakdowns +- Planning documents + +**Example structure**: +``` +docs/changes/ +├── my-feature/ +│ ├── proposal.md +│ ├── spec-delta.md +│ └── tasks.md +└── another-feature/ + └── proposal.md +``` + +**Workflow**: Changes start in `docs/changes/`, get approved via Spec PR, then move to `docs/specs/` + +## project-init vs project-migrate + +Understanding when to use each skill: + +### Use project-init when: +- Starting a **brand new project** from scratch +- Project has **no existing documentation** +- docs/ directory is **empty** or doesn't exist +- You just need the basic SynthesisFlow directory structure + +### Use project-migrate when: +- Project has **existing documentation** in docs/ or other locations +- You want to **migrate legacy docs** into SynthesisFlow structure +- You need to **preserve git history** during migration +- Documentation has **relative links** that need updating +- You want **doc-indexer compliant frontmatter** added automatically + +### Smooth Handoff + +The init-project.sh script automatically detects existing documentation and will: +1. Count markdown files in docs/ (excluding docs/specs/ and docs/changes/) +2. If found, display a recommendation to use project-migrate +3. Show the benefits of using project-migrate over basic initialization +4. Give you the option to continue with project-init or cancel + +This ensures you always use the right skill for your situation. + +## Notes + +- The script is **idempotent** - safe to run multiple times +- Existing directories won't be overwritten or deleted +- The script only creates directories, no files are created automatically +- Consider adding `.gitkeep` files to track empty directories in git +- This is just the directory scaffold - content comes from using other skills +- The structure is intentionally minimal - projects add what they need +- **Detection logic**: The script checks for markdown files in docs/, excluding those already in specs/ or changes/ subdirectories diff --git a/project-init/scripts/init-project.sh b/project-init/scripts/init-project.sh new file mode 100755 index 0000000..c1461ee --- /dev/null +++ b/project-init/scripts/init-project.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# This script scaffolds the basic directory structure for a SynthesisFlow project. + +set -e + +usage() { + echo "Usage: $0 [-d ]" + echo " -d : The root directory of the project to initialize. Defaults to the current directory." + exit 1 +} + +PROJECT_DIR="." + +while getopts ":d:" opt; do + case ${opt} in + d ) + PROJECT_DIR=$OPTARG + ;; + \? ) + echo "Invalid option: $OPTARG" 1>&2 + usage + ;; + : ) + echo "Invalid option: $OPTARG requires an argument" 1>&2 + usage + ;; + esac +done +shift $((OPTIND -1)) + +# Ensure the project directory exists +mkdir -p "$PROJECT_DIR" + +# Check if docs directory already exists with content +if [ -d "$PROJECT_DIR/docs" ]; then + # Count markdown files in docs/ (excluding docs/specs/ and docs/changes/) + EXISTING_DOCS=$(find "$PROJECT_DIR/docs" -type f -name "*.md" ! -path "*/specs/*" ! -path "*/changes/*" 2>/dev/null | wc -l) + + if [ "$EXISTING_DOCS" -gt 0 ]; then + echo "⚠️ Existing documentation detected in $PROJECT_DIR/docs/" + echo "" + echo "Found $EXISTING_DOCS markdown file(s) that may need migration." + echo "" + echo "💡 Recommendation: Use the 'project-migrate' skill instead!" + echo "" + echo "The project-migrate skill will:" + echo " • Discover and catalog your existing documentation" + echo " • Suggest appropriate locations (specs/, changes/, or root)" + echo " • Migrate files while preserving git history" + echo " • Update relative links automatically" + echo " • Add doc-indexer compliant frontmatter" + echo " • Create backups for safe rollback" + echo "" + echo "To use project-migrate:" + echo " bash skills/project-migrate/scripts/project-migrate.sh" + echo "" + echo "Or continue with basic initialization (existing docs will be preserved)..." + read -p "Continue with project-init anyway? (y/N): " CONTINUE + + if [[ ! "$CONTINUE" =~ ^[Yy]$ ]]; then + echo "Initialization cancelled. Use project-migrate skill to migrate existing docs." + exit 0 + fi + fi +fi + +echo "Initializing SynthesisFlow structure in $PROJECT_DIR..." +mkdir -p "$PROJECT_DIR/docs/specs" +mkdir -p "$PROJECT_DIR/docs/changes" +echo "✓ Done." \ No newline at end of file diff --git a/project-migrate/SKILL.md b/project-migrate/SKILL.md new file mode 100644 index 0000000..f24cdee --- /dev/null +++ b/project-migrate/SKILL.md @@ -0,0 +1,105 @@ +--- +name: project-migrate +description: Use this skill to migrate existing projects to the SynthesisFlow structure. It uses an AI-powered analysis to intelligently discover, categorize, and migrate documentation, generate rich frontmatter, and preserve git history. +--- + +# Project Migrate Skill + +## Purpose + +To intelligently migrate existing projects (brownfield) to the SynthesisFlow directory structure using a powerful, AI-assisted workflow. This skill goes beyond simple file moving by leveraging the **Gemini CLI** to analyze document content, ensuring accurate categorization and the generation of rich, meaningful metadata. It provides a safe, guided migration with discovery, analysis, backup, and validation phases to ensure zero data loss and high-quality results. + +## When to Use + +Use this skill in the following situations: + +- Adding SynthesisFlow to an existing project with established documentation. +- Migrating docs from an ad-hoc structure to SynthesisFlow conventions. +- When you want to automatically and intelligently categorize and add metadata to existing documents. +- To ensure a safe migration with backups and rollback capabilities. + +## Prerequisites + +- Project with existing documentation (`docs/`, `documentation/`, `wiki/`, or markdown files). +- Git repository initialized. +- Write permissions to the project directory. +- `gemini` CLI tool installed and authenticated. +- `doc-indexer` skill available for final compliance checking. + +## Workflow + +The skill guides you through a series of phases with interactive approval. + +### Step 1: Run the Migration Script + +Execute with one of three modes: + +**Interactive (default)** - Review and approve each phase: +```bash +bash scripts/project-migrate.sh +``` + +**Dry-run** - Preview the plan without making any changes: +```bash +bash scripts/project-migrate.sh --dry-run +``` + +**Auto-approve** - Skip prompts for automation (useful for CI/CD): +```bash +bash scripts/project-migrate.sh --auto-approve +``` + +### Step 2: Review Each Phase + +**Phase 1 & 2 - AI-Powered Discovery and Analysis**: +The script scans for all markdown files. For each file, it calls the **Gemini CLI** to analyze the document's *content*, not just its filename. This results in a much more accurate categorization of files into types like `spec`, `proposal`, `adr`, etc. The output is a detailed plan mapping each file to its new, correct location in the SynthesisFlow structure. + +**Phase 3 - Planning**: +Shows you the complete, AI-driven migration plan for your approval. You can review source and target mappings before any files are moved. + +**Phase 4 - Backup**: +Creates a timestamped backup directory of your entire `docs/` folder and includes a `rollback.sh` script before any changes are made. + +**Phase 5 - Migration**: +Executes the plan, moving files using `git mv` to preserve history and creating the necessary directory structure. + +**Phase 6 - LLM-Based Link Updates**: +Uses the Gemini CLI to intelligently identify and correct broken or outdated relative links within migrated files. This LLM-based approach is more robust than simple path recalculation, as it understands document context and can handle edge cases that pattern matching might miss. + +**Phase 7 - Validation**: +Verifies that all files were migrated correctly, checks link integrity, and validates the new directory structure. + +**Phase 8 - AI-Powered Frontmatter Generation (Optional)**: +For files that lack YAML frontmatter, the script uses the **Gemini CLI** to read the file content and generate rich, `doc-indexer` compliant frontmatter. This includes a suggested `title`, the `type` determined during the analysis phase, and a concise `description` summarizing the document's purpose. + +### Step 3: Post-Migration + +After successful completion: +- Review the validation report for any warnings. +- Run the `doc-indexer` skill to verify full documentation compliance. +- Commit the migration changes to git. + +## Error Handling + +### Gemini CLI Issues + +**Symptom**: The script fails during the "Analysis" or "Frontmatter Generation" phase with an error related to the `gemini` command. + +**Solution**: +- Ensure the `gemini` CLI is installed and in your system's PATH. +- Verify you are authenticated by running `gemini auth`. +- Check for Gemini API outages or network connectivity issues. +- The script has basic fallbacks, but for best results, ensure the Gemini CLI is functional. + +### Other Issues + +For issues related to permissions, conflicts, or broken links, the script provides detailed error messages and resolution suggestions during its interactive execution. The backup and rollback script is always available for a safe exit. + +## Notes + +- **AI-Enhanced**: Uses Gemini for intelligent content analysis, not just simple pattern matching. +- **Safe by default**: Creates a full backup with a rollback script before making any changes. +- **Git-aware**: Preserves file history using `git mv`. +- **Interactive**: You review and approve the AI-generated plan before execution. +- **Rich Metadata**: Generates high-quality frontmatter, including titles and descriptions. +- **LLM-Powered Link Correction**: Uses Gemini to intelligently update relative links with context awareness. \ No newline at end of file diff --git a/project-migrate/scripts/correct_links_llm.py b/project-migrate/scripts/correct_links_llm.py new file mode 100755 index 0000000..206ef60 --- /dev/null +++ b/project-migrate/scripts/correct_links_llm.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python3 +""" +LLM-based link correction for project-migrate skill + +This script uses an LLM to intelligently identify and correct broken or outdated +links within markdown content during file migration. +""" + +import sys +import os +import re +import argparse +import json +import subprocess +from pathlib import Path +from typing import List, Dict, Tuple, Optional + + +def extract_markdown_links(content: str) -> List[Dict]: + """ + Extract all markdown links from content and return structured information. + + Args: + content: Markdown content to analyze + + Returns: + List of dictionaries with link information + """ + links = [] + + # Pattern to match markdown links: [text](path) and ![alt](path) + pattern = r'!\[([^\]]*)\]\(([^)]+)\)|\[([^\]]*)\]\(([^)]+)\)' + + for match in re.finditer(pattern, content): + alt_text, img_src, link_text, link_href = match.groups() + + if img_src: + # Image link + links.append({ + 'type': 'image', + 'alt': alt_text, + 'path': img_src, + 'full_match': match.group(0) + }) + elif link_href: + # Regular link + links.append({ + 'type': 'link', + 'text': link_text, + 'path': link_href, + 'full_match': match.group(0) + }) + + return links + + +def should_skip_link(link_path: str) -> bool: + """ + Determine if a link should be skipped (external URLs, anchors, etc.). + + Args: + link_path: The path part of the link + + Returns: + True if the link should be skipped + """ + # Skip absolute URLs + if link_path.startswith(('http://', 'https://', 'mailto:', 'ftp://', 'tel:')): + return True + + # Skip anchor links + if link_path.startswith('#'): + return True + + # Skip email links without mailto prefix + if '@' in link_path and not link_path.startswith(('http://', 'https://')): + return True + + return False + + +def get_file_context(file_path: str) -> Dict: + """ + Get context about the file being processed. + + Args: + file_path: Path to the file + + Returns: + Dictionary with file context information + """ + path = Path(file_path) + + try: + relative_to_root = str(path.relative_to(Path.cwd())) + except ValueError: + # Handle case where file is not subdirectory of current working directory + relative_to_root = str(path) + + context = { + 'file_path': str(path.absolute()), + 'filename': path.name, + 'directory': str(path.parent.absolute()), + 'relative_to_root': relative_to_root, + } + + return context + + +def call_llm_for_link_correction(content: str, context: Dict) -> str: + """ + Call LLM to perform intelligent link correction. + + Args: + content: Original markdown content + context: File context information + + Returns: + Corrected markdown content + """ + try: + # Prepare the prompt for the LLM + prompt = f"""You are a markdown link correction assistant. Your task is to identify and correct broken or outdated relative links in the following markdown content. + +Context: +- File: {context['relative_to_root']} +- Directory: {context['directory']} + +Instructions: +1. Analyze all relative links in the content +2. For each link, determine if it points to an existing file +3. If a link appears broken or outdated, suggest a corrected path +4. Common migrations to consider: + - Files moved from root to docs/ directory + - Files moved from docs/ to docs/specs/ or docs/changes/ + - Changes in file extensions or naming conventions +5. Preserve all external URLs, anchors, and email links unchanged +6. Only modify links that clearly need correction + +Return ONLY the corrected markdown content without any additional explanation. + +Content to analyze: +{content}""" + + # Call Gemini CLI if available, otherwise fallback to a simple pass-through + try: + result = subprocess.run( + ['gemini', '--model', 'gemini-2.5-flash'], + input=prompt, + capture_output=True, + text=True, + timeout=30 + ) + + if result.returncode == 0 and result.stdout.strip(): + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError): + # Gemini not available or timed out - fallback to basic processing + pass + + except Exception as e: + print(f"Warning: LLM call failed: {e}", file=sys.stderr) + + # Fallback: return original content unchanged + return content + + +def validate_corrected_links(original: str, corrected: str) -> Dict[str, int]: + """ + Compare original and corrected content to count changes. + + Args: + original: Original markdown content + corrected: Corrected markdown content + + Returns: + Dictionary with change statistics + """ + original_links = extract_markdown_links(original) + corrected_links = extract_markdown_links(corrected) + + original_paths = {link['path'] for link in original_links if not should_skip_link(link['path'])} + corrected_paths = {link['path'] for link in corrected_links if not should_skip_link(link['path'])} + + changes = { + 'total_links': len(original_links), + 'skipped_links': len([link for link in original_links if should_skip_link(link['path'])]), + 'corrected_links': len(original_paths - corrected_paths), + 'new_links': len(corrected_paths - original_paths) + } + + return changes + + +def correct_links_in_content(content: str, file_path: str) -> Tuple[str, Dict]: + """ + Correct links in markdown content using LLM. + + Args: + content: Markdown content to process + file_path: Path to the file being processed + + Returns: + Tuple of (corrected_content, statistics) + """ + # Extract links for analysis + links = extract_markdown_links(content) + + # Filter for links that need processing + processable_links = [link for link in links if not should_skip_link(link['path'])] + + if not processable_links: + # No links to process + return content, { + 'total_links': len(links), + 'processable_links': 0, + 'corrected_links': 0, + 'llm_called': False + } + + # Get file context + context = get_file_context(file_path) + + # Call LLM for correction + corrected_content = call_llm_for_link_correction(content, context) + + # Validate changes + changes = validate_corrected_links(content, corrected_content) + changes.update({ + 'processable_links': len(processable_links), + 'llm_called': True + }) + + return corrected_content, changes + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description='LLM-based markdown link correction', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Correct links in a file + cat README.md | correct_links_llm.py --file README.md + + # Process multiple files + find docs -name "*.md" -exec correct_links_llm.py --file {} \\; + + # Show statistics only + cat file.md | correct_links_llm.py --file file.md --stats-only + """ + ) + + parser.add_argument( + '--file', + required=True, + help='Path to the file being processed (required for context)' + ) + + parser.add_argument( + '--stats-only', + action='store_true', + help='Only show statistics, don\'t output corrected content' + ) + + parser.add_argument( + '--dry-run', + action='store_true', + help='Analyze without making changes' + ) + + args = parser.parse_args() + + try: + # Read content from stdin + content = sys.stdin.read() + + if not content.strip(): + print("Error: No content provided on stdin", file=sys.stderr) + sys.exit(1) + + # Correct links + corrected_content, stats = correct_links_in_content(content, args.file) + + # Output statistics + if stats['llm_called']: + print(f"Link correction statistics for {args.file}:", file=sys.stderr) + print(f" Total links: {stats['total_links']}", file=sys.stderr) + print(f" Processable links: {stats['processable_links']}", file=sys.stderr) + print(f" Corrected links: {stats['corrected_links']}", file=sys.stderr) + print(f" Skipped links: {stats['skipped_links']}", file=sys.stderr) + else: + print(f"No links to process in {args.file}", file=sys.stderr) + + # Output corrected content (unless stats-only) + if not args.stats_only: + print(corrected_content) + + except KeyboardInterrupt: + print("\nInterrupted by user", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/project-migrate/scripts/project-migrate.sh b/project-migrate/scripts/project-migrate.sh new file mode 100755 index 0000000..710f5db --- /dev/null +++ b/project-migrate/scripts/project-migrate.sh @@ -0,0 +1,2091 @@ +#!/bin/bash +# Project Migrate Script +# Migrates existing (brownfield) projects to SynthesisFlow structure + +set -e + +# Configuration +DRY_RUN=false +AUTO_APPROVE=false +INTERACTIVE=true +BACKUP_DIR="" +MIGRATION_MANIFEST="" + +# Global arrays for discovered files +declare -a DISCOVERED_FILES=() +declare -A FILE_TYPES=() +declare -A FILE_CATEGORIES=() +declare -A FILE_TARGETS=() +declare -A FILE_RATIONALES=() +declare -A FILE_CONFLICTS=() + +# Function to prompt for phase continuation +prompt_phase_continue() { + local phase_name="$1" + local phase_description="$2" + + if [ "$INTERACTIVE" = false ] || [ "$DRY_RUN" = true ]; then + return 0 + fi + + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "📋 Ready to proceed to: $phase_name" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "" + echo "$phase_description" + echo "" + read -p "Continue to $phase_name? [Y/n]: " response + + case "$response" in + n|N|no|No|NO) + echo "" + echo "Migration paused. You can:" + echo " • Review the output above" + echo " • Run with --dry-run to see the complete plan" + echo " • Run again when ready to continue" + echo "" + exit 0 + ;; + *) + echo "" + return 0 + ;; + esac +} + +# Function to show progress indicator +show_progress() { + local current="$1" + local total="$2" + local description="$3" + + local percentage=$((current * 100 / total)) + local filled=$((percentage / 5)) + local empty=$((20 - filled)) + + printf "\r[%-20s] %3d%% - %s" \ + "$(printf '#%.0s' $(seq 1 $filled))$(printf ' %.0s' $(seq 1 $empty))" \ + "$percentage" \ + "$description" + + if [ "$current" -eq "$total" ]; then + echo "" + fi +} + +# Function to detect documentation type using Gemini +detect_file_type() { + local file="$1" + local basename=$(basename "$file") + + # Handle README as a special case based on filename, not content + if [[ "$basename" =~ ^README\.md$ ]] || [[ "$basename" =~ ^readme\.md$ ]]; then + echo "readme" + return + fi + + echo "Analyzing $file with Gemini to determine type..." >&2 + + # Use a temporary file for the prompt to handle special characters + local prompt_file=$(mktemp) + echo "Analyze the following document and determine its type. The type must be one of: 'spec', 'proposal', 'adr', 'design', 'plan', 'retrospective', or 'doc'. Return only the type as a single word." > "$prompt_file" + + # Call Gemini with the file and the prompt + local detected_type=$(gemini -p "@${prompt_file} @${file}" | tr '[:upper:]' '[:lower:]' | tr -d '[:space:]') + rm "$prompt_file" + + # Validate the output from Gemini + case "$detected_type" in + spec|proposal|adr|design|plan|retrospective|doc) + echo "$detected_type" + ;; + *) + echo "doc" # Default if Gemini returns an invalid type or fails + ;; + esac +} + +# Function to determine target category based on file type +categorize_file() { + local file="$1" + local file_type="$2" + + case "$file_type" in + spec) + echo "spec" + ;; + proposal) + echo "proposal" + ;; + adr) + echo "spec" # ADRs are architectural specs + ;; + design) + echo "spec" # Design docs are architectural specs + ;; + plan) + echo "spec" # Plans are planning specs + ;; + retrospective) + echo "root" # Retrospectives stay at root + ;; + readme) + echo "preserve" # READMEs stay in place + ;; + doc) + echo "doc" # General documentation + ;; + *) + echo "doc" # Default to general documentation + ;; + esac +} + +# Function to generate target path for a file +generate_target_path() { + local file="$1" + local category="$2" + local file_type="$3" + local basename=$(basename "$file") + local dirname=$(dirname "$file") + + case "$category" in + spec) + # Check if already in docs/specs/ + if [[ "$dirname" == "docs/specs" ]]; then + echo "$file" + else + # Preserve subdirectory structure to avoid name collisions + if [[ "$dirname" == docs/changes/* ]]; then + # Extract subdirectory name from docs/changes/subdir/file.md + local subdir=$(echo "$dirname" | sed 's|docs/changes/||') + echo "docs/specs/$subdir/$basename" + else + echo "docs/specs/$basename" + fi + fi + ;; + proposal) + # Check if already in docs/changes/ + if [[ "$dirname" == docs/changes/* ]]; then + echo "$file" + else + echo "docs/changes/$basename" + fi + ;; + doc) + # Check if already in docs/ (but not docs/specs or docs/changes) + if [[ "$dirname" == "docs" ]]; then + echo "$file" + else + # Preserve subdirectory structure to avoid name collisions + if [[ "$dirname" == docs/changes/* ]]; then + # Extract subdirectory name from docs/changes/subdir/file.md + local subdir=$(echo "$dirname" | sed 's|docs/changes/||') + echo "docs/$subdir/$basename" + else + echo "docs/$basename" + fi + fi + ;; + root) + # Retrospectives become RETROSPECTIVE.md at root + if [ "$file_type" = "retrospective" ]; then + echo "RETROSPECTIVE.md" + else + echo "$basename" + fi + ;; + preserve) + # Keep in original location + echo "$file" + ;; + *) + echo "docs/$basename" + ;; + esac +} + +# Function to get rationale for categorization +get_categorization_rationale() { + local file="$1" + local file_type="$2" + local category="$3" + local target="$4" + + # Check if file is already in target location + if [ "$file" = "$target" ]; then + echo "Already in correct location (no move needed)" + return + fi + + case "$category" in + spec) + if [ "$file_type" = "spec" ]; then + echo "Specification → docs/specs/ (SynthesisFlow source-of-truth)" + elif [ "$file_type" = "adr" ]; then + echo "ADR → docs/specs/ (architectural decisions are specs)" + elif [ "$file_type" = "design" ]; then + echo "Design doc → docs/specs/ (architectural documentation)" + elif [ "$file_type" = "plan" ]; then + echo "Plan → docs/specs/ (planning documentation)" + else + echo "→ docs/specs/" + fi + ;; + proposal) + echo "Proposal → docs/changes/ (proposed changes under review)" + ;; + doc) + echo "General documentation → docs/" + ;; + root) + echo "Retrospective → root/RETROSPECTIVE.md (SynthesisFlow convention)" + ;; + preserve) + echo "README preserved in original location" + ;; + *) + echo "→ docs/" + ;; + esac +} + +# Function to check for conflicts +check_conflict() { + local source="$1" + local target="$2" + + # Normalize paths for comparison (remove leading ./) + local normalized_source="${source#./}" + local normalized_target="${target#./}" + + # If source and target are the same, no conflict (already in place) + if [ "$normalized_source" = "$normalized_target" ]; then + echo "in_place" + return + fi + + # Check if target already exists and is not the source + if [ -f "$target" ] && [ "$normalized_source" != "$normalized_target" ]; then + echo "true" + else + echo "false" + fi +} + +# Function to analyze discovered files +analyze_files() { + echo "Categorizing files and detecting conflicts..." + echo "" + + for file in "${DISCOVERED_FILES[@]}"; do + local file_type="${FILE_TYPES[$file]}" + + # Determine category + local category=$(categorize_file "$file" "$file_type") + FILE_CATEGORIES["$file"]="$category" + + # Generate target path + local target=$(generate_target_path "$file" "$category" "$file_type") + FILE_TARGETS["$file"]="$target" + + # Get rationale (now includes file and target for "already in place" detection) + local rationale=$(get_categorization_rationale "$file" "$file_type" "$category" "$target") + FILE_RATIONALES["$file"]="$rationale" + + # Check for conflicts (now includes source to detect "already in place") + local has_conflict=$(check_conflict "$file" "$target") + FILE_CONFLICTS["$file"]="$has_conflict" + done +} + +# Function to discover markdown files +discover_files() { + local search_paths=( + "docs" + "documentation" + "wiki" + "." # Root level for READMEs and other top-level docs + ) + + echo "Scanning for markdown files..." + + for search_path in "${search_paths[@]}"; do + if [ ! -d "$search_path" ]; then + continue + fi + + # Find all .md files, excluding .git directory and node_modules + while IFS= read -r -d '' file; do + # Skip files in .git, node_modules, and hidden directories (except root level) + if [[ "$file" =~ /\. ]] || [[ "$file" =~ node_modules ]] || [[ "$file" =~ /\.git/ ]]; then + continue + fi + + # For root search, only include direct .md files, not in subdirectories + if [ "$search_path" = "." ]; then + # Normalize path (remove leading ./) + normalized="${file#./}" + # Skip if file is in a subdirectory (contains /) + if [[ "$normalized" =~ / ]]; then + continue + fi + fi + + DISCOVERED_FILES+=("$file") + + # Detect and store file type + local file_type=$(detect_file_type "$file") + FILE_TYPES["$file"]="$file_type" + + done < <(find "$search_path" -maxdepth $([ "$search_path" = "." ] && echo "1" || echo "10") -name "*.md" -type f -print0 2>/dev/null) + done + + # Sort discovered files for consistent output + IFS=$'\n' DISCOVERED_FILES=($(sort <<<"${DISCOVERED_FILES[*]}")) + unset IFS +} + +# Parse command-line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --dry-run) + DRY_RUN=true + shift + ;; + --auto-approve) + AUTO_APPROVE=true + INTERACTIVE=false + shift + ;; + *) + echo "Unknown option: $1" + echo "Usage: $0 [--dry-run] [--auto-approve]" + exit 1 + ;; + esac +done + +echo "========================================" +echo " Project Migrate: Brownfield to SynthesisFlow" +echo "========================================" +echo "" + +if [ "$DRY_RUN" = true ]; then + echo "🔍 Mode: DRY RUN" + echo " No changes will be made to your files." + echo " Review the complete migration plan safely." +elif [ "$AUTO_APPROVE" = true ]; then + echo "⚡ Mode: AUTO-APPROVE" + echo " Migration will proceed with minimal prompts." + echo " Conflicts will be skipped automatically." + echo " Frontmatter generation will be skipped (requires manual review)." +else + echo "👤 Mode: INTERACTIVE" + echo " You'll review and approve each phase." + echo " Prompts will guide you through the process." + echo " You can pause at any time." +fi +echo "" + +# Phase 1: Discovery +echo "Phase 1: Discovery" +echo "------------------" +echo "Scanning project for existing documentation..." +echo "" + +# Run discovery +discover_files + +# Display inventory summary +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "✓ Discovery complete! Found ${#DISCOVERED_FILES[@]} markdown file(s)" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "" + +if [ ${#DISCOVERED_FILES[@]} -eq 0 ]; then + echo "No markdown files found. Nothing to migrate." + exit 0 +fi + +# Group files by type for summary +declare -A type_counts=() +for file in "${DISCOVERED_FILES[@]}"; do + file_type="${FILE_TYPES[$file]}" + type_counts["$file_type"]=$((${type_counts[$file_type]:-0} + 1)) +done + +echo "Inventory by type:" +for file_type in $(echo "${!type_counts[@]}" | tr ' ' '\n' | sort); do + count="${type_counts[$file_type]}" + printf " %-15s %3d file(s)\n" "$file_type:" "$count" +done +echo "" + +echo "Discovered files:" +for file in "${DISCOVERED_FILES[@]}"; do + file_type="${FILE_TYPES[$file]}" + printf " [%-13s] %s\n" "$file_type" "$file" +done +echo "" + +# Prompt to continue to Analysis phase +prompt_phase_continue "Phase 2: Analysis" \ + "This phase will categorize discovered files and generate migration targets. + Files will be assigned to docs/specs/, docs/changes/, or docs/ based on type. + Conflicts with existing files will be detected." + +# Phase 2: Analysis +echo "Phase 2: Analysis" +echo "-----------------" +echo "Categorizing discovered content..." +echo "" + +# Run analysis +analyze_files + +# Display analysis results with rationale +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "✓ Analysis complete!" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "" + +# Count conflicts and in-place files +conflict_count=0 +in_place_count=0 +for file in "${DISCOVERED_FILES[@]}"; do + if [ "${FILE_CONFLICTS[$file]}" = "true" ]; then + conflict_count=$((conflict_count + 1)) + elif [ "${FILE_CONFLICTS[$file]}" = "in_place" ]; then + in_place_count=$((in_place_count + 1)) + fi +done + +if [ $conflict_count -gt 0 ]; then + echo "⚠️ WARNING: $conflict_count conflict(s) detected" + echo "" +fi + +if [ $in_place_count -gt 0 ]; then + echo "ℹ️ INFO: $in_place_count file(s) already in correct location" + echo "" +fi + +# Group by category for summary +declare -A category_counts=() +for file in "${DISCOVERED_FILES[@]}"; do + category="${FILE_CATEGORIES[$file]}" + category_counts["$category"]=$((${category_counts[$category]:-0} + 1)) +done + +echo "Migration plan by category:" +for category in $(echo "${!category_counts[@]}" | tr ' ' '\n' | sort); do + count="${category_counts[$category]}" + case "$category" in + spec) + target_desc="docs/specs/" + ;; + proposal) + target_desc="docs/changes/" + ;; + doc) + target_desc="docs/" + ;; + root) + target_desc="root/" + ;; + preserve) + target_desc="(preserved in place)" + ;; + *) + target_desc="$category" + ;; + esac + printf " %-10s → %-25s %3d file(s)\n" "$category" "$target_desc" "$count" +done +echo "" + +echo "Detailed migration plan:" +for file in "${DISCOVERED_FILES[@]}"; do + file_type="${FILE_TYPES[$file]}" + target="${FILE_TARGETS[$file]}" + rationale="${FILE_RATIONALES[$file]}" + has_conflict="${FILE_CONFLICTS[$file]}" + + # Format output with status indicator + if [ "$has_conflict" = "true" ]; then + conflict_marker="⚠️ " + elif [ "$has_conflict" = "in_place" ]; then + conflict_marker="✓ " + else + conflict_marker=" " + fi + + printf "%s%-40s → %-40s\n" "$conflict_marker" "$file" "$target" + printf " %s\n" "$rationale" + + if [ "$has_conflict" = "true" ]; then + printf " WARNING: Target file already exists - will need conflict resolution!\n" + fi + echo "" +done +echo "" + +# Prompt to continue to Planning phase +prompt_phase_continue "Phase 3: Planning" \ + "This phase will generate a detailed migration plan showing all file movements. + You'll be able to review, modify, or save the plan before execution. + The plan will be saved to a manifest file for reference." + +# Function to generate JSON migration plan +generate_migration_plan_json() { + echo "{" + echo " \"timestamp\": \"$(date -Iseconds)\"," + echo " \"total_files\": ${#DISCOVERED_FILES[@]}," + echo " \"conflict_count\": $conflict_count," + echo " \"in_place_count\": $in_place_count," + echo " \"migrations\": [" + + local first=true + for file in "${DISCOVERED_FILES[@]}"; do + if [ "$first" = true ]; then + first=false + else + echo "," + fi + + local file_type="${FILE_TYPES[$file]}" + local category="${FILE_CATEGORIES[$file]}" + local target="${FILE_TARGETS[$file]}" + local rationale="${FILE_RATIONALES[$file]}" + local has_conflict="${FILE_CONFLICTS[$file]}" + + # Escape double quotes in strings for JSON + file_escaped=$(echo "$file" | sed 's/"/\\"/g') + target_escaped=$(echo "$target" | sed 's/"/\\"/g') + rationale_escaped=$(echo "$rationale" | sed 's/"/\\"/g') + + echo -n " {" + echo -n "\"source\": \"$file_escaped\", " + echo -n "\"target\": \"$target_escaped\", " + echo -n "\"type\": \"$file_type\", " + echo -n "\"category\": \"$category\", " + echo -n "\"rationale\": \"$rationale_escaped\", " + echo -n "\"conflict\": \"$has_conflict\"" + echo -n "}" + done + + echo "" + echo " ]" + echo "}" +} + +# Function to display human-readable plan summary +display_plan_summary() { + echo "Migration Plan Summary" + echo "======================" + echo "" + echo "Total files discovered: ${#DISCOVERED_FILES[@]}" + echo "Files to migrate: $((${#DISCOVERED_FILES[@]} - in_place_count))" + echo "Files already in place: $in_place_count" + echo "Conflicts detected: $conflict_count" + echo "" + + if [ $conflict_count -gt 0 ]; then + echo "⚠️ CONFLICTS REQUIRING RESOLUTION:" + for file in "${DISCOVERED_FILES[@]}"; do + if [ "${FILE_CONFLICTS[$file]}" = "true" ]; then + printf " • %s → %s\n" "$file" "${FILE_TARGETS[$file]}" + printf " Resolution: Will prompt for action (skip/rename/overwrite)\n" + fi + done + echo "" + fi + + echo "MIGRATION ACTIONS:" + echo "" + + # Group by action type + local move_count=0 + local skip_count=0 + + for file in "${DISCOVERED_FILES[@]}"; do + if [ "${FILE_CONFLICTS[$file]}" = "in_place" ]; then + skip_count=$((skip_count + 1)) + else + move_count=$((move_count + 1)) + fi + done + + echo "Files to move: $move_count" + if [ $move_count -gt 0 ]; then + for file in "${DISCOVERED_FILES[@]}"; do + if [ "${FILE_CONFLICTS[$file]}" != "in_place" ]; then + local target="${FILE_TARGETS[$file]}" + local conflict_marker="" + if [ "${FILE_CONFLICTS[$file]}" = "true" ]; then + conflict_marker=" ⚠️" + fi + printf " %s → %s%s\n" "$file" "$target" "$conflict_marker" + fi + done + fi + echo "" + + echo "Files to skip (already in place): $skip_count" + if [ $skip_count -gt 0 ]; then + for file in "${DISCOVERED_FILES[@]}"; do + if [ "${FILE_CONFLICTS[$file]}" = "in_place" ]; then + printf " ✓ %s\n" "$file" + fi + done + fi + echo "" +} + +# Function to prompt for plan approval +prompt_plan_approval() { + if [ "$AUTO_APPROVE" = true ]; then + echo "Auto-approve enabled, proceeding with migration..." + return 0 + fi + + echo "" + echo "Review the migration plan above." + echo "" + echo "Options:" + echo " a) Approve and proceed" + echo " m) Modify plan (adjust target paths)" + echo " s) Save plan and exit (for review)" + echo " c) Cancel migration" + echo "" + read -p "Choose [a/m/s/c]: " choice + + case "$choice" in + a|A) + echo "Plan approved. Proceeding with migration..." + return 0 + ;; + m|M) + echo "Plan modification selected." + modify_plan_interactive + return $? + ;; + s|S) + echo "Saving plan to manifest..." + generate_migration_plan_json > "$MIGRATION_MANIFEST" + echo "Plan saved to: $MIGRATION_MANIFEST" + echo "Review the plan and run the migration again when ready." + exit 0 + ;; + c|C) + echo "Migration cancelled." + exit 0 + ;; + *) + echo "Invalid choice. Please try again." + prompt_plan_approval + ;; + esac +} + +# Function to modify plan interactively +modify_plan_interactive() { + echo "" + echo "Plan Modification Mode" + echo "======================" + echo "" + echo "You can adjust target paths for individual files." + echo "Type 'done' when finished, or 'cancel' to abort." + echo "" + + while true; do + echo "Files available for modification:" + local index=1 + for file in "${DISCOVERED_FILES[@]}"; do + printf "%2d) %s → %s\n" "$index" "$file" "${FILE_TARGETS[$file]}" + index=$((index + 1)) + done + echo "" + echo "Type the number of the file to modify (or 'done'/'cancel'):" + read -p "> " input + + case "$input" in + done|DONE|d|D) + echo "Modifications complete." + return 0 + ;; + cancel|CANCEL|c|C) + echo "Modifications cancelled. Returning to plan review..." + prompt_plan_approval + return $? + ;; + ''|*[!0-9]*) + echo "Invalid input. Please enter a number, 'done', or 'cancel'." + continue + ;; + *) + if [ "$input" -ge 1 ] && [ "$input" -le ${#DISCOVERED_FILES[@]} ]; then + local file="${DISCOVERED_FILES[$((input - 1))]}" + local current_target="${FILE_TARGETS[$file]}" + + echo "" + echo "File: $file" + echo "Current target: $current_target" + echo "" + read -p "Enter new target path (or press Enter to keep current): " new_target + + if [ -n "$new_target" ]; then + FILE_TARGETS["$file"]="$new_target" + echo "Updated target to: $new_target" + + # Re-check conflict status with new target + local has_conflict=$(check_conflict "$file" "$new_target") + FILE_CONFLICTS["$file"]="$has_conflict" + + if [ "$has_conflict" = "true" ]; then + echo "⚠️ Warning: New target conflicts with existing file!" + fi + else + echo "Target unchanged." + fi + echo "" + else + echo "Invalid number. Please choose between 1 and ${#DISCOVERED_FILES[@]}." + fi + ;; + esac + done +} + +# Function to save migration plan +save_migration_plan() { + echo "Saving migration plan to manifest..." + + # Generate manifest filename with timestamp + local timestamp=$(date +%Y%m%d_%H%M%S) + MIGRATION_MANIFEST=".project-migrate-manifest-${timestamp}.json" + + generate_migration_plan_json > "$MIGRATION_MANIFEST" + + if [ -f "$MIGRATION_MANIFEST" ]; then + echo "✓ Migration plan saved to: $MIGRATION_MANIFEST" + echo "" + echo "Manifest contains:" + echo " - Timestamp: $(date -Iseconds)" + echo " - Total files: ${#DISCOVERED_FILES[@]}" + echo " - Source → target mappings" + echo " - Conflict information" + echo " - File types and categories" + echo "" + else + echo "⚠️ Error: Failed to save migration plan!" + return 1 + fi +} + +# Phase 3: Planning +echo "Phase 3: Planning" +echo "-----------------" +echo "Generating migration plan..." +echo "" + +# Display human-readable plan summary +display_plan_summary + +# Save plan to manifest file +save_migration_plan + +# Prompt for plan approval (unless dry-run) +if [ "$DRY_RUN" = true ]; then + echo "DRY RUN: Plan generated but no actions will be taken." + echo "Review the plan above and the manifest file: $MIGRATION_MANIFEST" + echo "" +else + prompt_plan_approval +fi + +echo "" + +# Prompt to continue to Backup phase +prompt_phase_continue "Phase 4: Backup" \ + "This phase will create a complete backup of your docs/ directory. + A timestamped backup directory will be created with rollback scripts. + This ensures you can safely restore the original state if needed." + +# Function to create backup +create_backup() { + # Generate timestamped backup directory name + local timestamp=$(date +%Y%m%d-%H%M%S) + BACKUP_DIR=".synthesisflow-backup-${timestamp}" + + echo "Creating backup directory: $BACKUP_DIR" + + # Create backup directory + if ! mkdir -p "$BACKUP_DIR"; then + echo "⚠️ Error: Failed to create backup directory!" + return 1 + fi + + # Copy existing docs/ directory to backup location if it exists + if [ -d "docs" ]; then + echo "Backing up existing docs/ directory..." + if ! cp -r docs "$BACKUP_DIR/docs"; then + echo "⚠️ Error: Failed to backup docs/ directory!" + return 1 + fi + echo "✓ Backed up docs/ directory" + else + echo "ℹ️ No existing docs/ directory to backup" + fi + + # Store migration manifest in backup directory + if [ -f "$MIGRATION_MANIFEST" ]; then + echo "Storing migration manifest in backup..." + if ! cp "$MIGRATION_MANIFEST" "$BACKUP_DIR/migration-manifest.json"; then + echo "⚠️ Error: Failed to store migration manifest!" + return 1 + fi + echo "✓ Stored migration manifest" + fi + + # Create backup README with restoration instructions + echo "Creating backup README..." + cat > "$BACKUP_DIR/README.md" <<'EOF' +# SynthesisFlow Migration Backup + +This directory contains a backup of your project documentation before SynthesisFlow migration. + +## Backup Contents + +- `docs/` - Complete backup of your original docs/ directory (if it existed) +- `migration-manifest.json` - The migration plan that was executed +- `rollback.sh` - Script to restore the original state + +## Restoration Procedure + +If you need to rollback the migration and restore your original documentation: + +### Option 1: Use the Rollback Script + +Run the provided rollback script from your project root: + +```bash +bash BACKUP_DIR/rollback.sh +``` + +This will: +1. Create a safety backup of the current state (before rollback) +2. Restore the original docs/ directory from backup +3. Remove SynthesisFlow additions (docs/specs/, docs/changes/) if they're empty +4. Preserve any non-empty directories to prevent data loss +5. Clean up empty directories + +### Option 2: Manual Restoration + +If you prefer manual control: + +1. **Backup current state** (in case you want to keep some changes): + ```bash + mv docs docs-after-migration + ``` + +2. **Restore original docs/**: + ```bash + cp -r BACKUP_DIR/docs . + ``` + +3. **Clean up** (if desired): + ```bash + rm -rf docs-after-migration + ``` + +## Safety Notes + +- This backup is READ-ONLY - never modify files in this directory +- Keep this backup until you're confident the migration was successful +- The rollback script is safe to run - it won't delete this backup +- You can manually inspect files in this backup at any time + +## Backup Metadata + +- Created: TIMESTAMP +- Migration manifest: migration-manifest.json +- Original location: PROJECT_ROOT + +## Questions? + +Refer to the SynthesisFlow documentation or the project-migrate skill documentation. +EOF + + # Replace placeholders in README + sed -i "s|BACKUP_DIR|$BACKUP_DIR|g" "$BACKUP_DIR/README.md" + sed -i "s|TIMESTAMP|$(date -Iseconds)|g" "$BACKUP_DIR/README.md" + sed -i "s|PROJECT_ROOT|$(pwd)|g" "$BACKUP_DIR/README.md" + + echo "✓ Created backup README" + + # Generate rollback script + echo "Generating rollback script..." + cat > "$BACKUP_DIR/rollback.sh" <<'ROLLBACK_SCRIPT' +#!/bin/bash +# SynthesisFlow Migration Rollback Script +# This script restores your project to its pre-migration state + +set -e + +echo "========================================" +echo " SynthesisFlow Migration Rollback" +echo "========================================" +echo "" +echo "⚠️ WARNING: This will restore your project to its pre-migration state." +echo " Any changes made after migration will be lost!" +echo "" + +# Get the backup directory (where this script is located) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +BACKUP_DIR="$(basename "$SCRIPT_DIR")" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +echo "Backup directory: $BACKUP_DIR" +echo "Project root: $PROJECT_ROOT" +echo "" + +read -p "Are you sure you want to rollback? [y/N]: " confirm +if [[ ! "$confirm" =~ ^[Yy]$ ]]; then + echo "Rollback cancelled." + exit 0 +fi + +echo "" +echo "Starting rollback..." +echo "" + +cd "$PROJECT_ROOT" + +# Step 1: Backup current state (safety measure) +echo "Step 1: Creating safety backup of current state..." + +SAFETY_BACKUP_DIR="${BACKUP_DIR}_current_state_$(date +%Y%m%d-%H%M%S)" + +if [ -d "docs" ]; then + echo " Backing up current docs/ to $SAFETY_BACKUP_DIR..." + mkdir -p "$SAFETY_BACKUP_DIR" + cp -r docs "$SAFETY_BACKUP_DIR/docs" + echo "✓ Current state backed up (just in case)" +else + echo "ℹ️ No current docs/ directory to backup" +fi + +echo "" + +# Step 2: Restore original docs/ directory +echo "Step 2: Restoring original docs/ directory from backup..." + +if [ -d "$BACKUP_DIR/docs" ]; then + # Remove current docs/ directory if it exists + if [ -d "docs" ]; then + echo " Removing current docs/ directory..." + rm -rf docs + fi + + echo " Copying backup docs/ to project root..." + cp -r "$BACKUP_DIR/docs" . + echo "✓ Original docs/ directory restored" +else + echo "ℹ️ No original docs/ directory to restore" + echo " This suggests the project had no docs/ before migration" +fi + +echo "" + +# Step 3: Remove SynthesisFlow additions (only if they're now empty or were created by migration) +echo "Step 3: Cleaning up SynthesisFlow-specific directories..." + +# Load the migration manifest to determine what was created +if [ -f "$BACKUP_DIR/migration-manifest.json" ]; then + echo " Using migration manifest to identify SynthesisFlow additions..." +fi + +# Check if docs/specs should be removed (empty or only contains migrated files) +if [ -d "docs/specs" ]; then + # Check if directory is empty + if [ -z "$(ls -A docs/specs 2>/dev/null)" ]; then + echo " Removing empty docs/specs/..." + rmdir docs/specs + else + # Directory has content - check if it existed in original backup + if [ -d "$BACKUP_DIR/docs/specs" ]; then + echo " ℹ️ docs/specs/ existed before migration, keeping it" + else + echo " ⚠️ docs/specs/ has content and wasn't in original backup" + echo " Keeping it to prevent data loss (manual review recommended)" + fi + fi +fi + +# Check if docs/changes should be removed (empty or only contains migrated files) +if [ -d "docs/changes" ]; then + # Check if directory is empty + if [ -z "$(ls -A docs/changes 2>/dev/null)" ]; then + echo " Removing empty docs/changes/..." + rmdir docs/changes + else + # Directory has content - check if it existed in original backup + if [ -d "$BACKUP_DIR/docs/changes" ]; then + echo " ℹ️ docs/changes/ existed before migration, keeping it" + else + echo " ⚠️ docs/changes/ has content and wasn't in original backup" + echo " Keeping it to prevent data loss (manual review recommended)" + fi + fi +fi + +# Remove docs/ only if it's now completely empty +if [ -d "docs" ] && [ -z "$(ls -A docs 2>/dev/null)" ]; then + echo " Removing empty docs/ directory..." + rmdir docs +fi + +echo "✓ SynthesisFlow directory cleanup complete" +echo "" + +# Step 4: Clean up empty parent directories (but preserve structure) +echo "Step 4: Cleaning up empty directories..." + +# Only clean up directories that are truly empty (not .git or hidden) +find . -type d -empty -not -path "./.git/*" -not -path "*/\.*" -delete 2>/dev/null || true + +echo "✓ Cleanup complete" +echo "" + +echo "========================================" +echo " Rollback Complete!" +echo "========================================" +echo "" +echo "Your project has been restored to its pre-migration state." +echo "" +echo "Backup information:" +echo " • Original backup: $BACKUP_DIR (preserved)" +echo " • Safety backup of pre-rollback state: $SAFETY_BACKUP_DIR" +echo "" +echo "Next steps:" +echo " 1. Verify your documentation is restored correctly" +echo " 2. Review any warnings above about non-empty directories" +echo " 3. Delete backups when confident:" +echo " rm -rf $BACKUP_DIR" +echo " rm -rf $SAFETY_BACKUP_DIR" +echo "" +ROLLBACK_SCRIPT + + # Make rollback script executable + chmod +x "$BACKUP_DIR/rollback.sh" + echo "✓ Generated rollback script" + + echo "" + echo "✓ Backup complete!" + echo "" + echo "Backup location: $BACKUP_DIR" + echo " - Original docs/: $BACKUP_DIR/docs/" + echo " - Migration plan: $BACKUP_DIR/migration-manifest.json" + echo " - Restoration guide: $BACKUP_DIR/README.md" + echo " - Rollback script: $BACKUP_DIR/rollback.sh" + echo "" + echo "To rollback this migration: bash $BACKUP_DIR/rollback.sh" + echo "" + + return 0 +} + +# Phase 4: Backup +echo "Phase 4: Backup" +echo "---------------" +echo "Creating backup before migration..." +echo "" + +# Skip backup in dry-run mode +if [ "$DRY_RUN" = true ]; then + echo "DRY RUN: Backup would be created here" + echo " Backup directory name would be: .synthesisflow-backup-$(date +%Y%m%d-%H%M%S)" + echo " Would backup: docs/ directory (if exists)" + echo " Would include: migration manifest, README, rollback script" + echo "" +else + # Create backup + if ! create_backup; then + echo "⚠️ Error: Backup failed! Migration aborted." + exit 1 + fi +fi + +echo "" + +# Prompt to continue to Migration phase +prompt_phase_continue "Phase 5: Migration" \ + "This phase will execute the file migrations using git mv when possible. + Files will be moved to their target locations and links will be updated. + You'll be prompted to resolve any conflicts that occur." + +# Function to create SynthesisFlow directory structure +create_directory_structure() { + echo "Creating SynthesisFlow directory structure..." + + local dirs=("docs" "docs/specs" "docs/changes") + + for dir in "${dirs[@]}"; do + if [ ! -d "$dir" ]; then + if ! mkdir -p "$dir"; then + echo "⚠️ Error: Failed to create directory: $dir" + return 1 + fi + echo "✓ Created directory: $dir" + else + echo "ℹ️ Directory already exists: $dir" + fi + done + + echo "" + return 0 +} + +# Function to calculate relative path from one file to another +calculate_relative_path() { + local from_file="$1" + local to_file="$2" + + # Get directories containing the files + local from_dir=$(dirname "$from_file") + local to_dir=$(dirname "$to_file") + local to_base=$(basename "$to_file") + + # Normalize . to empty string for easier comparison + if [ "$from_dir" = "." ]; then from_dir=""; fi + if [ "$to_dir" = "." ]; then to_dir=""; fi + + # If both in same directory, just return the filename + if [ "$from_dir" = "$to_dir" ]; then + echo "$to_base" + return + fi + + # Convert paths to arrays for comparison + IFS='/' read -ra from_parts <<< "$from_dir" + IFS='/' read -ra to_parts <<< "$to_dir" + + # Find common prefix length + local common_length=0 + local max_length=${#from_parts[@]} + if [ ${#to_parts[@]} -lt $max_length ]; then + max_length=${#to_parts[@]} + fi + + for ((i=0; i&2 + echo " Skipping link correction for $(basename "$file")" >&2 + return 1 + fi + + # Check if Python 3 is available + if ! command -v python3 >/dev/null 2>&1; then + echo " ⚠️ Warning: Python 3 not available for LLM link correction" >&2 + echo " Skipping link correction for $(basename "$file")" >&2 + return 1 + fi + + # Create temporary file for corrected content + local temp_file=$(mktemp) + + if [ "$DRY_RUN" = true ]; then + echo " [DRY RUN] Would perform LLM-based link correction on $(basename "$file")" + # In dry run mode, just show what would be analyzed + echo " Total links to analyze: $(grep -o '\[[^]]*\]([^)]*)' "$file" | wc -l)" + else + # Apply LLM-based link correction + if cat "$file" | python3 "$llm_script" --file "$file" > "$temp_file" 2>/dev/null; then + # Check if any changes were made by comparing files + if ! cmp -s "$file" "$temp_file"; then + # File was modified, update it + mv "$temp_file" "$file" + echo " Applied LLM-based link corrections to $(basename "$file")" + else + # No changes made + rm "$temp_file" + echo " No link corrections needed in $(basename "$file")" + fi + else + # LLM processing failed + echo " ⚠️ Warning: LLM link correction failed for $(basename "$file")" >&2 + rm -f "$temp_file" + return 1 + fi + fi + + return 0 +} + +# Function to move a single file +migrate_file() { + local source="$1" + local target="$2" + local use_git_mv="${3:-true}" + + # Normalize paths + local normalized_source="${source#./}" + local normalized_target="${target#./}" + + # Check if source and target are the same + if [ "$normalized_source" = "$normalized_target" ]; then + echo " ✓ $source (already in place)" + return 0 + fi + + # Create target directory if it doesn't exist + local target_dir=$(dirname "$target") + if [ ! -d "$target_dir" ]; then + if ! mkdir -p "$target_dir"; then + echo " ⚠️ Error: Failed to create directory: $target_dir" + return 1 + fi + fi + + # Check if target already exists + if [ -f "$target" ]; then + echo " ⚠️ Conflict: Target already exists: $target" + + # In auto-approve mode, skip conflicts + if [ "$AUTO_APPROVE" = true ]; then + echo " Auto-approve mode: Skipping conflicted file" + return 0 + fi + + # Prompt for resolution + echo " Options: (s)kip, (r)ename source, (o)verwrite" + read -p " Choose [s/r/o]: " resolution + + case "$resolution" in + r|R) + # Rename by adding numeric suffix + local base="${target%.*}" + local ext="${target##*.}" + local counter=1 + local new_target="${base}-${counter}.${ext}" + + while [ -f "$new_target" ]; do + counter=$((counter + 1)) + new_target="${base}-${counter}.${ext}" + done + + echo " Renaming to: $new_target" + target="$new_target" + ;; + o|O) + echo " Overwriting target file..." + rm -f "$target" + ;; + s|S|*) + echo " Skipping file" + return 0 + ;; + esac + fi + + # Perform the move + if [ "$use_git_mv" = true ] && git rev-parse --is-inside-work-tree >/dev/null 2>&1; then + # Use git mv to preserve history + if git mv "$source" "$target" 2>/dev/null; then + echo " ✓ $source → $target (git mv)" + return 0 + else + # Fall back to regular mv if git mv fails + if mv "$source" "$target"; then + echo " ✓ $source → $target (mv)" + return 0 + else + echo " ⚠️ Error: Failed to move file: $source" + return 1 + fi + fi + else + # Use regular mv + if mv "$source" "$target"; then + echo " ✓ $source → $target (mv)" + return 0 + else + echo " ⚠️ Error: Failed to move file: $source" + return 1 + fi + fi +} + +# Function to execute migration +execute_migration() { + echo "Creating SynthesisFlow directory structure..." + if ! create_directory_structure; then + echo "⚠️ Error: Failed to create directory structure!" + return 1 + fi + echo "" + + echo "Migrating files..." + echo "" + + local success_count=0 + local skip_count=0 + local error_count=0 + local total_to_migrate=0 + + # Count files that need migration + for file in "${DISCOVERED_FILES[@]}"; do + local normalized_source="${file#./}" + local target="${FILE_TARGETS[$file]}" + local normalized_target="${target#./}" + + if [ "$normalized_source" != "$normalized_target" ]; then + total_to_migrate=$((total_to_migrate + 1)) + fi + done + + echo "Files to migrate: $total_to_migrate" + echo "" + + # Migrate each file + for file in "${DISCOVERED_FILES[@]}"; do + local target="${FILE_TARGETS[$file]}" + local conflict="${FILE_CONFLICTS[$file]}" + + # Skip files already in place + if [ "$conflict" = "in_place" ]; then + skip_count=$((skip_count + 1)) + continue + fi + + # Migrate the file + if migrate_file "$file" "$target" true; then + success_count=$((success_count + 1)) + + # Update links in the migrated file using LLM + llm_update_markdown_links "$target" "$file" "$target" + else + error_count=$((error_count + 1)) + fi + done + + echo "" + echo "Migration Results:" + echo " ✓ Migrated: $success_count file(s)" + echo " ⊘ Skipped: $skip_count file(s) (already in place)" + + if [ $error_count -gt 0 ]; then + echo " ⚠️ Errors: $error_count file(s)" + return 1 + fi + + echo "" + return 0 +} + +# Phase 5: Migration +echo "Phase 5: Migration" +echo "------------------" +echo "Executing file migrations..." +echo "" + +if [ "$DRY_RUN" = true ]; then + echo "DRY RUN: Migration would execute here" + echo "" + echo "Would create directories:" + echo " - docs/" + echo " - docs/specs/" + echo " - docs/changes/" + echo "" + echo "Would migrate $((${#DISCOVERED_FILES[@]} - in_place_count)) file(s):" + for file in "${DISCOVERED_FILES[@]}"; do + target="${FILE_TARGETS[$file]}" + conflict="${FILE_CONFLICTS[$file]}" + + if [ "$conflict" != "in_place" ]; then + conflict_marker="" + if [ "$conflict" = "true" ]; then + conflict_marker=" ⚠️" + fi + echo " $file → $target$conflict_marker" + fi + done + echo "" +else + # Execute migration + if ! execute_migration; then + echo "⚠️ Error: Migration failed!" + echo "" + echo "To rollback: bash $BACKUP_DIR/rollback.sh" + exit 1 + fi +fi + +echo "" + +# Prompt to continue to Frontmatter phase +prompt_phase_continue "Phase 6: Frontmatter Generation" \ + "This phase will add YAML frontmatter to files that don't have it. + You'll review each suggested frontmatter (title, type, metadata). + You can accept, edit, skip individual files, or batch-apply to all." + +# Function to generate frontmatter for a file using Gemini +generate_frontmatter() { + local file="$1" + echo "Generating frontmatter for $file with Gemini..." >&2 + + # Get git metadata if available + local git_metadata_prompt="" + if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then + local creation_date=$(git log --follow --format=%aI --reverse "$file" 2>/dev/null | head -n 1) + local author=$(git log --follow --format='%an' --reverse "$file" 2>/dev/null | head -n 1) + if [ -n "$creation_date" ] && [ -n "$author" ]; then + git_metadata_prompt="The file was created on '${creation_date}' by '${author}'. Use these for the 'created' and 'author' fields." + fi + fi + + local prompt_file=$(mktemp) + cat > "$prompt_file" </dev/null | grep -q '^---$'; then + return 0 # Has frontmatter + else + return 1 # No frontmatter + fi +} + +# Function to validate YAML syntax (basic check) +validate_yaml_frontmatter() { + local frontmatter="$1" + + # Check basic structure: starts and ends with --- + if ! echo "$frontmatter" | head -n 1 | grep -q '^---$'; then + return 1 + fi + + if ! echo "$frontmatter" | tail -n 1 | grep -q '^---$'; then + return 1 + fi + + # Check for key: value pattern in middle lines + local middle=$(echo "$frontmatter" | sed '1d;$d') + if [ -n "$middle" ]; then + # Each line should be "key: value" format + if echo "$middle" | grep -qv '^[a-zA-Z_][a-zA-Z0-9_]*: '; then + return 1 + fi + fi + + return 0 +} + +# Function to insert frontmatter at top of file +insert_frontmatter() { + local file="$1" + local frontmatter="$2" + + # Create temporary file + local temp_file=$(mktemp) + + # Write frontmatter + echo "$frontmatter" > "$temp_file" + echo "" >> "$temp_file" + + # Append original content + cat "$file" >> "$temp_file" + + # Replace original file + mv "$temp_file" "$file" + + return 0 +} + +# Function to display frontmatter suggestion and get user approval +review_frontmatter_suggestion() { + local file="$1" + local suggested_frontmatter="$2" + local batch_mode="${3:-false}" + + echo "" + echo "File: $file" + echo "" + echo "Suggested frontmatter:" + echo "$suggested_frontmatter" + echo "" + + if [ "$batch_mode" = "true" ]; then + # In batch mode, auto-apply + return 0 + fi + + echo "Options:" + echo " a) Accept and insert" + echo " e) Edit before inserting" + echo " s) Skip this file" + echo " b) Batch apply to all remaining files" + echo "" + read -p "Choose [a/e/s/b]: " choice + + case "$choice" in + a|A) + return 0 # Accept + ;; + e|E) + return 2 # Edit + ;; + s|S) + return 1 # Skip + ;; + b|B) + return 3 # Batch mode + ;; + *) + echo "Invalid choice. Skipping file..." + return 1 + ;; + esac +} + +# Function to edit frontmatter interactively +edit_frontmatter() { + local original="$1" + + # Create temp file with original + local temp_file=$(mktemp) + echo "$original" > "$temp_file" + + # Use editor (EDITOR env var or nano as fallback) + ${EDITOR:-nano} "$temp_file" + + # Read edited content + local edited=$(cat "$temp_file") + rm "$temp_file" + + echo "$edited" +} + +# Function to run frontmatter generation phase +generate_frontmatter_phase() { + local batch_mode=false + local files_processed=0 + local files_updated=0 + local files_skipped=0 + local files_already_compliant=0 + + echo "Scanning for files without frontmatter..." + echo "" + + # Collect all markdown files in docs/ + local -a files_to_process=() + + if [ -d "docs" ]; then + while IFS= read -r -d '' file; do + # Skip hidden files and directories + if [[ "$file" =~ /\. ]]; then + continue + fi + + # Check if file has frontmatter + if ! has_frontmatter "$file"; then + files_to_process+=("$file") + else + files_already_compliant=$((files_already_compliant + 1)) + fi + done < <(find docs -name "*.md" -type f -print0) + fi + + echo "Found ${#files_to_process[@]} file(s) without frontmatter" + echo "Found $files_already_compliant file(s) already compliant" + echo "" + + if [ ${#files_to_process[@]} -eq 0 ]; then + echo "✓ All files already have frontmatter!" + return 0 + fi + + # Process each file + for file in "${files_to_process[@]}"; do + files_processed=$((files_processed + 1)) + + # Generate suggested frontmatter + local suggested=$(generate_frontmatter "$file") + + # Validate YAML syntax + if ! validate_yaml_frontmatter "$suggested"; then + echo "⚠️ Warning: Generated frontmatter has invalid YAML syntax for: $file" + echo "Skipping this file..." + files_skipped=$((files_skipped + 1)) + continue + fi + + # Review suggestion + review_frontmatter_suggestion "$file" "$suggested" "$batch_mode" + local review_result=$? + + case $review_result in + 0) + # Accept - insert frontmatter + if insert_frontmatter "$file" "$suggested"; then + echo "✓ Frontmatter inserted into: $file" + files_updated=$((files_updated + 1)) + else + echo "⚠️ Error: Failed to insert frontmatter into: $file" + files_skipped=$((files_skipped + 1)) + fi + ;; + 1) + # Skip + echo "Skipped: $file" + files_skipped=$((files_skipped + 1)) + ;; + 2) + # Edit + echo "Opening editor for frontmatter..." + local edited=$(edit_frontmatter "$suggested") + + # Validate edited frontmatter + if ! validate_yaml_frontmatter "$edited"; then + echo "⚠️ Error: Edited frontmatter has invalid YAML syntax. Skipping..." + files_skipped=$((files_skipped + 1)) + continue + fi + + # Insert edited frontmatter + if insert_frontmatter "$file" "$edited"; then + echo "✓ Custom frontmatter inserted into: $file" + files_updated=$((files_updated + 1)) + else + echo "⚠️ Error: Failed to insert frontmatter into: $file" + files_skipped=$((files_skipped + 1)) + fi + ;; + 3) + # Batch mode - apply to this and all remaining + echo "Batch mode enabled. Applying to all remaining files..." + batch_mode=true + + # Insert frontmatter for current file + if insert_frontmatter "$file" "$suggested"; then + echo "✓ Frontmatter inserted into: $file" + files_updated=$((files_updated + 1)) + else + echo "⚠️ Error: Failed to insert frontmatter into: $file" + files_skipped=$((files_skipped + 1)) + fi + ;; + esac + done + + echo "" + echo "Frontmatter Generation Results:" + echo " Files processed: $files_processed" + echo " Files updated: $files_updated" + echo " Files skipped: $files_skipped" + echo " Files already compliant: $files_already_compliant" + echo "" + + # Run doc-indexer scan to confirm compliance + if command -v bash >/dev/null 2>&1 && [ -f "skills/doc-indexer/scripts/scan-docs.sh" ]; then + echo "Running doc-indexer compliance check..." + echo "" + bash skills/doc-indexer/scripts/scan-docs.sh | grep -E "^\[WARNING\]|^---" | head -20 + echo "" + fi + + return 0 +} + +# Phase 6: Frontmatter Generation +echo "Phase 6: Frontmatter Generation" +echo "-------------------------------" +echo "Adding YAML frontmatter to files..." +echo "" + +if [ "$DRY_RUN" = true ]; then + echo "DRY RUN: Frontmatter generation would execute here" + echo "" + echo "Would scan docs/ for files without frontmatter" + echo "Would generate suggested frontmatter for each file:" + echo " - Extract title from first # heading or filename" + echo " - Detect file type (spec, proposal, design, adr, etc.)" + echo " - Extract git metadata (creation date, author)" + echo " - Generate YAML frontmatter" + echo " - Validate YAML syntax" + echo " - Prompt for review and approval" + echo "" +elif [ "$AUTO_APPROVE" = true ]; then + echo "Auto-approve mode: Skipping frontmatter generation (requires manual review)" + echo "Run without --auto-approve to interactively add frontmatter" + echo "" +else + # Execute frontmatter generation + generate_frontmatter_phase +fi + +echo "" + +# Prompt to continue to Validation phase +prompt_phase_continue "Phase 7: Validation" \ + "This phase will verify the migration was successful. + It checks directory structure, file locations, counts, and link integrity. + A comprehensive validation report will be generated." + +# Function to validate migration +validate_migration() { + echo "Running post-migration validation..." + echo "" + + local validation_errors=0 + local validation_warnings=0 + + # Validation 1: Check SynthesisFlow directory structure exists + echo "1. Checking SynthesisFlow directory structure..." + local required_dirs=("docs" "docs/specs" "docs/changes") + local missing_dirs=0 + + for dir in "${required_dirs[@]}"; do + if [ -d "$dir" ]; then + echo " ✓ $dir exists" + else + echo " ⚠️ $dir NOT FOUND" + missing_dirs=$((missing_dirs + 1)) + validation_errors=$((validation_errors + 1)) + fi + done + + if [ $missing_dirs -eq 0 ]; then + echo " ✓ All required directories exist" + else + echo " ⚠️ Missing $missing_dirs required director(ies)" + fi + echo "" + + # Validation 2: Check all source files are in target locations + echo "2. Verifying file migrations..." + local files_verified=0 + local files_missing=0 + local files_skipped=0 + + for file in "${DISCOVERED_FILES[@]}"; do + local target="${FILE_TARGETS[$file]}" + local normalized_source="${file#./}" + local normalized_target="${target#./}" + + # Check if file was supposed to stay in place + if [ "$normalized_source" = "$normalized_target" ]; then + if [ -f "$file" ]; then + files_verified=$((files_verified + 1)) + files_skipped=$((files_skipped + 1)) + else + echo " ⚠️ File missing: $file (should have been preserved in place)" + files_missing=$((files_missing + 1)) + validation_errors=$((validation_errors + 1)) + fi + else + # File should have been moved to target + if [ -f "$target" ]; then + files_verified=$((files_verified + 1)) + else + echo " ⚠️ Target file missing: $target (source: $file)" + files_missing=$((files_missing + 1)) + validation_errors=$((validation_errors + 1)) + fi + fi + done + + echo " ✓ Files verified in target locations: $files_verified" + if [ $files_skipped -gt 0 ]; then + echo " (includes $files_skipped file(s) that stayed in place)" + fi + + if [ $files_missing -gt 0 ]; then + echo " ⚠️ Missing files: $files_missing" + fi + echo "" + + # Validation 3: Compare file counts (discovered vs migrated) + echo "3. Comparing file counts..." + local discovered_count=${#DISCOVERED_FILES[@]} + local expected_migrated=$((discovered_count - in_place_count)) + + echo " Files discovered: $discovered_count" + echo " Files expected to migrate: $expected_migrated" + echo " Files that stayed in place: $in_place_count" + echo " Files verified: $files_verified" + + local count_mismatch=$((discovered_count - files_verified)) + if [ $count_mismatch -eq 0 ]; then + echo " ✓ All discovered files accounted for" + else + echo " ⚠️ File count mismatch: $count_mismatch file(s) unaccounted for" + validation_errors=$((validation_errors + 1)) + fi + echo "" + + # Validation 4: Validate link integrity in migrated files + echo "4. Validating link integrity..." + local total_links=0 + local broken_links=0 + local files_with_links=0 + + for file in "${DISCOVERED_FILES[@]}"; do + local target="${FILE_TARGETS[$file]}" + local file_to_check="" + + # Determine which file to check (target if moved, source if in place) + local normalized_source="${file#./}" + local normalized_target="${target#./}" + + if [ "$normalized_source" = "$normalized_target" ]; then + file_to_check="$file" + else + file_to_check="$target" + fi + + # Check if file exists before validating links + if [ ! -f "$file_to_check" ]; then + continue + fi + + # Parse markdown links in the file + local file_has_links=false + while IFS= read -r line; do + # Find markdown links: [text](path) and ![alt](path) + if echo "$line" | grep -qE '\[[^]]*\]\([^)]+\)'; then + local temp_line="$line" + local link_pattern='\[([^]]*)\]\(([^)]+)\)' + + while [[ "$temp_line" =~ $link_pattern ]]; do + local link_path="${BASH_REMATCH[2]}" + + # Skip absolute URLs and anchors + if [[ "$link_path" =~ ^https?:// ]] || [[ "$link_path" =~ ^mailto: ]] || [[ "$link_path" =~ ^# ]]; then + temp_line="${temp_line#*]($link_path)}" + continue + fi + + file_has_links=true + total_links=$((total_links + 1)) + + # Calculate absolute path of linked file + local file_dir=$(dirname "$file_to_check") + local linked_file_abs="" + + if [[ "$link_path" = /* ]]; then + # Absolute path from project root + linked_file_abs="$link_path" + else + # Relative path from file location + if [ "$file_dir" = "." ]; then + linked_file_abs="$link_path" + else + linked_file_abs="$file_dir/$link_path" + fi + fi + + # Normalize path + linked_file_abs=$(echo "$linked_file_abs" | sed 's|/\./|/|g' | sed 's|^\./||') + + # Check if linked file/directory exists + if [ ! -f "$linked_file_abs" ] && [ ! -d "$linked_file_abs" ]; then + echo " ⚠️ Broken link in $file_to_check: $link_path → $linked_file_abs" + broken_links=$((broken_links + 1)) + validation_warnings=$((validation_warnings + 1)) + fi + + # Remove this match to find next link + temp_line="${temp_line#*]($link_path)}" + done + fi + done < "$file_to_check" + + if [ "$file_has_links" = true ]; then + files_with_links=$((files_with_links + 1)) + fi + done + + echo " Files with links: $files_with_links" + echo " Total links found: $total_links" + + if [ $broken_links -eq 0 ]; then + echo " ✓ No broken links detected" + else + echo " ⚠️ Broken links found: $broken_links" + echo " Note: Some links may point to files outside the migration scope" + fi + echo "" + + # Validation 5: Generate validation report summary + echo "======================================== +Validation Report Summary +========================================" + echo "" + + if [ $validation_errors -eq 0 ] && [ $validation_warnings -eq 0 ]; then + echo "✅ VALIDATION PASSED" + echo "" + echo "All checks passed successfully!" + echo " • SynthesisFlow directory structure exists" + echo " • All discovered files accounted for ($discovered_count)" + echo " • File counts match expectations" + echo " • No broken links detected" + elif [ $validation_errors -eq 0 ]; then + echo "⚠️ VALIDATION PASSED WITH WARNINGS" + echo "" + echo "Migration completed but with $validation_warnings warning(s):" + if [ $broken_links -gt 0 ]; then + echo " • $broken_links broken link(s) detected" + echo " Suggestion: Review and update broken links manually" + fi + else + echo "❌ VALIDATION FAILED" + echo "" + echo "Migration completed with $validation_errors error(s) and $validation_warnings warning(s):" + if [ $missing_dirs -gt 0 ]; then + echo " • $missing_dirs required director(ies) missing" + echo " Suggestion: Run migration again or create directories manually" + fi + if [ $files_missing -gt 0 ]; then + echo " • $files_missing file(s) missing from target locations" + echo " Suggestion: Check backup and restore missing files" + fi + if [ $count_mismatch -ne 0 ]; then + echo " • File count mismatch: $count_mismatch file(s) unaccounted for" + echo " Suggestion: Compare discovered files with migrated files" + fi + if [ $broken_links -gt 0 ]; then + echo " • $broken_links broken link(s) detected" + echo " Suggestion: Review and update broken links manually" + fi + fi + echo "" + + return $validation_errors +} + +# Phase 7: Validation +echo "Phase 7: Validation" +echo "-------------------" +echo "Verifying migration success..." +echo "" + +if [ "$DRY_RUN" = true ]; then + echo "DRY RUN: Validation would execute here" + echo "" + echo "Would verify:" + echo " 1. SynthesisFlow directory structure exists (docs/, docs/specs/, docs/changes/)" + echo " 2. All source files are in target locations" + echo " 3. File counts match (discovered: ${#DISCOVERED_FILES[@]}, to migrate: $((${#DISCOVERED_FILES[@]} - in_place_count)))" + echo " 4. Link integrity (no broken links)" + echo " 5. Generate comprehensive validation report" + echo "" +else + # Execute validation + if ! validate_migration; then + echo "⚠️ Validation detected errors. Please review the report above." + echo "" + if [ -n "$BACKUP_DIR" ]; then + echo "To rollback: bash $BACKUP_DIR/rollback.sh" + fi + echo "" + fi +fi + +echo "" + +echo "========================================" +echo " Migration Complete!" +echo "========================================" +echo "" +echo "Next steps:" +echo " 1. Review migrated files" +echo " 2. Run doc-indexer to catalog documentation" +echo " 3. Begin using SynthesisFlow workflow" +echo "" + +# Display backup information if backup was created +if [ -n "$BACKUP_DIR" ] && [ "$DRY_RUN" != true ]; then + echo "Backup location: $BACKUP_DIR" + echo "To rollback: bash $BACKUP_DIR/rollback.sh" +else + echo "Backup location: [dry-run mode - no backup created]" + echo "To rollback: [dry-run mode - no rollback script generated]" +fi +echo "" diff --git a/spec-authoring/.gitkeep b/spec-authoring/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/spec-authoring/SKILL.md b/spec-authoring/SKILL.md new file mode 100644 index 0000000..2fa9f50 --- /dev/null +++ b/spec-authoring/SKILL.md @@ -0,0 +1,125 @@ +--- +name: spec-authoring +description: Use this skill to propose changes via the Spec PR process. It uses the Gemini CLI to generate high-quality draft specifications and to analyze PR feedback, accelerating the spec-driven development workflow. Triggers include "create spec" or "propose change". +--- + +# Spec Authoring Skill + +## Purpose + +To manage the creation and refinement of feature specifications using a powerful, AI-assisted workflow. This skill leverages the **Gemini CLI** to accelerate the spec-driven development process by: +1. **Generating Drafts**: Automatically creates high-quality, multi-file draft proposals for new features. +2. **Analyzing Feedback**: Synthesizes review comments from Pull Requests into an actionable summary of recommended changes. + +This approach allows developers and product managers to move from idea to an approved, implementation-ready specification with greater speed and clarity. + +## When to Use + +Use this skill in the following situations: + +- Proposing a new feature or significant change. +- Generating a first draft of a specification for review. +- Processing and incorporating feedback from a Spec PR. + +## Prerequisites + +- Project initialized with SynthesisFlow structure (`docs/specs/` and `docs/changes/` directories exist). +- GitHub repository set up. +- `gh` CLI tool installed and authenticated. +- `gemini` CLI tool installed and authenticated. + +## Spec PR Philosophy + +**Specs as Code**: All specification changes follow the same rigor as code changes—proposed via branches, reviewed via PRs, and merged upon approval. This skill supercharges that philosophy with AI. + +--- + +## The `propose` Command + +### Purpose + +Generate a comprehensive, multi-file draft proposal for a new feature from a single command. + +### Workflow + +#### Step 1: Define the Proposal Name + +Choose a clear, descriptive name for your feature, such as "User Authentication System" or "Real-time Notifications". + +#### Step 2: Run the Helper Script + +Execute the script to generate the draft proposal: +```bash +bash scripts/spec-authoring.sh propose "Feature Name" +``` + +The script will: +1. Create a new directory in `docs/changes/feature-name/`. +2. Make three parallel calls to the **Gemini CLI** to generate drafts for `proposal.md`, `spec-delta.md`, and `tasks.md`. + - **`proposal.md`**: A high-level overview with problem statement, proposed solution, and success criteria. + - **`spec-delta.md`**: A detailed technical specification with requirements and design decisions. + - **`tasks.md`**: A preliminary breakdown of implementation tasks. +3. Save the AI-generated content into these files. + +#### Step 3: Review and Refine the Drafts + +The script provides you with a complete, context-aware first draft of your entire proposal. Your next step is to review and refine these documents to ensure they align with your vision before opening a Spec PR. + +--- + +## The `update` Command + +### Purpose + +Intelligently process feedback from a Spec PR by using AI to analyze review comments and generate a summarized action plan. + +### Workflow + +#### Step 1: Identify the PR Number + +Determine which Spec PR you need to update. + +#### Step 2: Run the Feedback Analysis Script + +Execute the script with the PR number: +```bash +bash scripts/spec-authoring.sh update PR_NUMBER +``` + +This command will: +1. Find the local files associated with the PR's branch. +2. Fetch all review comments from the PR. +3. Send the full content of your spec files and all the comments to the **Gemini CLI**. +4. Ask the AI to act as a reviewer and provide a summarized list of recommended changes for each file. + +#### Step 3: Address the Synthesized Feedback + +The script will output a clear, actionable plan that synthesizes all the reviewer feedback. Use this analysis to efficiently update your proposal files, address the comments, and push your changes for re-review. + +--- + +## Error Handling + +### Gemini CLI Issues + +**Symptom**: The script fails during the `propose` or `update` commands with an error related to the `gemini` command. +**Solution**: +- Ensure the `gemini` CLI is installed and in your system's PATH. +- Verify you are authenticated (`gemini auth`). +- Check for Gemini API outages or network issues. + +### Proposal Directory Already Exists + +**Symptom**: The `propose` command reports that the directory already exists. +**Solution**: Choose a different name for your proposal or work with the existing one. + +### Could Not Find Proposal Directory + +**Symptom**: The `update` command cannot find the local files for the PR. +**Solution**: Ensure you have the correct PR branch checked out and that the local directory in `docs/changes/` matches the branch name. + +## Notes + +- **AI-Assisted Workflow**: This skill is designed to be a powerful assistant. It generates high-quality drafts and analyzes feedback, but the final strategic decisions and refinements are yours to make. +- **Speed and Quality**: By automating the initial drafting and feedback synthesis, this skill allows you to focus on the high-value work of design, review, and alignment. +- **Iterative Process**: Use the `propose` command to start, and the `update` command to iterate based on team feedback, creating a rapid and efficient spec development cycle. \ No newline at end of file diff --git a/spec-authoring/scripts/spec-authoring.sh b/spec-authoring/scripts/spec-authoring.sh new file mode 100755 index 0000000..70242f8 --- /dev/null +++ b/spec-authoring/scripts/spec-authoring.sh @@ -0,0 +1,180 @@ +#!/bin/bash +# This script manages the authoring of specification proposals. + +set -e + +# --- COMMANDS --- + +function propose() { + local proposal_name=$1 + if [ -z "$proposal_name" ]; then + echo "Error: Proposal name not provided for 'propose' command." >&2 + echo "Usage: $0 propose " >&2 + exit 1 + fi + + local dir_name=$(echo "$proposal_name" | tr '[:upper:]' '[:lower:]' | tr ' ' '-') + local proposal_dir="docs/changes/$dir_name" + + echo "Generating draft specification for: $proposal_name" + + if [ -d "$proposal_dir" ]; then + echo "Error: Proposal directory '$proposal_dir' already exists." >&2 + exit 1 + fi + + mkdir -p "$proposal_dir" + + echo "Generating draft files with Gemini (chained calls for better coherence)..." + + # Step 1: Generate proposal.md and capture its content + echo "Step 1/3: Generating proposal.md..." + gemini -p "Generate a high-level project proposal in markdown for a feature called '${proposal_name}'. Include sections for Problem Statement, Proposed Solution, Benefits, and Success Criteria." > "$proposal_dir/proposal.md" + local proposal_content=$(cat "$proposal_dir/proposal.md") + + # Step 2: Generate spec-delta.md using proposal.md as context + echo "Step 2/3: Generating spec-delta.md (using proposal as context)..." + gemini -p "Generate a detailed technical specification delta in markdown for a feature called '${proposal_name}'. + +Use the following proposal as context to ensure alignment and coherence: + +--- +${proposal_content} +--- + +Based on the proposal above, create a specification delta that includes sections for: +- Overview (aligned with the proposal's problem statement and solution) +- Detailed Requirements (elaborating on the proposed solution) +- Key Design Decisions (technical choices to implement the solution) +- Potential Migration Path (if applicable) + +Ensure the spec-delta directly supports and elaborates on the proposal's goals." > "$proposal_dir/spec-delta.md" + local spec_delta_content=$(cat "$proposal_dir/spec-delta.md") + + # Step 3: Generate tasks.yml using both proposal.md and spec-delta.md as context + echo "Step 3/3: Generating tasks.yml (using proposal and spec-delta as context)..." + gemini -p "Generate a preliminary task breakdown in YAML format for implementing a feature called '${proposal_name}'. + +Use the following proposal and specification delta as context: + +**Proposal:** +--- +${proposal_content} +--- + +**Specification Delta:** +--- +${spec_delta_content} +--- + +Based on the proposal and spec-delta above, generate a task breakdown that follows this exact YAML structure: + +epic: \"Feature: ${proposal_name}\" +tasks: + - title: \"Task: Backend API Implementation\" + description: \"Implement the core backend API endpoints and business logic for the ${proposal_name} feature.\" + labels: + type: \"feature\" + component: \"backend\" + priority: \"P0\" + - title: \"Task: Frontend UI Development\" + description: \"Create the user interface components and pages for the ${proposal_name} feature.\" + labels: + type: \"feature\" + component: \"frontend\" + priority: \"P1\" + - title: \"Task: Database Schema\" + description: \"Design and implement the database schema changes required for ${proposal_name}.\" + labels: + type: \"refactor\" + component: \"database\" + priority: \"P1\" + - title: \"Task: Testing\" + description: \"Write comprehensive unit and integration tests for the ${proposal_name} feature.\" + labels: + type: \"test\" + component: \"testing\" + priority: \"P2\" + +Generate additional relevant tasks following the same structure, based on the specific requirements in the proposal and spec-delta. Each task must have title, description, and labels with type and component. The type should be one of: feature, enhancement, refactor, bug, chore, docs, test. The component should indicate which part of the system this task belongs to. + +Ensure the tasks directly implement the requirements specified in the spec-delta and align with the proposal's goals." > "$proposal_dir/tasks.yml" + + echo "Successfully generated draft proposal in $proposal_dir" + echo "Next step: Review and refine the generated markdown files, then open a Spec PR." +} + +function update() { + local pr_number=$1 + if [ -z "$pr_number" ]; then + echo "Error: Pull request number not provided for 'update' command." >&2 + echo "Usage: $0 update " >&2 + exit 1 + fi + + echo "Fetching PR details and synthesizing review feedback for PR #$pr_number..." + + # Get the branch name from the PR + local branch_name=$(gh pr view "$pr_number" --json headRefName -q '.headRefName') + if [ -z "$branch_name" ]; then + echo "Error: Could not determine branch name for PR #$pr_number." >&2 + exit 1 + fi + + # The directory name is derived from the branch name (e.g., spec/feature-name -> feature-name) + local dir_name=$(echo "$branch_name" | sed 's/spec\///') + local proposal_dir="docs/changes/$dir_name" + + if [ ! -d "$proposal_dir" ]; then + echo "Error: Could not find proposal directory '$proposal_dir' associated with branch '$branch_name'." >&2 + echo "Please ensure you have checked out the correct branch and the proposal exists." >&2 + exit 1 + fi + + local proposal_file="$proposal_dir/proposal.md" + local spec_delta_file="$proposal_dir/spec-delta.md" + local tasks_file="$proposal_dir/tasks.yml" + + # Fetch all comments + local all_comments=$(gh pr view "$pr_number" --comments) + + local gemini_prompt="Here are three specification documents and a list of review comments from a Pull Request. Please analyze the feedback and suggest how to update the documents. For each document, provide a concise summary of the suggested changes. + +**Original Proposal:** +@${proposal_file} + +**Original Spec Delta:** +@${spec_delta_file} + +**Original Tasks:** +@${tasks_file} + +**Review Comments:** +${all_comments} + +Based on the feedback, provide a summary of recommended changes for each file. Structure your output with headings for each file (e.g., '### Recommended Changes for proposal.md')." + + echo "------------------------- GEMINI FEEDBACK ANALYSIS -------------------------" + gemini -p "$gemini_prompt" + echo "----------------------------------------------------------------------------" + echo "Use the analysis above to update the files in '$proposal_dir'." +} + +# --- MAIN --- + +COMMAND=$1 +shift + +case "$COMMAND" in + propose) + propose "$@" + ;; + update) + update "$@" + ;; + *) + echo "Error: Unknown command '$COMMAND'" >&2 + echo "Usage: $0 {propose|update} ..." >&2 + exit 1 + ;; +esac \ No newline at end of file diff --git a/sprint-planner/.gitkeep b/sprint-planner/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/sprint-planner/SKILL.md b/sprint-planner/SKILL.md new file mode 100644 index 0000000..72cba78 --- /dev/null +++ b/sprint-planner/SKILL.md @@ -0,0 +1,104 @@ +--- +name: sprint-planner +description: Use this skill to plan a new sprint. It uses the Gemini CLI to intelligently decompose approved specs into atomic GitHub issues for the development team. Triggers include "plan sprint", "create sprint", or "start new sprint". +--- + +# Sprint Planner Skill + +## Purpose + +To plan and initialize a new sprint by intelligently decomposing approved specifications into a comprehensive set of atomic GitHub issues. This skill bridges the gap between high-level specs and executable work items by using the **Gemini CLI** to analyze the spec's content and generate a thoughtful task breakdown. It then automates the creation of these tasks as GitHub issues within a new sprint milestone. + +## When to Use + +Use this skill in the following situations: + +- Starting a new sprint or development cycle. +- Converting an approved spec into actionable GitHub issues. +- When you want an AI-assisted breakdown of an epic into atomic implementation tasks. + +## Prerequisites + +- Project board configured with an "Approved Backlog" status column. +- Approved spec files exist in the `docs/specs/` directory. +- An Epic issue exists on GitHub that links to the spec file in its body. +- `gh` CLI tool installed and authenticated. +- `jq` tool installed for JSON parsing. +- `gemini` CLI tool installed and authenticated. + +## Workflow + +### Step 1: Review Project Board + +Check the project board for approved specs (represented as Epics) ready to be planned. + +### Step 2: Discuss Sprint Scope with User + +Engage the user to determine which epic(s) from the "Approved Backlog" to include in the sprint. + +### Step 3: Define Sprint Metadata + +Work with the user to establish the sprint name (e.g., "Sprint 4"). + +### Step 4: Run the Helper Script + +Execute the sprint planning script to automate GitHub issue creation: + +```bash +bash scripts/create-sprint-issues.sh +``` + +### Step 5: Understand What the Script Does + +The helper script automates these steps: + +1. **Queries Project Board**: Fetches all items from the "Approved Backlog" and prompts you to select an Epic to plan. +2. **Extracts Spec File**: Parses the selected Epic's body to find the associated spec file path. +3. **Creates Milestone**: Prompts you for a sprint name and creates the corresponding GitHub milestone. +4. **Decomposes Spec with AI**: Instead of relying on a rigid format, the script sends the full content of the spec file and the parent Epic to the **Gemini CLI**. It asks the AI to generate a list of atomic, actionable tasks based on its understanding of the document. +5. **Creates GitHub Issues**: The script parses the structured task list from Gemini's response and creates a GitHub issue for each task. Each issue is automatically titled, assigned to the new milestone, and includes a description and references to the parent Epic and spec file. + +### Step 6: Verify Issue Creation + +After the script completes, review the newly created issues in your milestone. + +```bash +gh issue list --milestone "Your Sprint Name" +``` + +### Step 7: Review Created Issues with User + +Walk through the AI-generated issues with your team. The generated tasks provide a strong baseline, but you should review them to confirm completeness, adjust priorities, and make any necessary refinements. + +## Error Handling + +### jq or Gemini Not Installed + +**Symptom**: Script reports that `jq` or `gemini` command is not found. +**Solution**: Install the required tool and ensure it's in your system's PATH. + +### No Approved Epics Found + +**Symptom**: Script reports no epics in the approved backlog. +**Solution**: Ensure your Epics are in the correct status column on your project board. + +### Epic Body Missing Spec Reference + +**Symptom**: Script cannot find a spec file path in the Epic's body. +**Solution**: Edit the Epic's issue body on GitHub to include a valid path to a spec file (e.g., `docs/specs/my-feature.md`). + +### Gemini CLI Issues + +**Symptom**: The script fails during the task decomposition step with an error from the `gemini` command. +**Solution**: +- Ensure the `gemini` CLI is installed and authenticated (`gemini auth`). +- Check for API outages or network issues. +- The quality of the task breakdown depends on a functional Gemini CLI. + +## Notes + +- **Intelligent Decomposition**: The skill no longer relies on a rigid task format in spec files. Gemini reads and understands the document to create tasks. +- **LLM guides strategy, script executes**: You decide which spec to plan; the script uses AI to handle the tedious decomposition and issue creation. +- **One epic per run**: Run the script once for each Epic you want to plan for the sprint. +- **Traceability is built-in**: Each created task issue automatically references the parent Epic and the source spec file. +- **Manual refinement is expected**: The AI-generated task list is a starting point. Review and adjust it with your team. \ No newline at end of file diff --git a/sprint-planner/scripts/create-sprint-issues.sh b/sprint-planner/scripts/create-sprint-issues.sh new file mode 100755 index 0000000..2b95c02 --- /dev/null +++ b/sprint-planner/scripts/create-sprint-issues.sh @@ -0,0 +1,188 @@ +#!/bin/bash +# This script plans a new sprint by creating a milestone and generating task issues from a tasks.yml file. + +set -e + +# --- USAGE --- +usage() { + echo "Usage: $0 [TASKS_FILE]" + echo " TASKS_FILE: Path to the tasks.yml file (default: docs/changes/sprint-7-framework-improvements/tasks.yml)" + exit 1 +} + +# Parse command line arguments +if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then + usage +fi + +TASKS_FILE="${1:-docs/changes/sprint-7-framework-improvements/tasks.yml}" + +# --- CONFIGURATION --- +# In a real scenario, this should be detected dynamically from the git remote URL +REPO=$(gh repo view --json nameWithOwner -q .nameWithOwner) + +# --- VALIDATION --- +if ! command -v gh &> /dev/null; then + echo "Error: gh CLI is not installed. Please install it to continue." >&2 + exit 1 +fi +if ! command -v yq &> /dev/null; then + echo "Error: yq is not installed. Please install it to continue." >&2 + exit 1 +fi +if [ ! -f "$TASKS_FILE" ]; then + echo "Error: Tasks file not found at $TASKS_FILE" >&2 + exit 1 +fi + +# Validate YAML structure +echo "Validating tasks.yml structure..." +if ! yq '.' "$TASKS_FILE" > /dev/null 2>&1; then + echo "Error: Invalid YAML syntax in $TASKS_FILE" >&2 + exit 1 +fi + +# Check for required root-level 'epic' field +if ! yq '.epic' "$TASKS_FILE" > /dev/null 2>&1; then + echo "Error: Missing required 'epic' field in $TASKS_FILE" >&2 + exit 1 +fi + +# Check for required root-level 'tasks' array +if ! yq '.tasks' "$TASKS_FILE" > /dev/null 2>&1; then + echo "Error: Missing required 'tasks' array in $TASKS_FILE" >&2 + exit 1 +fi + +TASK_COUNT=$(yq '.tasks | length' "$TASKS_FILE") +if [ "$TASK_COUNT" -eq 0 ]; then + echo "Error: No tasks found in $TASKS_FILE" >&2 + exit 1 +fi + +echo "✓ Validated YAML structure with $TASK_COUNT tasks" + +# --- SCRIPT LOGIC --- + +echo "Planning new sprint from $TASKS_FILE..." + +# 1. Get Epic title from the tasks file +EPIC_TITLE=$(yq -r '.epic' "$TASKS_FILE") +echo "Found Epic: $EPIC_TITLE" + +# 2. Create or verify the Sprint milestone +read -p "Enter the name for the new sprint milestone (e.g., 'Sprint 1: Framework Improvements'): " SPRINT_NAME +if [ -z "$SPRINT_NAME" ]; then + echo "Error: Milestone name cannot be empty." >&2 + exit 1 +fi + +echo "Checking for milestone: $SPRINT_NAME" +EXISTING_MILESTONE=$(gh api "/repos/$REPO/milestones" | jq -r --arg name "$SPRINT_NAME" '.[] | select(.title == $name) | .title') + +if [ "$EXISTING_MILESTONE" == "$SPRINT_NAME" ]; then + echo "Milestone '$SPRINT_NAME' already exists. Using existing milestone." +else + echo "Creating new milestone: $SPRINT_NAME" + gh api --method POST -H "Accept: application/vnd.github.v3+json" "/repos/$REPO/milestones" -f title="$SPRINT_NAME" + echo "Milestone '$SPRINT_NAME' created." +fi + +# 3. Create an Epic Issue for the entire sprint +echo "Creating parent Epic issue..." +EPIC_BODY="This Epic tracks all work for the '$SPRINT_NAME' sprint. All tasks below are part of this epic." +EPIC_ISSUE_URL=$(gh issue create --title "$EPIC_TITLE" --body "$EPIC_BODY" --milestone "$SPRINT_NAME") +EPIC_ISSUE_NUMBER=$(echo "$EPIC_ISSUE_URL" | awk -F'/' '{print $NF}') +echo "Parent Epic issue #$EPIC_ISSUE_NUMBER created." + +# 4. Ensure all labels exist +echo "Ensuring all necessary labels exist..." +ALL_LABELS=$(yq -r '.tasks[].labels | .type + "\n" + .component + "\n" + (.priority // "")' "$TASKS_FILE" | sort -u | grep -v '^$') + +while IFS= read -r label; do + if [ -n "$label" ]; then + echo " - Ensuring label '$label' exists..." + # Assign a color based on the label type for better visual organization + color="D4C5F9" # default purple + if [[ "$label" == P* ]]; then color="B60205"; fi # red for priority + if [[ "$label" == "feature" ]]; then color="0E8A16"; fi # green + if [[ "$label" == "enhancement" ]]; then color="5319E7"; fi # purple + if [[ "$label" == "bug" ]]; then color="B60205"; fi # red + if [[ "$label" == "docs" ]]; then color="0075CA"; fi # blue + if [[ "$label" == "refactor" || "$label" == "chore" ]]; then color="FBCA04"; fi # yellow + + gh label create "$label" --color "$color" --description "Auto-created for sprint planning" || true + fi +done <<< "$ALL_LABELS" +echo "Label setup complete." + +# 5. Load context from RETROSPECTIVE.md to inform better task creation +echo "Loading context from RETROSPECTIVE.md..." +RETROSPECTIVE_FILE="RETROSPECTIVE.md" +if [ -f "$RETROSPECTIVE_FILE" ]; then + echo "✓ Found RETROSPECTIVE.md with past learnings" + # Count recent learnings to inform user about context + RECENT_LEARNINGS=$(grep -c "^### #[0-9]" "$RETROSPECTIVE_FILE" 2>/dev/null || echo "0") + echo " - Contains $RECENT_LEARNINGS completed issues with learnings" +else + echo "⚠ RETROSPECTIVE.md not found - no historical context available" +fi + +# 6. Parse the tasks.yml file and create an issue for each task +echo "Creating issues for all tasks..." + +# Use yq to output each task's fields separated by a pipe for safe reading +yq -r '.tasks[] | .title +"|" + .description +"|" + .labels.type +"|" + .labels.component +"|" + (.labels.priority // "")' "$TASKS_FILE" | while IFS='|' read -r title description type component priority; + +do + + echo "---" + echo "Processing task: $title" + + # Validate required fields + if [ -z "$title" ]; then + echo "Error: Task missing required 'title' field. Skipping." >&2 + continue + fi + + if [ -z "$description" ]; then + echo "Error: Task '$title' missing required 'description' field. Skipping." >&2 + continue + fi + + if [ -z "$type" ]; then + echo "Error: Task '$title' missing required 'labels.type' field. Skipping." >&2 + continue + fi + + if [ -z "$component" ]; then + echo "Error: Task '$title' missing required 'labels.component' field. Skipping." >&2 + continue + fi + + # Construct the issue body + BODY=$(printf "%s\n\n**Parent Epic:** #%s" "$description" "$EPIC_ISSUE_NUMBER") + + # Construct the labels string (priority is optional) + if [ -n "$priority" ]; then + LABELS="$type,$component,$priority" + else + LABELS="$type,$component" + fi + + # --- DEBUGGING --- + echo " - Title: $title" + echo " - Body: $BODY" + echo " - Milestone: $SPRINT_NAME" + echo " - Labels: $LABELS" + # --- END DEBUGGING --- + + # Create the GitHub issue + gh issue create --title "$title" --body "$BODY" --milestone "$SPRINT_NAME" --label "$LABELS" + +done + +echo "-------------------------------------------------" +echo "Sprint planning complete!" +echo "All tasks from $TASKS_FILE have been created as GitHub issues in the '$SPRINT_NAME' milestone." +echo "View the milestone here: https://github.com/$REPO/milestones"