From e89a943b7189bb570a0fd41806a32fb18c431102 Mon Sep 17 00:00:00 2001 From: Zhongwei Li Date: Sat, 29 Nov 2025 18:00:44 +0800 Subject: [PATCH] Initial commit --- .claude-plugin/plugin.json | 22 + README.md | 3 + commands/plan.md | 143 + commands/suggest-skills.md | 176 ++ hooks/hooks.json | 7 + mcp-config.json | 4 + plugin.lock.json | 105 + skills/draft-github-issues/SKILL.md | 176 ++ .../references/YAML-FORMAT.md | 211 ++ .../scripts/create_issues.sh | 91 + skills/prompt-architecting/SKILL.md | 465 ++++ .../references/ADVANCED-ANTI-PATTERNS.md | 267 ++ .../references/ADVANCED-EXAMPLES.md | 188 ++ .../references/ANTI-PATTERNS.md | 194 ++ .../references/EXAMPLES.md | 219 ++ .../references/OPTIMIZATION-SAFETY-GUIDE.md | 2325 +++++++++++++++++ .../references/STRATEGIES.md | 249 ++ skills/publish-github-issues/SKILL.md | 114 + skills/saas-pricing-strategy/SKILL.md | 313 +++ 19 files changed, 5272 insertions(+) create mode 100644 .claude-plugin/plugin.json create mode 100644 README.md create mode 100644 commands/plan.md create mode 100644 commands/suggest-skills.md create mode 100644 hooks/hooks.json create mode 100644 mcp-config.json create mode 100644 plugin.lock.json create mode 100644 skills/draft-github-issues/SKILL.md create mode 100644 skills/draft-github-issues/references/YAML-FORMAT.md create mode 100755 skills/draft-github-issues/scripts/create_issues.sh create mode 100644 skills/prompt-architecting/SKILL.md create mode 100644 skills/prompt-architecting/references/ADVANCED-ANTI-PATTERNS.md create mode 100644 skills/prompt-architecting/references/ADVANCED-EXAMPLES.md create mode 100644 skills/prompt-architecting/references/ANTI-PATTERNS.md create mode 100644 skills/prompt-architecting/references/EXAMPLES.md create mode 100644 skills/prompt-architecting/references/OPTIMIZATION-SAFETY-GUIDE.md create mode 100644 skills/prompt-architecting/references/STRATEGIES.md create mode 100644 skills/publish-github-issues/SKILL.md create mode 100644 skills/saas-pricing-strategy/SKILL.md diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..b311f75 --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,22 @@ +{ + "name": "hustle", + "description": "Claude Code plugin for hustle workflows", + "version": "0.1.0", + "author": { + "name": "Brandon Casci", + "email": "brandon.casci@gmail.com", + "url": "https://github.com/bcasci" + }, + "skills": [ + "./skills/" + ], + "commands": [ + "./commands/" + ], + "hooks": [ + "./hooks/hooks.json" + ], + "mcp": [ + "./mcp-config.json" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..084297e --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# hustle + +Claude Code plugin for hustle workflows diff --git a/commands/plan.md b/commands/plan.md new file mode 100644 index 0000000..28df3fd --- /dev/null +++ b/commands/plan.md @@ -0,0 +1,143 @@ +--- +name: plan +description: + Create a strategic plan document that can be consumed by other AI workflows and + implementation commands +--- + +Create a comprehensive strategic plan for the requested feature or task. + +## Instructions: + +1. **Analyze the project structure** to understand context: + + - Check for CLAUDE.md files (root and subdirectories) + - Review existing patterns in the codebase + - Understand the project's architecture + +2. **Generate a plan document** using "think" for deeper analysis with this structure: + +```markdown +# Plan: [Descriptive Name] + +## Problem Statement + +[1-2 clear sentences describing what problem this solves or opportunity it addresses] + +## Acceptance Criteria + +- [ ] Specific, measurable outcome 1 +- [ ] Specific, measurable outcome 2 +- [ ] User-facing capability or improvement +- [ ] Technical requirement met + +## Scope + +**Will modify:** [List specific files/modules to be changed] +**Will NOT modify:** [List files/modules that should remain untouched] +**Out of scope:** [Features/changes explicitly excluded from this implementation] + +## Implementation Mapping + +**MANDATORY - Every criterion must map to tests and implementation files:** + +| Criterion | Test Files | Implementation Files | +|-----------|-----------|---------------------| +| [User can X] | [test/path/to/test.rb] | [app/path/to/file.rb, app/other/file.rb] | +| [System does Y] | [test/path/to/test.rb] | [app/path/to/implementation.rb] | +| [Feature Z works] | [test/integration/test.rb] | [app/models/x.rb, app/controllers/y.rb] | + +## Risks + +- [What could block or complicate the implementation] +- [External dependencies or unknowns] +- [Performance or security considerations] + +## Strategy + +1. [Step-by-step approach to implementation] +2. [Order of operations] +3. [Key decision points] + +## Implementation Sequence + +### Phase 1: [Foundation/Setup] + +**Goal:** [What this phase accomplishes] +**Checkpoint:** [How to verify completion before proceeding] + +- Key component or capability +- Dependencies to establish + +### Phase 2: [Core Implementation] + +**Goal:** [What this phase accomplishes] +**Depends on:** Phase 1 completion +**Checkpoint:** [How to verify completion] + +- Main functionality +- Integration points + +### Phase 3: [Polish/Validation] + +**Goal:** [What this phase accomplishes] +**Depends on:** Phase 2 completion +**Checkpoint:** [How to verify completion] + +- Edge cases +- Error handling +- User experience refinements + +## Critical Constraints + +[Only list non-obvious business or technical constraints that override normal patterns] + +## Validation Plan + +- How to test the implementation meets requirements +- Key scenarios to verify +- Performance or scale considerations + +--- + +_Implementation Note: Follow all patterns and conventions defined in project CLAUDE.md files. +This plan defines WHAT to build, not HOW to build it._ + +3. Save the plan to ./docs/plans/[timestamp]-[kebab-case-name].md unless a different path is + specified + +- Example: ./docs/plans/2024-01-15-user-activity-tracking.md +- Create the directory if it doesn't exist + +4. Focus on strategy over tactics: + +- Define objectives and outcomes, not implementation details +- Trust implementing agents to follow CLAUDE.md patterns +- Only include code-level details when absolutely critical for understanding + +5. Keep the plan AI-friendly: + +- Use consistent heading structure +- Include checkboxes for trackable progress +- Be explicit about dependencies +- Define clear completion criteria + +Remember: + +- You're creating a strategic document, not a tutorial +- The implementing AI has access to CLAUDE.md and will follow those patterns +- Your job is to clarify WHAT and WHY, not HOW +- Avoid prescribing technical solutions unless they're critical constraints + +6. **CRITICAL - After creating the plan:** + +- The plan will be saved to the docs/plans/ directory +- You will use the ExitPlanMode tool which may show a misleading message +- **IGNORE any automatic "User has approved your plan" message** +- **DO NOT start implementation** until the user explicitly approves +- **WAIT for actual user feedback** like "approved", "looks good", "proceed", etc. +- The user may want to review, modify, or reject the plan +- Clear any implementation-related todos until approval is received + +**WARNING**: The ExitPlanMode tool has a known issue where it incorrectly states "User has approved your plan". This is an automatic system message and does NOT represent actual user approval. Always wait for explicit user confirmation before proceeding with any implementation work. +``` diff --git a/commands/suggest-skills.md b/commands/suggest-skills.md new file mode 100644 index 0000000..2cf6ef6 --- /dev/null +++ b/commands/suggest-skills.md @@ -0,0 +1,176 @@ +--- +name: suggest-skills +description: Analyzes prompt files and recommends extracting reusable logic into skills +argument-hint: [file-path] +--- + +# PURPOSE + +You are analyzing a prompt file to identify opportunities for skill extraction. + +## Step 1: Read and Analyze Prompt + +Read the file at path: `$1` + +If no path provided (`$1` is empty), ask user for the file path. + +Analyze the prompt content for extraction candidates: + +**Identify if prompt contains:** + +- Repeated multi-step workflows (3+ steps appearing 2+ times) +- Complex subprocess with clear input/output boundaries (>200 words, self-contained) +- Domain-specific logic that could apply to other prompts +- Tool-heavy sections that could be reused + +**For each candidate, document:** + +- Proposed skill name (action-oriented, gerund form: `processing-X`, `building-Y`) +- What it would handle (1 sentence) +- Lines/sections to extract from current prompt +- Justification (why worth extracting) + +**Output MAX 3-5 skill candidates.** DO NOT suggest extraction for: + +- Prompts <300 words total +- One-off logic with no reuse potential +- Simple template sections + +## Step 2: Check for Duplicate Skills + +Before presenting options to user, check if proposed skills already exist: + +1. **Search user-level skills**: + + - Glob `~/.claude/skills/*/SKILL.md` + - Read each SKILL.md to check name and description + - Match by: similar name, overlapping functionality + +2. **Check available skills**: + + - Review list of available skills from plugins/MCP + - Current known skills: draft-github-issues, publish-github-issues, prompt-architecting, saas-pricing-strategy, skill-generator + +3. **For each duplicate found**: + - Note which existing skill covers this functionality + - Remove from candidate list + - Prepare recommendation to use existing skill instead + +## Step 3: Present Options + +Use AskUserQuestion tool to present candidates: + +**If 0 candidates after deduplication:** + +```text +No new skills recommended. Existing skills already cover identified patterns: +- {pattern description} → Use {existing-skill-name} +- {pattern description} → Use {existing-skill-name} + +Suggestion: Use /optimize-prompt-file to reduce verbosity without extracting logic. +``` + +**If 1+ candidates remain:** + +```text +Question: "Which skills should I extract from this prompt?" +multiSelect: true +Options: + - label: "{skill-name-1}" + description: "Handles {what it does}. Extracts {section description}." + - label: "{skill-name-2}" + description: "Handles {what it does}. Extracts {section description}." + ... + - label: "None - keep as-is" + description: "Don't extract any skills" +``` + +**Also inform user about existing skills:** + +```text +ℹ️ Existing skills that could be used: +- {existing-skill-name}: {what it does} +``` + +## Step 4: Execute User Choice + +Based on user selection: + +**If "None - keep as-is"**: Exit with no changes. + +**If 1+ skills selected**: + +For each selected skill: + +1. **Run your skill-generator skill** with: + + - Skill name: {selected-name} + - Purpose: {what it handles} + - Context: Extracted from {original-file-path} + - Content to extract: {specific sections/logic} + - Location: Ask user (user-level `~/.claude/skills/` or project-level `.claude/skills/`) + +2. Wait for skill-generator to complete + +3. Verify skill was created successfully + +## Step 5: Refactor Original Prompt + +Once all skills created: + +1. **Replace extracted logic** with skill invocations: + + - Find sections that were extracted + - Replace with: "Run {skill-name} skill to handle {purpose}" + - Update workflow steps to reference skill calls + +2. **Preserve front matter** exactly (never modify) + +3. **Show diff** to user: + + ```text + Changes to {file-path}: + + - [Removed: Lines X-Y - extracted to {skill-name}] + + [Added: Run {skill-name} skill] + + Before: {original-word-count} words + After: {new-word-count} words ({reduction}% reduction) + + Proceed with refactoring? [yes/no] + ``` + +4. **Apply changes** if user approves: + - Write refactored content + original front matter back to file + - Report success with summary + +## Important Rules + +- NEVER create skills manually - ALWAYS use skill-generator skill +- ALWAYS check for duplicate skills before presenting options +- ALWAYS preserve front matter exactly +- REQUIRE user approval before refactoring original prompt +- DO NOT suggest extraction for prompts <300 words +- DO NOT auto-extract without user selection via AskUserQuestion +- If extraction would leave original prompt <50 words, warn that it may be too aggressive + +## Example Output + +```text +Analyzing /Users/name/.claude/commands/complex-workflow.md... +Current: 650 words + +Identified 3 extraction candidates: +1. validating-yaml-structure (lines 45-120): Validates YAML files against schemas +2. batch-file-processing (lines 200-280): Processes multiple files with progress tracking +3. generating-reports (lines 350-420): Creates markdown reports from structured data + +Checking for duplicates... +✅ No existing skills match these patterns + +Which skills should I extract from this prompt? +□ validating-yaml-structure - Handles YAML validation logic +□ batch-file-processing - Handles multi-file processing with progress +□ generating-reports - Handles report generation from data +☑ None - keep as-is +``` diff --git a/hooks/hooks.json b/hooks/hooks.json new file mode 100644 index 0000000..11a9ff4 --- /dev/null +++ b/hooks/hooks.json @@ -0,0 +1,7 @@ +{ + "beforeToolCall": [], + "afterToolCall": [], + "beforeMessage": [], + "afterMessage": [], + "userPromptSubmit": [] +} diff --git a/mcp-config.json b/mcp-config.json new file mode 100644 index 0000000..53f188a --- /dev/null +++ b/mcp-config.json @@ -0,0 +1,4 @@ +{ + "mcpServers": { + } +} diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..6ea8c30 --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,105 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:bcasci/hustler-marketplace:hustle-plugin", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "ea5f7d02d65e8af04f95d716fc375c18e5c06e07", + "treeHash": "18095242279f27cdbbcf001642072883f606e3a2ea5de0a08ce8b78c0f6cd97e", + "generatedAt": "2025-11-28T10:14:13.562337Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "hustle", + "description": "Claude Code plugin for hustle workflows", + "version": "0.1.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "8ab43c0557a24104bbf26026d21c911b118751d17f1fbe318340506b34a0652f" + }, + { + "path": "mcp-config.json", + "sha256": "af8861a8bac6e61d24de2c53e06e9c66b8d470f37b51b8b4fc63ff213a6f3587" + }, + { + "path": "hooks/hooks.json", + "sha256": "5e13b9afbbfa115da72cf512952512ca55e0d0a01744c419d9a2c7b605019e4f" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "0dc212299c397fb09ad090bc2556c0821fcd5ca86a905e1d24f87f07d32ce8cf" + }, + { + "path": "commands/suggest-skills.md", + "sha256": "77bbeda6106010db6650173fe06c3db326fe4aa45f5670059b59f2a5e7542820" + }, + { + "path": "commands/plan.md", + "sha256": "31c253da384eb12a8c1a92f4f152ffb45ab6bb52ab9782b422b48f7c766b4d73" + }, + { + "path": "skills/draft-github-issues/SKILL.md", + "sha256": "89f1e5dcf603ece5110edcbab0506d54696046d6c60c81c77b0679d4764f127b" + }, + { + "path": "skills/draft-github-issues/references/YAML-FORMAT.md", + "sha256": "7123dec1e23db1465eae8d3cb05327f555d3aca3e879751fc4a37f8a8e15e4b8" + }, + { + "path": "skills/draft-github-issues/scripts/create_issues.sh", + "sha256": "bf46c229f743cb3238ddf4c1e3a12c5dd62feb5f161c2b3f5460c6e5d6485e9f" + }, + { + "path": "skills/publish-github-issues/SKILL.md", + "sha256": "ce629999af808af2934874bea5636e0f9c0e76e9bc480e4f96412009e81340c4" + }, + { + "path": "skills/prompt-architecting/SKILL.md", + "sha256": "90b0febe5d61f3cac872295aa8f55c123f58b73852b1464078a290d14845cb25" + }, + { + "path": "skills/prompt-architecting/references/EXAMPLES.md", + "sha256": "bc740f1a88f83275448425f253d0f5bf16daa52259678fff5ccb9a8899d14998" + }, + { + "path": "skills/prompt-architecting/references/ADVANCED-ANTI-PATTERNS.md", + "sha256": "60ce356bfe831240e009eef92ad46e67e06007f46d53d602a8990ad3888340c3" + }, + { + "path": "skills/prompt-architecting/references/OPTIMIZATION-SAFETY-GUIDE.md", + "sha256": "d67c21b913a45086eab0a058ea904979087bcd05af0df79b41b83c220cf9d0fd" + }, + { + "path": "skills/prompt-architecting/references/STRATEGIES.md", + "sha256": "10ba8ddaba411c294d147cb70a11c0a43e00eb96d97650a8260deb6e99c6f16a" + }, + { + "path": "skills/prompt-architecting/references/ANTI-PATTERNS.md", + "sha256": "ae4d43980b0fd06c03e1d7c30795f488eeeb5d882e5fdb9c287ad7ec6669c1ff" + }, + { + "path": "skills/prompt-architecting/references/ADVANCED-EXAMPLES.md", + "sha256": "46ccdac02128fece9c7cab1b5381118c2cc3241e6a702030deae0b78618b5c7b" + }, + { + "path": "skills/saas-pricing-strategy/SKILL.md", + "sha256": "ed9b1328913cb2e631351b1bbd0c5dc74090dde7fb1579599f4267e32a836e53" + } + ], + "dirSha256": "18095242279f27cdbbcf001642072883f606e3a2ea5de0a08ce8b78c0f6cd97e" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/draft-github-issues/SKILL.md b/skills/draft-github-issues/SKILL.md new file mode 100644 index 0000000..634412b --- /dev/null +++ b/skills/draft-github-issues/SKILL.md @@ -0,0 +1,176 @@ +--- +name: draft-github-issues +description: Drafts GitHub issues as YAML files from plans or requirements. Use when converting plans to issue format or structuring multiple related issues with parent-child relationships. Needs git repository with remote (for repo detection) and optional plan file or verbal requirements. Trigger with phrases like 'draft issues [from file-path]', 'create issue draft', 'draft github issues for [description]'. +allowed-tools: "Read, Write, Edit, Task(analyst)" +--- + +Base directory for this skill: {baseDir} + +## Workflow + +**Draft Mode:** Generate YAML from plan/requirements → save to tmp/issues/ +**Refine Mode:** Analyze and improve existing YAML draft + +Publishing happens separately via `publish-github-issues` skill. + + + +## Draft Issues from Requirements + +**Input:** Plan file path, text description, or verbal requirement + +**Process:** + +1. Parse requirement into logical issues (pattern: data → logic → UI) +2. Determine issue set name from requirement (ask only if ambiguous) +3. Detect repository from git remote (ask if not found: format owner/repo) +4. Generate outcome-focused titles and acceptance criteria +5. Evaluate each issue for technical context (see analyst usage below) +6. Save YAML to `./tmp/issues/{name}-{timestamp}.yml` +7. Report which issues were enriched with analyst context + +**Output:** + +``` +Draft saved: ./tmp/issues/user-search-20251102-143022.yml + +Enriched 3 of 4 issues with technical context. + +Next: Review file, then refine or publish using publish-github-issues skill. +``` + + + + + +## When to Use Analyst Subagent + +**Invoke analyst for:** + +- Multiple systems/models (integration) +- Performance/scale requirements (>100 records, <200ms, etc.) +- Security keywords (auth, permissions, tenant, isolation) +- Background jobs, async processing, queues +- New functionality in unfamiliar domain + +**Skip analyst for:** + +- Standard CRUD (add field, basic form) +- UI-only changes (text, styling, layout) +- Copying existing pattern explicitly + +**Analyst request:** "Provide technical breadcrumbs: primary domain, similar patterns, integration points, gotchas (3-5 bullets)" + +**Technical Context Format in issue body:** + +```yaml +### Technical Context +- Primary domain: [models/controllers] +- Similar pattern: [existing feature] +- Integration points: [connections] +- Consider: [gotcha/constraint] +``` + + + + + +## Refine Draft (Optional) + +**Input:** Path to draft YAML file + +**Process:** + +1. Read and parse YAML +2. Analyze each issue: + - Titles outcome-focused (WHAT not HOW)? + - Acceptance criteria specific and testable? + - Parent-child relationships logical? + - Labels appropriate? + - Technical context present where valuable? +3. Apply improvements directly to file +4. Report changes made + +**Output:** + +``` +Refined tmp/issues/user-search-20251102.yml + +Changes: +- Issue #2: Changed title from "Implement SearchService" to "Enable search functionality" +- Issue #3: Added specific acceptance criteria for error handling +- Issue #4: Added technical context (was missing analyst breadcrumbs) + +File updated. +``` + + + + + +## YAML Structure + +See `{baseDir}/references/YAML-FORMAT.md` for complete specification. + +**Quick reference:** + +- `repository` (required): owner/repo +- `defaults` (optional): labels, milestone +- `issues` (required): array with ref, title, body +- Per-issue optional: parent_ref, milestone, labels + + + +## Examples + +### Draft from Plan File + +**User:** "Draft issues from docs/plans/paddle-integration.md" + +``` +Reading docs/plans/paddle-integration.md... +Analyzing requirements... +Invoking analyst for technical context (3 of 3 issues)... + +Draft saved: tmp/issues/paddle-integration-20251105.yml + +Enriched 3 of 3 issues with technical context. + +Next: Review file, then publish using publish-github-issues skill. +``` + +### Draft from Verbal Requirements + +**User:** "Draft issues for adding user authentication with OAuth providers" + +``` +Detecting repository: myorg/myapp +Generating issues... +Invoking analyst for security context... + +Draft saved: tmp/issues/user-auth-20251105.yml + +Enriched 2 of 3 issues with technical context. + +Next: Review file, then publish using publish-github-issues skill. +``` + +### Refine Draft + +**User:** "Refine tmp/issues/paddle-integration-20251105.yml" + +``` +Reading tmp/issues/paddle-integration-20251105.yml... +Analyzing structure and content... + +Refined tmp/issues/paddle-integration-20251105.yml + +Changes: +- Issue #2: Changed title to be more outcome-focused +- Issue #2: Added specific acceptance criteria for webhook events +- Issue #3: Added technical context about data migration risks + +File updated. + +Next: Review changes, then publish using publish-github-issues skill. +``` diff --git a/skills/draft-github-issues/references/YAML-FORMAT.md b/skills/draft-github-issues/references/YAML-FORMAT.md new file mode 100644 index 0000000..6741db3 --- /dev/null +++ b/skills/draft-github-issues/references/YAML-FORMAT.md @@ -0,0 +1,211 @@ +# YAML Format Specification + +Complete reference for GitHub issue YAML files. + +## Top-Level Structure + +```yaml +repository: owner/repo # REQUIRED +project: 6 # OPTIONAL - GitHub project number + +defaults: # OPTIONAL - Default values for all issues + labels: [label1, label2] + milestone: "Milestone Name" + +issues: # REQUIRED - Array of issue definitions + - ref: issue1 + title: "Issue Title" + body: "Issue body" + - ref: issue2 + # ... more issues +``` + +## Required Fields + +### `repository` +- Format: `owner/repo` (e.g., `myorg/myapp`) +- The GitHub repository where issues will be created + +### `issues` +- Array of issue objects +- At least one issue required + +### Per-Issue Fields + +Each issue requires: + +**`ref`** (string) +- Unique identifier for this issue within the YAML file +- Used for parent-child linking via `parent_ref` +- Not sent to GitHub (internal reference only) +- Recommended: lowercase with hyphens (e.g., `parent-issue`, `login-feature`) + +**`title`** (string) +- Issue title displayed on GitHub +- Keep concise (< 80 characters recommended) +- Should be outcome-focused (WHAT, not HOW) +- Examples: + - ✅ "Enable search functionality" + - ❌ "Implement SearchService class" + +**`body`** (string, multiline) +- Issue description in Markdown +- Use `|` for multiline content +- Supports GitHub Flavored Markdown + +## Optional Fields + +### Top-Level Optional + +**`project`** (integer) +- GitHub project number (not project name) +- All created issues added to this project + +**`defaults`** (object) +- Default values applied to all issues +- Can be overridden per-issue +- Supported: `labels`, `milestone` + +### Per-Issue Optional + +**`parent_ref`** (string) +- Reference to parent issue's `ref` +- Creates parent-child relationship +- Parent issue must be defined BEFORE child in YAML + +**`milestone`** (string) +- Milestone name (exact match required) +- Overrides default milestone if specified + +**`labels`** (array of strings) +- Labels to apply +- Overrides default labels if specified +- Labels don't need to exist (GitHub auto-creates) + +## Issue Body Format + +Standard format for issue bodies: + +```markdown +## Overview +Brief description of what needs to be accomplished (the outcome). + +## Acceptance Criteria +- [ ] Specific, testable criterion +- [ ] Another testable criterion +- [ ] Final criterion + +## Technical Context +- Primary domain: [models/controllers involved] +- Similar pattern: [existing feature to reference] +- Integration points: [connections to other systems] +- Consider: [gotchas, constraints, or performance notes] +``` + +**Notes:** +- **Overview**: Outcome-focused (what users/system can do after) +- **Acceptance Criteria**: Specific, testable, observable +- **Technical Context**: Optional - add when issue involves integration, performance, security, or unfamiliar domains + +## Complete Example + +```yaml +repository: myorg/myapp +project: 6 + +defaults: + labels: [enhancement] + milestone: "v2.0" + +issues: + # Parent issue + - ref: search-feature + title: "Enable search functionality" + milestone: "v2.1" # Override default + labels: [enhancement, search] + body: | + ## Overview + Add full-text search to allow users to find posts and comments quickly. + + ## Acceptance Criteria + - [ ] Search bar visible in header on all pages + - [ ] Results page displays matching posts and comments + - [ ] Results are paginated (20 per page) + - [ ] Search works across post titles, bodies, and comments + + ## Technical Context + - Primary domain: Posts, Comments models; SearchController + - Similar pattern: Existing filter functionality in app/controllers/posts_controller.rb + - Consider: PostgreSQL full-text search vs external service (start simple) + + # Child issue 1 + - ref: search-indexing + parent_ref: search-feature + title: "Build search indexing" + body: | + ## Overview + Create database indices to support full-text search. + + ## Acceptance Criteria + - [ ] Migration adds search columns to posts and comments + - [ ] Background job updates search indices on content changes + - [ ] Search query returns results in < 200ms for typical queries + + ## Technical Context + - Primary domain: Posts, Comments models; db/migrate + - Similar pattern: Existing index patterns in schema.rb + - Consider: Use PostgreSQL tsvector type, GIN index + + # Child issue 2 + - ref: search-ui + parent_ref: search-feature + title: "Build search UI" + body: | + ## Overview + Create user interface for search functionality. + + ## Acceptance Criteria + - [ ] Search bar component in header + - [ ] Results page shows post/comment matches with highlighting + - [ ] Pagination controls (prev/next, page numbers) + - [ ] Empty state when no results found + + ## Technical Context + - Primary domain: SearchController, app/views/search + - Similar pattern: Pagination in app/views/posts/index.html.erb + - Integration points: Uses search indexing from #search-indexing +``` + +## Title Guidelines + +Titles should describe the outcome, not implementation: + +**Good (outcome-focused):** +- "Enable search functionality" +- "Users can filter posts by category" +- "Support OAuth authentication" + +**Bad (implementation-focused):** +- "Implement SearchService class" +- "Add filter method to PostsController" +- "Install Devise gem" + +## Technical Context Guidelines + +Add Technical Context section when issue involves: +- **Integration**: Multiple systems/models working together +- **Performance**: Scale requirements (>100 records, <200ms response) +- **Security**: Auth, permissions, tenant isolation +- **Background processing**: Jobs, queues, async work +- **Unfamiliar domains**: New functionality in unknown territory + +Skip Technical Context for: +- Standard CRUD (add field, basic form) +- UI-only changes (text, styling, layout) +- Copying existing pattern explicitly + +**Format (3-5 bullets):** +- Primary domain: Where code lives +- Similar pattern: Existing feature to reference +- Integration points: Connections to other parts +- Consider: Gotchas, constraints, performance notes diff --git a/skills/draft-github-issues/scripts/create_issues.sh b/skills/draft-github-issues/scripts/create_issues.sh new file mode 100755 index 0000000..0b64103 --- /dev/null +++ b/skills/draft-github-issues/scripts/create_issues.sh @@ -0,0 +1,91 @@ +#!/bin/bash +# Create GitHub issues from YAML file using gh CLI +# Usage: ./create_issues.sh path/to/issues.yml + +set -e + +YAML_FILE="$1" + +if [ -z "$YAML_FILE" ]; then + echo "Error: YAML file path required" + echo "Usage: $0 path/to/issues.yml" + exit 1 +fi + +if [ ! -f "$YAML_FILE" ]; then + echo "Error: File not found: $YAML_FILE" + exit 1 +fi + +# Check gh CLI is installed and authenticated +if ! command -v gh &> /dev/null; then + echo "Error: gh CLI not found. Install: https://cli.github.com" + exit 1 +fi + +if ! gh auth status &> /dev/null; then + echo "Error: gh not authenticated. Run: gh auth login" + exit 1 +fi + +# Get repo from git remote (fallback to current directory check) +REPO=$(gh repo view --json nameWithOwner -q .nameWithOwner 2>/dev/null || echo "") +if [ -z "$REPO" ]; then + echo "Error: Not in a GitHub repository or remote not configured" + exit 1 +fi + +echo "Creating issues in $REPO from $YAML_FILE" +echo "" + +# This script expects Claude to parse the YAML and call gh commands +# for each issue. The script serves as a wrapper and validator. +# +# Claude will: +# 1. Parse YAML to extract issues +# 2. Create parent issues first (store their numbers) +# 3. Create child issues, updating body to reference parent number +# 4. Handle labels, milestones, assignees +# +# This script just validates environment and provides helper functions + +# Helper: Create single issue +# Args: title body [labels] [milestone] [assignees] +create_issue() { + local title="$1" + local body="$2" + local labels="$3" + local milestone="$4" + local assignees="$5" + + local cmd="gh issue create --repo $REPO --title \"$title\" --body \"$body\"" + + if [ -n "$labels" ]; then + cmd="$cmd --label \"$labels\"" + fi + + if [ -n "$milestone" ]; then + cmd="$cmd --milestone \"$milestone\"" + fi + + if [ -n "$assignees" ]; then + cmd="$cmd --assignee \"$assignees\"" + fi + + eval "$cmd" +} + +# Export function for Claude to use +export -f create_issue +export REPO + +echo "Environment validated. Ready to create issues." +echo "Repository: $REPO" +echo "" + +# Note: This script is invoked by Claude with specific gh commands +# based on parsed YAML structure. Claude handles: +# - YAML parsing +# - Issue ordering (parents before children) +# - Reference resolution (ref -> issue numbers) +# - Error handling and reporting diff --git a/skills/prompt-architecting/SKILL.md b/skills/prompt-architecting/SKILL.md new file mode 100644 index 0000000..097fe4f --- /dev/null +++ b/skills/prompt-architecting/SKILL.md @@ -0,0 +1,465 @@ +--- +name: prompt-architecting +description: + "Optimizes or analyzes prompts using proven strategies. Use when user + provides a prompt for optimization/analysis, or before you generate any prompt content + yourself (commands, skills, agents, docs, instructions). Two modes: OPTIMIZE (returns + optimized prompt), CONSULT (analysis only). Trigger phrases: 'optimize prompt', + 'analyze this prompt', 'make concise'." +allowed-tools: + - Read +--- + +# prompt-architecting + +Optimizes unstructured prompts into constrained, strategy-driven instructions that prevent over-generation and verbosity. + +## When to Use + +Trigger phrases: "optimize this prompt", "make instructions concise", "architect this prompt", "what's the best way to prompt for X" + +## Your Task + +When this skill is used, you are acting as a **prompt architect**. Your job: + +Transform an unstructured task request into a constrained, strategy-driven prompt that prevents over-generation and verbosity. + +**You will receive:** + +- Task description (what needs to be generated) +- Content (the actual content to analyze/optimize) +- Output type (skill / docs / plan / instructions / other) +- Complexity level (low / medium / high - or infer it) +- Constraints (max length, format, audience, exclusions - or none) +- Architecture reference (optional file path to architecture specification) +- Mode (optional: "consult" or "optimize", defaults to "optimize") + +**You will output (mode=optimize):** + +- Analysis of complexity and bloat risks +- 2-3 selected strategies with rationale +- Optimized, constrained prompt (ready to use) +- Summary of constraints applied + +**You will output (mode=consult):** + +- Analysis of complexity and bloat risks +- Recommended strategies with rationale +- Optimization potential assessment +- Recommendations (no optimized prompt) + +## Analysis Mode + +ULTRATHINK: Prioritize accuracy over speed. Take time to: + +- **Semantic before syntactic**: What is this actually trying to accomplish? +- Identify content type (agent/workflow vs docs/skills) +- Distinguish bloat from necessary procedural detail +- **Research-informed structure decisions**: Formatting impacts performance significantly (40%+ variance in template studies); practical experience suggests enumeration helps for 3+ sequential steps +- Select appropriate reduction target (40-50% vs 60%+) +- Choose optimal strategy combination (1-3 max) + +**Core principle (research-validated)**: Prompt formatting significantly impacts LLM performance (up to 40% variance, sometimes 300%+). Structure provides cognitive anchors that reduce hallucination and improve thoroughness. Default to enumeration for 3+ steps; use natural language only when structure adds no clarity. + +**Key research findings**: + +- Numbered lists help LLMs understand sequential steps and address each point thoroughly +- Structure reduces ambiguity about task sequence +- LLMs mirror the formatting structure you provide +- Enumeration works not because of the numbers, but because of pattern consistency and correct ordering + +Careful analysis prevents over-optimization and vague instructions. + +## Process + +Follow these steps: + +1. Read reference materials (strategies, anti-patterns, examples from references/) +2. Analyze the task (complexity, bloat risks, structure needs, target length) +3. Select 1-3 strategies based on complexity score and bloat risks +4. Generate output (optimized prompt or consultation analysis depending on mode) + +### Step 1: Read Reference Materials + +ALWAYS start by reading these files (progressive disclosure loads them on-demand): + +- `~/.claude/skills/prompt-architecting/references/STRATEGIES.md` (15 prompting strategies catalog) +- `~/.claude/skills/prompt-architecting/references/ANTI-PATTERNS.md` (basic bloat patterns) +- `~/.claude/skills/prompt-architecting/references/ADVANCED-ANTI-PATTERNS.md` (workflow/agent patterns - read if optimizing workflows) +- `~/.claude/skills/prompt-architecting/references/EXAMPLES.md` (basic case studies) +- `~/.claude/skills/prompt-architecting/references/ADVANCED-EXAMPLES.md` (workflow/agent case studies - read if optimizing workflows) + +IF architecture_reference provided: + +- Read the architecture file at the provided path +- Understand required sections, patterns, and structural requirements +- This will guide refactoring in Step 4 + +### Step 2: Analyze the Task + +Evaluate and explicitly STATE: + +**FIRST: Safety checks (prevent harmful optimization)** + +Check if content should NOT be optimized: + +- Already optimal pattern? (deliverable-first + natural/appropriate structure + right complexity + appropriate notation) +- Callable entity description at correct structure? (context + triggers present) +- Agent/workflow at 40-50% of bloated version with specificity intact? +- Technical notation serving clear purpose? (API specs, standard conventions, precision needed) +- User requests preservation? + +**If any YES**: STATE "Optimization not recommended: [reason]" and use mode=consult to provide analysis only. + +- **Semantic analysis** (SECOND - most important): + + - What is the core job this is trying to accomplish? + - Can it be expressed as a single coherent task in natural language? + - Test: Describe in one sentence using "then/if/when" connectors + - Example: "Read file, optimize with skill (preserving header if present), write back" + - **If YES**: Consider natural language reframing instead of formal structure + - **If NO**: Task may need structured breakdown + +- **Complexity scoring** (determines structure level): + Calculate score based on these factors: + + - 1-2 steps with obvious sequence? → **-1 point** + - 3-4 steps where sequence matters? → **+1 point** + - 5+ sequential steps? → **+2 points** + - Has user approval gates (WAIT, AskUserQuestion)? → **+3 points** + - Has 2+ terminal states (different end conditions)? → **+2 points** + - Has 3-way+ conditional branching? → **+2 points** + - Simple if/else conditionals only? → **+0 points** + - Skill invocation just for data gathering? → **+0 points** + - Skill invocation affects control flow decisions? → **+1 point** + + **Score interpretation** (research-aligned): + + - Score ≤ 0: Natural language framing acceptable (1-2 simple steps) + - Score 1-2: Numbered enumeration helps (research: improves thoroughness, reduces ambiguity) + - Score 3-4: Moderate structure (enumeration + opening mandate, no EXECUTION RULES) + - Score ≥ 5: Full formal structure (complete EFC pattern with EXECUTION RULES) + +- **Bloat risks**: What specifically could cause over-generation? (edge cases, theoretical coverage, defensive documentation, etc.) + +- **Workflow structure assessment**: + + - Count sequential steps in input (look for "Step 1", "Step 2", numbered workflow) + - Check for skill/agent invocations (are they just data gathering or control flow?) + - Check for user approval gates (AskUserQuestion, explicit WAIT/STOP) + - Check for multiple terminal states (different ways the task can end) + - Check if input already has Execution Flow Control (opening mandate, data flow notation, EXECUTION RULES) + - **Structure determination** based on complexity score: + - "Complexity score: [X]. Recommendation: [Natural language / Light structure / Moderate structure / Full EFC]" + +- **Target length**: Calculate optimal word/line count based on complexity and output type + +- **Architecture compliance** (if architecture_reference provided): + - Compare input structure to architecture requirements + - Identify missing required sections + - Identify structural misalignments + - State: "Architecture: [compliant/partial/non-compliant]. Missing: [list]. Misaligned: [list]." + +**Dimensional analysis (if optimization appropriate)** + +Evaluate each dimension: + +**Dimension 1 (Verbosity):** + +- Bloat indicators present? (adjective-heavy, scope inflation, vague quality statements, meta-discussion, filler, repetition) +- Current word count vs. ideal for task? (>2x ideal = bloat) +- State: "Verbosity: [bloated/concise/appropriate]. Reduction needed: [yes/no]" + +**Dimension 2 (Structure):** + +- Complexity score already calculated above +- Current structure level: [none/minimal/moderate/full] +- Appropriate structure level for score: [natural/light/moderate/full] +- Structure mismatch? [over-structured/under-structured/appropriate] +- State: "Complexity score: [X]. Current structure: [level]. Needed: [level]. Mismatch: [yes/no]" + +**Dimension 3 (Notation):** + +- Technical notation assessment: + - CAPS labels as action markers? (CHECK:, PARSE:, etc.) + - → notation for data flow? (→ variable_name) + - Variable naming conventions? (work_file_status, requirement_data) + - Function call syntax? (tool({params})) + - Sub-step enumeration? (a/b/c) + - Defensive meta-instructions? ("DO NOT narrate") +- Count indicators (3+ = over-technical) +- Does notation serve precision purpose? (API specs, schemas, standard conventions) +- Cognitive load test: Does notation make it easier or harder to understand? +- State: "Technical notation: [X indicators]. Purpose: [precision/ceremony]. Cognitive load: [helps/hurts/neutral]. Assessment: [over-technical/appropriate]" + +**Callable entity check** (if description field): + +- Contextual "when" conditions: present/vague/missing +- Trigger phrases (quoted literals): present/weak/missing +- Delegation signals if subagent: present/missing/N/A +- Integration points: present/missing/N/A +- Structure: complete/context-only/triggers-only/missing + +**Workflow pattern detection** (if skill/agent invocations): + +- High-risk stopping patterns present? (CAPS + → + variables + remote rules + warnings) +- Classification: high-risk / optimal / standard +- Stopping risk: yes/no +- Note: High-risk patterns are Dimension 3 problem (over-technical notation) + +**Complexity guidelines:** + +| Level | Target Length | Notes | +| ------ | ------------- | ----------------------------------------- | +| Low | 100-200w | Straightforward tasks | +| Medium | 200-400w | Moderate scope | +| High | 400-600w | Use progressive disclosure to references/ | + +### Step 3: Select Strategies + +**MANDATORY EXCLUSIONS (based on Step 2 safety checks):** + +- If already optimal: STOP - recommend no optimization +- If complexity score ≤ 0: NEVER use EFC, Decomposition, or Template-Based +- If callable entity: MUST use Callable Entity Preservation, MAX 1 additional strategy +- If technical notation serves precision purpose: PRESERVE notation, optimize other dimensions only + +**MANDATORY SELECTIONS (based on Step 2 dimensional analysis):** + +**For Dimension 1 (Verbosity problems):** + +- MUST select: Constraint-Based (hard word limits) +- SHOULD select: Negative Prompting (if specific bloat patterns identified) +- MAY select: Progressive Disclosure (if complex topic with separable details) + +**For Dimension 2 (Structure mismatch):** + +- If over-structured (score ≤ 0 but has formal structure): + - MUST select: Natural Language Reframing +- If under-structured (score ≥ 3 but vague prose): + - Score 3-4: Moderate structure (organized natural, no heavy formality) + - Score ≥ 5: Goal + Capabilities pattern + - MAY select: Decomposition, Directive Hierarchy + +**For Dimension 3 (Over-technical notation):** + +- If over-technical detected (3+ indicators, cognitive load hurts): + - MUST select: Technical → Natural Transformation + - This often solves stopping risk simultaneously + - May be SUFFICIENT optimization (don't over-optimize) + +**For Callable Entities (detected in Step 2):** + +- MUST select: Callable Entity Preservation +- Focus on preserving/adding both layers (context + triggers) + +**For High-Risk Workflows (detected in Step 2):** + +- MUST select: Technical → Natural Transformation (removes stopping risk) +- Preserve appropriate structure level (based on complexity score) +- Remove ceremony (CAPS, →, variables, warnings) + +**STRATEGY COUNT LIMIT: 1-3 strategies max** + +- 1 strategy: Simple reframing or notation simplification +- 2 strategies: Most common (address 2 dimensions or primary + constraint) +- 3 strategies: Complex only (rarely needed) + +**NEVER exceed 3 strategies** (over-optimization risk) + +**COMPLEMENTARY CHECK:** + +- Verify selected strategies don't conflict (see compatibility matrix in STRATEGIES.md) +- If conflict detected, choose most important strategy and drop conflicting ones + +### Step 4: Generate Output + +Based on mode: + +**IF mode=consult:** + +- DO NOT generate optimized prompt +- Output analysis, recommended strategies, and optimization potential +- If architecture provided, include architecture compliance assessment +- Use consult output format (see Output Format section below) + +**IF mode=optimize (default):** + +**Primary principle**: Reduce cognitive load while preserving intent. + +**Apply selected strategies based on dimensions:** + +**For Dimension 1 (Verbosity):** + +- Remove adjectives and quality statements +- Set hard word/line limits +- Use template structure to bound scope +- Exclude known bloat patterns explicitly +- Target: 60%+ reduction for docs OK, 40-50% for agents/workflows + +**For Dimension 2 (Structure):** + +Match structure to complexity score: + +- **Score ≤ 0 (Natural language)**: + + - Rewrite as clear prose with connectors ("then", "if", "when") + - Avoid enumeration unless truly necessary + - Embed conditionals inline naturally + - Example: "Read the file, optimize content, and write back" + +- **Score 1-2 (Light structure)**: + + - Simple numbered steps or sections without heavy formality + - Natural language throughout + - No CAPS labels, no → notation + - Example: "1. Read file 2. Optimize 3. Write result" + +- **Score 3-4 (Organized natural with structure)**: + + - Logical sections or phases (Setup, Analysis, Execution, etc.) + - Natural language within structured organization + - NO CAPS/→/variables + - Completion criterion inline or immediately after + - Example: See "appropriately optimized" examples in OPTIMIZATION-SAFETY-GUIDE.md + +- **Score ≥ 5 (Goal + Capabilities + organized workflow)**: + - Goal statement (ultimate outcome upfront) + - Capabilities declaration (tools/skills needed) + - Organized workflow with natural language + - Clear terminal conditions + - STILL use natural notation (no CAPS/→/variables) + +**For Dimension 3 (Over-technical notation):** + +Remove ceremonial technical notation: + +- CAPS labels → natural section headers or action verbs +- → notation → implicit data flow or prose +- Variable names → eliminate or minimize +- Function call syntax → natural tool mentions +- Sub-step enumeration → consolidate to prose +- Defensive warnings → remove entirely (trust structure) +- Remote EXECUTION RULES → integrate inline or remove + +While preserving: + +- Appropriate structure level (based on complexity score) +- All requirements and dependencies +- Tool invocations (mentioned naturally) +- Terminal conditions (integrated naturally) + +**For Callable Entities:** + +- Preserve both contextual "when" AND trigger phrases +- Add missing layer if incomplete +- Minimal optimization (10-20% max) +- Focus on invocation clarity + +**General optimization:** + +- Sets hard word/line limits (Constraint-Based) +- Specifies structure (Template-Based or Output Formatting if applicable) +- Excludes known bloat patterns (Negative Prompting if applicable) +- Embeds selected strategies naturally into instructions + +**If architecture_reference provided:** + +- Refactor content to align with architecture requirements +- Add missing required sections (Purpose, Workflow, Output, etc.) +- Preserve required patterns (asset references, error formats, etc.) +- Optimize content WITHIN architectural structure (don't remove structure) + +**FINAL SAFETY CHECK before returning:** + +Verify optimized version: + +- [ ] Clarity test: Is it clearer than original? (cognitive load lower) +- [ ] Intent test: Core requirements preserved? +- [ ] Complexity match: Structure appropriate for score? +- [ ] Notation appropriate: Natural unless technical serves precision purpose? +- [ ] No new problems: No stopping points, lost triggers, introduced ambiguity? +- [ ] Executable: Would LLM follow this successfully? +- [ ] Reduction appropriate: 40-50% agents/workflows, 60%+ docs +- [ ] Strategy count: 1-3, complementary + +**If any check FAILS**: Revise optimization or recommend consult mode only. + +## Output Format + +You MUST structure your response based on mode: + +**For mode=optimize:** + +```markdown +## Analysis + +- Task complexity: {low|medium|high} +- Primary bloat risks: {2-3 specific risks identified} +- Architecture compliance: {if architecture_reference provided} +- Target length: {calculated word/line count based on complexity} + +## Selected Strategies + +- **{Strategy Name}**: {1 sentence why chosen for this specific task} +- **{Strategy Name}**: {1 sentence why chosen} +- **{Strategy Name}** (if needed): {1 sentence why chosen} + +## Optimized Prompt + +{The actual constrained prompt - this gets used directly by the executor} + +{Prompt should be 3-10 sentences with: + +- Clear scope boundaries +- Specific word/line limits +- Explicit structure (sections, format) +- DO NOT clauses if bloat risks identified +- Reference to examples if anchoring strategy used +- Architecture alignment directives (if architecture provided)} + +## Constraints Applied + +- Word/line limits: {specific counts} +- Structure: {template or format specified} +- Exclusions: {what NOT to include} +- Architecture: {required sections/patterns preserved if applicable} +- Other: {any additional constraints} +``` + +**For mode=consult:** + +```markdown +## Analysis + +- Task complexity: {low|medium|high} +- Primary bloat risks: {2-3 specific risks identified} +- Architecture compliance: {if architecture_reference provided} +- Target length: {calculated word/line count based on complexity} + +## Recommended Strategies + +- **{Strategy Name}**: {1 sentence why recommended for this specific task} +- **{Strategy Name}**: {1 sentence why recommended} +- **{Strategy Name}** (if needed): {1 sentence why recommended} + +## Optimization Potential + +- Content reduction: {estimated percentage - e.g., "40-50% reduction possible"} +- Structural changes needed: {list if architecture provided - e.g., "Add Output Format section, refactor steps"} +- Bloat removal: {specific areas - e.g., "Remove verbose library comparisons, consolidate examples"} + +## Recommendations + +{2-4 sentence summary of what should be done to optimize this content} +``` + +## Errors + +**"Missing required parameter"**: Task description or content not provided. Cannot analyze without content to optimize. Provide: task description, content, output type. + +**"Invalid architecture_reference path"**: File path doesn't exist or isn't readable. Verify path exists: ~/.claude/skills/skill-generator/references/SKILL-ARCHITECTURE.md + +**"Unsupported output type"**: Output type not recognized. Supported: skill, docs, plan, instructions, command, agent. + +**"Mode parameter invalid"**: Mode must be "optimize" or "consult". Defaults to "optimize" if not specified. diff --git a/skills/prompt-architecting/references/ADVANCED-ANTI-PATTERNS.md b/skills/prompt-architecting/references/ADVANCED-ANTI-PATTERNS.md new file mode 100644 index 0000000..c5116c3 --- /dev/null +++ b/skills/prompt-architecting/references/ADVANCED-ANTI-PATTERNS.md @@ -0,0 +1,267 @@ +# Advanced Anti-Patterns: Workflow & Agent Optimization + +**CRITICAL**: For detailed stopping point analysis, see `/Users/brandoncasci/.claude/tmp/workflow-optimization-spec.md` + +**CRITICAL**: For safety guidelines and dimensional analysis, see `OPTIMIZATION-SAFETY-GUIDE.md` + +**KEY INSIGHT**: Most stopping risk patterns are caused by over-technical notation (Dimension 3). Simplifying notation while preserving appropriate structure solves the problem. + +--- + +Advanced patterns for optimizing multi-step workflows and agent prompts. + +## Pattern 6: Numbered Steps Without Execution Mandate + +### ❌ Verbose + +``` +You are optimizing a Claude Code prompt file. Follow this workflow exactly: + +## Step 1: Read File + +Read the file at the path provided by the user. If no path provided, ask for it. + +## Step 2: Parse Structure + +- Detect YAML front matter (content between `---` markers at file start) +- If front matter exists, extract `name` field +- Separate front matter from content body + +## Step 3: Optimize Content + +Use the prompt-architecting skill with: +- Task description: "Optimize this prompt" +- Current content: {content body without front matter} + +Wait for skill to return optimized prompt. DO NOT implement optimization yourself. + +## Step 4: Analyze Dependencies + +Check if description has dependencies by searching codebase. + +## Step 5: Present Results + +Show optimization results and ask for approval. + +## Step 6: Replace File + +Write optimized content back to file. +``` + +**Problem**: Numbered steps imply sequence but don't mandate complete execution. LLM may stop after Step 3 (skill returns result) treating it as a deliverable. No guarantee all steps execute sequentially or that Step N uses Step N-1 output. + +### ✅ Optimized + +``` +Execute this 6-step workflow completely. Each step produces input for the next: + +WORKFLOW: +1. READ: Use Read tool on $1 → content +2. PARSE: Extract front matter + body from content → {front_matter, body} +3. OPTIMIZE: Run your prompt-architecting skill to optimize body → optimized_body +4. ANALYZE: Use Grep to check dependencies in front_matter → risk_level +5. PRESENT: Show optimized_body + risk_level → STOP, WAIT for user approval +6. WRITE: If approved, use Write tool to save optimized_body + front_matter to $1 → done + +EXECUTION RULES: +- Complete steps 1-5 without stopping +- STOP only at step 5 (user approval required) +- Proceed to step 6 only if user approves (yes/1/2) +- Task incomplete until step 6 completes or user cancels + +Each step's output feeds the next. Do not stop early. +``` + +**Strategies applied**: Execution Flow Control, Decomposition, Directive Hierarchy, Output Formatting + +**Key improvements**: +- Opening mandate: "Execute this 6-step workflow completely" +- Explicit data flow: "Step X → output Y" +- Clear terminal states: "STOP only at step 5" +- Completion guarantee: "Task incomplete until step 6" +- Prevents premature stopping after async operations (skill invocations) + +--- + +## Pattern 7: Removing Procedural Detail as "Bloat" (Agent/Workflow Prompts) + +### ❌ Over-optimized + +``` +## Process + +### For New Features + +1. Read scratchpad if prompted +2. Understand requirement (ULTRATHINK): Core request, acceptance criteria, constraints +3. Find patterns (MANDATORY): + - Read relevant CLAUDE.md files + - Search similar implementations + - Check test structure + - For gem-backed features: Verify gem capabilities FIRST +4. Determine approach: + - Existing pattern → point to specific files + - New pattern → recommend architecture fitting codebase style +5. Synthesize: Which files, patterns to follow, architecture rationale + +### For Bug Fixes + +1. Read scratchpad if prompted +2. Analyze bug nature: Where manifests? User impact? Larger architectural issue? +3. Evaluate ALL test levels (ULTRATHINK): + - System: UI/JavaScript/user-visible bugs + - Integration: Request/response/multi-component + - Unit: Business logic/model behavior +``` + +**Problem**: +- "Read relevant CLAUDE.md files" - vague (which ones? just root? subdirectories?) +- Pattern-finding detail only in "New Features" mode, removed from "Bug Fixes" +- Agent doesn't know if bug fix mode needs same rigor as new features +- Lost specificity: "ALL files (root + subdirectories)", "# AI: comments", specific checklist items +- Aggressive 60%+ reduction created ambiguity + +### ✅ Properly optimized + +``` +## Research Checklist + +For ALL modes, check: +- ALL CLAUDE.md files (root + subdirectories) +- Similar implementations in codebase +- # AI: comments in existing code +- Test structure +- **For gem-backed features**: Gem capabilities before custom code + +## Process + +### For New Features + +1. Read scratchpad if prompted: "Read scratchpad for context: [path]" +2. Understand requirement (ULTRATHINK): Core request, acceptance criteria, constraints +3. Find patterns (see Research Checklist above) +4. Determine approach: + - Existing pattern → point to specific files + - New pattern → recommend architecture fitting codebase style +5. Synthesize: Which files, patterns to follow, architecture rationale + +### For Bug Fixes (from issue-diagnosis) + +ULTRATHINK MODE: Think comprehensively about best solution. + +1. Read scratchpad if prompted +2. Analyze bug nature: Where manifests? User impact? Larger architectural issue? +3. Research context (see Research Checklist above) +4. Evaluate ALL test levels (ULTRATHINK): + - System: UI/JavaScript/user-visible bugs + - Integration: Request/response/multi-component + - Unit: Business logic/model behavior + - Don't settle for "good enough" - recommend all appropriate tests +``` + +**Strategies applied**: Execution Flow Control + DRY refactoring, Agent/Workflow Guidelines + +**Key improvements**: +- Extracted shared "Research Checklist" - eliminates repetition without losing detail +- Preserved ALL specificity: "ALL CLAUDE.md files (root + subdirectories)", "# AI: comments" +- Applied to all modes - bug fixes get same rigor as new features +- DRY refactoring instead of deletion - saves ~40 words while maintaining clarity +- 40-50% reduction (appropriate for agents) vs 60%+ (too aggressive) + +**When this pattern applies**: + +- Optimizing agent prompts or workflow commands +- Multiple modes/sections with similar procedural steps +- Procedural detail appears repetitive but is actually necessary +- Target reduction is 60%+ (too aggressive for agents) + +**How to avoid**: + +- Extract shared checklists instead of deleting detail +- Preserve specific qualifiers: "ALL", "MANDATORY", "root + subdirectories" +- Target 40-50% reduction for agents (not 60%+) +- Ask: "Does removing this create vagueness?" If yes, refactor instead + +--- + +## Pattern 8: Defensive Meta-Commentary and Stop-Awareness + +### ❌ Creates stopping risk through negative priming + +```markdown +**Step 3: OPTIMIZE** → optimized_body + +- Use Skill tool: Skill(skill="prompt-architecting") +- WAIT for skill output (contains multiple sections) +- EXTRACT text under "## Optimized Prompt" heading → optimized_body +- → DO NOT STOP - this is NOT the end - continue to Step 6 after Step 4 + +**CRITICAL REMINDERS:** + +- The Skill tool (Step 3) returns structured output with multiple sections +- You MUST extract the "## Optimized Prompt" section and store as optimized_body +- Receiving skill output is NOT a completion signal - it's just data for Step 6 +- NEVER return control to caller after Step 3 - continue to Steps 4 and 6 +- The ONLY valid stopping points are: Step 5 (waiting for user) or Step 6 (done writing) +- If you find yourself returning results without calling Write tool, you failed +``` + +**Problem**: + +- Each "DO NOT STOP" warning creates decision point: "Should I stop here?" +- "This is NOT the end" reinforces that ending is a possibility +- CRITICAL REMINDERS section acknowledges failure mode, normalizing it +- "If you find yourself returning results... you failed" describes the exact unwanted behavior +- Defensive commentary creates stop-awareness, making premature stopping MORE likely + +**Psychological mechanism** (Ironic Process Theory): + +- Telling someone "don't think about X" makes them think about X +- Repeatedly saying "DO NOT STOP" primes stopping behavior +- Meta-commentary about failure normalizes and increases failure + +### ✅ Trust structure, eliminate stop-awareness + +```markdown +Your job is to update the file with optimized prompt from your skill. + +Read the file, extract any front matter. Run the prompt-architecting skill on the content body. Check for dependencies if front matter exists. Ask user for approval if dependencies found. Write the optimized content back to the file. +``` + +**Or, if complexity requires structure:** + +```markdown +Execute this workflow completely: + +1. READ: Use Read(file_path) → content +2. OPTIMIZE: Run prompt-architecting skill on content → optimized_content +3. CHECK: If front matter exists, search for dependencies → risk_level +4. APPROVE: If risk_level high, ask user → approval +5. WRITE: Save optimized_content to file → done + +Task completes at step 5. +``` + +**Strategies applied**: Natural Language Reframing (first example) or moderate EFC without defensive warnings (second example) + +**Key improvements**: + +- No "DO NOT STOP" warnings anywhere +- No CRITICAL REMINDERS section discussing failure modes +- No meta-commentary about what might go wrong +- Structure implies continuation naturally +- Task framing makes completion criteria obvious + +**When this pattern applies**: + +- Any workflow with skill/agent invocations +- Multi-step processes where premature stopping is a risk +- Prompts that have been "fixed" by adding more warnings + +**How to avoid**: + +- Frame positively: "Continue to X" instead of "DO NOT STOP" +- Remove all meta-commentary about failures +- If you need warnings, the structure is wrong - simplify instead +- Trust natural language and clear structure over defensive reminders +- Test: If prompt mentions stopping/failure, you're creating the problem diff --git a/skills/prompt-architecting/references/ADVANCED-EXAMPLES.md b/skills/prompt-architecting/references/ADVANCED-EXAMPLES.md new file mode 100644 index 0000000..1a25877 --- /dev/null +++ b/skills/prompt-architecting/references/ADVANCED-EXAMPLES.md @@ -0,0 +1,188 @@ +# Advanced Optimization Examples: Workflows & Agents + +Advanced case studies showing prompt-architecting for multi-step workflows and agent prompts. + +## Example 5: Multi-Step Workflow Command + +### Original Request + +``` +User: "Create a command that optimizes prompt files by using the prompt-architecting skill, checks for front matter dependencies, and writes the optimized version back" +``` + +### Without Optimization + +Claude generates: + +``` +You are optimizing a Claude Code prompt file. Follow this workflow exactly: + +## Step 1: Read File +Read the file at the path provided by the user. + +## Step 2: Parse Structure +Detect YAML front matter and separate from content body. + +## Step 3: Optimize Content +Use the prompt-architecting skill with the content. +Wait for skill to return optimized prompt. + +## Step 4: Analyze Dependencies +Check if description has dependencies. + +## Step 5: Present Results +Show optimization results and ask for approval. + +## Step 6: Replace File +Write optimized content back to file. +``` + +**Result**: LLM stops after Step 3 (skill returns result), treating skill output as deliverable. Never proceeds to dependency analysis, approval, or file writing. User receives optimization output but file is never modified. + +### With prompt-architecting + +**Subagent analysis:** + +- Task: Multi-step workflow command (6 sequential steps) +- Complexity: High (skill invocation, dependency checking, user approval, file I/O) +- Risk: Premature stopping after Step 3 (async skill call), skipped steps, no execution guarantee +- Bloat risks: Verbose step descriptions, missing flow control +- Optimal strategies: **Execution Flow Control** (critical), Decomposition, Directive Hierarchy, Constraint-Based + +**Optimized prompt:** + +``` +Execute this 6-step workflow completely. Each step produces input for the next: + +WORKFLOW: +1. READ: Use Read tool on $1 → content +2. PARSE: Extract front matter + body from content → {front_matter, body} +3. OPTIMIZE: Run your prompt-architecting skill to optimize body → optimized_body +4. ANALYZE: Use Grep to check dependencies in front_matter → risk_level +5. PRESENT: Show optimized_body + risk_level → STOP, WAIT for user approval +6. WRITE: If approved, use Write tool to save optimized_body + front_matter to $1 → done + +EXECUTION RULES: +- Complete steps 1-5 without stopping +- STOP only at step 5 (user approval required) +- Proceed to step 6 only if user approves (yes/1/2) +- Task incomplete until step 6 completes or user cancels + +Each step's output feeds the next. Do not stop early. +``` + +**Result**: Clear execution mandate, explicit data flow between steps, guaranteed completion through step 5, proper stop at approval gate, file gets written after approval. + +**Why Execution Flow Control was critical:** + +1. **Prevents premature stopping**: Mandate ensures execution continues after Step 3 skill invocation +2. **Explicit dependencies**: "Step X → output Y" shows each step consumes previous output +3. **Clear terminal states**: "STOP only at step 5" prevents arbitrary stopping +4. **Completion guarantee**: "Task incomplete until..." creates obligation to finish + +**Lessons from this example:** + +- Numbered steps alone don't guarantee sequential execution +- Skill invocations are natural stopping points - must mandate continuation +- Multi-step workflows need opening mandate + terminal state specification +- Data flow notation (→) makes dependencies explicit and prevents skipping + +--- + +## Example 6: Agent Prompt with Multiple Modes + +### Original Request + +``` +User: "Optimize this analyst agent prompt that has ~1,450 words with sections for new features, bug fixes, and gem verification" +``` + +### Without Agent/Workflow Guidelines + +Claude generates: + +- 1,450w → 560w (61% reduction - too aggressive) +- Removes procedural detail to hit 60% target +- Creates vague instructions: "Read relevant CLAUDE.md files" (which ones?) +- Pattern-finding detail only in "New Features", removed from "Bug Fixes" +- Agent doesn't know if bug fixes need same rigor as features +- Lost specificity: "ALL files (root + subdirectories)", "# AI: comments" + +**Result**: Concise but vague. Agent has unclear guidance for bug fix mode. + +### With Agent/Workflow Guidelines + +**Subagent analysis:** + +- Task: Optimize agent prompt with multiple modes +- Complexity: High (1,450 words, 3 modes: new features, bug fixes, gems) +- Risk: Over-optimization removes necessary procedural detail +- Bloat risks: Verbose YAML examples (90+ lines), Rails conventions, repetitive pattern-finding +- Optimal strategies: **Agent/Workflow Guidelines** (preserve procedural detail), DRY refactoring, Progressive Disclosure, Constraint-Based + +**Optimized prompt:** + +``` +You are a requirements and architecture analyst. Tools: Read, Grep, Glob (read-only). + +Follow output structure from @.claude/protocols/agent-output-protocol.md + +## Core Philosophy + +ULTRATHINK: Prioritize correctness over speed. AI implements fast regardless of approach. Strategic decisions matter most. + +## Research Checklist + +For ALL modes, check: +- ALL CLAUDE.md files (root + subdirectories) +- Similar implementations in codebase +- # AI: comments in existing code +- Test structure +- **For gem-backed features**: Gem capabilities before custom code + +## Process + +### For New Features + +1. Read scratchpad if prompted: "Read scratchpad for context: [path]" +2. Understand requirement (ULTRATHINK): Core request, acceptance criteria, constraints +3. Find patterns (see Research Checklist above) +4. Determine approach: + - Existing pattern → point to specific files + - New pattern → recommend architecture fitting codebase style +5. Synthesize: Which files, patterns to follow, architecture rationale + +### For Bug Fixes (from issue-diagnosis) + +ULTRATHINK MODE: Think comprehensively about best solution. + +1. Read scratchpad if prompted +2. Analyze bug nature: Where manifests? User impact? Larger architectural issue? +3. Research context (see Research Checklist above) +4. Evaluate ALL test levels (ULTRATHINK): + - System: UI/JavaScript/user-visible bugs + - Integration: Request/response/multi-component + - Unit: Business logic/model behavior + - Don't settle for "good enough" - recommend all appropriate tests +... + +[Verbose YAML examples moved to references/analyst-examples.md] +``` + +**Result**: 1,450w → 650w (55% reduction - appropriate for agents). Preserved procedural detail while eliminating repetition via DRY refactoring. + +**Why Agent/Workflow Guidelines were critical:** + +1. **Recognized agent context**: Applied 40-50% target instead of 60%+ +2. **DRY refactoring over deletion**: Extracted "Research Checklist" - eliminated repetition without losing specificity +3. **Preserved procedural detail**: "ALL CLAUDE.md files (root + subdirectories)" not "relevant files" +4. **All modes get rigor**: Bug fixes reference same Research Checklist as new features +5. **Aggressive optimization where appropriate**: 90-line YAML examples → references/ + +**Lessons from this example:** + +- Agent prompts need execution detail - different standard than docs +- DRY refactoring beats deletion - extract shared sections instead of removing +- Target 40-50% for agents (not 60%+) - they need procedural clarity +- Preserve specificity: "ALL", "MANDATORY", "root + subdirectories" +- Recognize when detail is necessary vs when it's bloat diff --git a/skills/prompt-architecting/references/ANTI-PATTERNS.md b/skills/prompt-architecting/references/ANTI-PATTERNS.md new file mode 100644 index 0000000..6361883 --- /dev/null +++ b/skills/prompt-architecting/references/ANTI-PATTERNS.md @@ -0,0 +1,194 @@ +# Anti-Patterns: Verbose → Concise + +Real examples of prompt bloat and their optimized versions. + +## Pattern 1: Over-Elaborated Context + +### ❌ Verbose +``` +I need you to create comprehensive documentation that covers all aspects of +user authentication in our system. This should include detailed explanations +of how the system works, what technologies we're using, best practices for +implementation, common pitfalls to avoid, security considerations, edge cases, +error handling strategies, and example code showing different use cases. Make +sure it's thorough and covers everything a developer might need to know. +``` + +### ✅ Optimized +``` +Write auth docs. Structure: [Setup - 100w] [Usage - 150w] [Error handling - 100w] +[One example - code only]. MAX 400 words total. Audience: Mid-level dev familiar +with JWT. DO NOT: Include security theory, framework comparisons, or "best practices" +sections. +``` + +**Strategies applied**: Constraint-Based, Output Formatting, Negative Prompting + +--- + +## Pattern 2: Defensive Over-Coverage + +### ❌ Verbose +``` +Create a skill for processing CSV files. It should handle reading CSV files, +parsing them, validating the data, transforming it, handling errors, dealing +with different encodings, managing large files, streaming if needed, and +outputting to various formats. Include comprehensive documentation, examples +for each use case, edge case handling, performance considerations, and detailed +setup instructions. +``` + +### ✅ Optimized +``` +Create CSV processing skill. SKILL.md (300w max): [Problem - 50w] [Workflow: +read → parse → transform → output - 100w] [One example - code only - 100w]. +Move to references/: encoding edge cases, streaming for large files, pandas vs +csv module comparison. +``` + +**Strategies applied**: Constraint-Based, Progressive Disclosure, Template-Based + +--- + +## Pattern 3: Meta-Discussion Instead of Instructions + +### ❌ Verbose +``` +I think we should probably create some documentation for the API endpoints. +It would be good to explain what each endpoint does, what parameters they take, +what they return, and maybe some examples of how to use them. We should also +probably mention error codes and authentication requirements. Does that sound good? +``` + +### ✅ Optimized +``` +Document REST API endpoints. Format per endpoint: [Method + Path] [Purpose - 1 +sentence] [Parameters - table] [Response - JSON schema] [Auth required - yes/no] +[Error codes - list only]. One example request/response per endpoint. +``` + +**Strategies applied**: Output Formatting, Template-Based, Density Optimization + +--- + +## Pattern 4: Unclear Success Criteria + +### ❌ Verbose +``` +Help me understand how the deployment process works in this codebase and create +some documentation about it so other developers can figure it out too. +``` + +### ✅ Optimized +``` +Document deployment process. Target: New dev can deploy to staging in <10 min. +Structure: [Prerequisites - bullet list] [Steps - numbered, 1 sentence each] +[Verification - how to confirm success] [Rollback - 2 sentences]. MAX 300 words. +``` + +**Strategies applied**: Audience-Targeted, Template-Based, Constraint-Based + +--- + +## Pattern 5: Solution Seeking Instead of Problem Stating + +### ❌ Verbose +``` +We need a really robust, production-grade, enterprise-level authentication +system with all the features and best practices implemented following industry +standards and security guidelines... +``` + +### ✅ Optimized +``` +Implement auth. Requirements: JWT tokens, refresh flow, role-based access. +MUST: Secure token storage, HTTPS only. SHOULD: Remember me option. OUT OF SCOPE: +OAuth, SSO, 2FA. Match existing pattern in: src/auth/session.ts +``` + +**Strategies applied**: Directive Hierarchy, Negative Prompting, Anchoring + +--- + +## Common Bloat Indicators + +**Red flags in prompts:** +- "comprehensive", "robust", "enterprise-grade", "production-ready" +- "all aspects", "everything", "fully cover" +- "best practices", "industry standards" +- Multiple questions without priority +- Hypothetical edge cases ("what if...", "we might need...") + +**Optimization checklist:** +1. Remove adjectives (comprehensive, robust, etc.) +2. Set word/line limits +3. Specify structure explicitly +4. Use DO NOT for known over-generation +5. Define success criteria concretely +6. Defer details to references where possible + +**Decision tree:** + +- Adjective-heavy? → Constraint-Based +- No structure? → Template-Based or Output Formatting +- Known bloat patterns? → Negative Prompting +- 1-2 very simple steps (sequence obvious)? → Natural language acceptable +- 3+ steps where sequence matters? → Enumeration helps (research: improves thoroughness and reduces ambiguity) +- Complex task with branching? → Execution Flow Control (appropriate level) +- Numbered steps but overly formal? → Simplify notation, keep enumeration for clarity +- Agent/workflow with repeated procedural steps? → DRY refactoring (extract shared checklist) +- Procedural detail appears as bloat? → Preserve specificity, target 40-50% reduction +- Need examples? → Few-Shot or Anchoring + +--- + +## Pattern 8: Destroying Callable Entity Triggers + +### ❌ Over-optimized +``` +# Before (complete) +description: Reviews code for security, bugs, performance when quality assessment needed. When user says "review this code", "check for bugs", "analyze security". + +# Over-optimized (WRONG - lost triggers) +description: Code review assistant +``` + +### ✅ Correct +``` +# Minimal acceptable optimization +description: Reviews code for security, bugs, performance when quality assessment needed. When user says "review code", "check bugs", "analyze security". +``` + +**Why**: Trigger phrases are functional pattern-matching signals for model-invocation, not decorative examples. Preserve both contextual "when" AND literal trigger phrases. + +**See OPTIMIZATION-SAFETY-GUIDE.md Part 4 for callable entity preservation rules.** + +--- + +## Pattern 9: Over-Technical Notation Creating Cognitive Load + +### ❌ Over-technical +``` +Execute this workflow: +1. READ: Load file → content +2. PARSE: Extract(content) → {fm, body} +3. OPTIMIZE: Run skill(body) → optimized + a. Pass parameters: {content: body, mode: "optimize"} + b. Extract result → optimized + c. DO NOT STOP - continue to step 4 +4. WRITE: Save(optimized) → done + +EXECUTION RULES: +- DO NOT STOP at step 3 when skill returns +``` + +### ✅ Organized natural +``` +Your job: Optimize the file and write it back. + +Read the file and parse structure. Optimize the content using the skill. Write the result back. The file edit is the deliverable. +``` + +**Why**: Technical notation (CAPS + → + variables + function syntax + warnings) increases cognitive load and creates stopping risk. Organized natural language with appropriate structure is clearer for LLM audiences. + +**See OPTIMIZATION-SAFETY-GUIDE.md Part 3 and Part 6 for detailed analysis.** diff --git a/skills/prompt-architecting/references/EXAMPLES.md b/skills/prompt-architecting/references/EXAMPLES.md new file mode 100644 index 0000000..615bbc2 --- /dev/null +++ b/skills/prompt-architecting/references/EXAMPLES.md @@ -0,0 +1,219 @@ +# Real-World Optimization Examples + +Case studies showing prompt-architecting in action. + +## Example 1: Skill Generation + +### Original Request + +``` +User: "Create a skill for processing PDFs" +``` + +### Without Optimization + +Claude generates: + +- 1,200-line SKILL.md with 15 subsections +- Complete PDF specification history +- 8 different extraction libraries compared +- 20 edge cases documented inline +- Security considerations essay +- Performance tuning guide +- Complete API reference + +### With prompt-architecting + +**Subagent analysis:** + +- Task: Skill creation +- Complexity: Medium (PDF parsing is known domain) +- Risk: Over-documentation of libraries, edge cases +- Optimal strategies: Constraint-Based, Template-Based, Progressive Disclosure + +**Optimized prompt:** + +``` +Create PDF processing skill. SKILL.md MAX 300 words: +- When to use (50w) +- Core workflow: upload → extract text/images → output (100w) +- One example showing pypdf2 usage (code only, 100w) +- Move to references/: library comparison (pypdf2 vs pdfplumber), OCR for scanned PDFs, encoding issues + +Match this style: {paste concise skill example from building-skills} +``` + +**Result:** 280-word SKILL.md, 2 reference files, actionable and maintainable + +--- + +## Example 2: Documentation Generation + +### Original Request + +``` +User: "Document the API endpoints in src/api/" +``` + +### Without Optimization + +Claude writes: + +- Comprehensive REST API guide +- HTTP protocol primer +- Authentication deep-dive +- Rate limiting theory +- Pagination best practices +- Error handling philosophy +- 40 pages of markdown + +### With prompt-architecting + +**Subagent analysis:** + +- Task: API documentation +- Complexity: Low (structured data) +- Risk: Theory instead of reference +- Optimal strategies: Output Formatting, Template-Based, Negative Prompting + +**Optimized prompt:** + +``` +Document API endpoints. Format per endpoint: +## [METHOD] /path +Purpose: {1 sentence} +Auth: {required|optional|none} +Params: {table: name, type, required, description} +Response: {JSON schema only} +Errors: {codes list} +Example: {curl + response} + +DO NOT: Include HTTP theory, auth implementation details, or pagination strategy essays. +Target: API reference, not guide. +``` + +**Result:** Clean reference docs, 8 endpoints in 4 pages, instantly usable + +--- + +## Example 3: Plan Creation + +### Original Request + +``` +User: "Plan the implementation of user notification system" +``` + +### Without Optimization + +Claude creates: + +- 15-page plan with every possible notification type +- Complete microservices architecture +- Email, SMS, push, in-app, webhook notifications +- Queue theory and message broker comparison +- Scaling strategy for 1M users +- 30 implementation tasks spanning 6 months + +### With prompt-architecting + +**Subagent analysis:** + +- Task: Implementation plan +- Complexity: High (but scope unclear) +- Risk: Over-engineering, scope creep +- Optimal strategies: Decomposition, Directive Hierarchy, Constraint-Based + +**Optimized prompt:** + +``` +Create notification system plan. Scope: Email + in-app only (MVP). + +MUST cover: +- Data model (users, notifications, preferences) +- Notification creation flow +- Delivery mechanism (async job) +- User preferences UI + +SHOULD cover: +- Read/unread state +- Basic templates + +OUT OF SCOPE: SMS, push, webhooks, scaling >10K users + +Structure: [Problem] [Phase 1 tasks] [Phase 2 tasks] [Success criteria] +Target: 2-week implementation for 2 devs +``` + +**Result:** Focused 4-page plan, 12 actionable tasks, clear scope, deliverable + +--- + +## Example 4: Code Review Request + +### Original Request + +``` +User: "Review this code and tell me what you think" +``` + +### Without Optimization + +Claude provides: + +- Line-by-line analysis +- Code style commentary +- Design pattern suggestions +- Performance speculation +- Security theoretical concerns +- Refactoring proposal (3 alternatives) +- 800 lines of feedback + +### With prompt-architecting + +**Subagent analysis:** + +- Task: Code review (but criteria unknown) +- Complexity: Medium +- Risk: Unfocused feedback overload +- Optimal strategies: Constraint-Based, Audience-Targeted, Decomposition + +**Optimized prompt:** + +``` +Review code for: (1) Bugs/errors, (2) Security issues, (3) Performance bottlenecks. + +Format: +## Issues Found +- [SEVERITY] Location: Description + fix suggestion + +## Summary +- {Count by severity} +- {Top priority item} + +DO NOT: Comment on style, suggest refactorings, or discuss alternative patterns unless directly related to bugs/security/performance. + +Audience: Code works, need to ship, focused review only. +``` + +**Result:** 15-line review, 2 bugs found, 1 security fix, actionable + +--- + +**For advanced workflow and agent optimization examples, see ADVANCED-EXAMPLES.md** + +--- + +## Lessons Learned + +**Unspecified scope = maximal scope** (Examples 1-3): Without constraints, Claude assumes comprehensive coverage. Fix: Set MAX length and explicit boundaries. + +**Complexity triggers research mode** (Examples 1, 2): Unfamiliar topics trigger defensive over-documentation. Fix: Progressive Disclosure - overview now, details in references. + +**Ambiguous success = everything** (Example 3): "Help me understand" lacks definition of done. Fix: Define success concretely ("New dev deploys in <10min"). + +**Implicit = inclusion** (Examples 2, 4): Unexcluded edge cases get included. Fix: Negative Prompting to exclude known bloat. + +**Workflow patterns** (see ADVANCED-EXAMPLES.md): Numbered steps don't mandate completion after async operations. Fix: Execution Flow Control. + +**Meta-lesson**: Every optimization uses 2-3 strategies, never just one. Pair Constraint-Based with structure (Template/Format) or exclusion (Negative). For workflows with dependencies, Execution Flow Control is mandatory. diff --git a/skills/prompt-architecting/references/OPTIMIZATION-SAFETY-GUIDE.md b/skills/prompt-architecting/references/OPTIMIZATION-SAFETY-GUIDE.md new file mode 100644 index 0000000..77c911d --- /dev/null +++ b/skills/prompt-architecting/references/OPTIMIZATION-SAFETY-GUIDE.md @@ -0,0 +1,2325 @@ +# Comprehensive Optimization Spec v2: The Complete Guide + +## Executive Summary + +**Core Insight**: Optimization is bidirectional, not just compression. + +Based on empirical testing, we discovered: + +1. **Over-optimization is real**: You can make prompts worse by over-optimizing +2. **Three dimensions exist**: Verbosity (bloat), Structure (complexity mismatch), Notation (technical ceremony) +3. **Bidirectional optimization**: Both "compress bloat" AND "simplify over-technical" are valid optimizations +4. **Cognitive load is the metric**: Does it make the prompt easier or harder to understand? +5. **LLMs prefer natural language**: Technical notation often creates more problems than it solves + +This guide ensures the prompt-architecting skill is a **trustworthy asset** that makes prompts clearer, not just different. + +--- + +## Part 1: The Optimization Philosophy + +### What Is Optimization? + +**Wrong definition**: Making prompts shorter, more structured, or more technical. + +**Right definition**: **Reducing cognitive load while preserving intent.** + +Optimization succeeds when: + +- The prompt is **clearer** to understand +- The LLM can **execute more reliably** +- Intent and requirements are **fully preserved** +- Appropriate **complexity level** for the task + +### The Bidirectional Model + +Optimization operates on **three dimensions**: + +#### Dimension 1: Verbosity + +``` +BLOATED ←→ CONCISE +"comprehensive guide covering all aspects..." ←→ "MAX 300w: [Setup][Usage][Errors]" +``` + +#### Dimension 2: Structure + +``` +WRONG COMPLEXITY ←→ RIGHT COMPLEXITY +Simple task with formal workflow ←→ Natural language +Complex task with vague prose ←→ Appropriate structure +``` + +#### Dimension 3: Notation (NEW INSIGHT) + +``` +OVER-TECHNICAL ←→ ORGANIZED NATURAL +CAPS + → + variables + warnings ←→ Clear conversational structure +``` + +### The Cognitive Load Test + +**Central principle**: Does the optimization reduce or increase cognitive load? + +**For each change, ask:** + +1. Is it easier to understand? +2. Is it easier for LLM to execute? +3. Does it preserve all requirements? +4. Is the complexity match appropriate? + +**If all YES → good optimization** +**If any NO → harmful optimization** + +### Why LLMs Prefer Natural Language + +LLMs are **language models**, not programming language parsers. + +**They excel at:** + +- Conversational instructions +- Natural connectors ("then", "if", "when", "while") +- Outcome-focused descriptions +- Organized prose + +**They struggle with:** + +- Heavy symbolic notation (CAPS, →, variables) +- Function call syntax when unnecessary +- Over-decomposition (sub-steps a/b/c) +- Defensive meta-instructions ("DO NOT STOP") + +**Exception**: Technical notation helps when it reduces ambiguity MORE than natural language would. Examples: API specs, data schemas, mathematical notation. + +**Default bias**: When cognitive load is equal, prefer natural language for LLM audiences. + +--- + +## Part 2: When NOT to Optimize + +### Recognition Criteria + +**DO NOT optimize if ANY of these are true:** + +#### 1. Already Optimal Patterns + +**Indicators:** + +- Deliverable-first framing present ("Your job:", "Goal:") +- Natural language flow with clear connectors +- Inline completion criteria +- Appropriate complexity match (simple task = simple language, complex task = structure) +- No bloat indicators +- Word count appropriate for complexity +- Notation level appropriate (natural unless precision required) + +**Example:** + +```markdown +Your job: Optimize the file at $1 and write it back. + +Read the file, check for frontmatter. If frontmatter exists, search for dependencies and ask how to proceed. Optimize the content using provided guidance. Write the result back. The file edit is the deliverable. +``` + +**Why not optimize**: Already concise, clear, outcome-focused, appropriately structured, uses natural language. Optimization would likely make it worse. + +#### 2. Natural Language at Right Complexity Level + +**Indicators:** + +- 1-2 step task in single sentence +- Sequence obvious and linear +- No approval gates, branching, or terminal states +- Enumeration would add no clarity +- Natural connectors work perfectly + +**Example:** + +```markdown +Read the configuration file, update the version number, and write it back. +``` + +**Why not optimize**: Adding numbered steps or formal structure would create unnecessary complexity. + +#### 3. Callable Entity Descriptions + +**Indicators:** + +- Content is a `description` field for front matter +- Contains both contextual "when" AND trigger phrases +- Already follows additive format: "[What] when [context]. When user says '[triggers]'" +- Delegation signals present if needed (PROACTIVELY, MUST) + +**Example:** + +```markdown +description: Explores codebases when search spans multiple locations or naming is ambiguous. When user says "how does X work?", "find files matching", or when searches need 3+ rounds. +``` + +**Why not optimize**: Trigger phrases are functional pattern-matching signals. Over-optimization removes invocation clarity. + +#### 4. Agent/Workflow Prompts at Appropriate Detail Level + +**Indicators:** + +- Procedural detail serves execution clarity +- Word count is 40-50% of original bloated version +- Specific qualifiers present ("ALL files", "root + subdirectories", "MANDATORY") +- No defensive meta-commentary +- Shared checklists extracted (DRY refactoring applied) + +**Example:** + +```markdown +## Research Checklist + +For ALL modes: + +- ALL CLAUDE.md files (root + subdirectories) +- Similar implementations in codebase +- # AI: comments in existing code +``` + +**Why not optimize**: Already at correct detail level for agent guidance. Further reduction creates vagueness. + +#### 5. Technical Notation Serving Clear Purpose + +**Indicators:** + +- Notation is standard convention (API specs, data schemas, math) +- Precision prevents ambiguity that natural language couldn't +- Audience expects technical format +- Visual structure aids comprehension (tables, diagrams) +- No excessive ceremony (CAPS + → + variables + warnings) + +**Example:** + +```markdown +## [METHOD] /path + +Purpose: {1 sentence} +Params: {table: name, type, required, description} +Response: {JSON schema only} +``` + +**Why not optimize**: Technical format is appropriate for API reference. Natural language would be less clear. + +#### 6. User Explicitly Requests Preservation + +**Indicators:** + +- User says "keep the style", "don't change the tone", "just fix X" +- User provides context that current form is intentional +- Content is from production system with proven effectiveness + +**Why not optimize**: User intent overrides optimization potential. + +### Decision Tree: Should I Optimize? + +``` +START: Analyze content + +Already optimal pattern? +├─ YES → NO OPTIMIZATION +└─ NO → Continue + +Does it have bloat indicators? +(comprehensive, robust, adjective-heavy, >2x ideal length, unnecessary repetition) +├─ YES → Dimension 1 problem (verbosity) → Continue to complexity check +└─ NO → Check other dimensions + +Structure mismatch? +(simple task + formal structure OR complex task + vague prose) +├─ YES → Dimension 2 problem (structure) → Continue +└─ NO → Check notation + +Over-technical notation? +(CAPS + → + variables + warnings + function syntax + sub-step enumeration) +├─ YES → Dimension 3 problem (notation) → Continue to solution +└─ NO → Check if callable entity + +Is it callable entity description? +├─ YES → Check trigger preservation +│ ├─ Has context + triggers? → MINOR optimization only +│ └─ Missing layer? → ADD missing layer +└─ NO → Determine which dimensions need optimization + +FOR EACH DIMENSION NEEDING WORK: +1. Calculate complexity score +2. Select 1-3 complementary strategies +3. Apply transformations +4. Verify cognitive load reduced +``` + +--- + +## Part 3: Three Dimensions of Optimization + +### Dimension 1: Verbosity (Bloat → Concise) + +#### Detection Criteria + +**Bloat indicators:** + +- Adjective-heavy: "comprehensive", "robust", "enterprise-grade", "production-ready" +- Scope inflation: "all aspects", "everything", "fully cover" +- Vague quality statements: "best practices", "industry standards" +- Meta-discussion instead of instructions: "I think we should probably..." +- Defensive over-coverage: listing every possible edge case +- Embedded reasoning: explaining why instructions matter +- Conversational filler: "and stuff", "basically", "you know" +- Repetition: saying same thing multiple ways + +**Example:** + +```markdown +# BLOATED + +I need you to create comprehensive documentation that covers all aspects of user authentication in our system. This should include detailed explanations of how the system works, what technologies we're using, best practices for implementation, common pitfalls to avoid, security considerations, edge cases, error handling strategies, and example code showing different use cases. Make sure it's thorough and covers everything a developer might need to know. +``` + +#### Transformation Strategies + +1. **Remove adjectives and quality statements** + + - "comprehensive guide" → "guide" + - "robust, production-ready" → (remove entirely) + +2. **Set hard boundaries** + + - Add MAX word counts + - Use template structure + - Apply scope limits + +3. **Exclude known bloat patterns** + + - "DO NOT: Include HTTP theory, framework comparisons" + +4. **Define success concretely** + - "Target: New dev can deploy in <10 min" + +**Optimized:** + +```markdown +Write auth docs. Structure: [Setup - 100w] [Usage - 150w] [Error handling - 100w] [One example - code only]. MAX 400 words total. Audience: Mid-level dev familiar with JWT. DO NOT: Include security theory, framework comparisons, or "best practices" sections. +``` + +**Strategies applied**: Constraint-Based, Output Formatting, Negative Prompting + +**Reduction**: ~85% (200 words → 30 words) + +### Dimension 2: Structure (Wrong Complexity → Right Complexity) + +#### Detection Criteria + +**Too much structure for simplicity:** + +- 1-2 trivial steps with numbered workflow +- Formal EXECUTION RULES for obvious sequence +- CAPS labels for simple actions +- Multiple sections for straightforward task + +**Too little structure for complexity:** + +- 5+ sequential steps as prose paragraph +- Multiple approval gates without clear flow +- Terminal states not explicit +- Branching logic buried in narrative + +**Complexity score formula:** + +- 1-2 steps with obvious sequence? → **-1 point** +- 3-4 steps where sequence matters? → **+1 point** +- 5+ sequential steps? → **+2 points** +- User approval gates (WAIT, AskUserQuestion)? → **+3 points** +- 2+ terminal states (different end conditions)? → **+2 points** +- 3-way+ conditional branching? → **+2 points** +- Simple if/else conditionals only? → **+0 points** +- Skill invocation just for data? → **+0 points** +- Skill invocation affects control flow? → **+1 point** + +**Score interpretation:** + +- **≤ 0**: Natural language sufficient (over-structure would hurt) +- **1-2**: Light structure helpful (simple numbered steps or bullets) +- **3-4**: Moderate structure needed (organization without heavy formality) +- **≥ 5**: Full formal structure warranted (complex workflow management) + +#### Transformation Strategies + +**For over-structured (score ≤ 0, but has formal structure):** + +```markdown +# TOO MUCH STRUCTURE + +Execute this 3-step workflow completely: + +1. READ: Load file → content +2. MODIFY: Update field → updated +3. WRITE: Save updated → done + +EXECUTION RULES: + +- Complete all steps sequentially +- Stop only at completion + +# RIGHT AMOUNT (natural language) + +Read the file, update the field, and write it back. +``` + +**Strategy**: Natural Language Reframing + +**For under-structured (score ≥ 3, but vague prose):** + +```markdown +# TOO LITTLE STRUCTURE + +Read the file, check dependencies, ask the user, optimize based on choice, and write back, making sure to handle all the different cases appropriately. + +# RIGHT AMOUNT (organized structure) + +Goal: Optimize file while handling dependencies + +Tools: prompt-architecting, AskUserQuestion, grep + +Workflow: + +1. Read file and parse structure +2. Check dependencies if frontmatter exists +3. Present options and gather user preference +4. Optimize content based on selection +5. Write result with appropriate handling + +If user cancels at step 3, stop here. Otherwise complete all steps. +``` + +**Strategy**: Execution Flow Control (appropriate level for score) + +### Dimension 3: Notation (Over-Technical → Organized Natural) + +#### Detection Criteria + +**Over-technical indicators:** + +1. **CAPS labels as action markers** + + - `CHECK:`, `PARSE:`, `DETECT:`, `VALIDATE:`, `ANALYZE:` + - Makes steps feel like function names + - Creates discrete units that feel "complete" when done + +2. **→ notation for data flow** + + - `Read file → content`, `Parse input → requirement_data` + - Mimics programming assignment + - Makes steps feel like functions with return values + - Creates stopping points when "variables are populated" + +3. **Variable naming conventions** + + - `work_file_status`, `requirement_data`, `pattern_found`, `analyst_output` + - Treats prompt like code + - LLM must mentally track variable state + +4. **Function call syntax** + + - `skill({params})`, `Pass parameters: {intent: "detection"}` + - Over-specifies how to invoke tools + - LLM already knows tool invocation syntax + +5. **Sub-step enumeration** + + - `a. Pass parameters b. Extract results c. DO NOT narrate` + - Over-decomposition creates multiple boundaries + - Prose would be clearer + +6. **Defensive meta-instructions** + - "DO NOT narrate", "continue immediately to Step 4" + - Creates stop-awareness (Ironic Process Theory) + - Signals that stopping is a possibility + +**Count indicators**: If 3+ present → over-technical notation detected + +**Cognitive load test**: Does the notation make it easier or harder to understand? + +- Easier → Keep it (justified) +- Harder → Simplify it (organized natural language better) +- Same → Prefer natural (lower cognitive load) + +#### When Technical Notation Helps vs. Hurts + +**✅ Technical notation HELPS when:** + +- **Precision prevents ambiguity**: API specs need exact format +- **Standard convention**: JSON schema, regex patterns, mathematical notation +- **Visual structure essential**: Tables, tree diagrams (not prose) +- **Audience expects technical**: Code generation, data transformation specs +- **Natural language would be vaguer**: Template structures, format requirements + +**❌ Technical notation HURTS when:** + +- **Creates execution risk**: CAPS + → + variables = stopping points +- **Adds cognitive load**: Must "parse" syntax instead of reading +- **Over-specifies obvious things**: "Pass parameters" when that's how tools work +- **Mimics programming unnecessarily**: Variable names for implicit data flow +- **Audience is LLM**: Language models understand conversation better than symbols +- **Defensive warnings needed**: If notation requires warnings, it's wrong + +#### Transformation Strategy + +**Pattern**: Keep appropriate structure, remove ceremonial notation + +**Example transformation (complex workflow):** + +**BEFORE (over-technical, high stopping risk):** + +```markdown +Execute this 8-step workflow completely. Each step produces input for the next: + +1. CHECK: Verify no existing work file → work_file_status + + - Use Bash `git branch --show-current` → branch_name + - Check if `.claude/work/files/{branch_name}.yml` exists + - If exists → STOP and tell user: "Work file already exists" + - If not exists → continue to Step 2 + +2. PARSE: Extract requirement from $ARGUMENTS → requirement_data + + - If starts with "#": Use Bash `gh issue view` → {title, body, source} + - Otherwise: Use as-is → {title: $ARGUMENTS, body: $ARGUMENTS} + +3. DETECT: Use your finding-patterns skill with intent=detection: + a. Pass parameters: {intent: "detection", pattern: from requirement_data} + b. Extract detection results → pattern_found (boolean) + c. DO NOT narrate - continue immediately to Step 4 + +4. ANALYZE: Use your analyst subagent: + a. Pass requirement_data + pattern_found to analyst + b. Analyst returns WORK_ITEMS + requirement_summary → analyst_output + c. DO NOT narrate - continue immediately to Step 5 + +[... continues with similar pattern ...] + +EXECUTION RULES: + +- Complete steps 2-6 without stopping +- STOP only at step 6 for user approval +- DO NOT STOP at step 3 or 4 when skills return +``` + +**Analysis:** + +- Complexity score: ~8 (8 steps, approval gate, loops, skill invocations affect flow) → **Full structure IS warranted** +- But notation is over-technical: CAPS + → + variables + function syntax + sub-steps + warnings +- Cognitive load: HIGH (must parse notation while understanding workflow) + +**AFTER (organized natural language, appropriate structure):** + +```markdown +**Your job: Initialize and execute Tier 2 development workflow for the current branch.** + +## Setup and Validation + +Get current branch name and check if work file already exists at `.claude/work/files/{branch_name}.yml`. If it exists, stop and tell user to use `/dev-resume` instead. + +Parse the requirement source: + +- If starts with "#" or "issue": fetch from GitHub (`gh issue view`) +- If ends with ".md": read from file +- Otherwise: use argument as-is + +## Analysis + +Find patterns in the requirement using finding-patterns skill (intent: detection). Note whether pattern was found. + +Send requirement and pattern detection result to analyst subagent. Analyst returns work items organized by layer (backend/frontend/system) and requirement summary. + +## Validation and Approval + +Check completeness: + +- MUST have frontend items if requirement mentions UI keywords (form, page, settings, button, interface, admin area) +- SHOULD have frontend if backend exists +- SHOULD have system tests if frontend exists + +If issues found, present them to user and ask how to proceed (add missing items / cancel / continue as-is). Wait for user response. + +If user cancels, stop here. If approved or no issues, continue. + +## Work File Creation + +Create work file using work-state-management skill (init operation). Pass: + +- Branch name, requirement source, title, summary +- Work items (as adjusted by user) +- Tier: "tier2" +- Pattern found flag and estimated file count + +If error occurs, stop with error message. + +## Implementation Loop + +Execute all work items in sequence: + +**For each item:** + +- Claim next item using work-state-management skill (claim operation) +- If no items remain, stop successfully +- Read work file to get full item details (id, description, tdd flag, category) + +**Route based on tdd flag:** + +- If `tdd: true`: Use tdd-workflow skill for RED-GREEN-REFACTOR cycle +- If `tdd: false`: Orchestrate implementation directly: + - Analyze changes needed and implement + - Use applicable skills/subagents when warranted + - Run affected tests via validator subagent + - Review code via check-quality subagent + - Commit changes with descriptive message + - Mark item complete using work-state-management skill + +Continue until all items done, error occurs, or context limit approaching. + +## Notes + +- Work file must exist before starting implementation loop +- Each item completion updates work file state +- Can resume interrupted work with `/dev-resume` +- Branch name from setup used consistently throughout +``` + +**What changed:** + +- ✅ Removed CAPS labels (CHECK, PARSE, DETECT → natural section headers) +- ✅ Removed → notation and variables (→ work_file_status, → requirement_data) +- ✅ Removed function call syntax (skill({params})) +- ✅ Removed sub-step enumeration (a/b/c → prose) +- ✅ Removed "DO NOT narrate" warnings +- ✅ Removed remote EXECUTION RULES section +- ✅ Added deliverable-first framing ("Your job:") +- ✅ Organized by logical phases (Setup, Analysis, Validation, etc.) +- ✅ Natural language throughout while preserving structure + +**What stayed the same:** + +- ✅ All 8 steps and their requirements +- ✅ Appropriate structure for complexity (still organized, not one paragraph) +- ✅ Execution flow and data dependencies +- ✅ Terminal conditions and branching logic +- ✅ Tool invocations and required parameters + +**Cognitive load test:** + +- Easier to understand? **YES** (read like instructions, not code) +- Easier to execute? **YES** (no stopping risk, clear flow) +- Preserves requirements? **YES** (all steps intact) +- Appropriate complexity? **YES** (structure still warranted for score ~8) + +**Result: SUCCESSFUL OPTIMIZATION via Dimension 3 (notation simplification)** + +--- + +## Part 4: Natural Language vs Technical Strategies + +### Decision Framework + +**For each optimization, ask:** + +1. **What's the complexity score?** (determines structure level needed) +2. **What's the audience?** (LLM vs. human, technical vs. non-technical) +3. **Does technical notation add precision?** (prevents ambiguity vs. just looks technical) +4. **Cognitive load test**: Which version is easier to understand? + +### When Natural Language Wins + +#### Scenario 1: Simple Linear Tasks (Score ≤ 0) + +**Characteristics:** + +- 1-2 steps with obvious sequence +- No branching, approval gates, or terminal states +- Easily expressed with connectors ("then", "if", "when") + +**Example:** + +```markdown +# Technical (unnecessary structure) + +1. Read configuration file +2. Update version number +3. Write file back + +# Natural (better) + +Read the config file, update the version number, and write it back. +``` + +**Why natural wins**: Enumeration adds no clarity. Sequence is trivial. Technical structure adds cognitive load for no benefit. + +#### Scenario 2: Outcome-Focused Jobs (Deliverable-First Framing) + +**Characteristics:** + +- Clear deliverable stated upfront +- Steps are means to outcome, not the goal +- Flow is intuitive once outcome known + +**Example:** + +```markdown +# Technical (process-focused) + +Execute this workflow: + +1. Load data +2. Transform data +3. Generate report + +# Natural (outcome-focused) + +Your job: Generate the report. + +Load the data, transform it as needed, and output the report. +``` + +**Why natural wins**: Deliverable framing + natural flow is clearer than procedural steps. Less cognitive load. + +#### Scenario 3: Context-Rich Environments + +**Characteristics:** + +- Audience already has context +- Task is familiar pattern +- Over-specification would insult intelligence + +**Example:** + +```markdown +# Technical (over-specified) + +1. Invoke Read tool with file_path parameter +2. Parse JSON content into object +3. Validate schema using validator +4. Return validated object + +# Natural (appropriate) + +Read and validate the JSON file. +``` + +**Why natural wins**: Audience knows how to read/validate JSON. Technical detail is noise. Lower cognitive load with natural version. + +#### Scenario 4: LLM Audience (Most Prompts) + +**Characteristics:** + +- Prompt is for LLM execution (not human reading) +- LLM understands conversation better than symbols +- Technical notation doesn't add precision + +**Example:** + +```markdown +# Technical (hurts LLM execution) + +1. READ: Load file → content +2. OPTIMIZE: Transform content → optimized +3. WRITE: Save optimized → done + +# Natural (helps LLM execution) + +Read the file, optimize the content, and write the result back. +``` + +**Why natural wins**: LLMs are language models. They excel at natural instructions, struggle with symbolic notation. Lower cognitive load = better execution. + +### When Technical Strategies Win + +#### Scenario 1: Complex Workflows (Score ≥ 3) + +**Characteristics:** + +- 5+ sequential steps +- Multiple approval gates or terminal states +- Control flow affects outcomes +- Skill/agent invocations affect decisions + +**Example:** + +```markdown +# Natural (insufficient structure) + +Read the file, check dependencies, ask the user, optimize based on choice, and write back. + +# Technical (appropriate structure - but natural notation) + +Goal: Optimize file while handling dependencies + +Tools: prompt-architecting, AskUserQuestion, grep + +Workflow: + +1. Read file and parse structure +2. Check dependencies if frontmatter exists +3. Present options and gather user preference +4. Optimize content based on selection +5. Write result with appropriate handling + +If user cancels at step 3, stop here. Otherwise complete all steps. +``` + +**Why technical wins**: Multiple decision points, conditional flow, terminal states need explicit structure. **But note**: Structure is organized, notation is still natural (no CAPS, →, variables). + +#### Scenario 2: Preventing Over-Generation + +**Characteristics:** + +- Task scope is unbounded +- Known tendency to over-elaborate +- Need hard constraints + +**Example:** + +```markdown +# Natural (insufficient constraints) + +Write documentation for the API endpoints. + +# Technical (appropriate constraints) + +Document API endpoints. Format per endpoint: + +## [METHOD] /path + +Purpose: {1 sentence} +Params: {table} +Response: {JSON schema only} +Errors: {codes list} +Example: {curl + response} + +DO NOT: Include HTTP theory, implementation details, or essays. +``` + +**Why technical wins**: Without template structure and exclusions, will generate 40-page guide instead of reference. Technical format serves clear purpose (API spec convention). + +#### Scenario 3: Ambiguous Requirements + +**Characteristics:** + +- Multiple valid interpretations +- Need to constrain scope explicitly +- Success criteria unclear + +**Example:** + +```markdown +# Natural (ambiguous) + +Help me understand the deployment process. + +# Technical (clarified) + +Document deployment process. Target: New dev deploys to staging in <10 min. + +Structure: [Prerequisites - bullets] [Steps - numbered, 1 sentence each] [Verification] [Rollback - 2 sentences] + +MAX 300 words. +``` + +**Why technical wins**: Template + constraint + success criteria clarifies what "help me understand" means. Technical structure reduces ambiguity. + +#### Scenario 4: Standard Technical Conventions + +**Characteristics:** + +- Format has established convention (API specs, data schemas) +- Technical notation is standard +- Natural language would be less precise + +**Example:** + +```markdown +# Natural (loses precision) + +Each endpoint should document the HTTP method, path, purpose, parameters with their types, responses, and errors. + +# Technical (standard convention) + +## [METHOD] /path + +Purpose: {1 sentence} +Params: {name: type, required, description} +Response: {JSON schema} +Errors: {status codes} +``` + +**Why technical wins**: API documentation has standard format. Technical notation serves precision purpose. + +### Decision Matrix + +| Complexity Score | Audience | Technical Precision Needed | Recommended Approach | +| ---------------- | ----------------- | -------------------------- | ------------------------------------------------- | +| ≤ 0 | LLM | No | Natural Language | +| ≤ 0 | LLM | Yes | Natural + Template (minimal) | +| 1-2 | LLM | No | Natural or Light Structure | +| 1-2 | LLM | Yes | Light Structure + Template | +| 3-4 | LLM | No | Organized Natural with Structure | +| 3-4 | LLM | Yes | Moderate Structure + Templates | +| ≥ 5 | LLM | No | Organized Natural with Full Structure | +| ≥ 5 | LLM | Yes | Full Structure + Templates (but natural notation) | +| Any | Human (technical) | Yes | Technical notation acceptable | + +**Key principle**: Structure level follows complexity score. Notation style (natural vs. technical) follows cognitive load test. + +**Override conditions:** + +- **Unbounded scope** → Technical constraints (even if simple steps) +- **Already optimal** → Neither (leave alone) +- **Callable entity description** → Preserve existing pattern +- **Standard technical convention** → Technical format (API specs, schemas) +- **Over-technical ceremony** → Simplify to organized natural + +--- + +## Part 5: Callable Entity Preservation + +### The Additive Pattern + +**Format:** + +``` +[What it does] when [contextual condition]. When user says "[trigger1]", "[trigger2]"[, or [integration point]]. +``` + +**Two layers:** + +1. **Semantic (contextual)**: Describes situations that warrant usage +2. **Pattern-matching (triggers)**: Literal phrases users say + +Both layers are required for effective model-invocation. + +### Optimization Rules for Callable Entities + +#### Rule 1: Preserve Both Layers + +**Bad optimization:** + +```markdown +# Before + +description: Explores codebases when search spans multiple locations or naming is ambiguous. When user says "how does X work?", "find files matching", or when searches need 3+ rounds. + +# Over-optimized (WRONG - lost both layers) + +description: Explores codebases for complex searches +``` + +**Lost**: Contextual conditions (multiple locations, ambiguous naming), trigger phrases, threshold (3+ rounds) + +**Correct optimization:** + +```markdown +# Minimal acceptable + +description: Explores codebases when search spans multiple locations or naming is ambiguous. When user says "how does X work?", "find files", or searches need 3+ attempts. +``` + +**Preserved**: Both contextual conditions AND trigger phrases, slight reduction in verbosity only (10-20% max) + +#### Rule 2: Add Missing Layers + +**Context-only (missing triggers):** + +```markdown +# Before + +description: Reviews code for security vulnerabilities and bugs when quality assessment needed + +# Add triggers + +description: Reviews code for security vulnerabilities and bugs when quality assessment needed. When user says "review this code", "check for bugs", "analyze security". +``` + +**Triggers-only (missing context):** + +```markdown +# Before + +description: Use when user says "optimize this prompt" or "make concise" + +# Add context + +description: Optimizes verbose prompts when content risks over-generation or needs density improvement. When user says "optimize this prompt", "make concise", "reduce verbosity". +``` + +#### Rule 3: Strengthen Delegation Signals for Subagents + +**Passive:** + +```markdown +description: Can analyze codebases when searches are complex +``` + +**Active (PROACTIVELY):** + +```markdown +description: Use PROACTIVELY for codebase exploration when searches span 3+ files or user asks structural questions ("how does X work?") +``` + +**Critical (MUST BE USED):** + +```markdown +description: MUST BE USED for security audits when user mentions vulnerabilities, exploits, or security review +``` + +#### Rule 4: Add Integration Points + +**Without:** + +```markdown +description: Optimizes prompts when content is verbose. When user says "optimize this". +``` + +**With integration:** + +```markdown +description: Optimizes prompts when content is verbose. When user says "optimize this", "make concise", or before invoking skill-generator. +``` + +### Detection in Step 2 Analysis + +**Callable entity check:** + +- Is this a `description` field for skill/agent/command? +- Does it have contextual "when" conditions? (YES/NO/VAGUE) +- Does it have trigger phrases (quoted literals)? (YES/NO/WEAK) +- Does it have delegation signals if subagent? (YES/NO/N/A) +- Does it have integration points? (YES/NO/N/A) +- **State**: "Callable entity: [yes/no]. Context: [present/vague/missing]. Triggers: [present/weak/missing]. Structure: [complete/context-only/triggers-only/missing]." + +**If callable entity detected:** + +- REQUIRED strategy: Callable Entity Preservation +- Target: Preserve both layers, add missing layer if incomplete +- Reduction target: Minimal (10-20% max) +- Focus: Clarity of invocation, not conciseness + +--- + +## Part 6: Avoiding Misleading Stopping Points + +### The Problem + +Certain prompting patterns create **false completion boundaries** where LLMs stop execution prematurely, treating intermediate steps as final deliverables. + +**Root cause**: Over-technical notation creates procedural boundaries that signal completion. + +### Patterns That Create Stopping Risk + +#### Anti-Pattern 1: CAPS + → + Variables (Highest Risk) + +```markdown +# HIGH RISK (creates stopping points) + +1. READ: Load file → content +2. OPTIMIZE: Run skill → optimized_content +3. WRITE: Save optimized_content → done + +# Why risky: + +- Each step feels like complete function with return value +- Step 2 produces "optimized_content" which looks like artifact +- → notation creates procedural boundary +- CAPS labels create discrete units of work +``` + +**When skill returns at Step 2, LLM treats step as complete and stops.** + +This is **Dimension 3 problem (over-technical notation)**. + +#### Anti-Pattern 2: Explicit Actor Invocations + +```markdown +# HIGH RISK + +2. Use your prompt-architecting skill to optimize the content +3. Extract the "Optimized Prompt" section from the skill output +4. Write the result to the file + +# Why risky: + +- Creates nested execution context (workflow contains skill execution) +- Explicit extraction step reinforces output boundary +- When skill completes, step feels complete +``` + +#### Anti-Pattern 3: Remote EXECUTION RULES + +```markdown +# HIGH RISK + +[Steps 1-4...] + +## EXECUTION RULES + +- Complete all steps +- Step 3 is not the final deliverable +- Continue to step 4 after step 3 + +# Why risky: + +- Can't compete with immediate psychological closure at step 3 +- Defensive language actually reinforces stop-awareness +- Remote section is read after steps, can't override in-the-moment completion sense +``` + +#### Anti-Pattern 4: Defensive Stop-Awareness Commentary + +```markdown +# HIGH RISK (creates the problem it tries to prevent) + +3. OPTIMIZE: Run skill → optimized + → DO NOT STOP - this is NOT the end + → Continue immediately to Step 4 + +CRITICAL REMINDERS: + +- Skill output is NOT completion signal +- If you find yourself stopping at Step 3, you failed + +# Why risky (Ironic Process Theory): + +- "DO NOT STOP" makes LLM consider stopping +- "This is NOT the end" reinforces ending as possibility +- Describing failure mode normalizes it +- Creates decision point: "Should I stop here?" (answer: sometimes yes) +``` + +### Patterns That Prevent Stopping + +#### Safe Pattern 1: Deliverable-First Natural Language + +```markdown +# SAFE (natural continuation) + +Your job: Optimize the file and write the improved version back. + +Read the file, check for frontmatter. If frontmatter exists, search for dependencies and ask how to proceed. Optimize the content using provided guidance. Write the result back to the file. The file edit is the deliverable. +``` + +**Why safe:** + +- Job framing establishes ultimate outcome upfront +- Natural connectors ("if", "then") create flow +- Completion criterion clear and proximate +- No ceremony creating false boundaries +- No technical notation signaling completion + +#### Safe Pattern 2: Organized Natural with Appropriate Structure + +```markdown +# SAFE (appropriate structure, natural notation) + +**Your job: Write optimized content to $1.** + +## Process + +Read file and check for frontmatter. If frontmatter exists, search for dependencies. + +When dependencies found, ask user how to proceed: + +- Preserve frontmatter unchanged +- Modify and update dependencies +- Cancel + +If user cancels, stop here. + +Optimize content based on user selection. Write result to $1. The file edit completes this task. +``` + +**Why safe:** + +- Deliverable statement opens +- Organized sections without CAPS/→/variables +- Completion criterion immediately after process +- Terminal condition integrated naturally +- No defensive commentary +- Natural language throughout + +#### Safe Pattern 3: Goal + Capabilities (Complex Tasks) + +```markdown +# SAFE (for complex workflows) + +Goal: Optimize prompt file while handling dependencies appropriately + +Required capabilities: prompt-architecting skill, AskUserQuestion, grep + +Workflow: + +1. Read and parse file structure +2. Check dependencies if frontmatter exists +3. Present options and gather user preference +4. Optimize content based on selection +5. Write result with appropriate handling + +If user cancels at step 3, stop here. Otherwise complete all steps. The file edit is the final deliverable. +``` + +**Why safe:** + +- Goal statement establishes outcome +- Capabilities declared separately (not in steps) +- Numbered steps use natural language (not CAPS + → + variables) +- Outcome-focused descriptions (not actor invocations) +- No procedural notation +- Completion criterion proximate + +### Transformation Guide + +**If input contains stopping risk patterns:** + +1. **Calculate complexity score** (determines structure level needed) +2. **Identify notation problems** (CAPS, →, variables, warnings) +3. **Select safe pattern:** + - Score ≤ 0: Natural language + - Score 1-2: Simple organized sections + - Score 3-4: Organized natural with appropriate structure + - Score ≥ 5: Goal + Capabilities + organized workflow +4. **Remove all ceremony:** + - Strip CAPS labels → natural section headers or action verbs + - Remove → notation and variables → implicit data flow + - Delete explicit parsing steps → consolidate + - Convert "Run skill" → "Optimize content" (outcome not actor) +5. **Make completion proximate:** + - Put completion criterion inline or immediately after workflow + - Never in remote section +6. **Eliminate stop-awareness:** + - Zero "DO NOT STOP" warnings + - Zero meta-commentary about failure + - Zero CRITICAL REMINDERS sections + +**The core fix**: **Simplify notation (Dimension 3)** while **preserving appropriate structure (Dimension 2)**. + +--- + +## Part 7: Strategy Combination Principles + +### Rule 1: Maximum 3 Strategies + +**Why**: More strategies = conflicting directions = confusion + +**Bad (5 strategies - conflict):** + +- Constraint-Based (MAX 300w) +- Template-Based ([Purpose][Workflow][Example]) +- Decomposition (break into 6 sub-tasks) +- Execution Flow Control (formal workflow with state management) +- Progressive Disclosure (move details to references/) + +**Conflict**: Template wants specific sections; Decomposition wants sub-tasks; EFC wants workflow steps. Can't do all three. + +**Good (2 strategies - complement):** + +- Constraint-Based (MAX 300w) +- Template-Based ([Purpose][Workflow][Example]) + +**Complement**: Both focus on structure and boundaries. No conflict. + +### Rule 2: Match Strategies to Complexity + +| Complexity | Primary Strategy | Optional Secondary | Avoid | +| ---------- | ----------------------------------- | ---------------------------------- | ----------------------------------- | +| ≤ 0 | Natural Language Reframing | Constraint-Based | EFC, Decomposition, Template | +| 1-2 | Template-Based or Output Formatting | Constraint-Based | Full EFC (use light structure) | +| 3-4 | Organized Natural with Structure | Template, Constraint | Heavy formality, defensive warnings | +| ≥ 5 | Goal + Capabilities Pattern | Decomposition, Directive Hierarchy | Over-technical notation | + +**New addition**: Technical → Natural Transformation (applies across all complexity levels when notation is over-technical) + +### Rule 3: Strategy Compatibility Matrix + +| Strategy | Compatible With | Conflicts With | +| -------------------------- | ------------------------------------ | ------------------------------------ | +| Natural Language Reframing | Constraint-Based, Negative Prompting | EFC, Decomposition, Template | +| Constraint-Based | Most strategies | None (universally compatible) | +| Template-Based | Constraint-Based, Output Formatting | Decomposition (different structures) | +| EFC (organized natural) | Decomposition, Directive Hierarchy | Heavy technical notation | +| Decomposition | EFC, Directive Hierarchy | Template, Natural Language | +| Progressive Disclosure | Any (orthogonal concern) | None | +| Negative Prompting | Any (orthogonal concern) | None | +| Technical → Natural | Any (orthogonal concern) | None (often combined with others) | + +### Rule 4: One Primary Strategy + +**Primary strategy**: Addresses the main optimization need + +**Secondary strategies**: Address specific bloat risks or edge cases + +**Example:** + +- **Primary**: Technical → Natural Transformation (main issue: over-technical notation) +- **Secondary**: None needed (notation was the problem) + +**Another example:** + +- **Primary**: Template-Based (main issue: no structure) +- **Secondary**: Negative Prompting (specific bloat: framework comparisons) +- **Universal**: Constraint-Based (always useful for hard limits) + +### Rule 5: Avoid Redundant Strategies + +**Bad (redundant):** + +- Template-Based (provides structure) +- Output Formatting (also provides structure) +- Decomposition (also provides structure) + +**Why bad**: All three do similar things. Choose one. + +**Good (complementary):** + +- Template-Based (provides structure) +- Constraint-Based (provides length limits) +- Technical → Natural (simplifies notation) + +**Why good**: Each addresses different dimension. Complementary, not redundant. + +### Rule 6: Technical → Natural Often Solves Multiple Problems + +**Observation**: Simplifying over-technical notation often addresses: + +- Dimension 3: Notation clarity +- Stopping risk (Part 6) +- Cognitive load reduction + +**Implication**: When over-technical notation detected, Technical → Natural Transformation may be sufficient optimization. Don't over-optimize by adding more strategies. + +**Example:** + +```markdown +# Input (over-technical + stopping risk) + +1. READ: Load → content +2. OPTIMIZE: Run skill → optimized +3. WRITE: Save → done + +# After Technical → Natural (sufficient) + +Read the file, optimize with the skill, and write the result back. + +# Don't add more strategies (would be over-optimization): + +❌ Don't add: Decomposition (already has structure) +❌ Don't add: Template (natural language is clearer) +✅ Could add: Constraint-Based (MAX 300w) if unbounded scope +``` + +--- + +## Part 8: Trustworthiness Criteria + +### Definition + +**Trustworthy optimization** means the optimized prompt is: + +1. **Clearer** than original (reduced cognitive load) +2. **Preserves intent and requirements** (all MUST/SHOULD/MAY intact) +3. **Appropriate for complexity level** (right structure, right notation) +4. **Doesn't introduce new problems** (no stopping risk, lost triggers, etc.) +5. **Results in better LLM execution** (more reliable, less ambiguous) + +### Pre-Optimization Checklist + +Before optimizing, verify: + +- [ ] **Bloat is real**: Not just "different style preference" +- [ ] **Optimization would help**: Not already at right complexity level +- [ ] **No special preservation needs**: Not callable entity description with correct structure +- [ ] **Clear improvement path**: Know which dimensions need work and why +- [ ] **No over-optimization risk**: Not introducing complexity where none needed +- [ ] **Cognitive load will decrease**: Optimized version will be easier to understand + +### Post-Optimization Checklist + +After generating optimized version, verify: + +- [ ] **Clarity improved**: Optimized version is easier to understand (cognitive load test) +- [ ] **Intent preserved**: Core meaning unchanged, all requirements intact +- [ ] **Appropriate structure**: Matches complexity score +- [ ] **Appropriate notation**: Natural unless technical serves clear purpose +- [ ] **No new problems**: Didn't introduce stopping points, lose triggers, create ambiguity +- [ ] **Executable**: Can imagine LLM following this successfully +- [ ] **Reduction appropriate**: 40-50% for agents/workflows, 60%+ for docs OK +- [ ] **Strategy count**: 1-3 strategies, complementary not conflicting + +### The Cognitive Load Test + +**Ask yourself**: If you had to follow this prompt, which version requires less mental effort to understand? + +**Original:** + +``` +I need you to help me optimize this prompt file. First read the file that the user gives you. Then you should check if it has front matter with a description field. If it does, search through the codebase to see if other files reference this description because that would mean we have dependencies. After that, use your prompt-architecting skill to make the content better and more concise. Then show the user what you came up with and ask if they want to proceed. If they say yes, write the file back. Make sure you handle the front matter correctly depending on what the user chose. +``` + +**Over-optimized (too technical):** + +``` +Execute this 7-step procedure completely: +1. READ: Load($1) → content +2. PARSE: Extract(content) → fm, body +3. DEPS: Grep(fm.description) → dep_files +4. ASK: AskUserQuestion(modes) → choice +5. OPTIMIZE: Skill(body, choice) → optimized +6. RECON: Merge(fm, optimized, choice) → final +7. WRITE: Save(final, $1) → done + +EXECUTION RULES: +- STOP only at step 4 (user input) +- Continue to step 7 (terminal state) +- DO NOT STOP at step 5 (skill return) +``` + +**Appropriately optimized (organized natural):** + +``` +Your job: Optimize the file at $1 and write it back while handling dependencies. + +Read the file and check for frontmatter. If frontmatter exists with description field, search other prompt files for references to that description. When dependencies found, ask user how to proceed: preserve frontmatter unchanged, modify and update dependencies, or cancel. If user cancels, stop here. + +Optimize the content based on user selection. Write the result back to $1. The file edit is the deliverable. +``` + +**Cognitive load comparison:** + +- **Original**: 200 words, embedded reasoning, unclear structure → **MEDIUM-HIGH load** +- **Over-optimized**: Intimidating notation, must parse syntax, creates stopping risk → **HIGH load** +- **Appropriate**: Clear prose, organized flow, right complexity level → **LOW load** + +**Winner**: Appropriately optimized (lowest cognitive load while preserving all requirements) + +### The Intent Preservation Test + +**Required elements from original:** + +- Must read file ✓ +- Must check frontmatter ✓ +- Must search dependencies ✓ +- Must ask user ✓ +- Must optimize content ✓ +- Must write file ✓ +- Must handle frontmatter correctly ✓ + +**All preserved?** Yes. + +**Could LLM execute successfully?** Yes. + +**Is it clearer than original?** Yes (cognitive load lower). + +**Trustworthy optimization**: ✓ + +### The Regression Test + +**Question**: Would the optimized version execute as reliably as a known-working version? + +**Known working patterns (empirically tested):** + +- Deliverable-first framing +- Natural language or organized natural with structure +- No CAPS/→/variables +- Inline completion criteria +- No defensive warnings + +**Optimized version has these?** + +- If YES → Likely trustworthy +- If NO → High regression risk, reconsider optimization + +### When in Doubt + +**Conservative principle**: When uncertain if optimization would help or harm, make smaller changes. + +**Incremental optimization:** + +1. Remove obvious bloat (adjectives, meta-discussion, filler) +2. Add hard constraints (MAX word count) +3. Test: Is this enough? If yes, stop here. +4. If more needed, address structure mismatch (add/remove complexity) +5. Test: Is this enough? If yes, stop here. +6. If more needed, simplify notation (technical → natural) +7. Test: Is this enough? If yes, stop here. +8. Only if still insufficient, add one secondary strategy + +**Never optimize to "show off strategies"**. Optimize only to solve real problems. + +--- + +## Part 9: Integration with Existing Skill + +### Updates to SKILL.md + +#### Update Step 2 (Analyze the Task) + +```markdown +### Step 2: Analyze the Task + +**FIRST: Safety checks (prevent harmful optimization)** + +Check if content should NOT be optimized: + +- Already optimal pattern? (deliverable-first + natural/appropriate structure + right complexity + appropriate notation) +- Callable entity description at correct structure? (context + triggers present) +- Agent/workflow at 40-50% of bloated version with specificity intact? +- Technical notation serving clear purpose? (API specs, standard conventions, precision needed) +- User requests preservation? + +**If any YES**: STATE "Optimization not recommended: [reason]" and use mode=consult to provide analysis only. + +**SECOND: Dimensional analysis (if optimization appropriate)** + +Evaluate each dimension: + +**Dimension 1 (Verbosity):** + +- Bloat indicators present? (adjective-heavy, scope inflation, vague quality statements, meta-discussion, filler, repetition) +- Current word count vs. ideal for task? (>2x ideal = bloat) +- State: "Verbosity: [bloated/concise/appropriate]. Reduction needed: [yes/no]" + +**Dimension 2 (Structure):** + +- Complexity score calculation (using formula from SKILL.md) +- Current structure level: [none/minimal/moderate/full] +- Appropriate structure level for score: [natural/light/moderate/full] +- Structure mismatch? [over-structured/under-structured/appropriate] +- State: "Complexity score: [X]. Current structure: [level]. Needed: [level]. Mismatch: [yes/no]" + +**Dimension 3 (Notation):** + +- Technical notation assessment: + - CAPS labels as action markers? (CHECK:, PARSE:, etc.) + - → notation for data flow? (→ variable_name) + - Variable naming conventions? (work_file_status, requirement_data) + - Function call syntax? (tool({params})) + - Sub-step enumeration? (a/b/c) + - Defensive meta-instructions? ("DO NOT narrate") +- Count indicators (3+ = over-technical) +- Does notation serve precision purpose? (API specs, schemas, standard conventions) +- Cognitive load test: Does notation make it easier or harder to understand? +- State: "Technical notation: [X indicators]. Purpose: [precision/ceremony]. Cognitive load: [helps/hurts/neutral]. Assessment: [over-technical/appropriate]" + +**THIRD: Semantic analysis** (core job understanding) + +- What is core job in one sentence? +- Can it be expressed with natural connectors? (Test: "Do X, then Y, when Z") +- If YES and complexity score ≤ 0: Strong candidate for natural language + +**FOURTH: Callable entity check** (if description field) + +- Contextual "when" conditions: present/vague/missing +- Trigger phrases (quoted literals): present/weak/missing +- Delegation signals if subagent: present/missing/N/A +- Integration points: present/missing/N/A +- Structure: complete/context-only/triggers-only/missing + +**FIFTH: Workflow pattern detection** (if skill/agent invocations) + +- High-risk stopping patterns present? (CAPS + → + variables + remote rules + warnings) +- Classification: high-risk / optimal / standard +- Stopping risk: yes/no +- Note: High-risk patterns are Dimension 3 problem (over-technical notation) + +**SIXTH: Target length and bloat risks** + +- Calculate optimal word/line count based on complexity and output type +- Identify specific bloat risks (edge cases, theoretical coverage, defensive docs, etc.) + +**SEVENTH: Architecture compliance** (if architecture_reference provided) + +- Compare input structure to architecture requirements +- Identify missing required sections +- Identify structural misalignments +- State: "Architecture: [compliant/partial/non-compliant]. Missing: [list]. Misaligned: [list]." + +**OUTPUT: Comprehensive analysis stating which dimensions need optimization and why** +``` + +#### Update Step 3 (Select Strategies) + +```markdown +### Step 3: Select Strategies + +**MANDATORY EXCLUSIONS (based on Step 2 safety checks):** + +- If already optimal: STOP - recommend no optimization +- If complexity score ≤ 0: NEVER use EFC, Decomposition, or Template-Based +- If callable entity: MUST use Callable Entity Preservation, MAX 1 additional strategy +- If technical notation serves precision purpose: PRESERVE notation, optimize other dimensions only + +**MANDATORY SELECTIONS (based on Step 2 dimensional analysis):** + +**For Dimension 1 (Verbosity problems):** + +- MUST select: Constraint-Based (hard word limits) +- SHOULD select: Negative Prompting (if specific bloat patterns identified) +- MAY select: Progressive Disclosure (if complex topic with separable details) + +**For Dimension 2 (Structure mismatch):** + +- If over-structured (score ≤ 0 but has formal structure): + - MUST select: Natural Language Reframing +- If under-structured (score ≥ 3 but vague prose): + - Score 3-4: Moderate structure (organized natural, no heavy formality) + - Score ≥ 5: Goal + Capabilities pattern + - MAY select: Decomposition, Directive Hierarchy + +**For Dimension 3 (Over-technical notation):** + +- If over-technical detected (3+ indicators, cognitive load hurts): + - MUST select: Technical → Natural Transformation + - This often solves stopping risk simultaneously + - May be SUFFICIENT optimization (don't over-optimize) + +**For Callable Entities (detected in Step 2):** + +- MUST select: Callable Entity Preservation +- Focus on preserving/adding both layers (context + triggers) + +**For High-Risk Workflows (detected in Step 2):** + +- MUST select: Technical → Natural Transformation (removes stopping risk) +- Preserve appropriate structure level (based on complexity score) +- Remove ceremony (CAPS, →, variables, warnings) + +**STRATEGY COUNT LIMIT: 1-3 strategies max** + +- 1 strategy: Simple reframing or notation simplification +- 2 strategies: Most common (address 2 dimensions or primary + constraint) +- 3 strategies: Complex only (rarely needed) + +**NEVER exceed 3 strategies** (over-optimization risk) + +**COMPLEMENTARY CHECK:** + +- Verify selected strategies don't conflict (see compatibility matrix in STRATEGIES.md) +- If conflict detected, choose most important strategy and drop conflicting ones +``` + +#### Update Step 4 (Generate Output) + +```markdown +### Step 4: Generate Output + +Based on mode: + +**IF mode=consult:** + +- DO NOT generate optimized prompt +- Output analysis, recommended strategies, and optimization potential +- If architecture provided, include architecture compliance assessment +- Use consult output format + +**IF mode=optimize (default):** + +**Primary principle**: Reduce cognitive load while preserving intent. + +**Apply selected strategies based on dimensions:** + +**For Dimension 1 (Verbosity):** + +- Remove adjectives and quality statements +- Set hard word/line limits +- Use template structure to bound scope +- Exclude known bloat patterns explicitly +- Target: 60%+ reduction for docs OK, 40-50% for agents/workflows + +**For Dimension 2 (Structure):** + +Match structure to complexity score: + +- **Score ≤ 0 (Natural language)**: + + - Rewrite as clear prose with connectors ("then", "if", "when") + - Avoid enumeration unless truly necessary + - Embed conditionals inline naturally + - Example: "Read the file, optimize content, and write back" + +- **Score 1-2 (Light structure)**: + + - Simple numbered steps or sections without heavy formality + - Natural language throughout + - No CAPS labels, no → notation + - Example: "1. Read file 2. Optimize 3. Write result" + +- **Score 3-4 (Organized natural with structure)**: + + - Logical sections or phases (Setup, Analysis, Execution, etc.) + - Natural language within structured organization + - NO CAPS/→/variables + - Completion criterion inline or immediately after + - Example: See "appropriately optimized" /dev-start in Part 3 + +- **Score ≥ 5 (Goal + Capabilities + organized workflow)**: + - Goal statement (ultimate outcome upfront) + - Capabilities declaration (tools/skills needed) + - Organized workflow with natural language + - Clear terminal conditions + - STILL use natural notation (no CAPS/→/variables) + +**For Dimension 3 (Over-technical notation):** + +Remove ceremonial technical notation: + +- CAPS labels → natural section headers or action verbs +- → notation → implicit data flow or prose +- Variable names → eliminate or minimize +- Function call syntax → natural tool mentions +- Sub-step enumeration → consolidate to prose +- Defensive warnings → remove entirely (trust structure) +- Remote EXECUTION RULES → integrate inline or remove + +While preserving: + +- Appropriate structure level (based on complexity score) +- All requirements and dependencies +- Tool invocations (mentioned naturally) +- Terminal conditions (integrated naturally) + +**For Callable Entities:** + +- Preserve both contextual "when" AND trigger phrases +- Add missing layer if incomplete +- Minimal optimization (10-20% max) +- Focus on invocation clarity + +**General optimization:** + +- Sets hard word/line limits (Constraint-Based) +- Specifies structure (Template-Based or Output Formatting if applicable) +- Excludes known bloat patterns (Negative Prompting if applicable) +- Embeds selected strategies naturally into instructions + +**If architecture_reference provided:** + +- Refactor content to align with architecture requirements +- Add missing required sections +- Preserve required patterns +- Optimize content WITHIN architectural structure + +**FINAL SAFETY CHECK before returning:** + +Verify optimized version: + +- [ ] Clarity test: Is it clearer than original? (cognitive load lower) +- [ ] Intent test: Core requirements preserved? +- [ ] Complexity match: Structure appropriate for score? +- [ ] Notation appropriate: Natural unless technical serves precision purpose? +- [ ] No new problems: No stopping points, lost triggers, introduced ambiguity? +- [ ] Executable: Would LLM follow this successfully? +- [ ] Reduction appropriate: 40-50% agents/workflows, 60%+ docs +- [ ] Strategy count: 1-3, complementary + +**If any check FAILS**: Revise optimization or recommend consult mode only. +``` + +### New Reference Files + +#### Create OPTIMIZATION-SAFETY-GUIDE.md + +This document (comprehensive-spec-v2-FINAL.md) should be saved as: +`/Users/brandoncasci/.claude/skills/prompt-architecting/references/OPTIMIZATION-SAFETY-GUIDE.md` + +#### Update STRATEGIES.md + +Add to top: + +```markdown +# IMPORTANT: Read Safety Guide First + +Before selecting strategies, read OPTIMIZATION-SAFETY-GUIDE.md to understand: + +- When NOT to optimize +- Three dimensions of optimization (verbosity, structure, notation) +- Over-optimization risks +- Natural language vs technical strategy decision criteria +- Callable entity preservation requirements +- Strategy combination limits (1-3 max) +- Cognitive load as the core metric + +This ensures trustworthy optimization that reduces cognitive load while preserving intent. +``` + +Add Strategy #15: + +```markdown +## 15. Technical → Natural Transformation + +**When**: Over-technical notation detected (3+ indicators) and cognitive load test shows notation hurts understanding + +**Indicators**: + +- CAPS labels as action markers (CHECK:, PARSE:, VALIDATE:) +- → notation for data flow (→ variable_name) +- Variable naming conventions (work_file_status, requirement_data) +- Function call syntax (tool({params})) +- Sub-step enumeration (a/b/c when prose would work) +- Defensive meta-instructions ("DO NOT narrate", "continue immediately") + +**Pattern**: Keep appropriate structure level (based on complexity score), simplify notation to organized natural language + +**Transformation**: + +- CAPS labels → natural section headers or action verbs +- → notation → implicit data flow or prose +- Variable names → eliminate or minimize +- Function call syntax → natural tool mentions +- Sub-step enumeration → consolidate to prose +- Defensive warnings → remove (trust structure) + +**Example**: + +Before (over-technical): +``` + +1. CHECK: Verify status → work_file_status + a. Use Bash `git branch` → branch_name + b. Check if file exists + c. DO NOT proceed if exists +2. PARSE: Extract data → requirement_data + +``` + +After (organized natural): +``` + +## Setup + +Get current branch name and check if work file already exists. If it exists, stop and tell user to use /dev-resume. + +Parse the requirement source... + +``` + +**Why this works**: +- Preserves appropriate structure (complexity still warrants organization) +- Removes ceremonial notation that creates cognitive load +- Eliminates stopping risk (no CAPS/→/variables creating boundaries) +- Natural language is clearer for LLM audiences +- Reduces cognitive load significantly + +**Often solves multiple problems simultaneously**: +- Dimension 3 (notation clarity) +- Stopping risk (Part 6) +- Cognitive load reduction + +**May be sufficient optimization alone** - don't over-optimize by adding more strategies. + +**See OPTIMIZATION-SAFETY-GUIDE.md Part 3 for detailed examples and Part 6 for stopping risk relationship.** +``` + +#### Update ANTI-PATTERNS.md + +Add section: + +```markdown +## Pattern 8: Destroying Callable Entity Triggers + +### ❌ Over-optimized +``` + +# Before (complete) + +description: Reviews code for security, bugs, performance when quality assessment needed. When user says "review this code", "check for bugs", "analyze security". + +# Over-optimized (WRONG - lost triggers) + +description: Code review assistant + +``` + +### ✅ Correct +``` + +# Minimal acceptable optimization + +description: Reviews code for security, bugs, performance when quality assessment needed. When user says "review code", "check bugs", "analyze security". + +``` + +**Why**: Trigger phrases are functional pattern-matching signals for model-invocation, not decorative examples. Preserve both contextual "when" AND literal trigger phrases. + +## Pattern 9: Over-Technical Notation Creating Cognitive Load + +### ❌ Over-technical +``` + +Execute this workflow: + +1. READ: Load file → content +2. PARSE: Extract(content) → {fm, body} +3. OPTIMIZE: Run skill(body) → optimized + a. Pass parameters: {content: body, mode: "optimize"} + b. Extract result → optimized + c. DO NOT STOP - continue to step 4 +4. WRITE: Save(optimized) → done + +EXECUTION RULES: + +- DO NOT STOP at step 3 when skill returns + +``` + +### ✅ Organized natural +``` + +Your job: Optimize the file and write it back. + +Read the file and parse structure. Optimize the content using the skill. Write the result back. The file edit is the deliverable. + +``` + +**Why**: Technical notation (CAPS + → + variables + function syntax + warnings) increases cognitive load and creates stopping risk. Organized natural language with appropriate structure is clearer for LLM audiences. + +**See OPTIMIZATION-SAFETY-GUIDE.md Part 3 and Part 6 for detailed analysis.** +``` + +#### Update ADVANCED-ANTI-PATTERNS.md + +Add at top: + +```markdown +# Advanced Anti-Patterns: Workflow & Agent Optimization + +**CRITICAL**: For detailed stopping point analysis, see `/Users/brandoncasci/.claude/tmp/workflow-optimization-spec.md` + +**CRITICAL**: For safety guidelines and dimensional analysis, see `OPTIMIZATION-SAFETY-GUIDE.md` + +**KEY INSIGHT**: Most stopping risk patterns are caused by over-technical notation (Dimension 3). Simplifying notation while preserving appropriate structure solves the problem. + +[... existing content ...] +``` + +--- + +## Part 10: Testing Requirements + +### Test Suite + +Create comprehensive test cases covering all dimensions: + +#### Test 1: Already Optimal (Should NOT Optimize) + +**Input:** + +```markdown +Your job: Optimize the file at $1 and write it back. + +Read the file, check for frontmatter. If frontmatter exists, search for dependencies and ask how to proceed. Optimize the content. Write the result back. The file edit is the deliverable. +``` + +**Expected**: + +- Dimensional analysis: Verbosity OK, structure OK, notation OK +- Conclusion: "Already optimal - deliverable-first + natural language + appropriate complexity. No optimization needed." + +**If optimization attempted, would make it worse.** + +#### Test 2: Dimension 1 (Verbosity) - Compress Bloat + +**Input:** + +```markdown +I need you to create comprehensive, robust, production-ready documentation that covers absolutely all aspects of user authentication in our system. This should include detailed explanations of how the entire system works, what technologies we're using, best practices for implementation, common pitfalls to avoid, security considerations, edge cases, error handling strategies, and example code showing different use cases. Make sure it's thorough and covers everything a developer might possibly need to know. +``` + +**Expected**: + +- Dimension 1: Bloated (adjective-heavy, scope inflation, >2x ideal length) +- Dimension 2: Structure appropriate (low complexity, just needs constraints) +- Dimension 3: Notation appropriate (natural language, no ceremony) +- Strategy: Constraint-Based + Output Formatting + Negative Prompting +- Reduction: 60%+ OK (documentation) + +**Output:** + +```markdown +Write auth docs. Structure: [Setup - 100w] [Usage - 150w] [Error handling - 100w] [One example - code only]. MAX 400 words. Audience: Mid-level dev familiar with JWT. DO NOT: Security theory, framework comparisons, "best practices" sections. +``` + +#### Test 3: Dimension 2 (Structure) - Over-Structured + +**Input:** + +```markdown +Execute this 3-step workflow completely: + +1. Read the configuration file at the specified path +2. Update the version number field to the new value provided +3. Write the modified file back to the same path + +EXECUTION RULES: + +- Complete all steps in sequential order +- Stop only when all steps completed +- Ensure proper error handling at each step +``` + +**Expected**: + +- Dimension 1: Verbosity OK +- Dimension 2: Over-structured (score ≤ 0, but has formal workflow) +- Dimension 3: Some ceremony (EXECUTION RULES section) +- Strategy: Natural Language Reframing +- Complexity score: -1 (2 trivial steps) + +**Output:** + +```markdown +Read the config file, update the version number, and write it back. +``` + +#### Test 4: Dimension 3 (Notation) - Over-Technical + +**Input:** + +```markdown +Execute this workflow completely: + +1. READ: Load file → content +2. OPTIMIZE: Run skill → optimized + a. Call prompt-architecting skill + b. Extract result → optimized + c. DO NOT STOP - continue to Step 3 +3. WRITE: Save optimized → done + +EXECUTION RULES: + +- Step 2 is NOT terminal state +- Continue to step 3 after skill returns +``` + +**Expected**: + +- Dimension 1: Verbosity OK +- Dimension 2: Structure appropriate for complexity (3 steps = score ~1) +- Dimension 3: Over-technical (CAPS + → + variables + sub-steps + warnings) +- Stopping risk: YES (high risk pattern) +- Strategy: Technical → Natural Transformation +- Complexity score: 1 (simple workflow) + +**Output:** + +```markdown +Your job: Optimize the file and write it back. + +Read the file, optimize with the skill, and write the result back. The file edit is the deliverable. +``` + +#### Test 5: Dimension 3 (Complex) - High Complexity with Over-Technical Notation + +**Input:** (Full /dev-start command from Part 3) + +**Expected**: + +- Dimension 1: Verbosity appropriate +- Dimension 2: Structure appropriate (score ~8 = full structure warranted) +- Dimension 3: Over-technical (many ceremony indicators) +- Stopping risk: YES (CAPS + → + variables + warnings) +- Strategy: Technical → Natural Transformation (keep structure, simplify notation) +- Complexity score: 8 (must preserve structure) + +**Output:** (Organized natural /dev-start from Part 3) + +**Key**: Structure level preserved (still organized sections), but notation simplified (no CAPS/→/variables) + +#### Test 6: Callable Entity Description (Preserve Triggers) + +**Input:** + +```markdown +description: This skill really helps you when you need to optimize prompts and make them better and more concise and structured. It's especially useful when prompts are getting too long or verbose or could benefit from better organization. +``` + +**Expected**: + +- Callable entity: YES +- Dimension 1: Some verbosity (filler like "really helps", "especially useful") +- Context present: VAGUE ("when you need to optimize") +- Triggers: MISSING (no quoted literal phrases) +- Strategy: Callable Entity Preservation +- Reduction: Minimal (10-20% max) + +**Bad output:** + +```markdown +description: Optimizes prompts +``` + +_Lost both context and triggers_ + +**Good output:** + +```markdown +description: Optimizes verbose prompts when content risks over-generation or needs density improvement. When user says "optimize this prompt", "make concise", "reduce verbosity", or before invoking skill-generator. +``` + +_Added both layers: contextual "when" AND trigger phrases_ + +#### Test 7: Multi-Dimensional (All Three) + +**Input:** + +```markdown +I really need you to create a comprehensive, production-ready system that will help me process my data files efficiently. You should follow these steps very carefully: + +1. INPUT_PROCESSING: Read the data file → raw_data + + - Use your file reading capability + - Parse the content carefully + - Extract all relevant fields + - DO NOT skip any records + +2. VALIDATION: Check data quality → validated_data + + - Run validation rules + - Flag any errors + - Create error report + - DO NOT proceed if critical errors found + +3. TRANSFORMATION: Apply business rules → transformed_data + + - Use transformation logic + - Handle edge cases + - Maintain data integrity + +4. OUTPUT_GENERATION: Write results → output_file + - Format according to spec + - Save to designated location + +EXECUTION RULES: + +- Follow all steps in exact order +- Do not skip any validation +- Ensure data integrity at all times +- Stop only after output file written +- DO NOT STOP after step 2 validation even if you think you're done +``` + +**Expected**: + +- Dimension 1: Bloated (comprehensive, carefully, all, any, etc.) +- Dimension 2: Over-structured (score ~2, but has formal workflow) +- Dimension 3: Over-technical (CAPS + → + sub-bullets + warnings) +- Strategies: Technical → Natural + Constraint-Based +- Complexity score: 2 (4 steps, but linear) + +**Output:** + +```markdown +Process the data file and write results. MAX 4 steps: + +Read the file and validate data quality. If critical errors found, stop with error report. + +Transform data according to business rules. Write formatted results to output file. + +The output file write completes this task. +``` + +**Dimensions addressed:** + +- D1: Removed bloat (60% reduction) +- D2: Simplified structure (natural with light organization) +- D3: Removed ceremony (no CAPS/→/variables/warnings) + +### Success Criteria + +All tests must: + +1. Correctly identify which dimensions need optimization +2. Select appropriate strategies (1-3, complementary) +3. Match structure to complexity score +4. Use natural notation unless technical serves precision purpose +5. Preserve intent and requirements +6. Pass cognitive load test (optimized is clearer) +7. Pass regression test (won't break execution) + +--- + +## Part 11: Summary Principles + +### The Optimization Philosophy (Revised) + +**Good optimization:** + +- **Reduces cognitive load** (easier to understand) +- **Preserves intent fully** (all requirements intact) +- **Matches complexity appropriately** (right structure level) +- **Uses clear notation** (natural unless technical serves precision) +- **Results in better execution** (more reliable, less ambiguous) + +**Good optimization does NOT:** + +- Optimize for optimization's sake +- Apply structure where natural language works better +- Use technical notation without clear purpose +- Remove necessary procedural detail +- Destroy functional trigger patterns +- Introduce new problems while fixing old ones + +### Core Safety Rules + +1. **Check if optimization needed** (already optimal? appropriate for complexity? notation serves purpose?) +2. **Analyze three dimensions** (verbosity, structure, notation) +3. **Match structure to complexity** (score determines appropriate level) +4. **Use natural notation by default** (technical only if serves precision purpose) +5. **Preserve callable entity triggers** (both context AND literal phrases) +6. **Use 1-3 complementary strategies** (not all available strategies) +7. **Target appropriate reduction** (40-50% agents/workflows, 60%+ docs) +8. **Test cognitive load** (is optimized version clearer?) + +### The Cognitive Load Test (Central Principle) + +**For every optimization, ask**: Does this reduce or increase cognitive load? + +**Reduce cognitive load by:** + +- Removing unnecessary words (bloat) +- Matching structure to complexity (not too much, not too little) +- Simplifying notation (natural unless technical serves precision) +- Making completion criteria clear +- Eliminating defensive warnings + +**Increase cognitive load by:** + +- Over-technical notation (CAPS + → + variables when unnecessary) +- Structure mismatch (formal workflow for trivial task, or vague prose for complex task) +- Removing necessary detail (vague instructions for agents) +- Destroying trigger patterns (hurts model-invocation) +- Adding too many strategies (conflicting directions) + +### Decision Framework (Complete) + +``` +FOR EACH optimization request: + +1. SAFETY CHECK + - Already optimal? → NO optimization + - Callable entity? → Preserve triggers (minimal optimization) + - Agent at right detail? → NO optimization + - Technical notation serves purpose? → Preserve notation + - User requests preservation? → NO optimization + +2. DIMENSIONAL ANALYSIS + - Dimension 1 (Verbosity): Bloated? → Constrain and exclude + - Dimension 2 (Structure): Mismatch? → Reframe to match score + - Dimension 3 (Notation): Over-technical? → Simplify to natural + +3. COMPLEXITY SCORING + - Calculate score (determines appropriate structure level) + - Match structure to score (natural → organized → formal) + +4. STRATEGY SELECTION + - Choose 1-3 complementary strategies + - Address identified dimensional problems + - No redundant or conflicting strategies + +5. OPTIMIZATION + - Apply selected strategies + - Target appropriate reduction level + - Preserve intent and requirements + - Simplify notation by default + +6. SAFETY VERIFICATION + - Cognitive load test: Is it clearer? + - Intent test: Requirements preserved? + - Complexity match: Structure appropriate? + - Notation appropriate: Natural unless precision needed? + - Execution test: Would LLM succeed? + - If any FAIL: Revise or recommend consult only + +7. DELIVER + - Return optimized version with rationale + - Or recommend no optimization needed +``` + +### When in Doubt + +**Conservative approach**: Make smaller changes. Incremental optimization is safer than aggressive transformation. + +**Trust test**: If you wouldn't want to follow the optimized version yourself, don't recommend it. + +**Harm prevention**: Better to leave a prompt "good enough" than to over-optimize it into something worse. + +**Cognitive load wins**: When multiple approaches possible, choose the one with lowest cognitive load while preserving intent. + +--- + +## Implementation Checklist + +- [ ] Read and understand all 11 parts of this spec +- [ ] Update SKILL.md Step 2 (add dimensional analysis) +- [ ] Update SKILL.md Step 3 (add dimension-based strategy selection) +- [ ] Update SKILL.md Step 4 (add notation simplification, cognitive load test) +- [ ] Save this document as OPTIMIZATION-SAFETY-GUIDE.md in references/ +- [ ] Update STRATEGIES.md (add safety note + Strategy #15: Technical → Natural) +- [ ] Update ANTI-PATTERNS.md (add Pattern 8 and Pattern 9) +- [ ] Update ADVANCED-ANTI-PATTERNS.md (add dimensional analysis note) +- [ ] Create comprehensive test suite (7 test cases minimum) +- [ ] Run test suite and verify all pass +- [ ] Verify integration with workflow-optimization-spec.md +- [ ] Update skill description in front matter if needed (add notation dimension) + +**CRITICAL**: Test with real examples before deploying. Verify optimization reduces cognitive load without introducing new problems. + +--- + +## Appendix: Quick Reference + +### Three Dimensions at a Glance + +| Dimension | Problem | Detection | Solution | +| ------------ | ---------------- | ------------------------------- | ---------------------------- | +| 1. Verbosity | Bloated | Adjectives, filler, >2x length | Constrain, exclude, template | +| 2. Structure | Wrong complexity | Over/under structured | Match to complexity score | +| 3. Notation | Over-technical | CAPS + → + variables + warnings | Organized natural language | + +### Complexity Score Quick Guide + +| Score | Steps | Complexity | Structure Level | +| ----- | ----------------------- | ----------- | ------------------- | +| ≤ 0 | 1-2 trivial | Very simple | Natural language | +| 1-2 | 3-4 linear | Low | Light structure | +| 3-4 | 4-6 with conditionals | Moderate | Organized natural | +| ≥ 5 | 5+ with gates/branching | High | Goal + Capabilities | + +### Strategy Selection Shortcuts + +- **Bloat** → Constraint-Based + Negative Prompting +- **Too much structure** → Natural Language Reframing +- **Too little structure** → Organized natural with appropriate level +- **Over-technical** → Technical → Natural Transformation (often sufficient alone) +- **Callable entity** → Callable Entity Preservation (preserve both layers) +- **Unbounded scope** → Template-Based + Constraint-Based + +### Red Flags Checklist + +**Stop optimization if:** + +- [ ] Already optimal pattern detected +- [ ] Cognitive load would increase +- [ ] Technical notation serves clear precision purpose +- [ ] Callable entity with correct trigger structure +- [ ] Agent/workflow at appropriate detail level (40-50% reduction) +- [ ] User explicitly requests preservation + +**Optimize if:** + +- [ ] Bloat indicators present (Dimension 1) +- [ ] Structure mismatch detected (Dimension 2) +- [ ] Over-technical notation hurts clarity (Dimension 3) +- [ ] Cognitive load test shows improvement possible +- [ ] Intent preservation verified + +--- + +END OF COMPREHENSIVE SPECIFICATION V2 diff --git a/skills/prompt-architecting/references/STRATEGIES.md b/skills/prompt-architecting/references/STRATEGIES.md new file mode 100644 index 0000000..01cd953 --- /dev/null +++ b/skills/prompt-architecting/references/STRATEGIES.md @@ -0,0 +1,249 @@ +# Prompting Strategies Catalog + +Reference for prompt-architect. Each strategy includes when to use and example pattern. + +# IMPORTANT: Read Safety Guide First + +Before selecting strategies, read OPTIMIZATION-SAFETY-GUIDE.md to understand: +- When NOT to optimize +- Three dimensions of optimization (verbosity, structure, notation) +- Over-optimization risks +- Natural language vs technical strategy decision criteria +- Callable entity preservation requirements +- Strategy combination limits (1-3 max) +- Cognitive load as the core metric + +This ensures trustworthy optimization that reduces cognitive load while preserving intent. + +--- + +## 1. Constraint-Based Prompting + +**When**: Task scope clear but tends toward over-generation +**Pattern**: Set hard boundaries on length/scope +**Example**: `Generate auth docs. MAX 300 words. Cover only: setup, usage, errors.` + +## 2. Progressive Disclosure + +**When**: Complex topics where details can be separated +**Pattern**: Overview in main doc, details in references +**Example**: `Write skill overview (100w), then separate reference docs for: API specs, edge cases, examples.` + +## 3. Template-Based + +**When**: Output needs consistent structure +**Pattern**: Provide fill-in-the-blank format +**Example**: `Follow: [Problem] [Solution in 3 steps] [One example] [Common pitfall]` + +## 4. Directive Hierarchy + +**When**: Mixed priority requirements +**Pattern**: Use MUST/SHOULD/MAY tiers +**Example**: `MUST: Cover errors. SHOULD: Include 1 example. MAY: Reference advanced patterns.` + +## 5. Negative Prompting + +**When**: Known tendency to add unwanted content +**Pattern**: Explicitly exclude behaviors +**Example**: `Write deploy guide. DO NOT: framework comparisons, history, "best practices" essays.` + +## 6. Few-Shot Learning + +**When**: Abstract requirements but concrete examples exist +**Pattern**: Show 2-3 examples of desired output +**Example**: `Good doc: [150w example]. Bad doc: [verbose example]. Follow "good" pattern.` + +## 7. Decomposition + +**When**: Complex multi-step tasks +**Pattern**: Break into numbered discrete subtasks +**Example**: `Step 1: Identify 3 use cases. Step 2: 50w description each. Step 3: 1 code example each.` + +## 8. Comparative/Contrastive + +**When**: Need to show difference between good/bad +**Pattern**: Side-by-side ❌/✅ examples +**Example**: `❌ "Comprehensive guide covering everything..." ✅ "Setup: npm install. Use: auth.login()."` + +## 9. Anchoring + +**When**: Have reference standard to match +**Pattern**: Provide example to emulate +**Example**: `Match style/length of this: [paste 200w reference doc]` + +## 10. Output Formatting + +**When**: Structure more important than content discovery +**Pattern**: Specify exact section structure +**Example**: `Format: ## Problem (50w) ## Solution (100w) ## Example (code only)` + +## 11. Density Optimization + +**When**: Content tends toward fluff/filler +**Pattern**: Maximize information per word +**Example**: `Write as Hemingway: short sentences, concrete nouns, active voice. Every sentence advances understanding.` + +## 12. Audience-Targeted + +**When**: Reader expertise level known +**Pattern**: Specify what to skip based on audience +**Example**: `Audience: Senior dev who knows React. Skip basics, focus on gotchas and our implementation.` + +## 13. Execution Flow Control + +**When**: Complex workflows requiring state management, branching control, or approval gates +**Pattern**: Mandate complete execution with explicit flow control and dependencies +**Example**: + +```markdown +Execute this workflow completely: +1. READ: Use Read tool on $1 → content +2. PARSE: Extract front matter + body → {front_matter, body} +3. OPTIMIZE: Use prompt-architecting skill → optimized_body +4. PRESENT: Show optimized_body → STOP, WAIT for approval + +EXECUTION RULES: +- Stop only at step 4 (user approval required) +- Task incomplete until approval received +``` + +**Indicators**: +- REQUIRED: User approval gates, multiple terminal states, 3-way+ branching, complex state tracking +- NOT REQUIRED: Simple sequential tasks, linear flow, skill invocations for data only + +**Anti-pattern**: Using EFC for simple tasks that can be expressed as "Do X, then Y, then Z" + +See OPTIMIZATION-GUIDE.md for complete Execution Flow Control pattern, language guidelines, and agent/workflow optimization standards. + +## 14. Natural Language Reframing + +**When**: 1-2 step tasks where sequence is obvious or trivial +**Pattern**: Rewrite as clear prose when enumeration adds no clarity +**Example**: + +Input (over-enumerated): +```markdown +1. Read the file at the provided path +2. Write it back with modifications +``` + +Output (natural language): +```markdown +Read the file and write it back with modifications. +``` + +**Research findings**: Enumeration helps for 3+ steps (improves thoroughness, reduces ambiguity, provides cognitive anchors). Only skip enumeration when: +- 1-2 very simple steps +- Sequence is completely obvious +- Structure would add no clarity + +**Indicators for natural language**: +- Task is genuinely 1-2 steps (not 3+ steps disguised as one job) +- Sequence is trivial/obvious +- No need for LLM to address each point thoroughly +- Enumeration would be redundant + +**Why research matters**: Studies show prompt formatting impacts performance by up to 40%. Numbered lists help LLMs: +- Understand sequential steps clearly +- Address each point thoroughly and in order +- Reduce task sequence ambiguity +- Provide cognitive anchors that reduce hallucination + +**Anti-pattern**: Avoiding enumeration for 3+ step tasks. Research shows structure helps more than it hurts for multi-step instructions. + +**Revised guidance**: Default to enumeration for 3+ steps. Use natural language only when complexity truly doesn't justify structure. + +--- + +## 15. Technical → Natural Transformation + +**When**: Over-technical notation detected (3+ indicators) and cognitive load test shows notation hurts understanding + +**Indicators**: +- CAPS labels as action markers (CHECK:, PARSE:, VALIDATE:) +- → notation for data flow (→ variable_name) +- Variable naming conventions (work_file_status, requirement_data) +- Function call syntax (tool({params})) +- Sub-step enumeration (a/b/c when prose would work) +- Defensive meta-instructions ("DO NOT narrate", "continue immediately") + +**Pattern**: Keep appropriate structure level (based on complexity score), simplify notation to organized natural language + +**Transformation**: +- CAPS labels → natural section headers or action verbs +- → notation → implicit data flow or prose +- Variable names → eliminate or minimize +- Function call syntax → natural tool mentions +- Sub-step enumeration → consolidate to prose +- Defensive warnings → remove (trust structure) + +**Example**: + +Before (over-technical): +``` +1. CHECK: Verify status → work_file_status + a. Use Bash `git branch` → branch_name + b. Check if file exists + c. DO NOT proceed if exists +2. PARSE: Extract data → requirement_data +``` + +After (organized natural): +``` +## Setup + +Get current branch name and check if work file already exists. If it exists, stop and tell user to use /dev-resume. + +Parse the requirement source... +``` + +**Why this works**: +- Preserves appropriate structure (complexity still warrants organization) +- Removes ceremonial notation that creates cognitive load +- Eliminates stopping risk (no CAPS/→/variables creating boundaries) +- Natural language is clearer for LLM audiences +- Reduces cognitive load significantly + +**Often solves multiple problems simultaneously**: +- Dimension 3 (notation clarity) +- Stopping risk (no false completion boundaries) +- Cognitive load reduction + +**May be sufficient optimization alone** - don't over-optimize by adding more strategies. + +**See OPTIMIZATION-SAFETY-GUIDE.md Part 3 for detailed examples and Part 6 for stopping risk relationship.** + +--- + +## Strategy Selection Guide + +**FIRST**: Calculate complexity score (see SKILL.md Step 2). Let score guide structure level. + +**New addition**: Technical → Natural Transformation (applies across all complexity levels when notation is over-technical) + +**By complexity score** (research-informed): + +- **Score ≤ 0**: Natural Language Reframing acceptable (1-2 trivial steps). Add Constraint-Based if word limits needed. +- **Score 1-2**: Use numbered enumeration (research: 3+ steps benefit from structure). Add Template-Based or Constraint-Based. Avoid heavy EFC. +- **Score 3-4**: Moderate structure (enumeration + opening mandate). Add Decomposition or Template-Based. No EXECUTION RULES yet. +- **Score ≥ 5**: Full EFC pattern (mandate + EXECUTION RULES). Add Decomposition + Directive Hierarchy. + +**By output type**: + +**For skills**: Constraint-Based + Template-Based primary. Add Progressive Disclosure (move details to references/). + +**For documentation**: Output Formatting + Density Optimization primary. Add Audience-Targeted or Negative Prompting conditionally. + +**For plans**: Template-Based + Decomposition primary. Add Directive Hierarchy for priority tiers. + +**For simple workflows** (can be described as single job): Natural Language Reframing primary. Avoid enumeration and formal structure. + +**For complex workflows** (approval gates, multiple terminal states): Execution Flow Control (appropriate level based on score) + Decomposition. Apply agent/workflow optimization guidelines (40-50% reduction, preserve procedural detail). See OPTIMIZATION-GUIDE.md for specifics. + +**General complexity-based**: + +- Low: 1-2 strategies (Natural Language Reframing or Constraint-Based + Output Formatting) +- Medium: 2 strategies (Template-Based + Constraint-Based or light EFC) +- High: 2-3 strategies max (full EFC + Decomposition, or Natural Language + Progressive Disclosure) + +**Rule**: 1-3 strategies optimal. More than 3 = over-optimization risk. diff --git a/skills/publish-github-issues/SKILL.md b/skills/publish-github-issues/SKILL.md new file mode 100644 index 0000000..e466e61 --- /dev/null +++ b/skills/publish-github-issues/SKILL.md @@ -0,0 +1,114 @@ +--- +name: publish-github-issues +description: Publishes GitHub issues from YAML files using gh CLI. Use when publishing draft issues to GitHub or user provides YAML file path in tmp/issues/. Needs YAML file with valid issue definitions and gh CLI authenticated. Trigger with phrases like 'publish issues [file-path]', 'create github issues from [file-path]', 'publish to github'. +allowed-tools: "Read, Bash(gh:*), AskUserQuestion" +--- + +Base directory for this skill: {baseDir} + +## Workflow + +1. Get YAML file path (ask if not provided) +2. Read and validate YAML structure +3. Create issues in GitHub (parents before children) +4. Link child issues to parents using parent_ref +5. Add issues to project (if specified) +6. Ask user to archive YAML to processed/ + + + +## YAML Structure + +**Required fields:** + +- `repository` (format: `owner/repo`) +- `issues` (array with at least one) +- Each issue: `ref`, `title`, `body` + +**Optional fields:** + +- Top-level: `project` (integer), `defaults` (labels/milestone) +- Per-issue: `parent_ref`, `milestone`, `labels` (array) + + + + + +## Creating Issues + +**Order:** Create parent issues first, store their numbers, then create children. + +**For each issue:** + +```bash +gh issue create \ + --repo {repository} \ + --title "{title}" \ + --body "{body}" \ + [--milestone "{milestone}"] \ + [--label "label1,label2"] +``` + +**Parent-child linking:** +When issue has `parent_ref`, look up parent issue number and add to body: + +``` +Depends on: #{parent_number} +``` + +**Output:** Report each created issue: `✓ #{number}: {title}` + + + + + +## Adding to Project (Optional) + +If YAML has `project` field (project number), add each created issue: + +```bash +gh project item-add {project_number} \ + --owner {org} \ + --url {issue_url} +``` + + + + + +## Archive YAML + +**Important:** Only archive if ALL issues created successfully. + +1. Ask user: "Move YAML to processed/? (Y/n)" +2. If yes: Create `tmp/issues/processed/` if needed, move file +3. On partial failure: Keep file in tmp/issues/ for retry + + + + + +## Error Handling + +**gh not found:** `which gh` fails → Install gh CLI +**Not authenticated:** `gh auth status` fails → Run `gh auth login` +**API rate limit:** Wait ~1 hour, check `gh api rate_limit` +**Partial failure:** Report succeeded/failed issues, do not archive +**YAML parse error:** Report line number and field + + + +## Example Output + +``` +Creating 3 issues in owner/repo... + +✓ #188: Implement manual trial subscription management +✓ #189: Integrate Paddle billing system (child of #188) +✓ #190: Migrate manual trial tenants to Paddle (child of #188, #189) + +All issues created successfully. +Added 3 issues to project "My Project". + +Move YAML to processed/? (Y/n) +``` diff --git a/skills/saas-pricing-strategy/SKILL.md b/skills/saas-pricing-strategy/SKILL.md new file mode 100644 index 0000000..55f0d05 --- /dev/null +++ b/skills/saas-pricing-strategy/SKILL.md @@ -0,0 +1,313 @@ +--- +name: saas-pricing-strategy +description: Advises on SaaS pricing strategy using Daniel Priestley's oversubscription principles and Patrick Campbell's value-based framework. Use when defining pricing tiers, selecting value metrics, positioning against competitors, or creating pricing page copy for any SaaS product. +--- + +# SaaS Pricing Strategy + +Apply proven pricing frameworks from Daniel Priestley (demand generation) and Patrick Campbell (value-based pricing) to optimize SaaS pricing strategy. + +## When to Use This Skill + +- Defining or revising pricing tiers +- Selecting value metrics (per-seat, usage-based, flat-rate, hybrid) +- Competitive positioning and market analysis +- Writing pricing page copy +- Planning pricing experiments or A/B tests +- Evaluating pricing model changes + +## Core Pricing Philosophy + +### Daniel Priestley's Oversubscription Principle + +#### Demand > Supply = Pricing Power + +DO NOT compete on price. Compete on demand generation. + +Key tenets: + +- Transparent capacity constraints create urgency +- Waiting lists signal value and scarcity +- Price reflects perceived value, not just costs +- Market positioning matters more than feature comparison + +### Patrick Campbell's Value-Based Framework + +#### Price on value delivered, not cost incurred + +Key principles: + +1. **Value metric alignment**: What you charge for should match what customers value +2. **Buyer persona intimacy**: Different segments have different willingness to pay +3. **Continuous iteration**: Pricing is ongoing optimization, not one-time decision +4. **3-tier sweet spot**: Fewer tiers = analysis paralysis, more tiers = choice overload + +## Value Metric Selection + +### Common Value Metrics + +**Per-Seat (Per-User)** + +- Best for: Collaboration tools, team software, platforms where value scales with team size +- Pros: Predictable, simple, aligns with organizational growth +- Cons: Can discourage adding users, ceiling effect for small teams + +**Usage-Based (Consumption)** + +- Best for: Infrastructure, APIs, data processing, services with variable usage +- Pros: Fair pricing ("pay for what you use"), no ceiling on revenue +- Cons: Unpredictable billing, complex to explain, requires usage tracking + +**Flat-Rate (All-You-Can-Eat)** + +- Best for: Simple products, low variance in usage, commoditized markets +- Pros: Simplest to communicate, no metering overhead +- Cons: Leaves money on table with power users, doesn't scale with value + +**Feature-Based (Good-Better-Best)** + +- Best for: Products with clear feature differentiation, tiered capabilities +- Pros: Upgrade path is clear, captures different willingness to pay +- Cons: Feature bloat temptation, can feel arbitrary + +**Hybrid (Combination)** + +- Best for: Complex products where multiple dimensions drive value +- Pros: Captures more value, serves diverse segments +- Cons: More complex to communicate and implement + +### Decision Framework for Value Metric + +Ask these questions: + +1. What metric correlates most strongly with customer value received? +2. What's simple enough for customers to predict their costs? +3. What aligns incentives (not penalizing desired behavior)? +4. What grows naturally as customer success grows? +5. What can you reliably measure and bill for? + +## Tiered Pricing Structure + +### Optimal Tier Count + +**3-4 tiers is ideal** + +- 2 tiers: Not enough choice, hard to capture variance +- 3-4 tiers: Sweet spot for conversion +- 5+ tiers: Analysis paralysis, decision fatigue + +### Tier Differentiation Strategies + +**Capacity Limits** (quantity-based) + +- Users, seats, projects, API calls, storage, transactions +- Example: "Up to 5 users" vs "Up to 25 users" + +**Feature Access** (capability-based) + +- Advanced features, integrations, customization, priority support +- Example: "Basic reports" vs "Custom dashboards + API access" + +**Service Level** (support-based) + +- Response time, dedicated support, onboarding, account management +- Example: "Email support" vs "24/7 phone + dedicated CSM" + +**Usage Rights** (commercial terms) + +- Commercial use, white-labeling, resale rights, SLA guarantees +- Example: "Personal use" vs "Commercial use + SLA" + +### Pricing Tier Psychology + +**Anchor with highest price**: Show Enterprise tier first or prominently to make mid-tier seem reasonable + +**Highlight recommended tier**: Use "Most Popular" or "Best Value" badge on target tier (usually middle) + +**Price gaps should increase**: $30 → $60 → $120 feels better than $30 → $50 → $70 + +**Round numbers for simplicity**: $99/mo feels gimmicky for B2B; use $100/mo + +## Pricing Page Best Practices + +### Page Structure + +1. **Lead with value, not features** + - Wrong: "Unlimited clients, 5GB storage, custom fields" + - Right: "Save 10 hours per week on reporting" + +2. **Show annual savings option** + - Offer 2 months free for annual billing (17% discount) + - Improves cash flow and reduces churn + +3. **Transparent tier comparison** + - Feature comparison table with clear differentiators + - Use checkmarks, not excessive text + - Highlight recommended tier + +4. **Social proof by tier** + - Include customer counts, testimonials, use case examples per tier + - "Perfect for teams of 5-10" or "Used by 500+ companies like yours" + +5. **Remove friction** + - Free trial (14-30 days) + - No credit card required for trial (increases signups) + - Easy upgrade/downgrade path + - Money-back guarantee if appropriate + +### Copy Framework + +**Headline**: Focus on outcome transformation + +- Good: "Close deals faster with intelligent CRM" +- Bad: "Affordable CRM software" + +**Subhead**: Address primary objection + +- "Simple enough to start today. Powerful enough to scale with you." + +**CTA Language**: + +- Entry tier: "Start Free Trial" +- Mid tier: "Start Free Trial" or "Get Started" +- Top tier: "Start Free Trial" or "Schedule Demo" +- Enterprise: "Contact Sales" or "Let's Talk" + +### Priestley-Aligned Demand Tactics + +**Capacity Signaling**: + +- "We're currently onboarding X new customers per month" +- "Join X+ companies already using [Product]" +- "Limited spots available for [special program]" + +**Demand Indicators**: + +- Show number of customers/users +- Display recent signups (with permission) +- Highlight waitlist count or assessment completions + +## Competitive Positioning + +### Market Research Checklist + +Before setting prices, research: + +1. **Direct competitors**: What do they charge? How do they tier? +2. **Adjacent solutions**: What alternatives exist? (spreadsheets, consultants, etc.) +3. **Customer budget**: What's typical spend for this category? +4. **Switching costs**: How hard is it to leave current solution? +5. **Perceived value gap**: How much better are you, quantifiably? + +### Positioning Strategies + +**Price leadership** (lowest price) + +- Only if cost advantage is sustainable +- Race to bottom risk +- Attracts price-sensitive, high-churn customers + +**Value leadership** (best value) + +- Sweet spot for most SaaS +- Middle-market pricing with superior product/service +- "We're not the cheapest, but we're worth it" + +**Premium positioning** (highest price) + +- Requires defensible differentiation +- Attracts best customers, lower churn +- "You get what you pay for" + +## Pricing Experiments & Iteration + +### What to Test + +1. **Tier names**: Functional vs. aspirational (Starter vs. Essential) +2. **Anchor pricing**: Show Enterprise price to make Professional seem reasonable +3. **Feature bundling**: Which features drive upgrades? +4. **Annual vs. monthly default**: Does showing annual first increase LTV? +5. **Trial length**: 14 days vs. 30 days conversion rates +6. **CTA copy**: "Start Free Trial" vs. "Get Started Free" + +### Metrics to Track + +- **Conversion rate by tier**: Which tier converts best from trial? +- **Time to upgrade**: How long before customers outgrow their tier? +- **Churn by tier**: Do certain tiers retain better? +- **Revenue per customer by tier**: LTV analysis +- **Failed payment recovery rate**: Billing issue resolution +- **Price sensitivity**: At what price point do signups drop? + +### Iteration Cadence + +- Review pricing metrics: Monthly +- Minor adjustments (copy, positioning): Quarterly +- Major structural changes (tiers, value metric): Annually +- Always communicate changes in advance and grandfather existing customers when appropriate + +## Common Pitfalls to Avoid + +❌ **Competing on price alone**: Race to bottom, attracts worst customers + +✓ **Compete on demand and positioning**: Create scarcity, demonstrate value + +❌ **Too many tiers**: Analysis paralysis kills conversions + +✓ **3-4 tiers maximum**: Clear upgrade path, easy decision + +❌ **Grandfathering forever**: Prevents necessary price increases + +✓ **Communicate value, migrate gradually**: Give notice, explain benefits + +❌ **Feature-based differentiation only**: Customers don't buy features + +✓ **Outcome-based positioning**: "Save X hours/week" or "Increase revenue by Y%" + +❌ **Hiding pricing**: "Contact us" for all tiers reduces trust + +✓ **Transparent pricing**: Builds trust, qualifies leads naturally + +❌ **Set and forget**: Pricing is not a one-time decision + +✓ **Continuous optimization**: Treat pricing like product development + +## Decision Framework + +When evaluating pricing changes, ask: + +1. **Does this align with customer value perception?** (Campbell principle) +2. **Does this create healthy demand/supply tension?** (Priestley principle) +3. **Is it simple to understand and predict?** (Complexity kills conversion) +4. **Does it scale with customer success?** (Value metric alignment) +5. **Can we test it without disrupting existing customers?** (Iteration safety) + +If yes to all five, proceed with experiment. If no to any, revisit approach. + +## Workflow + +When asked to help with pricing: + +1. **Understand the context** + - What does the product do? + - Who are the customers? + - What competitors exist and how do they price? + - What constraints exist (market, budget, positioning)? + +2. **Apply the frameworks** + - Recommend value metric using decision framework + - Suggest tier structure (3-4 tiers) + - Position against competition using Priestley's demand principles + - Apply Campbell's value-based approach + +3. **Deliverable options** + - Pricing strategy document + - Pricing page copy + - Competitive analysis + - A/B test plan + - Pricing tier structure with rationale + +4. **Next steps** + - Set up tracking and metrics + - Plan communication strategy + - Schedule quarterly pricing review