commit 99971760403b23371de96eb954132047a5b87059 Author: Zhongwei Li Date: Sat Nov 29 18:32:48 2025 +0800 Initial commit diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..6da9c9b --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,18 @@ +{ + "name": "productivity-toolkit", + "description": "Essential productivity suite with planning and workflow tools: 1 documentation agent + 11 powerful commands (ultra-think, planning, PRPs, containerize, etc.) + 2 utility skills (PRP generator, timezone tools)", + "version": "1.0.0", + "author": { + "name": "Henrik Soederlund", + "email": "whom-wealthy.2z@icloud.com" + }, + "skills": [ + "./skills" + ], + "agents": [ + "./agents" + ], + "commands": [ + "./commands" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..3dae453 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# productivity-toolkit + +Essential productivity suite with planning and workflow tools: 1 documentation agent + 11 powerful commands (ultra-think, planning, PRPs, containerize, etc.) + 2 utility skills (PRP generator, timezone tools) diff --git a/agents/documentation-manager.md b/agents/documentation-manager.md new file mode 100644 index 0000000..a0728c0 --- /dev/null +++ b/agents/documentation-manager.md @@ -0,0 +1,72 @@ +--- +name: documentation-manager +description: "Expert documentation specialist. Proactively updates documentation when code changes are made, ensures README accuracy, and maintains comprehensive technical documentation. Be sure to give this subagent information on the files that were changed so it knows where to look to document changes. Always call this agent after there are code changes." +tools: Read, Write, Edit, MultiEdit, Grep, Glob, ls +--- + +You are a documentation management specialist focused on maintaining high-quality, accurate, and comprehensive documentation for software projects. Your primary responsibility is ensuring that all documentation stays synchronized with code changes and remains helpful for developers. + +## Core Responsibilities + +### 1. Documentation Synchronization +- When code changes are made, proactively check if related documentation needs updates +- Ensure README.md accurately reflects current project state, dependencies, and setup instructions +- Update API documentation when endpoints or interfaces change +- Maintain consistency between code comments and external documentation + +### 2. Documentation Structure +- Organize documentation following best practices: + - README.md for project overview and quick start + - docs/ folder for detailed documentation + - API.md for endpoint documentation + - ARCHITECTURE.md for system design + - CONTRIBUTING.md for contribution guidelines +- Ensure clear navigation between documentation files + +### 3. Documentation Quality Standards +- Write clear, concise explanations that a mid-level developer can understand +- Include code examples for complex concepts +- Add diagrams or ASCII art where visual representation helps +- Ensure all commands and code snippets are tested and accurate +- Use consistent formatting and markdown conventions + +### 4. Proactive Documentation Tasks +When you notice: +- New features added → Update feature documentation +- Dependencies changed → Update installation/setup docs +- API changes → Update API documentation and examples +- Configuration changes → Update configuration guides +- Breaking changes → Add migration guides + +### 5. Documentation Validation +- Check that all links in documentation are valid +- Verify that code examples compile/run correctly +- Ensure setup instructions work on fresh installations +- Validate that documented commands produce expected results + +## Working Process + +1. **Analyze Changes**: When code modifications occur, analyze what was changed +2. **Identify Impact**: Determine which documentation might be affected +3. **Update Systematically**: Update all affected documentation files +4. **Validate Changes**: Ensure documentation remains accurate and helpful +5. **Cross-Reference**: Make sure all related docs are consistent + +## Key Principles + +- Documentation is as important as code +- Out-of-date documentation is worse than no documentation +- Examples are worth a thousand words +- Always consider the reader's perspective +- Test everything you document + +## Output Standards + +When updating documentation: +- Use clear headings and subheadings +- Include table of contents for long documents +- Add timestamps or version numbers when relevant +- Provide both simple and advanced examples +- Link to related documentation sections + +Remember: Good documentation reduces support burden, accelerates onboarding, and makes projects more maintainable. Always strive for clarity, accuracy, and completeness. \ No newline at end of file diff --git a/commands/containerize.md b/commands/containerize.md new file mode 100644 index 0000000..97d7659 --- /dev/null +++ b/commands/containerize.md @@ -0,0 +1,93 @@ +--- +allowed-tools: Read, Write, Edit, Bash +argument-hint: [application-type] | --node | --python | --java | --go | --multi-stage +description: Containerize application with optimized Docker configuration, security, and multi-stage builds +model: claude-sonnet-4-5 +--- + +# Application Containerization + +Containerize application for deployment: $ARGUMENTS + +## Current Application Analysis + +- Application type: @package.json or @setup.py or @go.mod or @pom.xml (detect runtime) +- Existing Docker: @Dockerfile or @docker-compose.yml or @compose.yaml (if exists) +- Dependencies: !find . -name "*requirements*.txt" -o -name "package*.json" -o -name "go.mod" | head -3 +- Port configuration: !grep -r "PORT\|listen\|bind" src/ 2>/dev/null | head -3 || echo "Port detection needed" +- Build tools: @Makefile or build scripts detection + +## Task + +Implement production-ready containerization strategy: + +1. **Application Analysis and Containerization Strategy** + - Analyze application architecture and runtime requirements + - Identify application dependencies and external services + - Determine optimal base image and runtime environment + - Plan multi-stage build strategy for optimization + - Assess security requirements and compliance needs + +2. **Dockerfile Creation and Optimization** + - Create comprehensive Dockerfile with multi-stage builds + - Select minimal base images (Alpine, distroless, or slim variants) + - Configure proper layer caching and build optimization + - Implement security best practices (non-root user, minimal attack surface) + - Set up proper file permissions and ownership + +3. **Build Process Configuration** + - Configure .dockerignore file to exclude unnecessary files + - Set up build arguments and environment variables + - Implement build-time dependency installation and cleanup + - Configure application bundling and asset optimization + - Set up proper build context and file structure + +4. **Runtime Configuration** + - Configure application startup and health checks + - Set up proper signal handling and graceful shutdown + - Configure logging and output redirection + - Set up environment-specific configuration management + - Configure resource limits and performance tuning + +5. **Security Hardening** + - Run application as non-root user with minimal privileges + - Configure security scanning and vulnerability assessment + - Implement secrets management and secure credential handling + - Set up network security and firewall rules + - Configure security policies and access controls + +6. **Docker Compose Configuration** + - Create compose.yaml for local development + - Configure service dependencies and networking + - Set up volume mounting and data persistence + - Configure environment variables and secrets + - Set up development vs production configurations + +7. **Container Orchestration Preparation** + - Prepare configurations for Kubernetes deployment + - Create deployment manifests and service definitions + - Configure ingress and load balancing + - Set up persistent volumes and storage classes + - Configure auto-scaling and resource management + +8. **Monitoring and Observability** + - Configure application metrics and health endpoints + - Set up logging aggregation and centralized logging + - Configure distributed tracing and monitoring + - Set up alerting and notification systems + - Configure performance monitoring and profiling + +9. **CI/CD Integration** + - Configure automated Docker image building + - Set up image scanning and security validation + - Configure image registry and artifact management + - Set up automated deployment pipelines + - Configure rollback and blue-green deployment strategies + +10. **Testing and Validation** + - Test container builds and functionality + - Validate security configurations and compliance + - Test deployment in different environments + - Validate performance and resource utilization + - Test backup and disaster recovery procedures + - Create documentation for container deployment and management diff --git a/commands/execute-parallel.md b/commands/execute-parallel.md new file mode 100644 index 0000000..4d522b6 --- /dev/null +++ b/commands/execute-parallel.md @@ -0,0 +1,27 @@ +# Parallel Task Version Execution + +## Variables +FEATURE_NAME: $ARGUMENTS +PLAN_TO_EXECUTE: $ARGUMENTS +NUMBER_OF_PARALLEL_WORKTREES: $ARGUMENTS + +## Instructions + +We're going to create NUMBER_OF_PARALLEL_WORKTREES new subagents that use the Task tool to create N versions of the same feature in parallel. + +Be sure to read PLAN_TO_EXECUTE. + +This enables use to concurrently build the same feature in parallel so we can test and validate each subagent's changes in isolation then pick the best changes. + +The first agent will run in trees/-1/ +The second agent will run in trees/-2/ +... +The last agent will run in trees/-/ + +The code in trees/-/ will be identical to the code in the current branch. It will be setup and ready for you to build the feature end to end. + +Each agent will independently implement the engineering plan detailed in PLAN_TO_EXECUTE in their respective workspace. + +When the subagent completes it's work, have the subagent to report their final changes made in a comprehensive `RESULTS.md` file at the root of their respective workspace. + +Make sure agents don't run any tests or other code - focus on the code changes only. \ No newline at end of file diff --git a/commands/execute-prp.md b/commands/execute-prp.md new file mode 100644 index 0000000..81fb8ea --- /dev/null +++ b/commands/execute-prp.md @@ -0,0 +1,40 @@ +# Execute BASE PRP + +Implement a feature using using the PRP file. + +## PRP File: $ARGUMENTS + +## Execution Process + +1. **Load PRP** + - Read the specified PRP file + - Understand all context and requirements + - Follow all instructions in the PRP and extend the research if needed + - Ensure you have all needed context to implement the PRP fully + - Do more web searches and codebase exploration as needed + +2. **ULTRATHINK** + - Think hard before you execute the plan. Create a comprehensive plan addressing all requirements. + - Break down complex tasks into smaller, manageable steps using your todos tools. + - Use the TodoWrite tool to create and track your implementation plan. + - Identify implementation patterns from existing code to follow. + +3. **Execute the plan** + - Execute the PRP + - Implement all the code + +4. **Validate** + - Run each validation command + - Fix any failures + - Re-run until all pass + +5. **Complete** + - Ensure all checklist items done + - Run final validation suite + - Report completion status + - Read the PRP again to ensure you have implemented everything + +6. **Reference the PRP** + - You can always reference the PRP again if needed + +Note: If validation fails, use error patterns in PRP to fix and retry. \ No newline at end of file diff --git a/commands/generate-prp.md b/commands/generate-prp.md new file mode 100644 index 0000000..9191d9b --- /dev/null +++ b/commands/generate-prp.md @@ -0,0 +1,69 @@ +# Create PRP + +## Feature file: $ARGUMENTS + +Generate a complete PRP for general feature implementation with thorough research. Ensure context is passed to the AI agent to enable self-validation and iterative refinement. Read the feature file first to understand what needs to be created, how the examples provided help, and any other considerations. + +The AI agent only gets the context you are appending to the PRP and training data. Assuma the AI agent has access to the codebase and the same knowledge cutoff as you, so its important that your research findings are included or referenced in the PRP. The Agent has Websearch capabilities, so pass urls to documentation and examples. + +## Research Process + +1. **Codebase Analysis** + - Search for similar features/patterns in the codebase + - Identify files to reference in PRP + - Note existing conventions to follow + - Check test patterns for validation approach + +2. **External Research** + - Search for similar features/patterns online + - Library documentation (include specific URLs) + - Implementation examples (GitHub/StackOverflow/blogs) + - Best practices and common pitfalls + +3. **User Clarification** (if needed) + - Specific patterns to mirror and where to find them? + - Integration requirements and where to find them? + +## PRP Generation + +Using templates/prp_base.md as template: + +### Critical Context to Include and pass to the AI agent as part of the PRP +- **Documentation**: URLs with specific sections +- **Code Examples**: Real snippets from codebase +- **Gotchas**: Library quirks, version issues +- **Patterns**: Existing approaches to follow + +### Implementation Blueprint +- Start with pseudocode showing approach +- Reference real files for patterns +- Include error handling strategy +- list tasks to be completed to fullfill the PRP in the order they should be completed + +### Validation Gates (Must be Executable) eg for python +```bash +# Syntax/Style +ruff check --fix && mypy . + +# Unit Tests +uv run pytest tests/ -v + +``` + +*** CRITICAL AFTER YOU ARE DONE RESEARCHING AND EXPLORING THE CODEBASE BEFORE YOU START WRITING THE PRP *** + +*** ULTRATHINK ABOUT THE PRP AND PLAN YOUR APPROACH THEN START WRITING THE PRP *** + +## Output +Save as: `PRPs/{feature-name}.md` + +## Quality Checklist +- [ ] All necessary context included +- [ ] Validation gates are executable by AI +- [ ] References existing patterns +- [ ] Clear implementation path +- [ ] Error handling documented + +Score the PRP on a scale of 1-10 (confidence level to succeed in one-pass implementation using claude codes) + +Remember: The goal is one-pass implementation success through comprehensive context. \ No newline at end of file diff --git a/commands/infinite.md b/commands/infinite.md new file mode 100644 index 0000000..9cace59 --- /dev/null +++ b/commands/infinite.md @@ -0,0 +1,202 @@ +**INFINITE AGENTIC LOOP COMMAND** + +Think deeply about this infinite generation task. You are about to embark on a sophisticated iterative creation process. + +**Variables:** + +spec_file: $ARGUMENTS +output_dir: $ARGUMENTS +count: $ARGUMENTS + +**ARGUMENTS PARSING:** +Parse the following arguments from "$ARGUMENTS": + +1. `spec_file` - Path to the markdown specification file +2. `output_dir` - Directory where iterations will be saved +3. `count` - Number of iterations (1-N or "infinite") + +**PHASE 1: SPECIFICATION ANALYSIS** +Read and deeply understand the specification file at `spec_file`. This file defines: + +- What type of content to generate +- The format and structure requirements +- Any specific parameters or constraints +- The intended evolution pattern between iterations + +Think carefully about the spec's intent and how each iteration should build upon previous work. + +**PHASE 2: OUTPUT DIRECTORY RECONNAISSANCE** +Thoroughly analyze the `output_dir` to understand the current state: + +- List all existing files and their naming patterns +- Identify the highest iteration number currently present +- Analyze the content evolution across existing iterations +- Understand the trajectory of previous generations +- Determine what gaps or opportunities exist for new iterations + +**PHASE 3: ITERATION STRATEGY** +Based on the spec analysis and existing iterations: + +- Determine the starting iteration number (highest existing + 1) +- Plan how each new iteration will be unique and evolutionary +- Consider how to build upon previous iterations while maintaining novelty +- If count is "infinite", prepare for continuous generation until context limits + +**PHASE 4: PARALLEL AGENT COORDINATION** +Deploy multiple Sub Agents to generate iterations in parallel for maximum efficiency and creative diversity: + +**Sub-Agent Distribution Strategy:** + +- For count 1-5: Launch all agents simultaneously +- For count 6-20: Launch in batches of 5 agents to manage coordination +- For "infinite": Launch waves of 3-5 agents, monitoring context and spawning new waves + +**Agent Assignment Protocol:** +Each Sub Agent receives: + +1. **Spec Context**: Complete specification file analysis +2. **Directory Snapshot**: Current state of output_dir at launch time +3. **Iteration Assignment**: Specific iteration number (starting_number + agent_index) +4. **Uniqueness Directive**: Explicit instruction to avoid duplicating concepts from existing iterations +5. **Quality Standards**: Detailed requirements from the specification + +**Agent Task Specification:** + +``` +TASK: Generate iteration [NUMBER] for [SPEC_FILE] in [OUTPUT_DIR] + +You are Sub Agent [X] generating iteration [NUMBER]. + +CONTEXT: +- Specification: [Full spec analysis] +- Existing iterations: [Summary of current output_dir contents] +- Your iteration number: [NUMBER] +- Assigned creative direction: [Specific innovation dimension to explore] + +REQUIREMENTS: +1. Read and understand the specification completely +2. Analyze existing iterations to ensure your output is unique +3. Generate content following the spec format exactly +4. Focus on [assigned innovation dimension] while maintaining spec compliance +5. Create file with exact name pattern specified +6. Ensure your iteration adds genuine value and novelty + +DELIVERABLE: Single file as specified, with unique innovative content +``` + +**Parallel Execution Management:** + +- Launch all assigned Sub Agents simultaneously using Task tool +- Monitor agent progress and completion +- Handle any agent failures by reassigning iteration numbers +- Ensure no duplicate iteration numbers are generated +- Collect and validate all completed iterations + +**PHASE 5: INFINITE MODE ORCHESTRATION** +For infinite generation mode, orchestrate continuous parallel waves: + +**Wave-Based Generation:** + +1. **Wave Planning**: Determine next wave size (3-5 agents) based on context capacity +2. **Agent Preparation**: Prepare fresh context snapshots for each new wave +3. **Progressive Sophistication**: Each wave should explore more advanced innovation dimensions +4. **Context Monitoring**: Track total context usage across all agents and main orchestrator +5. **Graceful Conclusion**: When approaching context limits, complete current wave and summarize + +**Infinite Execution Cycle:** + +``` +WHILE context_capacity > threshold: + 1. Assess current output_dir state + 2. Plan next wave of agents (size based on remaining context) + 3. Assign increasingly sophisticated creative directions + 4. Launch parallel Sub Agent wave + 5. Monitor wave completion + 6. Update directory state snapshot + 7. Evaluate context capacity remaining + 8. If sufficient capacity: Continue to next wave + 9. If approaching limits: Complete final wave and summarize +``` + +**Progressive Sophistication Strategy:** + +- **Wave 1**: Basic functional replacements with single innovation dimension +- **Wave 2**: Multi-dimensional innovations with enhanced interactions +- **Wave 3**: Complex paradigm combinations with adaptive behaviors +- **Wave N**: Revolutionary concepts pushing the boundaries of the specification + +**Context Optimization:** + +- Each wave uses fresh agent instances to avoid context accumulation +- Main orchestrator maintains lightweight state tracking +- Progressive summarization of completed iterations to manage context +- Strategic pruning of less essential details in later waves + +**EXECUTION PRINCIPLES:** + +**Quality & Uniqueness:** + +- Each iteration must be genuinely unique and valuable +- Build upon previous work while introducing novel elements +- Maintain consistency with the original specification +- Ensure proper file organization and naming + +**Parallel Coordination:** + +- Deploy Sub Agents strategically to maximize creative diversity +- Assign distinct innovation dimensions to each agent to avoid overlap +- Coordinate timing to prevent file naming conflicts +- Monitor all agents for successful completion and quality + +**Scalability & Efficiency:** + +- Think deeply about the evolution trajectory across parallel streams +- For infinite mode, optimize for maximum valuable output before context exhaustion +- Use wave-based generation to manage context limits intelligently +- Balance parallel speed with quality and coordination overhead + +**Agent Management:** + +- Provide each Sub Agent with complete context and clear assignments +- Handle agent failures gracefully with iteration reassignment +- Ensure all parallel outputs integrate cohesively with the overall progression + +**ULTRA-THINKING DIRECTIVE:** +Before beginning generation, engage in extended thinking about: + +**Specification & Evolution:** + +- The deeper implications of the specification +- How to create meaningful progression across iterations +- What makes each iteration valuable and unique +- How to balance consistency with innovation + +**Parallel Strategy:** + +- Optimal Sub Agent distribution for the requested count +- How to assign distinct creative directions to maximize diversity +- Wave sizing and timing for infinite mode +- Context management across multiple parallel agents + +**Coordination Challenges:** + +- How to prevent duplicate concepts across parallel streams +- Strategies for ensuring each agent produces genuinely unique output +- Managing file naming and directory organization with concurrent writes +- Quality control mechanisms for parallel outputs + +**Infinite Mode Optimization:** + +- Wave-based generation patterns for sustained output +- Progressive sophistication strategies across multiple waves +- Context capacity monitoring and graceful conclusion planning +- Balancing speed of parallel generation with depth of innovation + +**Risk Mitigation:** + +- Handling agent failures and iteration reassignment +- Ensuring coherent overall progression despite parallel execution +- Managing context window limits across the entire system +- Maintaining specification compliance across all parallel outputs + +Begin execution with deep analysis of these parallel coordination challenges and proceed systematically through each phase, leveraging Sub Agents for maximum creative output and efficiency. diff --git a/commands/planning.md b/commands/planning.md new file mode 100644 index 0000000..577e3d1 --- /dev/null +++ b/commands/planning.md @@ -0,0 +1,8 @@ +Please create a detailed plan to implement a project objective or feature according to the user's task which is as follows: $ARGUMENTS. + +- You will ask followup questions and clarification prompts to the user until you are clear how to approach the plan. +- You are to collect as much information as you can around the project to maximise its chances of success. +- You will now ULTRATHINK about the plan and figure out if you are missing anything, spin the project around from a different perspective and challenge your assumptions. +- You will research online, to find peer reviewed solutions, best practises or other inspiration sources that help increase the chances of success. +- Once your reserach is completed, you will create a clearly described task list and present that to the user. +- You will NOT write any code or start building anything until the user has confirmed your plan and task list, so you will collaborate with the user until this has been completed and the plan is confirmed. \ No newline at end of file diff --git a/commands/prep-parallel.md b/commands/prep-parallel.md new file mode 100644 index 0000000..25004ed --- /dev/null +++ b/commands/prep-parallel.md @@ -0,0 +1,14 @@ +# Initialize parallel git worktree directories for parallel Claude Code agents + +## Variables +FEATURE_NAME: $ARGUMENTS +NUMBER_OF_PARALLEL_WORKTREES: $ARGUMENTS + +## Execute these commands +> Execute the loop in parallel with the Batch and Task tool + +- create a new dir `trees/` +- for i in NUMBER_OF_PARALLEL_WORKTREES + - RUN `git worktree add -b FEATURE_NAME-i ./trees/FEATURE_NAME-i` + - RUN `cd trees/FEATURE_NAME-i`, `git ls-files` to validate +- RUN `git worktree list` to verify all trees were created properly \ No newline at end of file diff --git a/commands/primer.md b/commands/primer.md new file mode 100644 index 0000000..9ad0366 --- /dev/null +++ b/commands/primer.md @@ -0,0 +1,16 @@ +# Prime Context for Claude Code + +Use the command `tree` to get an understanding of the project structure. + +Start with reading the CLAUDE.md file if it exists to get an understanding of the project. + +Read the README.md file to get an understanding of the project. + +Read key files in the src/ or root directory + +Explain back to me: +- Project structure +- Project purpose and goals +- Key files and their purposes +- Any important dependencies +- Any important configuration files \ No newline at end of file diff --git a/commands/prompt_writer.md b/commands/prompt_writer.md new file mode 100644 index 0000000..f9929de --- /dev/null +++ b/commands/prompt_writer.md @@ -0,0 +1,18 @@ +--- +allowed-tools: Read, Write, Edit, +argument-hint: [Your initial draft idea of a prompt to be improved] +description: Helps to craft an improved prompt based on your brief, following Anthropic's latest prompt engineering guides +model: claude-sonnet-4-5 +--- +You're a Principal prompt engineer with equity stake in the company you're being asked to write prompts for. + +READ: +https://docs.claude.com/en/docs/build-with-claude/prompt-engineering/claude-4-best-practices.md + +You are to improve and think ahrd about best practise prompt enginnering, and take the users prompt brief and improve it. + +User's Brief/Initial Prompt: +$ARGUMENTS + +Output: +Improved prompt validated aginst the documentation article shared with you diff --git a/commands/reflection.md b/commands/reflection.md new file mode 100644 index 0000000..ef05d2f --- /dev/null +++ b/commands/reflection.md @@ -0,0 +1,50 @@ +You are an expert in prompt engineering, specializing in optimizing AI code assistant instructions. Your task is to analyze and improve the instructions for Claude Code found in u/CLAUDE.md. Follow these steps carefully: + +1. Analysis Phase: + Review the chat history in your context window. + +Then, examine the current Claude instructions: + +u/CLAUDE.md + + +Analyze the chat history and instructions to identify areas that could be improved. Look for: + +- Inconsistencies in Claude's responses +- Misunderstandings of user requests +- Areas where Claude could provide more detailed or accurate information +- Opportunities to enhance Claude's ability to handle specific types of queries or tasks + +2. Interaction Phase: + Present your findings and improvement ideas to the human. For each suggestion: + a) Explain the current issue you've identified + b) Propose a specific change or addition to the instructions + c) Describe how this change would improve Claude's performance + +Wait for feedback from the human on each suggestion before proceeding. If the human approves a change, move it to the implementation phase. If not, refine your suggestion or move on to the next idea. + +3. Implementation Phase: + For each approved change: + a) Clearly state the section of the instructions you're modifying + b) Present the new or modified text for that section + c) Explain how this change addresses the issue identified in the analysis phase + +4. Output Format: + Present your final output in the following structure: + + +[List the issues identified and potential improvements] + + + +[For each approved improvement: +1. Section being modified +2. New or modified instruction text +3. Explanation of how this addresses the identified issue] + + + +[Present the complete, updated set of instructions for Claude, incorporating all approved changes] + + +Remember, your goal is to enhance Claude's performance and consistency while maintaining the core functionality and purpose of the AI assistant. Be thorough in your analysis, clear in your explanations, and precise in your implementations. diff --git a/commands/ultra-think.md b/commands/ultra-think.md new file mode 100644 index 0000000..4fc3159 --- /dev/null +++ b/commands/ultra-think.md @@ -0,0 +1,169 @@ +# Deep Analysis and Problem Solving Mode + +Deep analysis and problem solving mode + +## Instructions + +1. **Initialize Ultra Think Mode** + - Acknowledge the request for enhanced analytical thinking + - Set context for deep, systematic reasoning + - Prepare to explore the problem space comprehensively + +2. **Parse the Problem or Question** + - Extract the core challenge from: **$ARGUMENTS** + - Identify all stakeholders and constraints + - Recognize implicit requirements and hidden complexities + - Question assumptions and surface unknowns + +3. **Multi-Dimensional Analysis** + Approach the problem from multiple angles: + + ### Technical Perspective + + - Analyze technical feasibility and constraints + - Consider scalability, performance, and maintainability + - Evaluate security implications + - Assess technical debt and future-proofing + + ### Business Perspective + + - Understand business value and ROI + - Consider time-to-market pressures + - Evaluate competitive advantages + - Assess risk vs. reward trade-offs + + ### User Perspective + + - Analyze user needs and pain points + - Consider usability and accessibility + - Evaluate user experience implications + - Think about edge cases and user journeys + + ### System Perspective + + - Consider system-wide impacts + - Analyze integration points + - Evaluate dependencies and coupling + - Think about emergent behaviors + +4. **Generate Multiple Solutions** + - Brainstorm at least 3-5 different approaches + - For each approach, consider: + - Pros and cons + - Implementation complexity + - Resource requirements + - Potential risks + - Long-term implications + - Include both conventional and creative solutions + - Consider hybrid approaches + +5. **Deep Dive Analysis** + For the most promising solutions: + - Create detailed implementation plans + - Identify potential pitfalls and mitigation strategies + - Consider phased approaches and MVPs + - Analyze second and third-order effects + - Think through failure modes and recovery + +6. **Cross-Domain Thinking** + - Draw parallels from other industries or domains + - Apply design patterns from different contexts + - Consider biological or natural system analogies + - Look for innovative combinations of existing solutions + +7. **Challenge and Refine** + - Play devil's advocate with each solution + - Identify weaknesses and blind spots + - Consider "what if" scenarios + - Stress-test assumptions + - Look for unintended consequences + +8. **Synthesize Insights** + - Combine insights from all perspectives + - Identify key decision factors + - Highlight critical trade-offs + - Summarize innovative discoveries + - Present a nuanced view of the problem space + +9. **Provide Structured Recommendations** + Present findings in a clear structure: + `` + + ## Problem Analysis + + - Core challenge + - Key constraints + - Critical success factors + + ## Solution Options + + ### Option 1: [Name] + + - Description + - Pros/Cons + - Implementation approach + - Risk assessment + + ### Option 2: [Name] + + [Similar structure] + + ## Recommendation + + - Recommended approach + - Rationale + - Implementation roadmap + - Success metrics + - Risk mitigation plan + + ## Alternative Perspectives + + - Contrarian view + - Future considerations + - Areas for further research + ` + +10. **Meta-Analysis** + - Reflect on the thinking process itself + - Identify areas of uncertainty + - Acknowledge biases or limitations + - Suggest additional expertise needed + - Provide confidence levels for recommendations + +## Usage Examples + +`bash + +### Architectural decision + +/project:ultra-think Should we migrate to microservices or improve our monolith? + +### Complex problem solving + +/project:ultra-think How do we scale our system to handle 10x traffic while reducing costs? + +### Strategic planning + +/project:ultra-think What technology stack should we choose for our next-gen platform? + +### Design challenge + +/project:ultra-think How can we improve our API to be more developer-friendly while maintaining backward compatibility? +`` + +## Key Principles + +- **First Principles Thinking**: Break down to fundamental truths +- **Systems Thinking**: Consider interconnections and feedback loops +- **Probabilistic Thinking**: Work with uncertainties and ranges +- **Inversion**: Consider what to avoid, not just what to do +- **Second-Order Thinking**: Consider consequences of consequences + +## Output Expectations + +- Comprehensive analysis (typically 2-4 pages of insights) +- Multiple viable solutions with trade-offs +- Clear reasoning chains +- Acknowledgment of uncertainties +- Actionable recommendations +- Novel insights or perspectives diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..ddb0259 --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,125 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:henkisdabro/wookstar-claude-code-plugins:productivity-toolkit", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "284b1e195f0292128154a82d6509201c397170c1", + "treeHash": "abfbe5b7dc93f5c07177106574d4f09664c4d8be7887b5e28288ac0743cf1df4", + "generatedAt": "2025-11-28T10:17:23.923726Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "productivity-toolkit", + "description": "Essential productivity suite with planning and workflow tools: 1 documentation agent + 11 powerful commands (ultra-think, planning, PRPs, containerize, etc.) + 2 utility skills (PRP generator, timezone tools)", + "version": "1.0.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "e47c695547790ba2b2245c0bc9db3b3da3f9fd5202f59fe11c3b4ef649c4b8dd" + }, + { + "path": "agents/documentation-manager.md", + "sha256": "5e3d7424935259c70b3e71bcaf1bb7472dd0ac2515136d992bf5b79ffcf8e4b9" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "6f28cfa6cf1d39dabaa83cdd30f08a3009dbfa4d9ac3b56432f65b21fdfbefb6" + }, + { + "path": "commands/planning.md", + "sha256": "99339b014d9388a9eb03c819fe1718c0e97cbcb2d60804a8ed568584129bee67" + }, + { + "path": "commands/execute-parallel.md", + "sha256": "9d20b321bd9178ec5a49090ca4d38bbbe1653ce855d1be9dd7907de7fb5bfd66" + }, + { + "path": "commands/prompt_writer.md", + "sha256": "33dc817e264251f982fc38c1b2c6ceb7bf0b89d324edd426180cc90e4dd254fe" + }, + { + "path": "commands/prep-parallel.md", + "sha256": "2b63fed5ab0aa096476c86fa905f75b0451476104905204b811aca1e89707af3" + }, + { + "path": "commands/infinite.md", + "sha256": "ba249b6bd175247a92ef48b764575f423e4c1a045f7d3e55cf7e189355f3991d" + }, + { + "path": "commands/primer.md", + "sha256": "48466e604c9b169d2aa8c0a27eaba1061f64d852491241de6b708654fbe6623b" + }, + { + "path": "commands/generate-prp.md", + "sha256": "e1b4d14196a2491c53e19b6f2fe18c79ff52f4e45e8d9580436dd76b2a0c507d" + }, + { + "path": "commands/ultra-think.md", + "sha256": "736363906eb8c3c060277e110030edf26af3cca20ba3ad8f0e67c5a2da90e505" + }, + { + "path": "commands/execute-prp.md", + "sha256": "3a74ea61c0d61047c1df8d7a6a0900bedeb76771482cdc6e6569528430e019b3" + }, + { + "path": "commands/reflection.md", + "sha256": "de5e2eb202c43bf80f195981d7574641465ad5825c5e06e47b312a1bece4e79d" + }, + { + "path": "commands/containerize.md", + "sha256": "feb7151ca44fe505c049b2c76698ae465d4adf5ce080e026de0582b8b82067ef" + }, + { + "path": "skills/timezone-tools/SKILL.md", + "sha256": "e2a086ad30c600040fb48e184334f746962204e4748d6cd9eb860dc0b13d6ab0" + }, + { + "path": "skills/timezone-tools/scripts/requirements.txt", + "sha256": "b69dad8965f75b18347743c9090542c7e003d2ce3e0d98e46284b20b56769ab0" + }, + { + "path": "skills/timezone-tools/scripts/get_time.py", + "sha256": "207161f1e15b4b79ed979edc81ad40d5fa7faca1b82dc6622a838b95db32e0e7" + }, + { + "path": "skills/timezone-tools/scripts/convert_time.py", + "sha256": "2a85bb3a78846067ef4bdb0b3b7f384abd977d1fd4fdc094da63e5fed6b662e0" + }, + { + "path": "skills/timezone-tools/scripts/list_timezones.py", + "sha256": "72057c46ff22b2952c165cb84d476640ed0ae796959bf3996c9859e8ac7ec5e2" + }, + { + "path": "skills/timezone-tools/data/common_timezones.json", + "sha256": "5d9774c1ba774e761e6039e23225da59b9ac4549c90c888f1389e489dcdd0ab7" + }, + { + "path": "skills/prp-generator/SKILL.md", + "sha256": "ece7d046b6e84c41d650b7cd1b3dc10703907955dca42d0eb2477e24f1a85b5a" + }, + { + "path": "skills/prp-generator/references/research_methodology.md", + "sha256": "387d8201021da027df3180611833c0fe231654077d60a5104bd51aece22b9db3" + }, + { + "path": "skills/prp-generator/assets/prp_template.md", + "sha256": "0c7fae2b7befd8c5981e2ae539cf2f37c43174b047b87bd94337ed12eb591e0b" + } + ], + "dirSha256": "abfbe5b7dc93f5c07177106574d4f09664c4d8be7887b5e28288ac0743cf1df4" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/prp-generator/SKILL.md b/skills/prp-generator/SKILL.md new file mode 100644 index 0000000..36800db --- /dev/null +++ b/skills/prp-generator/SKILL.md @@ -0,0 +1,400 @@ +--- +name: prp-generator +description: Generate comprehensive Product Requirement Plans (PRPs) for feature implementation with thorough codebase analysis and external research. Use when the user requests a PRP, PRD, or detailed implementation plan for a new feature. Conducts systematic research, identifies patterns, and creates executable validation gates for one-pass implementation success. +--- + +# PRP Generator + +## Overview + +This skill generates comprehensive Product Requirement Plans (PRPs) that enable AI agents to implement features in a single pass with high success rates. The skill combines systematic codebase analysis with external research to create detailed, context-rich implementation blueprints. + +## When to Use This Skill + +Invoke this skill when: +- User requests a PRP or PRD (Product Requirement Plan/Document) +- User wants a detailed implementation plan for a new feature +- User asks to "plan out" or "design" a complex feature +- Beginning a significant feature development that would benefit from structured planning +- User provides a feature description file and asks for implementation guidance + +## Core Principle + +**Context is Everything**: The AI agent implementing your PRP only receives: +1. The PRP content you create +2. Training data knowledge +3. Access to the codebase +4. WebSearch capabilities + +Therefore, your PRP must be self-contained with all necessary context, specific references, and executable validation gates. + +## Workflow + +### Phase 1: Understanding the Feature + +1. **Read the Feature Request** + - If user provides a feature file path, read it completely + - If user provides verbal description, clarify requirements by asking: + - What is the user trying to accomplish? + - What are the acceptance criteria? + - Are there any specific constraints or requirements? + - Identify the core problem being solved + +2. **Clarify Ambiguities** + - Use AskUserQuestion tool for any unclear requirements + - Confirm technology stack assumptions + - Verify integration points + - Ask about specific patterns to follow if not obvious + +### Phase 2: Codebase Analysis (Mandatory) + +**Goal**: Understand existing patterns, conventions, and integration points + +Refer to `references/research_methodology.md` for detailed guidance, but the core steps are: + +1. **Search for Similar Features** + ``` + Use Grep to search for: + - Similar component names + - Similar functionality keywords + - Similar UI patterns + - Similar API endpoints + ``` + + Document findings with: + - Exact file paths and line numbers + - Code snippets showing patterns + - Relevance to new feature + - Necessary adaptations + +2. **Identify Architectural Patterns** + - Directory structure conventions + - Component organization patterns + - State management approach + - API structure patterns + - Routing patterns (if applicable) + + Example findings: + ``` + Pattern: Feature-based directory structure + Location: src/features/ + Application: Create src/features/[new-feature]/ + ``` + +3. **Document Coding Conventions** + - TypeScript usage patterns (interfaces vs types, strict mode) + - Component patterns (FC vs function, default vs named exports) + - Styling approach (CSS modules, styled-components, Tailwind) + - Import ordering and organization + - Function and variable naming + - Comment style + + Example: + ``` + Convention: Named exports for all components + Example: export function UserProfile() { ... } + Found in: src/components/*.tsx + ``` + +4. **Study Test Patterns** + - Test framework and version + - Test file naming and location + - Mock strategies + - Coverage expectations + - Example test to mirror + + Document: + ``` + Framework: Vitest + @testing-library/react + Pattern: Co-located tests with *.test.tsx + Example: src/components/Button/Button.test.tsx + Mock Strategy: Use vi.fn() for functions, MSW for HTTP + ``` + +5. **Check Project Configuration** + - Review `package.json` for dependencies and scripts + - Check `tsconfig.json` for TypeScript settings + - Review build configuration (vite.config.ts, etc.) + - Note path aliases and special configurations + + Document: + ``` + Build Tool: Vite 5.x + Path Aliases: '@/' → 'src/', '@components/' → 'src/components/' + TypeScript: Strict mode enabled + ``` + +### Phase 3: External Research (Mandatory) + +**Goal**: Find best practices, documentation, examples, and gotchas + +Refer to `references/research_methodology.md` for detailed guidance, but the core steps are: + +1. **Search for Library Documentation** + - Go to official documentation for any libraries being used + - Find the SPECIFIC version in package.json + - Document exact URLs to relevant sections + - Note version-specific features or changes + + Example output: + ``` + Library: @tanstack/react-query + Version: 5.28.0 (from package.json) + Docs: https://tanstack.com/query/latest/docs/react/overview + Key Sections: + - Queries: https://tanstack.com/query/latest/docs/react/guides/queries + - Mutations: https://tanstack.com/query/latest/docs/react/guides/mutations + Gotchas: + - Query keys must be arrays + - Automatic refetching on window focus + - Default staleTime is 0 + ``` + +2. **Find Implementation Examples** + - Search GitHub for similar implementations + - Look for StackOverflow solutions (recent, highly-voted) + - Find blog posts from reputable sources + - Check official example repositories + + Document: + ``` + Example: Form validation with React Hook Form + Zod + Source: https://github.com/react-hook-form/react-hook-form/tree/master/examples/V7/zodResolver + Relevance: Shows exact integration pattern needed + Key Takeaway: Use zodResolver from @hookform/resolvers + ``` + +3. **Research Best Practices** + - Search for "[technology] best practices [current year]" + - Look for common pitfalls and gotchas + - Research performance considerations + - Check security implications (OWASP guidelines) + + Document: + ``` + Practice: Input sanitization for user content + Why: Prevent XSS attacks + How: Use DOMPurify before rendering HTML + Reference: https://owasp.org/www-community/attacks/xss/ + Warning: NEVER use dangerouslySetInnerHTML without sanitization + ``` + +4. **Performance & Security Research** + - Bundle size implications of new dependencies + - Runtime performance patterns + - Security vulnerabilities to avoid + - Accessibility considerations + + Document specific URLs and recommendations + +### Phase 4: Ultra-Thinking (Critical) + +**STOP AND THINK DEEPLY BEFORE WRITING THE PRP** + +This is the most important phase. Spend significant time analyzing: + +1. **Integration Analysis** + - How does the new feature connect to existing code? + - What existing patterns should be followed? + - Where might conflicts arise? + - What files will need to be created vs modified? + +2. **Implementation Path Planning** + - What is the logical order of implementation steps? + - What are the dependencies between steps? + - Where are the potential roadblocks? + - What edge cases need handling? + +3. **Validation Strategy** + - What can be validated automatically? + - What requires manual testing? + - How can the implementer verify each step? + - What are the success criteria? + +4. **Context Completeness Check** + Ask yourself: + - Could an AI agent implement this without asking questions? + - Are all integration points documented? + - Are all necessary examples included? + - Are gotchas and warnings clearly stated? + - Are validation gates executable? + - Is the implementation path clear and logical? + +5. **Quality Assessment** + - Is this PRP comprehensive enough for one-pass implementation? + - What could cause the implementation to fail? + - What additional context would be helpful? + - Are all assumptions documented? + +### Phase 5: Generate the PRP + +Use the template from `assets/prp_template.md` as the base structure, and populate it with: + +1. **Metadata Section** + - Feature name + - Timeline estimate + - Confidence score (1-10) + - Creation date + +2. **Executive Summary** + - 2-3 sentences describing the feature + - Core value proposition + +3. **Research Findings** + - Codebase analysis results (with file:line references) + - External research (with specific URLs and sections) + - Document EVERYTHING discovered in Phase 2 and 3 + +4. **Technical Specification** + - Architecture overview + - Component breakdown + - Data models + - API endpoints (if applicable) + +5. **Implementation Blueprint** + - Prerequisites + - Step-by-step implementation (with pseudocode) + - File-by-file changes + - Reference patterns from codebase + - Error handling strategy + - Edge cases + +6. **Testing Strategy** + - Unit test approach + - Integration test approach + - Manual testing checklist + +7. **Validation Gates** + Must be EXECUTABLE commands: + ```bash + # Type checking + npm run type-check + + # Linting + npm run lint + + # Tests + npm run test + + # Build + npm run build + ``` + +8. **Success Criteria** + - Clear, measurable completion criteria + - Checklist format + +### Phase 6: Quality Scoring + +Score the PRP on a scale of 1-10 for one-pass implementation success: + +**Scoring Criteria**: +- **9-10**: Exceptionally detailed, all context included, clear path, executable gates +- **7-8**: Very good, minor gaps, mostly clear implementation path +- **5-6**: Adequate, some ambiguity, may require clarification +- **3-4**: Incomplete research, missing context, unclear path +- **1-2**: Insufficient for implementation + +**If score is below 7**: Go back and improve the PRP before delivering it. + +### Phase 7: Save and Deliver + +1. **Determine Feature Name** + - Use kebab-case + - Be descriptive but concise + - Example: "user-authentication", "dark-mode-toggle", "data-export" + +2. **Save the PRP** + ``` + Save to: PRPs/[feature-name].md + ``` + + If PRPs directory doesn't exist, create it: + ```bash + mkdir -p PRPs + ``` + +3. **Deliver Summary to User** + Provide: + - Brief summary of the feature + - Location of saved PRP + - Confidence score and rationale + - Next steps recommendation + +## Quality Checklist + +Before delivering the PRP, verify: + +- [ ] Feature requirements fully understood +- [ ] Codebase analysis completed with specific file references +- [ ] External research completed with URLs and versions +- [ ] All similar patterns identified and documented +- [ ] Coding conventions documented +- [ ] Test patterns identified +- [ ] Implementation steps clearly defined +- [ ] Validation gates are executable (not pseudo-code) +- [ ] Error handling strategy documented +- [ ] Edge cases identified +- [ ] Success criteria defined +- [ ] Confidence score 7+ (if not, improve the PRP) +- [ ] No assumptions left undocumented +- [ ] Integration points clearly identified +- [ ] PRP saved to correct location + +## Common Pitfalls to Avoid + +1. **Vague References** + - ❌ "There's a similar component somewhere" + - ✅ "See UserProfile at src/components/UserProfile.tsx:45-67" + +2. **Missing Version Information** + - ❌ "Use React Query" + - ✅ "Use @tanstack/react-query v5.28.0" + +3. **Non-Executable Validation Gates** + - ❌ "Run tests and make sure they pass" + - ✅ "npm run test && npm run build" + +4. **Generic Best Practices** + - ❌ "Follow React best practices" + - ✅ "Use named exports (see src/components/Button.tsx:1)" + +5. **Incomplete Research** + - ❌ Skipping codebase analysis + - ✅ Thoroughly document existing patterns + +6. **Missing Gotchas** + - ❌ Assuming smooth implementation + - ✅ Document known issues and edge cases + +## Example Usage + +**User Request**: +> "Create a PRP for adding dark mode support to the application" + +**Your Response**: +1. Clarify: "Should dark mode preference persist across sessions? Should it respect system preferences?" +2. Research codebase for theme-related code +3. Research external resources (dark mode best practices, library options) +4. Ultra-think about implementation approach +5. Generate comprehensive PRP using template +6. Score the PRP +7. Save to `PRPs/dark-mode-support.md` +8. Deliver summary with confidence score + +## Resources + +### Template +- `assets/prp_template.md` - Base template for all PRPs + +### References +- `references/research_methodology.md` - Detailed research guidance and best practices + +## Notes + +- **Research is mandatory**: Never skip codebase or external research +- **Be specific**: Always include file paths, line numbers, URLs, versions +- **Think deeply**: Phase 4 (Ultra-Thinking) is critical for success +- **Validate everything**: All validation gates must be executable +- **Score honestly**: If confidence is below 7, improve the PRP +- **Context is king**: The implementer only has what you put in the PRP diff --git a/skills/prp-generator/assets/prp_template.md b/skills/prp-generator/assets/prp_template.md new file mode 100644 index 0000000..6276861 --- /dev/null +++ b/skills/prp-generator/assets/prp_template.md @@ -0,0 +1,316 @@ +# Product Requirement Plan: [Feature Name] + +## Metadata + +- **Feature**: [Feature name] +- **Target Completion**: [Timeline estimate] +- **Confidence Score**: [1-10] - Likelihood of one-pass implementation success +- **Created**: [Date] + +## Executive Summary + +[2-3 sentences describing what this feature does and why it's valuable] + +## Research Findings + +### Codebase Analysis + +#### Similar Patterns Found + +[List similar features or patterns discovered in the codebase with file references] + +- **Pattern**: [Pattern name] + - **Location**: `path/to/file.ts:line` + - **Description**: [What this pattern does] + - **Relevance**: [Why this is useful for the current feature] + +#### Existing Conventions + +[List coding conventions, architectural patterns, and style guidelines to follow] + +- **Convention**: [Convention name] + - **Example**: [Code snippet or file reference] + - **Application**: [How to apply this to the new feature] + +#### Test Patterns + +[Document existing test patterns and validation approaches] + +- **Test Framework**: [Framework name and version] +- **Pattern**: [Test pattern to follow] +- **Location**: `path/to/test.spec.ts` + +### External Research + +#### Documentation References + +[List all relevant documentation with specific URLs and sections] + +- **Resource**: [Library/Framework name] + - **URL**: [Specific URL to docs] + - **Key Sections**: [Relevant sections to read] + - **Version**: [Specific version] + - **Gotchas**: [Known issues or quirks] + +#### Implementation Examples + +[List real-world examples and references] + +- **Example**: [Title/Description] + - **Source**: [GitHub/StackOverflow/Blog URL] + - **Relevance**: [What to learn from this] + - **Cautions**: [What to avoid] + +#### Best Practices + +[Document industry best practices and common pitfalls] + +- **Practice**: [Best practice name] + - **Why**: [Rationale] + - **How**: [Implementation approach] + - **Warning**: [What to avoid] + +## Technical Specification + +### Architecture Overview + +[High-level architecture diagram or description] + +``` +[ASCII diagram or description of component interactions] +``` + +### Component Breakdown + +#### Component 1: [Name] + +- **Purpose**: [What this component does] +- **Location**: `path/to/component` +- **Dependencies**: [List dependencies] +- **Interface**: [API or props interface] + +#### Component 2: [Name] + +[Repeat for each major component] + +### Data Models + +[Define all data structures, types, and schemas] + +```typescript +// Example: Define interfaces/types +interface FeatureData { + // ... +} +``` + +### API Endpoints (if applicable) + +[Document any new API endpoints] + +- **Endpoint**: `POST /api/feature` + - **Purpose**: [What it does] + - **Request**: [Request schema] + - **Response**: [Response schema] + - **Authentication**: [Auth requirements] + +## Implementation Blueprint + +### Prerequisites + +[List any setup steps, dependencies to install, or environment configuration needed] + +1. [Prerequisite 1] +2. [Prerequisite 2] + +### Implementation Steps (in order) + +#### Step 1: [Step Name] + +**Goal**: [What this step accomplishes] + +**Pseudocode Approach**: +``` +// High-level pseudocode showing the approach +function stepOne() { + // ... +} +``` + +**Files to Create/Modify**: +- `path/to/file1.ts` - [What changes] +- `path/to/file2.ts` - [What changes] + +**Reference Pattern**: See `existing/pattern/file.ts:123` for similar implementation + +**Validation**: [How to verify this step works] + +#### Step 2: [Step Name] + +[Repeat for each implementation step] + +### Error Handling Strategy + +[Document how errors should be handled] + +- **Client-side errors**: [Approach] +- **Server-side errors**: [Approach] +- **Validation errors**: [Approach] +- **Network errors**: [Approach] + +### Edge Cases + +[List all edge cases to handle] + +1. **Edge Case**: [Description] + - **Solution**: [How to handle] + +## Testing Strategy + +### Unit Tests + +[Describe unit test approach] + +- **Coverage Target**: [Percentage or scope] +- **Key Test Cases**: [List critical test scenarios] +- **Mock Strategy**: [What to mock and why] + +### Integration Tests + +[Describe integration test approach] + +- **Test Scenarios**: [List integration test cases] +- **Setup Required**: [Test environment setup] + +### Manual Testing Checklist + +- [ ] [Test scenario 1] +- [ ] [Test scenario 2] +- [ ] [Edge case 1] +- [ ] [Edge case 2] + +## Validation Gates + +### Pre-Implementation Validation + +```bash +# Ensure development environment is ready +[Commands to verify environment setup] +``` + +### During Implementation Validation + +```bash +# Type checking +npm run type-check + +# Linting +npm run lint + +# Unit tests (watch mode during development) +npm run test:watch +``` + +### Post-Implementation Validation + +```bash +# Full test suite +npm run test + +# Type checking +npm run type-check + +# Linting +npm run lint + +# Build verification +npm run build + +# E2E tests (if applicable) +npm run test:e2e +``` + +### Manual Validation Steps + +1. [Manual test step 1] +2. [Manual test step 2] +3. [Verify in browser/UI] + +## Dependencies + +### New Dependencies (if any) + +```json +{ + "dependencies": { + "package-name": "^version" + }, + "devDependencies": { + "test-package": "^version" + } +} +``` + +**Justification**: [Why each dependency is needed] + +### Version Compatibility + +- **Node**: [Version requirement] +- **Framework**: [Version requirement] +- **Other**: [Version requirements] + +## Migration & Rollout + +### Database Migrations (if applicable) + +[Document any database schema changes] + +### Feature Flags (if applicable) + +[Document feature flag strategy] + +### Rollout Plan + +1. [Rollout step 1] +2. [Rollout step 2] + +## Success Criteria + +- [ ] All validation gates pass +- [ ] All test cases pass (unit, integration, manual) +- [ ] No TypeScript errors +- [ ] No linting errors +- [ ] Build succeeds +- [ ] Feature works as specified +- [ ] Edge cases handled +- [ ] Error handling implemented +- [ ] Code follows existing conventions +- [ ] Documentation updated + +## Known Limitations + +[Document any known limitations or future enhancements] + +## References + +### Internal Documentation + +- [Link to internal docs] + +### External Resources + +- [Link to external resources used during research] + +## Appendix + +### Code Snippets from Research + +[Include any useful code snippets discovered during research] + +```typescript +// Example from existing codebase +``` + +### Additional Notes + +[Any additional context that doesn't fit elsewhere] diff --git a/skills/prp-generator/references/research_methodology.md b/skills/prp-generator/references/research_methodology.md new file mode 100644 index 0000000..66c5d24 --- /dev/null +++ b/skills/prp-generator/references/research_methodology.md @@ -0,0 +1,334 @@ +# Research Methodology for PRP Generation + +This document provides detailed guidance on conducting thorough research for creating comprehensive Product Requirement Plans. + +## Research Philosophy + +The AI agent implementing the PRP only receives: +1. The context you include in the PRP +2. Their training data knowledge +3. Access to the codebase +4. WebSearch capabilities + +Therefore, your research findings MUST be: +- **Comprehensive**: Cover all aspects of implementation +- **Specific**: Include exact URLs, file paths, line numbers +- **Actionable**: Provide concrete examples and patterns +- **Complete**: Assume the implementer won't have your conversation context + +## Codebase Analysis Process + +### 1. Find Similar Features + +**Goal**: Identify existing implementations that solve similar problems + +**Approach**: +```bash +# Search for similar feature keywords +# Use Grep tool with relevant patterns + +# Look for: +- Similar UI components +- Similar API endpoints +- Similar data models +- Similar business logic +``` + +**What to Document**: +- Exact file paths with line numbers (e.g., `src/components/UserProfile.tsx:45-67`) +- Code snippets showing the pattern +- Why this pattern is relevant +- Any modifications needed + +**Example**: +``` +Found: User authentication flow in `src/auth/AuthProvider.tsx:23-89` +Pattern: Context provider with useAuth hook +Relevance: Similar state management approach needed for feature X +Adaptation: Will need to add Y and Z properties +``` + +### 2. Identify Architectural Patterns + +**Goal**: Understand how the codebase is structured + +**Look for**: +- Directory structure conventions +- File naming patterns +- Component organization +- State management approach (Redux, Context, Zustand, etc.) +- API structure patterns +- Database access patterns +- Error handling patterns + +**What to Document**: +``` +Pattern: Feature-based directory structure +Example: src/features/authentication/ +Application: Create src/features/[new-feature]/ with: + - components/ + - hooks/ + - types/ + - api/ + - tests/ +``` + +### 3. Analyze Coding Conventions + +**Goal**: Ensure consistency with existing codebase + +**Check**: +- TypeScript usage (strict mode? interfaces vs types?) +- Component patterns (FC vs function? default vs named exports?) +- Styling approach (CSS modules, styled-components, Tailwind?) +- Import ordering +- Comment style +- Function naming (camelCase, descriptive names) + +**What to Document**: +``` +Convention: Named exports for all components +Example: export function UserProfile() { ... } +Reasoning: Easier refactoring and better IDE support +``` + +### 4. Study Test Patterns + +**Goal**: Write tests that match existing patterns + +**Investigate**: +- Test framework (Jest, Vitest, etc.) +- Testing library usage (@testing-library/react?) +- Test file naming (`*.test.ts` or `*.spec.ts`?) +- Test organization (co-located or separate test directory?) +- Mock patterns +- Test coverage expectations + +**What to Document**: +``` +Pattern: Co-located tests with *.test.tsx suffix +Framework: Vitest + @testing-library/react +Example: src/components/Button.test.tsx +Approach: Test user interactions, not implementation details +Mock Strategy: Use vi.fn() for callbacks, MSW for API calls +``` + +### 5. Check Configuration Files + +**Goal**: Understand build, lint, and tooling setup + +**Review**: +- `package.json` - scripts and dependencies +- `tsconfig.json` - TypeScript configuration +- `vite.config.ts` or `webpack.config.js` - build setup +- `.eslintrc` - linting rules +- `.prettierrc` - formatting rules + +**What to Document**: +``` +TypeScript: Strict mode enabled +Build: Vite with React plugin +Path Aliases: '@/' maps to 'src/' +Must use: Import type syntax for type-only imports +``` + +## External Research Process + +### 1. Library Documentation + +**When to Search**: +- Using a new library or framework feature +- Integrating third-party services +- Implementing complex functionality + +**How to Search**: +1. Go directly to official documentation +2. Search for the specific version being used +3. Look for: + - Getting started guides + - API references + - Examples + - Migration guides + - Known issues + +**What to Document**: +``` +Library: @tanstack/react-query v5 +URL: https://tanstack.com/query/latest/docs/react/guides/queries +Key Sections: + - Queries: https://tanstack.com/query/latest/docs/react/guides/queries + - Mutations: https://tanstack.com/query/latest/docs/react/guides/mutations +Version: 5.28.0 (check package.json) +Gotchas: + - Query keys must be arrays + - Automatic refetching on window focus (may want to disable) + - Stale time defaults to 0 +``` + +### 2. Implementation Examples + +**Where to Search**: +- GitHub repositories (search: "language:typescript [feature]") +- StackOverflow (recent answers) +- Official example repositories +- Blog posts from reputable sources + +**What to Look For**: +- Production-grade code (not quick hacks) +- Recent examples (check dates) +- Well-explained implementations +- Edge case handling + +**What to Document**: +``` +Example: Form validation with Zod and React Hook Form +Source: https://github.com/react-hook-form/react-hook-form/tree/master/examples/V7/zodResolver +Relevance: Shows integration pattern we need +Key Takeaway: Use zodResolver for seamless integration +Caution: Needs @hookform/resolvers package +``` + +### 3. Best Practices Research + +**Search Queries**: +- "[Technology] best practices 2024" +- "[Feature] common pitfalls" +- "[Library] performance optimization" +- "[Pattern] security considerations" + +**What to Document**: +``` +Practice: Input sanitization for user-generated content +Why: Prevent XSS attacks +How: Use DOMPurify library before rendering HTML +Reference: https://owasp.org/www-community/attacks/xss/ +Warning: Never use dangerouslySetInnerHTML without sanitization +``` + +### 4. Performance Considerations + +**Research**: +- Bundle size implications +- Runtime performance patterns +- Common optimization techniques +- Lazy loading opportunities + +**What to Document**: +``` +Performance: Large data table rendering +Solution: Use virtualization (@tanstack/react-virtual) +Reference: https://tanstack.com/virtual/latest +Benefit: Render only visible rows (handles 100k+ items) +Tradeoff: Adds 15KB to bundle +``` + +### 5. Security Research + +**Check**: +- Common vulnerabilities (OWASP Top 10) +- Authentication/authorization patterns +- Data validation requirements +- Secure defaults + +**What to Document**: +``` +Security: API authentication +Pattern: Use HTTP-only cookies for tokens +Reference: https://owasp.org/www-community/HttpOnly +Implementation: Configure in Cloudflare Workers +Warning: Don't store tokens in localStorage +``` + +## Combining Research Findings + +### Integration Analysis + +After completing both codebase and external research: + +1. **Match external patterns to codebase conventions** + - "Library X recommends pattern Y, but codebase uses pattern Z" + - Document adaptations needed + +2. **Identify conflicts or gaps** + - "Existing auth pattern doesn't support OAuth" + - Document how to extend existing patterns + +3. **Plan integration points** + - "New feature will integrate with existing [Component] at [Location]" + - Document all touch points + +### Context Completeness Check + +Before finalizing research, verify: + +- [ ] Can implementer understand the codebase structure from your findings? +- [ ] Are all external dependencies documented with versions and URLs? +- [ ] Are code examples specific enough to follow? +- [ ] Are gotchas and warnings clearly stated? +- [ ] Are all integration points identified? +- [ ] Can implementer validate their work with the gates you've defined? + +## Research Output Format + +Organize findings into these categories for the PRP: + +1. **Codebase Analysis** + - Similar patterns (with file:line references) + - Existing conventions (with examples) + - Test patterns (with framework details) + +2. **External Research** + - Documentation references (with specific URLs and sections) + - Implementation examples (with source links and relevance) + - Best practices (with rationale and warnings) + +3. **Integration Points** + - Where new code connects to existing code + - What patterns to follow + - What conventions to maintain + +## Common Research Pitfalls to Avoid + +1. **Vague references**: "There's a similar component somewhere" ❌ + - Should be: "See UserProfile component at src/components/UserProfile.tsx:45" ✅ + +2. **Outdated examples**: Linking to old blog posts or deprecated APIs ❌ + - Should be: Check dates, verify current best practices ✅ + +3. **Missing version info**: "Use React Query" ❌ + - Should be: "Use @tanstack/react-query v5.28.0" ✅ + +4. **No gotchas**: Assuming smooth implementation ❌ + - Should be: Document known issues, common mistakes, edge cases ✅ + +5. **Too generic**: "Follow React best practices" ❌ + - Should be: Specific patterns with code examples ✅ + +## Research Time Allocation + +**Codebase Analysis**: 40% of research time +- Critical for maintaining consistency +- Most valuable for implementation + +**External Research**: 35% of research time +- Essential for new technologies +- Validation of approaches + +**Integration Planning**: 15% of research time +- Connecting the dots +- Identifying conflicts + +**Documentation**: 10% of research time +- Organizing findings +- Creating clear references + +## Quality Indicators + +Your research is complete when: +- ✅ An implementer could start coding immediately +- ✅ All necessary context is documented +- ✅ Integration points are clear +- ✅ Validation approach is defined +- ✅ Edge cases are identified +- ✅ Error handling strategy is outlined +- ✅ No assumptions are left undocumented diff --git a/skills/timezone-tools/SKILL.md b/skills/timezone-tools/SKILL.md new file mode 100644 index 0000000..920ca2a --- /dev/null +++ b/skills/timezone-tools/SKILL.md @@ -0,0 +1,136 @@ +--- +name: timezone-tools +description: Get current time in any timezone and convert times between timezones. Use when working with time, dates, timezones, scheduling across regions, or when user mentions specific cities/regions for time queries. Supports IANA timezone names. +--- + +# Timezone Tools + +Get current time in any timezone and convert times between different timezones using IANA timezone database. + +## Quick Start + +### Get current time in a timezone + +```bash +python scripts/get_time.py "America/New_York" +``` + +### Convert time between timezones + +```bash +python scripts/convert_time.py "America/New_York" "14:30" "Australia/Perth" +``` + +### Search for timezone names + +```bash +python scripts/list_timezones.py "perth" +``` + +## Instructions + +When the user asks about time or timezones: + +1. **For current time queries** (e.g., "What time is it in Tokyo?"): + - Use `get_time.py` with IANA timezone name + - If unsure of timezone name, search first with `list_timezones.py` + - Script outputs: timezone, datetime, day of week, DST status + +2. **For time conversions** (e.g., "What's 2pm EST in Perth time?"): + - Use `convert_time.py` with source timezone, time (HH:MM 24-hour), target timezone + - Script shows source time, target time, and time difference + - Automatically handles DST changes + +3. **For timezone searches**: + - Use `list_timezones.py` with city/country name + - Returns matching IANA timezone names + +## Common Timezones Reference + +For quick reference, see [data/common_timezones.json](data/common_timezones.json) which includes major cities worldwide, with Perth prominently featured. + +**User's local timezone**: The scripts automatically detect your local timezone using `tzlocal`. + +## Examples + +### Example 1: Current time query + +User: "What time is it in Perth?" + +```bash +python scripts/list_timezones.py "perth" +# Output: Australia/Perth + +python scripts/get_time.py "Australia/Perth" +# Output: +# Timezone: Australia/Perth +# Current time: 2025-11-07T15:30:45 +# Day: Thursday +# DST: No +``` + +### Example 2: Time conversion + +User: "I have a meeting at 2pm New York time, what time is that in Perth?" + +```bash +python scripts/convert_time.py "America/New_York" "14:00" "Australia/Perth" +# Output: +# Source: America/New_York - 2025-11-07T14:00:00 (Thursday, DST: No) +# Target: Australia/Perth - 2025-11-08T03:00:00 (Friday, DST: No) +# Time difference: +13.0h +``` + +### Example 3: Multiple timezone search + +User: "What are the timezone codes for London, Tokyo, and Sydney?" + +```bash +python scripts/list_timezones.py "london" +python scripts/list_timezones.py "tokyo" +python scripts/list_timezones.py "sydney" +# Outputs: +# Europe/London +# Asia/Tokyo +# Australia/Sydney +``` + +## Time Format + +- All times use **24-hour format** (HH:MM): `14:30` not `2:30 PM` +- ISO 8601 datetime format for output: `2025-11-07T14:30:45` +- IANA timezone names (e.g., `America/New_York`, not `EST`) + +## Troubleshooting + +### "Invalid timezone" error + +- Use IANA timezone names: `America/New_York` not `EST` or `Eastern` +- Search with `list_timezones.py` if unsure +- Check [data/common_timezones.json](data/common_timezones.json) for reference + +### "Invalid time format" error + +- Use 24-hour format: `14:30` not `2:30 PM` +- Format must be `HH:MM` with colon separator + +### Missing dependencies + +Install required Python packages: + +```bash +pip install tzlocal +``` + +## Dependencies + +- Python 3.9+ +- `tzlocal>=5.0` - for local timezone detection +- `zoneinfo` - built-in Python 3.9+ (IANA timezone database) + +## Notes + +- Scripts automatically handle Daylight Saving Time (DST) +- Local timezone is auto-detected from system +- All timezone data uses IANA Time Zone Database +- Perth, Australia timezone: `Australia/Perth` (UTC+8, no DST) diff --git a/skills/timezone-tools/data/common_timezones.json b/skills/timezone-tools/data/common_timezones.json new file mode 100644 index 0000000..770f8f4 --- /dev/null +++ b/skills/timezone-tools/data/common_timezones.json @@ -0,0 +1,67 @@ +{ + "australia": { + "Perth": "Australia/Perth", + "Sydney": "Australia/Sydney", + "Melbourne": "Australia/Melbourne", + "Brisbane": "Australia/Brisbane", + "Adelaide": "Australia/Adelaide", + "Darwin": "Australia/Darwin", + "Hobart": "Australia/Hobart" + }, + "north_america": { + "New York": "America/New_York", + "Los Angeles": "America/Los_Angeles", + "Chicago": "America/Chicago", + "Denver": "America/Denver", + "Toronto": "America/Toronto", + "Vancouver": "America/Vancouver", + "Mexico City": "America/Mexico_City" + }, + "europe": { + "London": "Europe/London", + "Paris": "Europe/Paris", + "Berlin": "Europe/Berlin", + "Madrid": "Europe/Madrid", + "Rome": "Europe/Rome", + "Amsterdam": "Europe/Amsterdam", + "Stockholm": "Europe/Stockholm", + "Moscow": "Europe/Moscow" + }, + "asia": { + "Tokyo": "Asia/Tokyo", + "Hong Kong": "Asia/Hong_Kong", + "Singapore": "Asia/Singapore", + "Beijing": "Asia/Shanghai", + "Mumbai": "Asia/Kolkata", + "Dubai": "Asia/Dubai", + "Seoul": "Asia/Seoul", + "Bangkok": "Asia/Bangkok" + }, + "south_america": { + "Sao Paulo": "America/Sao_Paulo", + "Buenos Aires": "America/Argentina/Buenos_Aires", + "Lima": "America/Lima", + "Bogota": "America/Bogota", + "Santiago": "America/Santiago" + }, + "africa": { + "Cairo": "Africa/Cairo", + "Johannesburg": "Africa/Johannesburg", + "Lagos": "Africa/Lagos", + "Nairobi": "Africa/Nairobi" + }, + "pacific": { + "Auckland": "Pacific/Auckland", + "Fiji": "Pacific/Fiji", + "Honolulu": "Pacific/Honolulu", + "Guam": "Pacific/Guam" + }, + "utc": { + "UTC": "UTC" + }, + "_notes": { + "format": "City Name -> IANA Timezone", + "perth_details": "Perth: UTC+8, no DST, Western Australia", + "usage": "Use list_timezones.py to search for additional cities" + } +} diff --git a/skills/timezone-tools/scripts/convert_time.py b/skills/timezone-tools/scripts/convert_time.py new file mode 100755 index 0000000..4f5b3e7 --- /dev/null +++ b/skills/timezone-tools/scripts/convert_time.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +"""Convert time between timezones.""" + +import sys +from datetime import datetime, timedelta +from zoneinfo import ZoneInfo + + +def convert_time(source_tz: str, time_str: str, target_tz: str) -> None: + """Convert time from source timezone to target timezone. + + Args: + source_tz: Source IANA timezone name (e.g., 'America/New_York') + time_str: Time to convert in 24-hour format (HH:MM) + target_tz: Target IANA timezone name (e.g., 'Australia/Perth') + """ + try: + # Parse timezone names + source_timezone = ZoneInfo(source_tz) + target_timezone = ZoneInfo(target_tz) + + # Parse time string + try: + parsed_time = datetime.strptime(time_str, "%H:%M").time() + except ValueError: + print(f"Error: Invalid time format '{time_str}'", file=sys.stderr) + print("Expected format: HH:MM (24-hour, e.g., '14:30')", file=sys.stderr) + sys.exit(1) + + # Create datetime for today in source timezone + now = datetime.now(source_timezone) + source_time = datetime( + now.year, + now.month, + now.day, + parsed_time.hour, + parsed_time.minute, + tzinfo=source_timezone, + ) + + # Convert to target timezone + target_time = source_time.astimezone(target_timezone) + + # Calculate time difference + source_offset = source_time.utcoffset() or timedelta() + target_offset = target_time.utcoffset() or timedelta() + hours_difference = (target_offset - source_offset).total_seconds() / 3600 + + # Format time difference (handle fractional hours like Nepal's +5:45) + if hours_difference.is_integer(): + time_diff_str = f"{hours_difference:+.1f}h" + else: + time_diff_str = f"{hours_difference:+.2f}".rstrip("0").rstrip(".") + "h" + + # Check DST status + source_dst = source_time.dst() is not None and source_time.dst().total_seconds() != 0 + target_dst = target_time.dst() is not None and target_time.dst().total_seconds() != 0 + + # Output formatted information + print(f"Source: {source_tz} - {source_time.isoformat(timespec='seconds')} " + f"({source_time.strftime('%A')}, DST: {'Yes' if source_dst else 'No'})") + print(f"Target: {target_tz} - {target_time.isoformat(timespec='seconds')} " + f"({target_time.strftime('%A')}, DST: {'Yes' if target_dst else 'No'})") + print(f"Time difference: {time_diff_str}") + + except Exception as e: + if "ZoneInfo" in str(type(e).__name__): + print(f"Error: Invalid timezone", file=sys.stderr) + else: + print(f"Error: {str(e)}", file=sys.stderr) + sys.exit(1) + + +def main(): + if len(sys.argv) != 4: + print("Usage: python convert_time.py