From f0e7f0e603f66c0e8a726632e89a74687730136e Mon Sep 17 00:00:00 2001 From: Zhongwei Li Date: Sat, 29 Nov 2025 17:50:56 +0800 Subject: [PATCH] Initial commit --- .claude-plugin/plugin.json | 12 + README.md | 3 + SKILL.md | 842 ++++++++++++++++++ assets/requirements-template.md | 83 ++ plugin.json | 15 + plugin.lock.json | 65 ++ references/domain-templates.md | 1379 ++++++++++++++++++++++++++++++ scripts/generate_project_docs.py | 801 +++++++++++++++++ scripts/validate_documents.py | 325 +++++++ 9 files changed, 3525 insertions(+) create mode 100644 .claude-plugin/plugin.json create mode 100644 README.md create mode 100644 SKILL.md create mode 100644 assets/requirements-template.md create mode 100644 plugin.json create mode 100644 plugin.lock.json create mode 100644 references/domain-templates.md create mode 100644 scripts/generate_project_docs.py create mode 100644 scripts/validate_documents.py diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..991ca0b --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,12 @@ +{ + "name": "project-planner-skill", + "description": "Comprehensive project planning and documentation generator for software projects. Creates structured requirements documents, system design documents, and task breakdown plans with implementation tracking.", + "version": "1.0.0", + "author": { + "name": "George A Puiu", + "email": "puiu.adrian@gmail.com" + }, + "skills": [ + "./" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..a115e0a --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# project-planner-skill + +Comprehensive project planning and documentation generator for software projects. Creates structured requirements documents, system design documents, and task breakdown plans with implementation tracking. diff --git a/SKILL.md b/SKILL.md new file mode 100644 index 0000000..a7a8fab --- /dev/null +++ b/SKILL.md @@ -0,0 +1,842 @@ +--- +name: project-planner +description: Comprehensive project planning and documentation generator for software projects. Creates structured requirements documents, system design documents, and task breakdown plans with implementation tracking. Use when starting a new project, defining specifications, creating technical designs, or breaking down complex systems into implementable tasks. Supports user story format, acceptance criteria, component design, API specifications, and hierarchical task decomposition with requirement traceability. +--- + +# Project Planner Skill + +This skill provides templates and guidance for generating comprehensive project planning documents that serve as blueprints for AI-assisted implementation. + +## Quick Start + +When a user wants to start a new project, generate three core documents: +1. **Requirements Document** - User stories with acceptance criteria +2. **Design Document** - Technical architecture and component specifications +3. **Implementation Plan** - Hierarchical task breakdown with requirement tracing + +## Why Explicit Architectural Planning Works + +Setting clear roles, responsibilities, and deliverables upfront dramatically improves project outcomes: + +### Benefits of Upfront Definition + +1. **Component Clarity** - Defining all system components first prevents scope creep and ensures complete coverage +2. **Data Flow Visibility** - Mapping data movement early reveals integration complexities and performance bottlenecks +3. **Integration Planning** - Identifying all touchpoints upfront prevents surprise dependencies during implementation +4. **Clear Boundaries** - Explicitly stating what's in/out of scope focuses effort and prevents feature drift +5. **Measurable Success** - Specific goals and constraints enable objective progress tracking + +### The Architect Mindset + +When acting as a **Project Architect**, approach planning with: +- **Systems Thinking** - See the whole before diving into parts +- **Interface-First Design** - Define contracts between components before internals +- **Traceability Focus** - Every requirement maps to design elements and tasks +- **Constraint Awareness** - Acknowledge limitations upfront to guide decisions +- **Deliverable Orientation** - Know exactly what artifacts you're producing + +## Document Generation Workflow + +### 1. Project Architect Role Definition + +When starting a project, explicitly establish Claude as the **Project Architect** with clear responsibilities: + +**Role:** System Architect and Planning Specialist +**Responsibilities:** +- Define complete system architecture with all components +- Map data flow between system elements +- Identify all integration points and interfaces +- Establish clear project boundaries and constraints +- Create traceable requirements to implementation tasks + +### 2. Initial Project Understanding + +Before generating documents, gather key information and architectural elements: + +``` +Required Project Information: +- Project name and purpose +- Target users (single-user local, multi-tenant SaaS, etc.) +- Core functionality (3-5 main features) +- Technical preferences (languages, frameworks, deployment) +- Non-functional requirements (performance, security, scalability) + +Required Architectural Elements (define upfront): +- System Components: All major modules/services and their purposes +- Data Flow: How data moves through the entire system +- Integration Points: All external APIs, services, databases +- System Boundaries: What's in scope vs out of scope +- Constraints: Technical, business, and resource limitations +- Success Metrics: Clear, measurable goals for the system +``` + +### 3. Deliverable Definition (Set Upfront) + +Define all deliverables explicitly before starting documentation: + +``` +Standard Deliverables Package: +1. Requirements Document + - User stories with measurable acceptance criteria + - Complete glossary of terms + - Traceable requirement IDs + +2. System Design Document + - Component architecture diagram + - Data flow diagrams for all major processes + - Integration point specifications + - API/Interface contracts + - Performance and scaling targets + +3. Implementation Plan + - Hierarchical task breakdown + - Requirement-to-task mapping + - Dependency graph + - Phase-based delivery schedule + +Optional Deliverables (specify if needed): +- API Documentation +- Database Schema Design +- Security Threat Model +- Deployment Guide +- Testing Strategy Document +``` + +### 4. Generate Requirements Document + +Use the requirements template to create user-focused specifications: + +```python +# Execute this to generate requirements structure +requirements = { + "introduction": "System purpose and scope", + "glossary": "Domain-specific terms", + "requirements": [ + { + "id": "REQ-X", + "user_story": "As a [role], I want [feature], so that [benefit]", + "acceptance_criteria": [ + "WHEN [condition], THE system SHALL [behavior]", + "WHERE [context], THE system SHALL [behavior]", + "IF [condition], THEN THE system SHALL [behavior]" + ] + } + ] +} +``` + +### 5. Generate Design Document + +Create technical specifications with explicit architectural elements: + +```python +# Execute this to generate comprehensive design structure +design = { + "overview": "High-level system description", + "architecture": { + "diagram": "ASCII or visual representation of all components", + "components": [ + { + "id": "COMP-1", + "name": "Component Name", + "type": "Frontend/Backend/Service/Database", + "responsibility": "Single clear purpose", + "boundaries": "What it does and doesn't do" + } + ] + }, + "data_flow": { + "primary_flows": [ + { + "name": "User Registration Flow", + "steps": [ + "1. User submits form → Frontend", + "2. Frontend validates → API Gateway", + "3. API Gateway → Auth Service", + "4. Auth Service → User Database", + "5. Response flows back" + ], + "data_transformations": "How data changes at each step" + } + ] + }, + "integration_points": [ + { + "name": "External Payment API", + "type": "REST/GraphQL/WebSocket/Database", + "purpose": "Process payments", + "interface": "API contract definition", + "authentication": "Method used", + "error_handling": "Retry/fallback strategy" + } + ], + "components_detail": [ + { + "name": "Component Name", + "responsibility": "What it does", + "key_classes": ["Class descriptions"], + "interfaces": "API/method signatures", + "dependencies": "What it needs to function", + "performance": "Targets and constraints" + } + ], + "data_models": "Entity definitions with relationships", + "system_boundaries": { + "in_scope": ["What the system handles"], + "out_of_scope": ["What it delegates or ignores"], + "assumptions": ["External dependencies assumed available"] + }, + "error_handling": "Strategies for failures", + "testing_strategy": "Unit, integration, performance", + "deployment": "Docker, environment, configuration" +} +``` + +### 6. Generate Implementation Plan + +Break down the project into executable tasks with clear scope boundaries: + +```python +# Execute this to generate task structure with boundaries +tasks = { + "project_boundaries": { + "must_have": ["Core features for MVP"], + "nice_to_have": ["Enhancement features"], + "out_of_scope": ["Features explicitly excluded"], + "technical_constraints": ["Framework/library limitations"] + }, + "phases": [ + { + "id": 1, + "name": "Infrastructure Setup", + "deliverables": ["What this phase produces"], + "tasks": [ + { + "id": "1.1", + "description": "Task description", + "subtasks": ["Specific actions"], + "requirements_fulfilled": ["REQ-1.1", "REQ-2.3"], + "components_involved": ["COMP-1", "COMP-3"], + "dependencies": [], + "estimated_hours": 4, + "success_criteria": "How to verify completion" + } + ] + } + ] +} +``` + +## Requirements Document Template + +```markdown +# Requirements Document + +## Introduction + +[System description in 2-3 sentences. Target user and deployment model.] + +## Glossary + +- **Term**: Definition specific to this system +- **Component**: Major system module or service +[Add all domain-specific terms] + +## Requirements + +### Requirement [NUMBER] + +**User Story:** As a [user type], I want [capability], so that [benefit] + +#### Acceptance Criteria + +1. WHEN [trigger/condition], THE [component] SHALL [action/behavior] +2. WHERE [mode/context], THE [component] SHALL [action/behavior] +3. IF [condition], THEN THE [component] SHALL [action/behavior] +4. THE [component] SHALL [capability with measurable target] + +[Repeat for each requirement] +``` + +### Requirements Best Practices + +1. **One capability per requirement** - Each requirement should address a single feature +2. **Testable criteria** - Every criterion must be verifiable +3. **Use SHALL for mandatory** - Consistent RFC 2119 keywords +4. **Include performance targets** - "within X milliseconds/seconds" +5. **Specify all states** - Success, failure, edge cases +6. **Number systematically** - REQ-1, REQ-2 for traceability + +### Acceptance Criteria Patterns + +``` +Behavior criteria: +- WHEN [event occurs], THE system SHALL [respond] +- THE system SHALL [provide capability] +- THE system SHALL [enforce rule/limit] + +Conditional criteria: +- IF [condition], THEN THE system SHALL [action] +- WHERE [mode is active], THE system SHALL [behavior] + +Performance criteria: +- THE system SHALL [complete action] within [time] +- THE system SHALL support [number] concurrent [operations] +- THE system SHALL maintain [metric] above/below [threshold] + +Data criteria: +- THE system SHALL persist [data type] with [attributes] +- THE system SHALL validate [input] against [rules] +- THE system SHALL return [data] in [format] +``` + +## Design Document Template + +```markdown +# Design Document + +## Overview + +[System architecture summary in 3-4 sentences. Key design decisions and priorities.] + +## System Architecture + +### Component Map + +| Component ID | Name | Type | Responsibility | Interfaces With | +|-------------|------|------|----------------|-----------------| +| COMP-1 | Web Frontend | UI | User interface | COMP-2 | +| COMP-2 | API Gateway | Service | Request routing | COMP-3, COMP-4 | +| COMP-3 | Business Logic | Service | Core processing | COMP-5 | +[Complete component inventory] + +### High-Level Architecture Diagram + +[ASCII diagram showing all components and their relationships] + +## Data Flow Specifications + +### Primary Data Flows + +#### 1. [Flow Name] (e.g., User Authentication) + +``` +1. [Source] → [Component]: [Data description] +2. [Component] → [Component]: [Transformation applied] +3. [Component] → [Destination]: [Final data format] +``` + +**Data Transformations:** +- Step 2: [How data changes] +- Step 3: [Validation/Processing applied] + +[Repeat for each major data flow] + +## Integration Points + +### Internal Integration Points + +| Source | Target | Protocol | Data Format | Purpose | +|--------|--------|----------|-------------|---------| +| Frontend | API Gateway | HTTPS/REST | JSON | API calls | +| API Gateway | Auth Service | gRPC | Protobuf | Authentication | +[All internal integrations] + +### External Integration Points + +#### [External System Name] + +**Type:** REST API / Database / Message Queue / etc. +**Purpose:** [What this integration provides] +**Endpoint:** [URL/Connection string pattern] +**Authentication:** [Method - OAuth2, API Key, etc.] +**Rate Limits:** [Any constraints] + +**Interface Contract:** +```language +// Request format +POST /api/endpoint +{ + "field": "type" +} + +// Response format +{ + "result": "type" +} +``` + +**Error Handling:** +- Retry strategy: [Exponential backoff, circuit breaker] +- Fallback: [What happens if unavailable] +- Monitoring: [How to detect issues] + +[Repeat for each external integration] + +## Components and Interfaces + +### 1. [Component Name] + +**Responsibility:** [Single sentence description] + +**Key Classes:** +- `ClassName`: [Purpose and main methods] +- `ServiceName`: [What it manages] + +**Interfaces:** +```language +class InterfaceName: + def method_name(params) -> ReturnType + # Core methods only +``` + +**Data Flow:** +- Receives [input] from [source] +- Processes by [algorithm/logic] +- Outputs [result] to [destination] + +**Performance:** +- Target: [metric and value] +- Constraints: [limitations] + +[Repeat for each major component] + +## Data Models + +### [Entity Name] +```language +@dataclass +class EntityName: + field: Type + field: Optional[Type] + # Core fields only +``` + +## Error Handling + +### [Error Category] +**Types:** [List of error scenarios] +**Handling:** [Strategy and recovery] + +## Testing Strategy + +### Unit Tests +- [Component]: Test [aspects] +- Coverage target: 80% + +### Integration Tests +- [Flow]: Test [end-to-end scenario] + +### Performance Tests +- [Operation]: Target [metric] + +## Deployment + +### Docker Configuration +```yaml +# Essential service definitions only +``` + +### Environment Variables +``` +CATEGORY_VAR=description +``` + +## Performance Targets + +- [Operation]: <[time] +- [Throughput]: >[rate] +- [Resource]: <[limit] + +## Security Considerations + +- [Authentication method if applicable] +- [Data protection approach] +- [Access control model] +``` + +### Design Best Practices + +1. **Component responsibilities** - Single, clear purpose per component +2. **Interface first** - Define contracts before implementation +3. **Data flow clarity** - Show how data moves through system +4. **Error categories** - Group related failures with consistent handling +5. **Performance targets** - Specific, measurable goals +6. **Deployment ready** - Include Docker and configuration + +## Implementation Plan Template + +```markdown +# Implementation Plan + +- [x] 1. [Phase Name] + + - [x] 1.1 [Task name] + - [Subtask description] + - [Subtask description] + - _Requirements: [REQ-X.Y, REQ-A.B]_ + + - [ ] 1.2 [Task name] + - [Subtask description] + - _Requirements: [REQ-X.Y]_ + - _Dependencies: Task 1.1_ + +- [ ] 2. [Phase Name] + + - [ ] 2.1 [Task name] + - [Detailed steps or subtasks] + - _Requirements: [REQ-X.Y]_ + - _Dependencies: Phase 1_ + +[Continue for all phases] +``` + +### Task Planning Best Practices + +1. **Hierarchical structure** - Phases > Tasks > Subtasks +2. **Requirement tracing** - Link each task to requirements +3. **Dependency marking** - Identify blockers and prerequisites +4. **Checkbox format** - [x] for complete, [ ] for pending +5. **Atomic tasks** - Each task independently completable +6. **Progressive implementation** - Infrastructure → Core → Features → Polish + +### Common Implementation Phases + +```markdown +1. **Infrastructure Setup** + - Project structure + - Database schema + - Docker configuration + - Core dependencies + +2. **Data Layer** + - Models/entities + - Database operations + - Migrations + +3. **Business Logic** + - Core algorithms + - Service classes + - Validation rules + +4. **API/Interface Layer** + - REST/GraphQL endpoints + - WebSocket handlers + - Authentication + +5. **Frontend/UI** + - Component structure + - State management + - API integration + - Responsive design + +6. **Integration** + - External services + - Third-party APIs + - Message queues + +7. **Testing** + - Unit tests + - Integration tests + - End-to-end tests + +8. **DevOps** + - CI/CD pipeline + - Monitoring + - Logging + - Deployment scripts + +9. **Documentation** + - API documentation + - User guides + - Deployment guide + - README +``` + +## Document Patterns by Project Type + +### Web Application (Full-Stack) + +Requirements focus: +- User authentication and authorization +- CRUD operations for entities +- Real-time updates +- Responsive UI +- API design + +Design focus: +- 3-tier architecture (Frontend, Backend, Database) +- REST/GraphQL API design +- State management strategy +- Component hierarchy +- Database schema + +Tasks focus: +1. Database and backend setup +2. API implementation +3. Frontend components +4. Integration and testing + +### Microservices System + +Requirements focus: +- Service boundaries +- Inter-service communication +- Data consistency +- Service discovery +- Fault tolerance + +Design focus: +- Service decomposition +- API contracts between services +- Message queue/event bus +- Distributed tracing +- Container orchestration + +Tasks focus: +1. Service scaffolding +2. Shared libraries/contracts +3. Individual service implementation +4. Integration layer +5. Orchestration setup + +### Data Pipeline/ETL + +Requirements focus: +- Data sources and formats +- Transformation rules +- Data quality checks +- Schedule/triggers +- Error handling and retry + +Design focus: +- Pipeline stages +- Data flow diagram +- Schema evolution +- Monitoring and alerting +- Storage strategy + +Tasks focus: +1. Data source connectors +2. Transformation logic +3. Validation and quality checks +4. Scheduling setup +5. Monitoring implementation + +### CLI Tool/Library + +Requirements focus: +- Command structure +- Input/output formats +- Configuration options +- Error messages +- Performance requirements + +Design focus: +- Command parser architecture +- Plugin system (if applicable) +- Configuration management +- Output formatters +- Testing strategy + +Tasks focus: +1. Core command structure +2. Business logic implementation +3. Input/output handlers +4. Configuration system +5. Documentation and examples + +## Generating Documents for Specific Domains + +### Trading/Financial Systems + +Additional requirements: +- Risk management rules +- Order execution logic +- Market data handling +- Compliance requirements +- Audit trail + +Additional design: +- High-frequency data handling +- Position tracking +- Risk calculations +- Order routing +- Failover strategies + +### Real-time Systems (Chat, Gaming, IoT) + +Additional requirements: +- Latency targets +- Connection handling +- State synchronization +- Offline support +- Push notifications + +Additional design: +- WebSocket/SSE architecture +- State management +- Caching strategy +- Message queuing +- Horizontal scaling + +### Machine Learning Systems + +Additional requirements: +- Model training pipeline +- Feature engineering +- Model versioning +- Prediction API +- Monitoring/drift detection + +Additional design: +- Training infrastructure +- Feature store +- Model registry +- Serving architecture +- A/B testing framework + +## Quality Checklist + +Before finalizing documents, verify: + +### Requirements Document +- [ ] Every requirement has a clear user story +- [ ] All acceptance criteria are testable +- [ ] Non-functional requirements specified (performance, security) +- [ ] Glossary covers all domain terms +- [ ] Requirements are numbered for tracing + +### Design Document +- [ ] Architecture diagram included +- [ ] All components have clear responsibilities +- [ ] Interfaces defined for integration points +- [ ] Error handling strategy documented +- [ ] Performance targets specified +- [ ] Deployment configuration included + +### Implementation Plan +- [ ] Tasks grouped into logical phases +- [ ] Dependencies identified between tasks +- [ ] Requirements traced to tasks +- [ ] Tasks are atomic and completable +- [ ] Checkbox format for progress tracking + +## Example Usage Patterns + +### Starting a New Project (With Explicit Architecture Focus) + +``` +User: "I want to build an order flow trading automation system" + +Response: "I'll act as your Project Architect to create comprehensive planning documents. + +First, let me establish the architectural foundation: + +**System Components I need to define:** +- Market data ingestion service +- Order flow analysis engine +- Trading signal generator +- Risk management module +- Order execution service +- Monitoring dashboard + +**Data Flows to map:** +- Market data → Analysis → Signals → Execution +- Position tracking → Risk calculation → Trading limits +- All events → Audit trail + +**Integration Points to specify:** +- Exchange APIs (which ones?) +- Database system (time-series data needs?) +- Notification services (alerts/reporting?) + +**Key Questions:** +1. Is this for personal use (single-user) or a platform (multi-user)? +2. Which specific exchanges/brokers will it connect to? +3. What are your latency requirements (milliseconds matter)? +4. What's your risk tolerance and position sizing approach? +5. Do you need backtesting capabilities? + +**Deliverables I'll create:** +1. Requirements document with traceable user stories +2. System design with complete component map and data flows +3. Implementation plan with phased delivery milestones + +This architectural blueprint will ensure nothing is overlooked during implementation." +``` + +### Starting a New Project (Simple Approach) + +``` +User: "I want to build an order flow trading automation system" + +Response: "I'll help you create comprehensive planning documents for your trading system. Let me gather some key information: + +1. Is this for personal use (single-user) or a multi-user platform? +2. Which markets/exchanges will it connect to? +3. What are the core trading strategies or patterns? +4. Any specific performance requirements (latency, throughput)? + +Based on this, I'll generate: +- Requirements document with user stories +- System design with components and data flow +- Implementation plan with phases and tasks" +``` + +### Expanding Existing Requirements + +``` +User: "Add real-time monitoring dashboard to the requirements" + +Response: "I'll add dashboard requirements to your document: + +### Requirement 15 +**User Story:** As a trader, I want a real-time dashboard, so that I can monitor system performance and trading activity + +#### Acceptance Criteria +1. THE Dashboard SHALL display active positions with current P&L +2. THE Dashboard SHALL update market data within 1 second +3. THE Dashboard SHALL provide filtering by symbol, timeframe, and date range +4. WHEN a trade executes, THE Dashboard SHALL reflect it within 100ms" +``` + +## Common Pitfalls to Avoid + +### Planning Pitfalls +1. **Skipping architectural planning** - Jumping to requirements without mapping components first +2. **Vague role definition** - Not establishing the architect role leads to unfocused documentation +3. **Hidden integration points** - Discovering external dependencies during implementation +4. **Undefined boundaries** - No clear scope leads to feature creep and timeline slippage +5. **Missing data flow analysis** - Not mapping how data moves reveals issues late + +### Requirements Pitfalls +1. **Over-specifying implementation** - Requirements should define "what" not "how" +2. **Vague acceptance criteria** - Avoid "user-friendly" or "fast" without metrics +3. **Missing error cases** - Include failure scenarios in requirements +4. **Untraceable requirements** - Every requirement should map to tasks + +### Design Pitfalls +1. **Monolithic components** - Break down large components into focused services +2. **Circular dependencies** - Ensure task dependencies form a DAG +3. **Missing data models** - Define core entities early +4. **Ignoring deployment** - Include Docker/deployment from the start +5. **Unclear component boundaries** - Each component needs explicit responsibilities + +## Output Format + +Generate documents in Markdown format for easy editing and version control. Use: +- Clear hierarchical headings (##, ###, ####) +- Code blocks with language hints +- Bulleted and numbered lists +- Tables for structured data +- Checkboxes for task tracking +- Bold for emphasis on key terms +- Inline code for technical terms + +Save documents as: +- `requirements.md` - Requirements document +- `design.md` - Design document +- `tasks.md` - Implementation plan + +These documents serve as the foundation for AI-assisted implementation, providing clear specifications that can be referenced throughout development. diff --git a/assets/requirements-template.md b/assets/requirements-template.md new file mode 100644 index 0000000..16b4233 --- /dev/null +++ b/assets/requirements-template.md @@ -0,0 +1,83 @@ +# Requirements Document Template + +## Introduction + +[PROJECT NAME] is a [SYSTEM TYPE] designed for [TARGET USERS]. The system [PRIMARY PURPOSE]. + +## System Context + +### Architectural Overview +- **Components:** [List major system components] +- **Data Flow:** [High-level data movement] +- **Integration Points:** [External systems/APIs] +- **Deployment Model:** [Cloud/On-premise/Hybrid] + +## Glossary + +- **[Term]**: [Definition specific to this system] +- **Component**: Major system module or service +- **Integration Point**: Connection to external system or API + +## Functional Requirements + +### REQ-1: [Feature Name] + +**User Story:** As a [user role], I want [feature], so that [benefit] + +**Acceptance Criteria:** +1. WHEN [condition], THE system SHALL [behavior] +2. THE system SHALL [requirement] within [time constraint] +3. IF [error condition], THEN THE system SHALL [error handling] + +**Components Involved:** [COMP-1, COMP-2] +**Data Flow:** [How data moves for this requirement] + +### REQ-2: [Feature Name] + +**User Story:** As a [user role], I want [feature], so that [benefit] + +**Acceptance Criteria:** +1. WHEN [condition], THE system SHALL [behavior] +2. WHERE [context], THE system SHALL [behavior] +3. THE system SHALL persist [data] with [attributes] + +**Components Involved:** [COMP-3, COMP-4] +**Integration Points:** [External systems used] + +## Non-Functional Requirements + +### Performance Requirements +- Response time: THE system SHALL respond to user requests within [X] milliseconds +- Throughput: THE system SHALL handle [X] concurrent users +- Data processing: THE system SHALL process [X] records per second + +### Security Requirements +- Authentication: THE system SHALL implement [auth method] +- Authorization: THE system SHALL enforce role-based access control +- Data protection: THE system SHALL encrypt sensitive data at rest and in transit + +### Reliability Requirements +- Availability: THE system SHALL maintain 99.9% uptime +- Recovery: THE system SHALL recover from failures within [X] minutes +- Data integrity: THE system SHALL ensure ACID compliance for transactions + +### Scalability Requirements +- THE system SHALL support horizontal scaling +- THE system SHALL handle [X]% growth in users annually +- THE system SHALL support database sharding for data volumes exceeding [X] + +## Constraints and Boundaries + +### Technical Constraints +- Technology: [Programming languages, frameworks, databases] +- Infrastructure: [Cloud provider, hardware limitations] + +### Business Constraints +- Budget: [Cost limitations] +- Timeline: [Delivery deadlines] +- Compliance: [Regulatory requirements] + +### Scope Boundaries +- **In Scope:** [What's included] +- **Out of Scope:** [What's explicitly excluded] +- **Future Considerations:** [Deferred features] diff --git a/plugin.json b/plugin.json new file mode 100644 index 0000000..8a236a4 --- /dev/null +++ b/plugin.json @@ -0,0 +1,15 @@ +{ + "name": "project-planner-skill", + "description": "Comprehensive project planning and documentation generator for software projects. Creates structured requirements documents, system design documents, and task breakdown plans with implementation tracking.", + "version": "1.0.0", + "author": { + "name": "George A Puiu", + "email": "puiu.adrian@gmail.com" + }, + "homepage": "https://github.com/adrianpuiu/claude-skills-marketplace", + "repository": "https://github.com/adrianpuiu/claude-skills-marketplace", + "license": "MIT", + "keywords": ["project-planning", "documentation", "requirements", "design", "implementation"], + "category": "productivity", + "strict": false +} \ No newline at end of file diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..eaa94fc --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,65 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:adrianpuiu/claude-skills-marketplace:project-planner-skill", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "81b81b946e042f6a789b143d5c09482792d375e6", + "treeHash": "bfc5ab6b0bcaab2ba4730052c02142568ae8c7fc29525ab89ec055870ff1f845", + "generatedAt": "2025-11-28T10:13:01.449491Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "project-planner-skill", + "description": "Comprehensive project planning and documentation generator for software projects. Creates structured requirements documents, system design documents, and task breakdown plans with implementation tracking.", + "version": "1.0.0" + }, + "content": { + "files": [ + { + "path": "plugin.json", + "sha256": "7b291017ca833dff4b02dc0397b4f5cb072995317c0c79fd3b60ca0a3e5b2bab" + }, + { + "path": "README.md", + "sha256": "bdc7011a3d0f58068b96f817b5e25fae61818b9791024219808f2f7d5eb61050" + }, + { + "path": "SKILL.md", + "sha256": "f28924ef91365ba9ee0e877be3429c5880d993c4ec0af6df664c1fb74d398c66" + }, + { + "path": "references/domain-templates.md", + "sha256": "c2db709e4805a561121739f1ca420ff306155054af0ea6e517aded7a83879a2d" + }, + { + "path": "scripts/validate_documents.py", + "sha256": "8e8c6b6e80b9bcf7d11afcf997295b726857521d40220f27fe51567b2c8df620" + }, + { + "path": "scripts/generate_project_docs.py", + "sha256": "06ffa88d983ae9cca5f82f8275b1a2d6097fae73a69911b0a5b630fcaa2935b4" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "0b5e032fcf74465badaa9d3bacfa4d0e1c696d0c4628b86dd908339c26a44662" + }, + { + "path": "assets/requirements-template.md", + "sha256": "baae55f4e5e305f269e1d08cbd6d483d1127a17916ddce594423480046d0ab23" + } + ], + "dirSha256": "bfc5ab6b0bcaab2ba4730052c02142568ae8c7fc29525ab89ec055870ff1f845" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/references/domain-templates.md b/references/domain-templates.md new file mode 100644 index 0000000..4e32a08 --- /dev/null +++ b/references/domain-templates.md @@ -0,0 +1,1379 @@ +# Domain-Specific Templates Reference + +This reference provides specialized templates and patterns for different types of systems. + +## Table of Contents + +- [Trading and Financial Systems](#trading-and-financial-systems) +- [Real-time Systems](#real-time-systems) +- [E-commerce](#e-commerce) +- [Content Management Systems](#content-management-systems) +- [IoT](#iot) +- [Machine Learning Pipelines](#machine-learning-pipelines) +- [Developer Tools](#developer-tools) +- [SaaS](#saas) +- [Data Lakehouse and Analytics Systems](#data-lakehouse-and-analytics-systems) +- [AI Agent and Orchestration Systems](#ai-agent-and-orchestration-systems) +- [Enterprise Integration Platforms](#enterprise-integration-platforms) +- [Common Non-Functional Requirements](#common-non-functional-requirements) +- [Cross-Cutting Concerns](#cross-cutting-concerns) +- [Task Breakdown Patterns by Domain](#task-breakdown-patterns-by-domain) +- [Testing Strategies by Domain](#testing-strategies-by-domain) +- [Deployment Patterns](#deployment-patterns) + +## Trading and Financial Systems + +### Specific Requirements Patterns + +```markdown +### Market Data Requirements +- THE system SHALL process market data streams and orders with sub-millisecond latency. +- THE system SHALL integrate with external financial exchanges and market data feeds using standard protocols (e.g. FIX, REST APIs). +- THE system SHALL support large volumes of transactions concurrently (thousands to millions per second). +- THE system SHALL provide time synchronization mechanisms for ordering of events across components. + +### Risk Management Requirements +- THE system SHALL include real-time risk management to reject or flag risky orders based on configurable rules. +- THE system SHALL log all transactions and data changes for auditing and traceability. +- THE system SHALL generate regulatory reports (e.g. trade confirmations, position reports) in required formats on scheduled intervals. +- THE system SHALL ensure ACID compliance for financial transactions to maintain consistency. + +### Order Execution Requirements +- THE system SHALL support connection to trading user interfaces and algorithmic trading bots through well-defined APIs. +- THE system SHALL provide high availability (99.999%) with active-active clustering and automated failover. +- THE system SHALL implement stringent security controls, encryption in transit and at rest, and authentication/authorization for all components. +``` + +### Architecture Components + +```markdown +### Market Data Ingestion +- Captures real-time price feeds from exchanges and data vendors. +- Protocol adapters (e.g. FIX engines, API gateways) for external integration. +- Network optimization for low-latency data delivery. + +### Order Entry Gateway +- Receives and validates incoming orders from trading applications or clients. +- Authentication and authorization of trading requests. +- Order validation and pre-trade risk checks. + +### Order Matching Engine +- Matches buy and sell orders in an order book and confirms trades. +- High-performance order book management. +- Trade execution and confirmation logic. + +### Risk Management Service +- Evaluates orders and positions against risk rules (e.g. credit limits, market risk). +- Real-time position monitoring and limit checking. +- Risk alert generation and order rejection. + +### Trade Repository +- Stores executed trades, order history, and market data for reconciliation and analytics. +- Audit trail maintenance for regulatory compliance. +- Historical data storage and retrieval. + +### Settlement and Clearing Module +- Handles post-trade processing, settlements with clearing houses, and ledger updates. +- Trade confirmation and settlement workflows. +- Financial ledger management. + +### User Interface +- Web/mobile applications or trader workstations for order entry and monitoring. +- Real-time market data visualization. +- Trading dashboard and analytics. + +### Security Layer +- Infrastructure for encryption, secure network zones, identity management, and audit logging. +- Network segmentation and access controls. +- Security monitoring and incident response. + +### Analytics Engine +- Provides real-time analytics, reporting, and historical trend analysis for traders and compliance teams. +- Performance metrics calculation. +- Custom report generation. +``` + +## Real-time Systems + +### Specific Requirements Patterns + +```markdown +### Event Processing +- THE system SHALL process and respond to incoming events within a defined real-time threshold (e.g. milliseconds to seconds) to meet business needs. +- THE system SHALL support streaming data ingestion and event processing at high throughput (e.g. thousands of events per second). +- THE system SHALL guarantee the order of events when required (in-order processing). +- THE system SHALL maintain state consistency across distributed components for stateful real-time operations. + +### Reliability and Scaling +- THE system SHALL be resilient to failures and able to recover quickly without data loss. +- THE system SHALL provide real-time monitoring and alerting on processing delays and system health. +- THE system SHALL support horizontal scaling to meet increasing volumes of events. +- THE system SHALL provide time synchronization mechanisms (e.g. NTP or PTP) for ordering of events across components. + +### Integration and Security +- THE system SHALL integrate with real-time analytics and dashboarding tools to display live data. +- THE system SHALL include security measures for data in transit and at rest, as real-time systems often handle sensitive data. +``` + +### Architecture Components + +```markdown +### Event Sources +- Sensors, user interfaces, or external services generating real-time events. +- Event format standardization. +- Source authentication and validation. + +### Ingestion Layer (Message Broker) +- High-throughput messaging system (e.g. Apache Kafka, RabbitMQ) to buffer and distribute events. +- Message partitioning and routing. +- Durability and replay capabilities. + +### Stream Processing Engine +- Processes streams in real time (e.g. Apache Flink, Spark Streaming, or microservices) performing filtering, aggregation, and event correlation. +- State management for stream operations. +- Windowing and time-based operations. + +### State Store +- Distributed in-memory or fast storage (e.g. Redis, Cassandra) to maintain application state for stream processing. +- State backup and recovery. +- Consistent state across processing nodes. + +### Real-time Databases +- Databases optimized for real-time read/write (e.g. in-memory DBs) for low-latency queries. +- Data indexing for fast access. +- Query optimization for real-time workloads. + +### API Gateway/WebSockets +- Interfaces to push real-time updates to clients (web/mobile) via WebSockets or Server-Sent Events. +- Connection management and authentication. +- Message routing to clients. + +### Monitoring & Alerting +- Systems (e.g. Prometheus, Grafana) to track latency, throughput, and health of components. +- Real-time dashboard creation. +- Alert configuration and notification. + +### Configuration Service +- Centralized service to manage and distribute real-time system configurations and thresholds. +- Dynamic configuration updates. +- Environment-specific settings management. + +### Security Layer +- Encryption and authentication for data sources, brokers, and processing nodes. +- Access control for event streams. +- Security monitoring for real-time systems. + +### Scalable Compute Cluster +- Container orchestration (e.g. Kubernetes) or real-time optimized servers to manage deployment of processing services. +- Resource allocation and scaling. +- Load balancing for processing nodes. +``` + +## E-commerce + +### Specific Requirements Patterns + +```markdown +### Product Catalog +- THE system SHALL support a large catalog of products and enable fast search and filtering. +- THE system SHALL implement caching (e.g. CDN, distributed cache) to accelerate delivery of static assets and frequently accessed data. +- THE system SHALL support personalization features, such as product recommendations and targeted promotions. +- THE system SHALL provide analytics and reporting on sales, traffic, and user behavior. + +### Shopping Cart and Checkout +- THE system SHALL allow users to browse products, add items to a shopping cart, and proceed through a checkout process seamlessly. +- THE system SHALL integrate securely with multiple payment gateways (e.g. credit card, digital wallets) to process orders. +- THE system SHALL maintain inventory counts and prevent overselling by updating stock in real-time during purchases. +- THE system SHALL ensure high availability and scalability to handle peak loads (e.g. sales events). + +### User Management +- THE system SHALL provide a secure user account management for registration, login, and profile management. +- THE system SHALL support order tracking and status updates for customers and administrators. +``` + +### Architecture Components + +```markdown +### Web and Mobile Frontend +- Customer-facing applications (websites or mobile apps) presenting product catalogs and user interfaces. +- Responsive design for multiple devices. +- Progressive web app capabilities. + +### API Gateway +- Secure entry point for client applications to interact with backend services. +- Rate limiting and request validation. +- API versioning and documentation. + +### Product Catalog Service +- Manages product data, categories, pricing, and availability. +- Search and filtering functionality. +- Product recommendation engine. + +### Shopping Cart Service +- Maintains user cart state and manages cart operations. +- Session management for anonymous and logged-in users. +- Cart persistence across sessions. + +### Order Management Service +- Orchestrates order placement, validation, and status tracking. +- Order workflow automation. +- Integration with fulfillment systems. + +### Inventory Service +- Tracks stock levels across warehouses and updates quantities. +- Inventory reservation during checkout. +- Low stock alerts and reordering. + +### Payment Service +- Handles payment processing through integration with external payment gateways. +- Payment method tokenization. +- Transaction reconciliation and dispute handling. + +### User Management Service +- Manages user authentication, authorization, and profiles (can integrate with Identity provider). +- Social login integration. +- User preference and history tracking. + +### Search and Recommendation Engine +- Provides full-text search and personalized product recommendations (e.g. Elasticsearch, Machine Learning). +- Search analytics and optimization. +- Recommendation algorithm tuning. + +### Content Delivery Network (CDN) +- Distributes static content (images, scripts, CSS) globally for fast access. +- Edge caching and optimization. +- Dynamic content acceleration. + +### Analytics and Reporting +- Aggregates sales, customer behavior, and performance data for dashboards. +- Business intelligence and insights. +- Custom report generation. + +### Logging and Monitoring Tools +- Tracks system health, errors, and performance (e.g. ELK stack, Prometheus). +- Application performance monitoring. +- Error tracking and alerting. +``` + +## Content Management Systems + +### Specific Requirements Patterns + +```markdown +### Content Creation and Management +- THE system SHALL allow content authors to create, edit, and schedule publishing of content with version control. +- THE system SHALL support multiple content types (text, images, video, documents, etc.) and metadata for each content item. +- THE system SHALL provide a user-friendly content editor interface (WYSIWYG or markdown) and workflows for review/approval. + +### Content Delivery +- THE system SHALL deliver content via APIs or templates to multiple channels (web, mobile, social). +- THE system SHALL implement role-based access control so that only authorized users can publish or modify content. +- THE system SHALL support full-text search and indexing of content for fast retrieval. + +### Performance and Reliability +- THE system SHALL allow for content preview in different templates or layouts before publishing. +- THE system SHALL integrate a caching layer (e.g. CDN, reverse proxy) to improve performance of content delivery. +- THE system SHALL provide audit logs of content changes for accountability. +- THE system SHALL ensure high availability to avoid content downtime. +``` + +### Architecture Components + +```markdown +### Content Repository +- Central database or storage (e.g. MySQL, MongoDB, Blob storage) that stores content and metadata. +- Content versioning and history tracking. +- Binary asset management. + +### Authoring Interface +- Web-based UI for content creators to author, edit, and manage content. +- Rich text editor and media management. +- Collaboration features for content teams. + +### Delivery API +- REST or GraphQL APIs that serve content to front-end applications. +- Content transformation and formatting. +- API access controls and rate limiting. + +### Front-end Delivery Layer +- Rendered website or mobile app that displays the content to end-users. +- Template engine integration. +- Multi-channel content adaptation. + +### Template Engine +- (if not headless) Generates HTML views from content and templates. +- Template inheritance and composition. +- Dynamic content rendering. + +### Search Index +- Engine (e.g. Elasticsearch) that indexes content for search queries. +- Full-text search capabilities. +- Search relevance optimization. + +### Cache/CDN +- Caching proxy or content delivery network to store and serve static content and pages. +- Cache invalidation strategies. +- Performance optimization. + +### Workflow Engine +- Manages content publishing workflows (draft, review, publish states). +- Approval process configuration. +- Notification and escalation rules. + +### Authentication/Authorization Service +- Manages user identities, roles, and permissions. +- Integration with enterprise identity systems. +- Fine-grained access control. + +### Analytics Dashboard +- Tracks content performance (views, engagement) and provides reporting. +- User behavior analysis. +- Content effectiveness metrics. +``` + +## IoT + +### Specific Requirements Patterns + +```markdown +### Device Management +- THE system SHALL support secure onboarding and provisioning of a large number of IoT devices. +- THE system SHALL allow over-the-air (OTA) firmware or configuration updates to devices. +- THE system SHALL implement device identity management and authentication to prevent unauthorized devices. +- THE system SHALL scale horizontally to support millions of concurrent device connections. + +### Data Ingestion and Processing +- THE system SHALL use lightweight protocols (e.g. MQTT, CoAP) to handle unreliable networks and constrained devices. +- THE system SHALL ingest telemetry data from devices at high volume and in near-real-time. +- THE system SHALL provide mechanisms for batching or edge processing to reduce cloud communication costs. +- THE system SHALL ensure data durability by storing raw telemetry in a fault-tolerant data lake or time-series database. + +### Monitoring and Security +- THE system SHALL enable real-time monitoring and alerts based on streaming data (e.g. temperature thresholds). +- THE system SHALL comply with security standards (e.g. encryption of data in transit and at rest, secure key storage). +``` + +### Architecture Components + +```markdown +### Edge or Gateway Services +- Local bridges or gateways that aggregate device connections and preprocess data. +- Protocol translation and normalization. +- Local data processing and filtering. + +### Device Registry +- Catalog of devices and metadata for management and authentication. +- Device lifecycle management. +- Device grouping and organization. + +### Message Broker/Hub +- Middleware (e.g. AWS IoT Core, Azure IoT Hub, MQTT broker) to receive and route device telemetry. +- Message filtering and routing rules. +- Quality of service management. + +### Stream Ingestion Service +- Processes incoming data streams for real-time handling (e.g. AWS Kinesis, Azure Event Hubs). +- Data validation and enrichment. +- Stream partitioning and scaling. + +### Data Processing/Analytics +- Real-time (e.g. stream analytics) and batch processing for insights (e.g. Spark, Flink). +- Anomaly detection and alerting. +- Data transformation and aggregation. + +### Time-series Database/Data Lake +- Storage optimized for time-series data (e.g. InfluxDB, IoTDB) or scalable data lake (S3, HDFS). +- High-volume data retention. +- Efficient time-based queries. + +### Device Management Service +- Handles OTA updates, configuration, and device health monitoring. +- Firmware deployment and rollback. +- Device diagnostics and troubleshooting. + +### Security Service +- Certificate/key management for device authentication and encryption. +- Device identity verification. +- Security policy enforcement. + +### Visualization/Dashboard +- Front-end or service to display device data and analytics. +- Real-time monitoring dashboards. +- Historical data visualization. + +### Alerting & Notification +- Generates alerts/notifications (email, SMS, push) based on rules. +- Alert escalation policies. +- Notification history and tracking. +``` + +## Machine Learning Pipelines + +### Specific Requirements Patterns + +```markdown +### Data Pipeline +- THE system SHALL ingest and preprocess data from multiple sources (databases, logs, streams) into a central storage or data lake. +- THE system SHALL version control datasets, features, and models to ensure reproducibility. +- THE system SHALL include evaluation metrics tracking and validation steps in the pipeline. + +### Model Training and Deployment +- THE system SHALL support scalable model training on large datasets using distributed compute (e.g. GPU clusters). +- THE system SHALL automate retraining of models on new data or when performance degrades. +- THE system SHALL support continuous deployment of validated models to production serving environments. +- THE system SHALL provide model serving endpoints (REST/gRPC) for inference with low latency. + +### Monitoring and Governance +- THE system SHALL monitor model performance in production and trigger alerts on data drift or accuracy drop. +- THE system SHALL allow A/B testing of models and rollback to previous versions if needed. +- THE system SHALL ensure data privacy and compliance (e.g. anonymization, encryption) during processing and model training. +``` + +### Architecture Components + +```markdown +### Data Ingestion Layer +- ETL pipelines (e.g. Apache NiFi, AWS Glue, Kafka Connect) to bring raw data into storage. +- Data validation and quality checks. +- Schema management and evolution. + +### Data Storage +- Scalable data lake (e.g. S3, HDFS) or data warehouse for raw and processed data. +- Data partitioning and organization. +- Access controls and governance. + +### Feature Store +- Centralized storage of engineered features for reuse in training and serving. +- Feature computation pipelines. +- Online and offline feature serving. + +### Workflow Orchestration +- Pipeline management tools (e.g. Kubeflow Pipelines, Apache Airflow) to coordinate steps. +- Dependency management. +- Pipeline execution monitoring. + +### Training Environment +- Compute cluster (e.g. Kubernetes with GPU nodes or Spark clusters) for model training. +- Distributed training frameworks. +- Resource optimization and scheduling. + +### Model Registry +- Repository (e.g. MLflow, SageMaker Model Registry) to store and version trained models and metadata. +- Model lineage tracking. +- Model artifact storage. + +### Model Serving Infrastructure +- Services or platforms (e.g. TensorFlow Serving, AWS SageMaker Endpoint) for online inference. +- Request routing and load balancing. +- Model version management. + +### Monitoring and Logging +- Services to monitor pipeline runs, track metrics, logs (e.g. Prometheus, ELK, ML monitoring platforms). +- Performance metrics collection. +- Alert configuration and notification. + +### Experimental Tracking +- Tools (e.g. MLflow Tracking) to log model parameters, metrics, and results. +- Experiment comparison. +- Hyperparameter optimization. + +### Security and Compliance +- Data encryption services, access controls, and audit logs for the ML pipeline. +- Privacy-preserving ML techniques. +- Regulatory compliance tools. +``` + +## Developer Tools + +### Specific Requirements Patterns + +```markdown +### Version Control and Collaboration +- THE system SHALL provide a centralized version control repository (e.g. Git) for source code management. +- THE system SHALL facilitate collaboration features such as code reviews, merge requests, and documentation wikis. +- THE system SHALL include role-based access control to restrict who can merge or deploy code. + +### CI/CD and Build Process +- THE system SHALL automate builds, tests, and deployments through CI/CD pipelines upon code commits or pull requests. +- THE system SHALL include a package or artifact repository (e.g. Nexus, Artifactory) to store build outputs. +- THE system SHALL enforce code quality checks (linting, static analysis, security scans) in the pipeline. + +### Development Environment +- THE system SHALL offer environments (e.g. containers, VMs) that mimic production for testing and validation. +- THE system SHALL provide environment provisioning (infrastructure as code) to spin up test or staging environments on demand. +- THE system SHALL offer container and image registries for Docker or OCI artifacts. +``` + +### Architecture Components + +```markdown +### Version Control System +- Git-based repository (e.g. GitLab, GitHub) for source code and branching. +- Code hosting and collaboration. +- Pull request workflow. + +### CI/CD Platform +- Tools (e.g. Jenkins, GitHub Actions, GitLab CI) that define pipelines for build/test/deploy. +- Pipeline configuration and management. +- Build artifact storage. + +### Artifact Repository +- Central storage (e.g. Nexus, Artifactory) for binaries and libraries. +- Dependency management. +- Version control for artifacts. + +### Container Registry +- Storage for container images (e.g. Docker Hub, private registry). +- Image vulnerability scanning. +- Image promotion workflows. + +### Issue Tracker / Project Management +- Tool (e.g. Jira, GitHub Issues) to track tasks and bugs. +- Project planning and tracking. +- Team collaboration features. + +### Test Automation Framework +- Automated test suites (unit, integration, UI tests) integrated into CI. +- Test result reporting. +- Test environment provisioning. + +### Infrastructure as Code +- Tools (e.g. Terraform, Ansible) to define and provision infrastructure. +- Environment configuration management. +- Infrastructure version control. + +### Collaboration Platform +- Wiki or documentation site (e.g. Confluence, GitHub Pages) for team knowledge sharing. +- Documentation versioning. +- Knowledge base management. + +### Security Scanning Tools +- Static code analysis, vulnerability scanning (e.g. SonarQube, Snyk). +- Security policy enforcement. +- Vulnerability management. + +### Notification/ChatOps +- Integration with communication tools (e.g. Slack, Teams) for build/deploy notifications. +- Automated alerts and notifications. +- Chat-based operations. +``` + +## SaaS + +### Specific Requirements Patterns + +```markdown +### Tenant Management +- THE system SHALL support tenant isolation so that customer data is logically separated (e.g. separate databases or partitioned schema). +- THE system SHALL allow per-tenant configuration of features (feature flags or settings) without affecting others. +- THE system SHALL provide a self-service portal for onboarding new tenants and managing account settings. +- THE system SHALL include tenant-specific branding or theming if required. + +### Subscription and Billing +- THE system SHALL implement subscription management and usage metering for billing. +- THE system SHALL scale resources (compute, storage) elastically based on overall usage across tenants. +- THE system SHALL ensure strict data security and privacy between tenants. + +### Customization and Integration +- THE system SHALL support multi-region deployment for disaster recovery or latency requirements. +- THE system SHALL offer customization hooks (APIs or plugins) to allow integration with tenant systems. +- THE system SHALL provide centralized monitoring and logging across all tenants with filtering by tenant. +``` + +### Architecture Components + +```markdown +### Tenant Management Service +- Manages tenant lifecycle (onboarding, offboarding, subscriptions). +- Tenant configuration management. +- Tenant billing and usage tracking. + +### Authentication & Authorization +- Identity service (e.g. IAM) that supports multi-tenant logins (often through OAuth/OpenID). +- Single sign-on integration. +- Per-tenant access control. + +### Multi-tenant Data Layer +- Databases or data warehouses with logical separation (schemas or tags) or separate instances per tenant. +- Data isolation strategies. +- Tenant-specific data optimization. + +### Application Service +- The core application codebase, scaled horizontally (e.g. containerized services). +- Multi-tenant aware business logic. +- Tenant context management. + +### Configuration Service +- Handles per-tenant configuration and feature toggles. +- Dynamic configuration updates. +- Tenant-specific settings management. + +### Billing and Usage Service +- Tracks resource usage per tenant and generates billing records. +- Subscription management. +- Payment processing integration. + +### Logging/Monitoring +- Centralized logging (e.g. ELK, Splunk) and monitoring (e.g. Prometheus) that isolates metrics per tenant. +- Tenant-specific dashboards. +- Cross-tenant analytics. + +### API Gateway +- Routes tenant requests to appropriate services, often handling rate limiting and quotas per tenant. +- Tenant identification and routing. +- API access control per tenant. + +### Self-Service Portal +- Web application or console where tenants manage their account and settings. +- Tenant onboarding workflow. +- Account management interface. + +### Notification Service +- Sends emails or alerts to tenants for events like billing, outages, or updates. +- Tenant-specific notifications. +- Notification preference management. +``` + +## Data Lakehouse and Analytics Systems + +### Specific Requirements Patterns + +```markdown +### Data Storage and Management +- THE system SHALL store all raw and processed data in a centralized, scalable data lake (e.g. cloud object storage). +- THE system SHALL maintain a unified metadata catalog for datasets (data catalog) to enable discoverability. +- THE system SHALL support ACID transactions on data (e.g. through Delta Lake or Iceberg) for reliability. +- THE system SHALL automate data lifecycle management (e.g. partitioning, aging, archiving). + +### Data Processing and Analytics +- THE system SHALL enable both batch and streaming ingestion pipelines for diverse data sources. +- THE system SHALL allow SQL-based analytics on data with low query latency (e.g. using a lakehouse query engine). +- THE system SHALL integrate with BI and visualization tools (e.g. Tableau, Power BI) for dashboards. +- THE system SHALL support large-scale machine learning directly on the lakehouse data. + +### Governance and Security +- THE system SHALL implement data governance (data quality checks, access controls, lineage tracking). +- THE system SHALL provide role-based access control and encryption to secure sensitive data. +``` + +### Architecture Components + +```markdown +### Data Ingestion Framework +- Tools (e.g. Apache NiFi, Kafka, AWS Glue) to bring batch and streaming data into the lakehouse. +- Data validation and quality checks. +- Schema detection and evolution. + +### Data Lake Storage +- Scalable object storage (e.g. S3, ADLS) or distributed file system for raw and curated data. +- Data organization and partitioning. +- Storage optimization and tiering. + +### Metadata Catalog +- Service (e.g. AWS Glue Catalog, Hive Metastore, Databricks Unity Catalog) that maintains schemas and table definitions. +- Data lineage tracking. +- Data discovery and documentation. + +### Lakehouse Engine +- Query engines (e.g. Apache Spark, Trino/Presto, Databricks, Snowflake) that support ACID transactions and various workloads. +- Query optimization. +- Workload isolation. + +### ETL/ELT Tools +- Platforms (e.g. dbt, Talend) to transform and load data into analytics-ready tables. +- Data transformation pipelines. +- Data quality monitoring. + +### Data Warehouse Layer +- Structured tables optimized for BI (could be part of lakehouse or external warehouse). +- Aggregate table management. +- Performance optimization. + +### Business Intelligence Tools +- Front-end tools (e.g. Tableau, Power BI) for dashboarding and reports. +- Self-service analytics. +- Interactive visualization. + +### Streaming Analytics +- Components (e.g. Spark Streaming, Apache Flink) for real-time analytics on event streams. +- Stream processing logic. +- Real-time dashboard updates. + +### Data Governance and Security +- Data quality tools, auditing, encryption, and IAM to manage policies. +- Data classification and tagging. +- Compliance reporting. + +### Machine Learning Platform +- Integration (e.g. MLflow, AWS SageMaker) for training models on data lakehouse datasets. +- Feature engineering pipelines. +- Model deployment and monitoring. +``` + +## AI Agent and Orchestration Systems + +### Specific Requirements Patterns + +```markdown +### Agent Integration and Orchestration +- THE system SHALL allow modular integration of AI components (e.g. LLMs, vision models, NLP modules) into workflows. +- THE system SHALL orchestrate sequences of actions (prompt chains) among AI agents and external tools. +- THE system SHALL manage conversational or task state across multiple interactions and agents. +- THE system SHALL provide a mechanism for human-in-the-loop intervention or correction. + +### Monitoring and Learning +- THE system SHALL log all AI queries and responses for auditing and iterative improvement. +- THE system SHALL monitor model performance in production and trigger alerts on data drift or accuracy drop. +- THE system SHALL enable continuous learning by feeding back usage data into retraining pipelines. +- THE system SHALL provide explainability logs or traces of decision paths taken by agents. + +### Security and Scalability +- THE system SHALL secure integration with external data sources and APIs (e.g. databases, web APIs). +- THE system SHALL allow dynamic addition or removal of agents without downtime. +- THE system SHALL support parallel or ensemble execution of multiple AI models and combine results. +``` + +### Architecture Components + +```markdown +### Agent Orchestrator +- Coordinates the workflow of multiple AI agents and tools (could be a custom microservice). +- Task scheduling and routing. +- Workflow definition and execution. + +### Large Language Model API +- Connection to LLM services (e.g. OpenAI, Anthropic, local LLM cluster) for natural language tasks. +- Prompt engineering and management. +- Response parsing and validation. + +### Specialized AI Modules +- Additional AI services (e.g. image recognition, speech-to-text, custom NLP models) for specific subtasks. +- Model hosting and serving. +- Model versioning and A/B testing. + +### Conversation or State Manager +- Tracks the state of dialogs or multi-step tasks across interactions. +- Context preservation. +- Session management. + +### Tool Integration Layer +- Connectors or APIs for external tools (databases, search, calculators, web services). +- API authentication and management. +- Tool result processing. + +### Agent Registry +- Catalog of available agents and capabilities with metadata. +- Agent discovery and selection. +- Capability matching. + +### Feedback Loop and Learning +- Pipeline that collects feedback and performance metrics for retraining models. +- User feedback collection. +- Model improvement workflows. + +### Logging & Telemetry +- Centralized logs of all queries, responses, and agent decisions for monitoring and debugging. +- Performance metrics collection. +- Audit trail maintenance. + +### Security and Privacy Controls +- Ensures sensitive data is anonymized or protected in model interactions. +- Data masking and filtering. +- Privacy policy enforcement. + +### User Interface / API +- Front-end for human users or APIs for other systems to interact with the agent platform. +- Conversation interface design. +- API documentation and testing. +``` + +## Enterprise Integration Platforms + +### Specific Requirements Patterns + +```markdown +### Connectivity and Integration +- THE system SHALL support a variety of communication protocols (HTTP/REST, SOAP, AMQP, MQTT, JMS, etc.). +- THE system SHALL provide centralized API management, including routing, security, and throttling. +- THE system SHALL enable orchestration of message flows and business processes across multiple systems. +- THE system SHALL implement message transformation and enrichment (e.g. XML/JSON conversion, data mapping). + +### Reliability and Monitoring +- THE system SHALL ensure reliable message delivery with transaction support and retry policies. +- THE system SHALL provide a schema registry or contracts management for message formats. +- THE system SHALL include monitoring and logging for all integration flows. +- THE system SHALL allow decentralized deployment (local gateways) or centralized bus depending on needs. + +### Security and Scalability +- THE system SHALL integrate with enterprise identity and access control systems (e.g. LDAP, SSO). +- THE system SHALL support high throughput and scalability for large volumes of messages. +``` + +### Architecture Components + +```markdown +### API Gateway +- Manages and secures API calls, routes requests to backend services, handles rate limiting. +- API documentation and testing. +- API versioning and lifecycle management. + +### Message Broker/ESB +- Central messaging infrastructure (e.g. Kafka, RabbitMQ, Mule ESB) for asynchronous communication. +- Message routing and transformation. +- Queue management and monitoring. + +### Connector/Adapter Library +- Pre-built connectors for common systems (ERP, databases, SaaS platforms) to simplify integration. +- Connector configuration and customization. +- Connector lifecycle management. + +### Transformation Engine +- Component (e.g. Apache Camel, XSLT) to map and convert message formats between systems. +- Data mapping rules. +- Transformation validation. + +### Integration Server / Orchestrator +- Coordinates complex workflows or service orchestrations (e.g. Camunda, Azure Logic Apps). +- Process modeling and execution. +- Workflow monitoring and management. + +### Monitoring Dashboard +- Tracks integration flows, message queue depth, error rates, and system health. +- Performance metrics visualization. +- Alert configuration and notification. + +### Configuration Repository +- Stores integration flow definitions and transformation rules (could be code or XML/JSON configs). +- Version control for configurations. +- Configuration deployment. + +### Security Layer +- Encryption, token management, and certificate handling for inter-system communication. +- Authentication and authorization. +- Security policy enforcement. + +### Registry & Discovery +- Service registry (e.g. Consul, etcd) for discovering endpoints of various integrated services. +- Service health monitoring. +- Dynamic endpoint resolution. + +### Logging & Auditing +- Centralized logging for integration transactions and change tracking. +- Audit trail maintenance. +- Compliance reporting. +``` + +## Common Non-Functional Requirements + +### Performance +```markdown +- Response time: p95 < 200ms, p99 < 500ms +- Throughput: >1000 requests per second +- Concurrent users: >10,000 +- Database queries: <50ms +- Cache hit rate: >90% +``` + +### Scalability +```markdown +- Horizontal scaling capability +- Auto-scaling based on metrics +- Database sharding strategy +- Stateless service design +- Load balancer configuration +``` + +### Reliability +```markdown +- Uptime: 99.9% availability +- RTO: <1 hour +- RPO: <5 minutes +- Automated failover +- Data replication strategy +``` + +### Security +```markdown +- TLS 1.3 for all communications +- OAuth 2.0/JWT authentication +- Role-based access control +- Audit logging +- Encryption at rest +- Input validation +- SQL injection prevention +- XSS protection +- Rate limiting +- DDoS protection +``` + +### Monitoring +```markdown +- Application metrics (Prometheus) +- Distributed tracing (Jaeger/Zipkin) +- Centralized logging (ELK stack) +- Error tracking (Sentry) +- Uptime monitoring +- Custom dashboards +- Alert configuration +- SLA tracking +``` + +## Cross-Cutting Concerns + +### DevSecOps +- Implement CI/CD with automated security scans +- Enforce SBOM (Software Bill of Materials) for all builds +- Enable policy-as-code (OPA, Conftest) +- Integrate secrets management (Vault, SSM) +- Implement GitOps for infrastructure deployment + +### Data Governance +- Maintain centralized metadata catalog (DataHub, Amundsen) +- Apply data classification and retention policies +- Automate lineage tracking +- Enforce PII masking and anonymization +- Support regulatory compliance (GDPR, HIPAA) + +### Observability Maturity +Level 1: Metrics only +Level 2: Metrics + Centralized Logs +Level 3: Metrics + Logs + Traces +Level 4: Business KPIs + SLO Dashboards +Level 5: Autonomous Remediation (AIOps) + +### High Availability Blueprint +- Active-active regional clusters +- Read replicas for critical databases +- Zero-downtime deployments +- Circuit breaker patterns for dependencies +- Stateful failover validation testing + +### API Governance +- Consistent naming and versioning (v1, v2) +- Schema validation and contract testing +- Rate limit and quota enforcement +- Consumer onboarding workflow +- Deprecation policy automation + +## Task Breakdown Patterns by Domain + +### Trading System Tasks +```markdown +1. Market Data Integration + - Exchange API setup + - WebSocket implementation + - Data normalization + - Storage optimization + +2. Strategy Development + - Indicator calculation + - Pattern detection + - Signal generation + - Backtesting framework + +3. Execution System + - Order management + - Position tracking + - Risk controls + - Performance analytics +``` + +### Real-time System Tasks +```markdown +1. Connection Layer + - WebSocket server + - Session management + - Load balancing + - Failover handling + +2. Message Processing + - Message routing + - Persistence layer + - Delivery guarantees + - Presence tracking + +3. Client SDKs + - JavaScript SDK + - Mobile SDKs + - Reconnection logic + - Offline support +``` + +### E-commerce Tasks +```markdown +1. Product Management + - Catalog setup + - Search implementation + - Inventory system + - Media handling + +2. Purchase Flow + - Cart implementation + - Checkout process + - Payment integration + - Order processing + +3. Customer Experience + - User accounts + - Recommendations + - Reviews/ratings + - Customer service +``` + +### CMS Tasks +```markdown +1. Content Creation + - Editor implementation + - Media management + - Version control + - Workflow automation + +2. Content Delivery + - API development + - Template engine + - Caching layer + - CDN integration + +3. Content Management + - User permissions + - Content scheduling + - Search functionality + - Analytics integration +``` + +### IoT Tasks +```markdown +1. Device Management + - Device onboarding + - Firmware updates + - Device authentication + - Device monitoring + +2. Data Pipeline + - Data ingestion + - Stream processing + - Data storage + - Data analytics + +3. Edge Computing + - Edge deployment + - Local processing + - Connectivity management + - Synchronization +``` + +### Machine Learning Tasks +```markdown +1. Data Pipeline + - Data ingestion + - Feature engineering + - Data validation + - Data versioning + +2. Model Development + - Experiment tracking + - Model training + - Model evaluation + - Model versioning + +3. Model Deployment + - Model serving + - Performance monitoring + - A/B testing + - Model retraining +``` + +### Developer Tools Tasks +```markdown +1. Core Infrastructure + - Version control setup + - CI/CD pipeline + - Artifact repository + - Build automation + +2. Development Environment + - IDE integration + - Testing framework + - Debugging tools + - Documentation system + +3. Collaboration + - Code review process + - Issue tracking + - Communication tools + - Knowledge sharing +``` + +### SaaS Tasks +```markdown +1. Tenant Management + - Tenant onboarding + - Data isolation + - Configuration management + - Billing integration + +2. Application Development + - Multi-tenant architecture + - Feature flags + - Customization framework + - API development + +3. Operations + - Monitoring + - Scaling + - Backup/Recovery + - Security compliance +``` + +### Data Lakehouse Tasks +```markdown +1. Data Infrastructure + - Storage setup + - Metadata catalog + - Query engine + - Data governance + +2. Data Processing + - Ingestion pipelines + - ETL/ELT processes + - Data transformation + - Quality checks + +3. Analytics + - BI integration + - Dashboard development + - ML pipeline + - Reporting automation +``` + +### AI Agent Tasks +```markdown +1. Agent Development + - Model integration + - Prompt engineering + - Workflow design + - Tool integration + +2. Orchestration + - Agent coordination + - State management + - Error handling + - Performance optimization + +3. Operations + - Monitoring + - Logging + - Feedback collection + - Model updates +``` + +### Enterprise Integration Tasks +```markdown +1. Connectivity + - API development + - Message broker setup + - Connector development + - Protocol handling + +2. Integration Logic + - Transformation rules + - Workflow design + - Error handling + - Transaction management + +3. Operations + - Monitoring + - Logging + - Security + - Performance tuning +``` + +## Testing Strategies by Domain + +### Financial Systems +- Market data replay testing +- Strategy backtesting +- Risk scenario testing +- Regulatory compliance testing +- Latency benchmarking + +### Real-time Systems +- Connection stress testing +- Message ordering verification +- Failover testing +- Network partition testing +- Client compatibility testing + +### E-commerce +- Load testing (Black Friday simulation) +- Payment gateway testing +- Inventory accuracy testing +- Cart abandonment testing +- Cross-browser testing + +### CMS +- Content workflow testing +- Permission testing +- Search functionality testing +- Template rendering testing +- Multi-channel delivery testing + +### IoT +- Device scalability testing +- Network reliability testing +- Data integrity testing +- Edge computing testing +- Security vulnerability testing + +### Machine Learning +- Model accuracy testing +- Data drift detection testing +- Performance benchmarking +- A/B testing validation +- Bias and fairness testing + +### Developer Tools +- Build pipeline testing +- Integration testing +- Performance testing +- Security scanning +- Usability testing + +### SaaS +- Tenant isolation testing +- Multi-tenancy performance testing +- Subscription billing testing +- Customization testing +- Security compliance testing + +### Data Lakehouse +- Data quality testing +- Query performance testing +- Schema evolution testing +- Governance compliance testing +- Security access testing + +### AI Agent +- Conversation flow testing +- Integration testing +- Performance testing +- Safety and bias testing +- User experience testing + +### Enterprise Integration +- End-to-end flow testing +- Message transformation testing +- Error handling testing +- Performance testing +- Security testing + +## Deployment Patterns + +### High-Frequency Trading +```markdown +# Colocation deployment +- Bare metal servers +- Kernel bypass networking +- CPU isolation +- NUMA optimization +- Dedicated network paths +``` + +### Real-time Systems +```markdown +# Low-latency deployment +- Edge computing locations +- WebSocket optimization +- Connection pooling +- Geographic distribution +- Real-time monitoring +``` + +### E-commerce +```yaml +# Scalable web deployment +- Auto-scaling groups +- CDN configuration +- Database sharding +- Cache layers +- Payment gateway integration +``` + +### CMS +```markdown +# Content delivery deployment +- Headless architecture +- CDN integration +- Multi-region deployment +- Content replication +- Preview environments +``` + +### IoT +```markdown +# Edge + Cloud hybrid +- Edge gateway deployment +- Cloud orchestration +- Message queue setup +- Time-series database +- Analytics pipeline +``` + +### Machine Learning +```markdown +# ML platform deployment +- GPU clusters +- Model serving infrastructure +- Feature store +- Experiment tracking +- Model registry +``` + +### Developer Tools +```markdown +# DevOps platform deployment +- Container orchestration +- CI/CD pipeline +- Artifact repository +- Monitoring stack +- Self-service environments +``` + +### SaaS +```markdown +# Multi-region deployment +- Geographic load balancing +- Regional data residency +- CDN configuration +- Database replication +- Disaster recovery +``` + +### Data Lakehouse +```markdown +# Analytics platform deployment +- Data lake storage +- Query engine cluster +- Metadata catalog +- BI tools integration +- Security controls +``` + +### AI Agent +```markdown +# AI platform deployment +- Model serving infrastructure +- Orchestration engine +- Monitoring and logging +- Feedback pipeline +- Security controls +``` + +### Enterprise Integration +```markdown +# Integration platform deployment +- API gateway cluster +- Message broker +- Integration runtime +- Monitoring dashboard +- Security infrastructure diff --git a/scripts/generate_project_docs.py b/scripts/generate_project_docs.py new file mode 100644 index 0000000..3a5e0d6 --- /dev/null +++ b/scripts/generate_project_docs.py @@ -0,0 +1,801 @@ +#!/usr/bin/env python3 +""" +Project Document Generator +Generates structured requirements, design, and task documents for new projects +""" + +import json +import argparse +from datetime import datetime +from typing import Dict, List, Optional +import os + +class ProjectDocumentGenerator: + def __init__(self, project_name: str, project_type: str = "web-app"): + self.project_name = project_name + self.project_type = project_type + self.timestamp = datetime.now().strftime("%Y-%m-%d") + + def generate_requirements_template(self, features: List[str]) -> str: + """Generate requirements document template""" + + template = f"""# Requirements Document + +## Introduction + +{self.project_name} is a [DESCRIPTION OF SYSTEM PURPOSE]. The system is designed for [TARGET USERS] and will be deployed as [DEPLOYMENT MODEL]. + +## Glossary + +- **[Term]**: [Definition specific to this system] +- **User**: [Define user types] +- **System**: The {self.project_name} platform + +## Requirements +""" + + for i, feature in enumerate(features, 1): + template += f""" +### Requirement {i} + +**User Story:** As a [USER TYPE], I want {feature}, so that [BENEFIT] + +#### Acceptance Criteria + +1. WHEN [trigger/condition], THE system SHALL [behavior] +2. WHERE [context applies], THE system SHALL [behavior] +3. THE system SHALL [capability] within [time limit] +4. IF [error condition], THEN THE system SHALL [handle gracefully] +5. THE system SHALL persist [data] with [attributes] +""" + + return template + + def generate_design_template(self, components: List[str]) -> str: + """Generate design document template with comprehensive architecture""" + + template = f"""# Design Document + +## Overview + +The {self.project_name} system is built as a [ARCHITECTURE PATTERN] with [KEY COMPONENTS]. The design prioritizes [KEY PRIORITIES]. + +## System Architecture + +### Component Map + +| Component ID | Name | Type | Responsibility | Interfaces With | +|-------------|------|------|----------------|-----------------| +| COMP-1 | Frontend | UI | User interface and interaction | COMP-2 | +| COMP-2 | API Gateway | Service | Request routing and authentication | COMP-3, COMP-4 |""" + + for i, component in enumerate(components, 3): + template += f""" +| COMP-{i} | {component} | Service | [Responsibility] | [Components] |""" + + template += """ + +### High-Level Architecture Diagram + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Frontend Layer │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ [UI Framework] Application │ │ +│ └──────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + │ + API (REST/GraphQL/WebSocket) + │ +┌─────────────────────────────────────────────────────────────┐ +│ Backend Layer │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ [Backend Framework] Application │ │ +│ │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ │ +│ │ │ Service │ │ Service │ │ Service │ │ │ +│ │ └──────────┘ └──────────┘ └──────────┘ │ │ +│ └──────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + │ + Database Access + │ +┌─────────────────────────────────────────────────────────────┐ +│ Data Layer │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ [Database Type] │ │ +│ └──────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +## Data Flow Specifications + +### Primary Data Flows + +#### 1. User Authentication Flow + +``` +1. User → Frontend: Login credentials +2. Frontend → API Gateway: Encrypted credentials +3. API Gateway → Auth Service: Validation request +4. Auth Service → User Database: Query user record +5. User Database → Auth Service: User data +6. Auth Service → API Gateway: JWT token +7. API Gateway → Frontend: Auth response with token +``` + +**Data Transformations:** +- Step 2: Credentials encrypted with HTTPS +- Step 3: Rate limiting applied +- Step 6: JWT token generated with claims + +[Add other critical data flows] + +## Integration Points + +### Internal Integration Points + +| Source | Target | Protocol | Data Format | Purpose | +|--------|--------|----------|-------------|---------| +| Frontend | API Gateway | HTTPS/REST | JSON | API calls | +| API Gateway | Services | HTTP/gRPC | JSON/Protobuf | Service calls | +| Services | Database | TCP | SQL | Data persistence | + +### External Integration Points + +#### [External Service Name] + +**Type:** REST API / Database / Message Queue +**Purpose:** [What this integration provides] +**Endpoint:** [URL pattern or connection details] +**Authentication:** [OAuth2, API Key, etc.] +**Rate Limits:** [Any constraints] + +**Interface Contract:** +``` +POST /api/endpoint +Headers: { "Authorization": "Bearer token" } +Body: { "field": "value" } +Response: { "result": "value" } +``` + +**Error Handling:** +- Retry strategy: Exponential backoff with jitter +- Circuit breaker: Opens after 5 consecutive failures +- Fallback: [Degraded functionality or cached response] + +## System Boundaries + +### In Scope +- [Core functionality included] +- [Features to be implemented] + +### Out of Scope +- [Features not included] +- [Delegated to external systems] + +### Assumptions +- [External services available] +- [Infrastructure provided] + +## Components and Interfaces +""" + + for component in components: + template += f""" +### {component} + +**Responsibility:** [Single sentence description of what this component does] + +**Key Classes:** +- `{component}Service`: Main service class for {component.lower()} operations +- `{component}Controller`: Handles API requests for {component.lower()} +- `{component}Repository`: Data access layer for {component.lower()} + +**Interfaces:** +```python +class {component}Service: + async def create(self, data: Dict) -> {component} + async def get(self, id: str) -> Optional[{component}] + async def update(self, id: str, data: Dict) -> {component} + async def delete(self, id: str) -> bool + async def list(self, filters: Dict) -> List[{component}] +``` + +**Data Flow:** +- Receives requests from [API layer/other service] +- Validates input using [validation rules] +- Processes business logic +- Persists to database +- Returns response + +**Performance:** +- Target response time: <200ms for queries +- Target response time: <500ms for mutations +- Maximum concurrent operations: 100 +""" + + template += """ +## Data Models + +### User +```python +@dataclass +class User: + id: str + email: str + name: str + created_at: datetime + updated_at: datetime +``` + +[Add other data models] + +## Error Handling + +### API Errors +**Types:** +- 400 Bad Request - Invalid input +- 401 Unauthorized - Missing/invalid authentication +- 403 Forbidden - Insufficient permissions +- 404 Not Found - Resource doesn't exist +- 500 Internal Server Error - Unexpected error + +**Handling:** +- Return consistent error format with code, message, and details +- Log all errors with context +- Implement retry logic for transient failures + +### Database Errors +**Types:** +- Connection failures +- Query timeouts +- Constraint violations + +**Handling:** +- Retry with exponential backoff +- Graceful degradation where possible +- Transaction rollback on failure + +## Testing Strategy + +### Unit Tests +- Service layer: Test business logic with mocked dependencies +- Repository layer: Test database operations +- API layer: Test request/response handling +- Coverage target: 80% + +### Integration Tests +- End-to-end API tests +- Database integration tests +- External service integration tests + +### Performance Tests +- Load testing: 100 concurrent users +- Response time: p95 < 500ms +- Throughput: >100 requests/second + +## Deployment + +### Docker Configuration +```yaml +version: '3.8' + +services: + app: + build: . + ports: + - "3000:3000" + environment: + - DATABASE_URL=${DATABASE_URL} + depends_on: + - database + + database: + image: postgres:15 + volumes: + - db_data:/var/lib/postgresql/data + environment: + - POSTGRES_PASSWORD=${DB_PASSWORD} + +volumes: + db_data: +``` + +### Environment Variables +``` +DATABASE_URL=postgresql://user:pass@localhost/dbname +API_KEY=your-api-key +JWT_SECRET=your-secret-key +NODE_ENV=production +``` + +## Performance Targets + +- API response time: <200ms (p95) +- Database query time: <50ms (p95) +- Frontend load time: <2s +- Time to interactive: <3s +- Memory usage: <512MB per instance + +## Security Considerations + +- JWT-based authentication +- Rate limiting on all endpoints +- Input validation and sanitization +- SQL injection prevention via parameterized queries +- XSS prevention via output encoding +- HTTPS only in production +""" + + return template + + def generate_tasks_template(self, phases: List[Dict]) -> str: + """Generate implementation plan template with boundaries and deliverables""" + + template = f"""# Implementation Plan + +Generated: {self.timestamp} +Project: {self.project_name} +Type: {self.project_type} + +## Project Boundaries + +### Must Have (MVP) +- [Core feature 1] +- [Core feature 2] +- [Core feature 3] + +### Nice to Have (Enhancements) +- [Enhancement feature 1] +- [Enhancement feature 2] + +### Out of Scope +- [Explicitly excluded feature 1] +- [Deferred to future phase] + +### Technical Constraints +- [Framework limitations] +- [Resource constraints] + +## Deliverables by Phase + +| Phase | Deliverables | Success Criteria | +|-------|-------------|------------------| +| 1. Infrastructure | Working development environment | All developers can run locally | +| 2. Data Layer | Database schema, models | CRUD operations functional | +| 3. Business Logic | Core services implemented | All requirements fulfilled | +| 4. API Layer | REST/GraphQL endpoints | API tests passing | +| 5. Frontend | User interface | End-to-end workflows complete | +| 6. Testing | Test coverage >80% | All tests passing | +| 7. Deployment | Production environment | System accessible and stable | + +## Task Breakdown + +""" + + for phase_num, phase in enumerate(phases, 1): + template += f"- [ ] {phase_num}. {phase['name']}\n\n" + + for task_num, task in enumerate(phase.get('tasks', []), 1): + template += f" - [ ] {phase_num}.{task_num} {task['name']}\n" + + if 'subtasks' in task: + for subtask in task['subtasks']: + template += f" - {subtask}\n" + + if 'requirements' in task: + template += f" - _Requirements: {', '.join(task['requirements'])}_\n" + + if 'dependencies' in task and task['dependencies']: + template += f" - _Dependencies: {', '.join(task['dependencies'])}_\n" + + template += "\n" + + return template + + def get_default_phases(self) -> List[Dict]: + """Get default phases based on project type""" + + if self.project_type == "web-app": + return [ + { + "name": "Infrastructure Setup", + "tasks": [ + { + "name": "Initialize project structure", + "subtasks": [ + "Create directory structure", + "Initialize package managers", + "Set up version control" + ], + "requirements": ["REQ-12.1"] + }, + { + "name": "Set up database", + "subtasks": [ + "Create database schema", + "Write migrations", + "Set up connection pooling" + ], + "requirements": ["REQ-9.1", "REQ-9.2"] + }, + { + "name": "Configure Docker", + "subtasks": [ + "Create Dockerfiles", + "Write docker-compose.yml", + "Set up volumes and networks" + ], + "requirements": ["REQ-12.2", "REQ-12.3"] + } + ] + }, + { + "name": "Backend Implementation", + "tasks": [ + { + "name": "Create data models", + "subtasks": [ + "Define entities", + "Create validation schemas", + "Implement serialization" + ], + "requirements": ["REQ-3.1"], + "dependencies": ["1.2"] + }, + { + "name": "Implement service layer", + "subtasks": [ + "Create business logic services", + "Implement validation rules", + "Add error handling" + ], + "requirements": ["REQ-4.1"], + "dependencies": ["2.1"] + }, + { + "name": "Build API endpoints", + "subtasks": [ + "Create REST/GraphQL routes", + "Add authentication middleware", + "Implement request validation" + ], + "requirements": ["REQ-5.1"], + "dependencies": ["2.2"] + } + ] + }, + { + "name": "Frontend Implementation", + "tasks": [ + { + "name": "Set up frontend framework", + "subtasks": [ + "Initialize React/Vue/Angular app", + "Configure build tools", + "Set up routing" + ], + "requirements": ["REQ-7.1"] + }, + { + "name": "Create UI components", + "subtasks": [ + "Build reusable components", + "Implement responsive design", + "Add styling/theming" + ], + "requirements": ["REQ-7.2"], + "dependencies": ["3.1"] + }, + { + "name": "Integrate with backend", + "subtasks": [ + "Set up API client", + "Implement state management", + "Add error handling" + ], + "requirements": ["REQ-7.3"], + "dependencies": ["2.3", "3.2"] + } + ] + }, + { + "name": "Testing and Quality Assurance", + "tasks": [ + { + "name": "Write unit tests", + "subtasks": [ + "Test services", + "Test components", + "Test utilities" + ], + "requirements": ["REQ-13.1"], + "dependencies": ["2.2", "3.2"] + }, + { + "name": "Create integration tests", + "subtasks": [ + "Test API endpoints", + "Test database operations", + "Test external integrations" + ], + "requirements": ["REQ-13.2"], + "dependencies": ["4.1"] + }, + { + "name": "Perform end-to-end testing", + "subtasks": [ + "Test user workflows", + "Test error scenarios", + "Performance testing" + ], + "requirements": ["REQ-13.3"], + "dependencies": ["4.2"] + } + ] + }, + { + "name": "Deployment and Documentation", + "tasks": [ + { + "name": "Set up CI/CD pipeline", + "subtasks": [ + "Configure build automation", + "Set up test automation", + "Configure deployment" + ], + "requirements": ["REQ-14.1"], + "dependencies": ["4.3"] + }, + { + "name": "Write documentation", + "subtasks": [ + "API documentation", + "User guide", + "Deployment guide" + ], + "requirements": ["REQ-15.1"], + "dependencies": ["5.1"] + }, + { + "name": "Deploy to production", + "subtasks": [ + "Set up production environment", + "Configure monitoring", + "Perform deployment" + ], + "requirements": ["REQ-14.2"], + "dependencies": ["5.2"] + } + ] + } + ] + + elif self.project_type == "cli-tool": + return [ + { + "name": "Project Setup", + "tasks": [ + { + "name": "Initialize project", + "subtasks": [ + "Set up package structure", + "Configure build system", + "Add dependencies" + ] + }, + { + "name": "Design command structure", + "subtasks": [ + "Define commands and subcommands", + "Plan argument parsing", + "Design configuration schema" + ] + } + ] + }, + { + "name": "Core Implementation", + "tasks": [ + { + "name": "Implement command parser", + "subtasks": [ + "Create argument parser", + "Add command handlers", + "Implement help system" + ], + "dependencies": ["1.2"] + }, + { + "name": "Build core logic", + "subtasks": [ + "Implement business logic", + "Add validation", + "Handle errors" + ], + "dependencies": ["2.1"] + } + ] + }, + { + "name": "Testing and Packaging", + "tasks": [ + { + "name": "Write tests", + "subtasks": [ + "Unit tests", + "Integration tests", + "CLI tests" + ], + "dependencies": ["2.2"] + }, + { + "name": "Package and distribute", + "subtasks": [ + "Create package", + "Write documentation", + "Publish" + ], + "dependencies": ["3.1"] + } + ] + } + ] + + elif self.project_type == "api-service": + return [ + { + "name": "Service Setup", + "tasks": [ + { + "name": "Initialize API project", + "subtasks": [ + "Set up framework", + "Configure database", + "Add middleware" + ] + }, + { + "name": "Design API schema", + "subtasks": [ + "Define endpoints", + "Create OpenAPI spec", + "Plan authentication" + ] + } + ] + }, + { + "name": "API Implementation", + "tasks": [ + { + "name": "Create endpoints", + "subtasks": [ + "Implement routes", + "Add validation", + "Handle errors" + ], + "dependencies": ["1.2"] + }, + { + "name": "Add authentication", + "subtasks": [ + "Implement auth middleware", + "Add JWT/OAuth", + "Set up permissions" + ], + "dependencies": ["2.1"] + } + ] + } + ] + + else: # Generic project + return [ + { + "name": "Project Setup", + "tasks": [ + { + "name": "Initialize project", + "subtasks": ["Create structure", "Set up tools"] + } + ] + }, + { + "name": "Implementation", + "tasks": [ + { + "name": "Build core features", + "subtasks": ["Implement logic", "Add tests"] + } + ] + }, + { + "name": "Deployment", + "tasks": [ + { + "name": "Prepare for production", + "subtasks": ["Test", "Document", "Deploy"] + } + ] + } + ] + + def generate_all_documents(self, + features: List[str] = None, + components: List[str] = None, + output_dir: str = ".") -> Dict[str, str]: + """Generate all three documents""" + + # Use defaults if not provided + if not features: + features = [ + "to authenticate and manage my account", + "to create and manage resources", + "to view analytics and reports", + "to configure system settings", + "to receive notifications" + ] + + if not components: + components = [ + "Authentication Service", + "User Management", + "Resource Manager", + "Analytics Engine", + "Notification Service" + ] + + # Generate documents + docs = { + "requirements.md": self.generate_requirements_template(features), + "design.md": self.generate_design_template(components), + "tasks.md": self.generate_tasks_template(self.get_default_phases()) + } + + # Save to files + os.makedirs(output_dir, exist_ok=True) + + for filename, content in docs.items(): + filepath = os.path.join(output_dir, filename) + with open(filepath, 'w') as f: + f.write(content) + print(f"Generated: {filepath}") + + return docs + + +def main(): + parser = argparse.ArgumentParser(description="Generate project planning documents") + parser.add_argument("project_name", help="Name of the project") + parser.add_argument("--type", default="web-app", + choices=["web-app", "cli-tool", "api-service", "generic"], + help="Type of project") + parser.add_argument("--features", nargs="+", + help="List of features for requirements") + parser.add_argument("--components", nargs="+", + help="List of components for design") + parser.add_argument("--output", default=".", + help="Output directory for documents") + + args = parser.parse_args() + + generator = ProjectDocumentGenerator(args.project_name, args.type) + generator.generate_all_documents( + features=args.features, + components=args.components, + output_dir=args.output + ) + + print(f"\n✅ Successfully generated project documents for '{args.project_name}'") + print(f" Type: {args.type}") + print(f" Location: {args.output}/") + print("\nNext steps:") + print("1. Review and customize the generated documents") + print("2. Fill in the [PLACEHOLDER] sections") + print("3. Add project-specific requirements and design details") + print("4. Use these documents as input for AI-assisted implementation") + + +if __name__ == "__main__": + main() diff --git a/scripts/validate_documents.py b/scripts/validate_documents.py new file mode 100644 index 0000000..9ae29ed --- /dev/null +++ b/scripts/validate_documents.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python3 +""" +Document Validator +Validates project planning documents for completeness and consistency +""" + +import re +import argparse +from typing import List, Dict, Tuple +import os + +class DocumentValidator: + def __init__(self): + self.errors = [] + self.warnings = [] + + def validate_requirements(self, content: str) -> Tuple[List[str], List[str]]: + """Validate requirements document structure and content""" + errors = [] + warnings = [] + + # Check required sections + required_sections = [ + "## Introduction", + "## Glossary", + "## Requirements" + ] + + for section in required_sections: + if section not in content: + errors.append(f"Missing required section: {section}") + + # Check for user stories + user_story_pattern = r"\*\*User Story:\*\*.*As a.*I want.*so that" + if not re.search(user_story_pattern, content, re.DOTALL): + warnings.append("No user stories found in requirements") + + # Check for acceptance criteria + if "Acceptance Criteria" not in content: + errors.append("No acceptance criteria found") + + # Check for SHALL statements + shall_count = content.count("SHALL") + if shall_count < 5: + warnings.append(f"Only {shall_count} SHALL statements found (recommend at least 5)") + + # Check for requirement numbering + req_pattern = r"### Requirement \d+|### REQ-\d+" + req_matches = re.findall(req_pattern, content) + if len(req_matches) < 3: + warnings.append(f"Only {len(req_matches)} numbered requirements found") + + # Check for placeholders + placeholder_pattern = r"\[.*?\]" + placeholders = re.findall(placeholder_pattern, content) + if len(placeholders) > 10: + warnings.append(f"Found {len(placeholders)} placeholders - remember to fill them in") + + return errors, warnings + + def validate_design(self, content: str) -> Tuple[List[str], List[str]]: + """Validate design document structure and content""" + errors = [] + warnings = [] + + # Check required sections + required_sections = [ + "## Overview", + "## System Architecture", + "## Data Flow", + "## Integration Points", + "## Components", + "## Data Models", + "## Deployment" + ] + + for section in required_sections: + if section not in content: + errors.append(f"Missing required section: {section}") + + # Check for component map + if "Component Map" not in content and "| Component ID |" not in content: + errors.append("Missing Component Map table") + + # Check for data flow specifications + if "Data Flow" not in content: + errors.append("Missing Data Flow specifications") + + # Check for integration points + if "Integration Points" not in content: + errors.append("Missing Integration Points section") + + # Check for system boundaries + if "System Boundaries" not in content and "In Scope" not in content: + warnings.append("Missing System Boundaries definition") + + # Check for architecture diagram + if "```" not in content and "┌" not in content: + warnings.append("No architecture diagram found") + + # Check for interfaces + if "class" not in content and "interface" not in content.lower(): + warnings.append("No interface definitions found") + + # Check for error handling + if "Error Handling" not in content and "error handling" not in content.lower(): + warnings.append("No error handling section found") + + # Check for performance targets + if "Performance" not in content and "performance" not in content.lower(): + warnings.append("No performance targets specified") + + # Check for Docker configuration + if "Docker" not in content and "docker" not in content: + warnings.append("No Docker configuration found") + + return errors, warnings + + def validate_tasks(self, content: str) -> Tuple[List[str], List[str]]: + """Validate implementation plan structure and content""" + errors = [] + warnings = [] + + # Check for project boundaries + if "## Project Boundaries" not in content: + errors.append("Missing Project Boundaries section") + + if "Must Have" not in content: + warnings.append("Missing 'Must Have' scope definition") + + if "Out of Scope" not in content: + warnings.append("Missing 'Out of Scope' definition") + + # Check for deliverables + if "## Deliverables" not in content and "Deliverables by Phase" not in content: + warnings.append("Missing Deliverables section") + + # Check for success criteria + if "Success Criteria" not in content: + warnings.append("Missing Success Criteria for deliverables") + + # Check for task structure + phase_pattern = r"- \[[ x]\] \d+\." + phases = re.findall(phase_pattern, content) + + if len(phases) == 0: + errors.append("No phases found in task list") + elif len(phases) < 3: + warnings.append(f"Only {len(phases)} phases found (recommend at least 3)") + + # Check for subtasks + task_pattern = r" - \[[ x]\] \d+\.\d+" + tasks = re.findall(task_pattern, content) + + if len(tasks) == 0: + errors.append("No tasks found in implementation plan") + elif len(tasks) < 10: + warnings.append(f"Only {len(tasks)} tasks found (recommend at least 10)") + + # Check for requirement tracing + req_pattern = r"_Requirements:.*REQ-\d+|_Requirements:.*\d+\.\d+" + req_traces = re.findall(req_pattern, content) + + if len(req_traces) == 0: + warnings.append("No requirement tracing found in tasks") + elif len(req_traces) < len(tasks) / 2: + warnings.append(f"Only {len(req_traces)} tasks have requirement tracing") + + # Check for component involvement + comp_pattern = r"_Components:.*COMP-\d+" + comp_traces = re.findall(comp_pattern, content) + + if len(comp_traces) == 0: + warnings.append("No component mapping found in tasks") + + # Check for dependencies + dep_pattern = r"_Dependencies:" + dependencies = re.findall(dep_pattern, content) + + if len(dependencies) == 0: + warnings.append("No task dependencies defined") + + # Check completion status + completed_pattern = r"- \[x\]" + pending_pattern = r"- \[ \]" + + completed = len(re.findall(completed_pattern, content)) + pending = len(re.findall(pending_pattern, content)) + + if completed + pending > 0: + completion_rate = (completed / (completed + pending)) * 100 + print(f"Task completion: {completed}/{completed + pending} ({completion_rate:.1f}%)") + + return errors, warnings + + def validate_consistency(self, req_content: str, design_content: str, + task_content: str) -> Tuple[List[str], List[str]]: + """Check consistency across documents""" + errors = [] + warnings = [] + + # Extract requirement IDs from requirements doc + req_ids = set() + req_pattern = r"### Requirement (\d+)|### REQ-(\d+)" + for match in re.finditer(req_pattern, req_content): + req_id = match.group(1) or match.group(2) + req_ids.add(f"REQ-{req_id}") + + # Check if requirements are referenced in tasks + for req_id in req_ids: + if req_id not in task_content: + warnings.append(f"{req_id} not referenced in any tasks") + + # Extract components from design + component_pattern = r"### .*(?:Service|Component|Manager|Engine|Handler)" + components = re.findall(component_pattern, design_content) + + # Check if major components have corresponding tasks + for component in components: + component_name = component.replace("### ", "").strip() + if component_name.lower() not in task_content.lower(): + warnings.append(f"Component '{component_name}' not mentioned in tasks") + + return errors, warnings + + def validate_all(self, req_file: str, design_file: str, + task_file: str) -> Dict[str, Tuple[List[str], List[str]]]: + """Validate all three documents""" + results = {} + + # Read files + with open(req_file, 'r') as f: + req_content = f.read() + with open(design_file, 'r') as f: + design_content = f.read() + with open(task_file, 'r') as f: + task_content = f.read() + + # Validate individual documents + results['requirements'] = self.validate_requirements(req_content) + results['design'] = self.validate_design(design_content) + results['tasks'] = self.validate_tasks(task_content) + + # Validate consistency + results['consistency'] = self.validate_consistency( + req_content, design_content, task_content + ) + + return results + +def print_validation_results(results: Dict[str, Tuple[List[str], List[str]]]): + """Print validation results in a formatted way""" + + total_errors = 0 + total_warnings = 0 + + for doc_name, (errors, warnings) in results.items(): + print(f"\n{'='*50}") + print(f"Validation Results: {doc_name.upper()}") + print('='*50) + + if errors: + print(f"\n❌ Errors ({len(errors)}):") + for error in errors: + print(f" - {error}") + total_errors += len(errors) + else: + print("\n✅ No errors found") + + if warnings: + print(f"\n⚠️ Warnings ({len(warnings)}):") + for warning in warnings: + print(f" - {warning}") + total_warnings += len(warnings) + else: + print("\n✅ No warnings found") + + # Summary + print(f"\n{'='*50}") + print("SUMMARY") + print('='*50) + + if total_errors == 0 and total_warnings == 0: + print("✅ All documents are valid and complete!") + else: + print(f"Total Errors: {total_errors}") + print(f"Total Warnings: {total_warnings}") + + if total_errors > 0: + print("\n⚠️ Please fix errors before using these documents") + else: + print("\n📝 Review warnings to improve document quality") + +def main(): + parser = argparse.ArgumentParser(description="Validate project planning documents") + parser.add_argument("--requirements", "-r", default="requirements.md", + help="Path to requirements document") + parser.add_argument("--design", "-d", default="design.md", + help="Path to design document") + parser.add_argument("--tasks", "-t", default="tasks.md", + help="Path to tasks/implementation plan") + + args = parser.parse_args() + + # Check if files exist + for filepath, name in [(args.requirements, "Requirements"), + (args.design, "Design"), + (args.tasks, "Tasks")]: + if not os.path.exists(filepath): + print(f"❌ {name} file not found: {filepath}") + return 1 + + # Validate documents + validator = DocumentValidator() + results = validator.validate_all(args.requirements, args.design, args.tasks) + + # Print results + print_validation_results(results) + + # Return exit code based on errors + total_errors = sum(len(errors) for errors, _ in results.values()) + return 1 if total_errors > 0 else 0 + +if __name__ == "__main__": + exit(main())