Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 17:50:59 +08:00
commit 7f42d64263
19 changed files with 2908 additions and 0 deletions

View File

@@ -0,0 +1,12 @@
{
"name": "architecture-skills",
"description": "Collection of architectural documentation and specification generation skills",
"version": "0.0.0-2025.11.28",
"author": {
"name": "George A Puiu",
"email": "puiu.adrian@gmail.com"
},
"skills": [
"./skills/specification-architect"
]
}

3
README.md Normal file
View File

@@ -0,0 +1,3 @@
# architecture-skills
Collection of architectural documentation and specification generation skills

104
plugin.lock.json Normal file
View File

@@ -0,0 +1,104 @@
{
"$schema": "internal://schemas/plugin.lock.v1.json",
"pluginId": "gh:adrianpuiu/specification-document-generator:architecture-skills",
"normalized": {
"repo": null,
"ref": "refs/tags/v20251128.0",
"commit": "a05ffc0b61bcf2dce213b9e3ba922d1815a65328",
"treeHash": "ea165f6a6b3b2e9cc1e06e1d131635743f7935b17b60aa6112a00b7ecfd31855",
"generatedAt": "2025-11-28T10:13:01.882699Z",
"toolVersion": "publish_plugins.py@0.2.0"
},
"origin": {
"remote": "git@github.com:zhongweili/42plugin-data.git",
"branch": "master",
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
},
"manifest": {
"name": "architecture-skills",
"description": "Collection of architectural documentation and specification generation skills"
},
"content": {
"files": [
{
"path": "README.md",
"sha256": "aa46d2fd2965f0167381040bd508f0bd3fa404410c70c3b6f8bfa641a4925de5"
},
{
"path": ".claude-plugin/plugin.json",
"sha256": "ffbef060d9408d45a42636730dcd2a7c0a1fbaa6f793c18b4d7dc3fc314ee92d"
},
{
"path": "skills/specification-architect/validate.bat",
"sha256": "fab04fc5e2cf1a8c0928b6e0cf5c808a211dbf8e206537dfa0f2f6c8a244ab38"
},
{
"path": "skills/specification-architect/validate.sh",
"sha256": "2ba7305a92e7696e0a522e6cbe942b18d3da600ae80c842cba3c0455086da0dd"
},
{
"path": "skills/specification-architect/validate_specifications.py",
"sha256": "429531bd3d64e5d712ad51897564325d6023290a66fb4cff8602d24150df8453"
},
{
"path": "skills/specification-architect/plugin.json",
"sha256": "ac420827bbe164e48f9a663864e55b1b7052f15b0b1343ce2c36b70f0e3ad78d"
},
{
"path": "skills/specification-architect/README.md",
"sha256": "942130a3dc0f15008e125373754bb8984cfcd2a93016c429f2dedc841dc99ffd"
},
{
"path": "skills/specification-architect/SKILL.md",
"sha256": "caa270d54420e9bd0e1d9e7ed590d6c10c7bae07a622e08ace3411a1db5f03ff"
},
{
"path": "skills/specification-architect/VALIDATION_SCRIPTS_README.md",
"sha256": "e23e0d50142625c4b6860691efd0224c71a1cb9be75eb425bd45c0dd3777fa2f"
},
{
"path": "skills/specification-architect/references/document_templates.md",
"sha256": "2b1130d5acb9bc10c23b6d7fc33de89529ebbc55ae2b25a5e5cd709058d7df12"
},
{
"path": "skills/specification-architect/scripts/traceability_validator.py",
"sha256": "8d896f3fd28f47171c4af9c141193ff1b66a4ef591dfaea981a9e6296aef417d"
},
{
"path": "skills/specification-architect/assets/sample_outputs/example_tasks.md",
"sha256": "d23924fbc0b78710d062eb1891391c31575da6d855ac98cfbe265c798828272a"
},
{
"path": "skills/specification-architect/assets/sample_outputs/example_design.md",
"sha256": "c8b76c46cf98eaa08ea04a22206f2c7fdd6b7dc8629f10f432b7f4b53140be4f"
},
{
"path": "skills/specification-architect/assets/sample_outputs/example_requirements.md",
"sha256": "7e56d4ca9196f4caea398ab68f35b995c7d9a663e52c25c4f3ff176319e3dea0"
},
{
"path": "skills/specification-architect/assets/sample_outputs/example_blueprint.md",
"sha256": "03dc691cb51c2d14545d014bf2fa1b15c7b0cfccc5a1cd9033d3423c717cbb49"
},
{
"path": "skills/specification-architect/assets/sample_outputs/example_research.md",
"sha256": "40913d76ad2002b9303d3c52db215c86a72319d0f2f3621de3bba03791d776d2"
},
{
"path": "skills/specification-architect/assets/sample_outputs/example_verifiable_research.md",
"sha256": "5a091a2bd64092b1687bd72bbea705e9ab795aaf5dc0ee79c25ffbc836a2a848"
},
{
"path": "skills/specification-architect/assets/sample_outputs/example_validation.md",
"sha256": "e623b7d38c9dfd3d9ba40fd37787540d4f307f2b1bf707b2e09868b9af497461"
}
],
"dirSha256": "ea165f6a6b3b2e9cc1e06e1d131635743f7935b17b60aa6112a00b7ecfd31855"
},
"security": {
"scannedAt": null,
"scannerVersion": null,
"flags": []
}
}

View File

@@ -0,0 +1,78 @@
# Specification Architect Skill
An AI skill that generates rigorous, evidence-based architectural documentation with complete traceability.
## Quick Start
### Generate Specification Documents
The skill follows a 6-phase sequential process:
1. **Phase 0: Verifiable Research** (research.md) - Evidence-based technology research with citations
2. **Phase 1: Blueprint** (blueprint.md) - Component architecture and data flow
3. **Phase 2: Requirements** (requirements.md) - Acceptance criteria with component assignments
4. **Phase 3: Design** (design.md) - Detailed component specifications
5. **Phase 4: Tasks** (tasks.md) - Implementation tasks with requirement traceability
6. **Phase 5: Validation** (validation.md) - Automated validation results
### Validation Commands
```bash
# Primary validation - ensures 100% requirements coverage
python validate_specifications.py
# With options
python validate_specifications.py --path ./specs --verbose --generate-validation
# Advanced traceability validation
python scripts/traceability_validator.py
```
### Cross-Platform Helpers
```bash
# Linux/macOS
./validate.sh --verbose --generate
# Windows
validate.bat --verbose --generate
```
## Key Files
- **validate_specifications.py** - Main validation script
- **scripts/traceability_validator.py** - Advanced traceability validation
- **SKILL.md** - Complete skill documentation
- **references/document_templates.md** - Template examples and format specifications
- **assets/sample_outputs/** - Example generated documents
## Validation Exit Codes
- **0**: Success (100% coverage achieved)
- **1**: Failure (missing files, incomplete coverage, format errors)
## Evidence-Based Research Protocol
This skill implements a strict anti-"research slop" protocol:
1. **Search THEN Browse**: Use WebSearch to find sources, then WebFetch to read actual content
2. **Cite Every Claim**: Every factual statement must end with `[cite:INDEX]` citation
3. **Verify Sources**: Read full source content, not just search snippets
4. **Auditable Trail**: Complete citation trail from claim to source
## Quality Assurance
The validation system ensures:
- All required documents exist
- Component names are consistent across documents
- Requirements have 100% task coverage
- Citations follow proper format
- Templates are correctly implemented
## License
MIT
## Author
George A Puiu (puiu.adrian@gmail.com)

View File

@@ -0,0 +1,328 @@
---
name: specification-architect
description: A rigorous, traceability-first system that generates five interconnected architectural documents (blueprint.md, requirements.md, design.md, tasks.md, and validation.md) with complete requirements-to-implementation traceability. Use this skill when users need to architect systems, create technical specifications, or develop structured project documentation with guaranteed traceability.
---
# Specification Architect AI
## Overview
This skill implements a rigorous, evidence-based system for generating architectural documentation that eliminates "research slop" and prevents AI-generated misinformation. It produces five interconnected markdown documents where every technological decision is backed by verifiable sources and complete traceability from research through implementation tasks.
**Core Principle**: Every claim must be supported by evidence. No AI-generated "facts" without verification.
## When to Use This Skill
Use this skill when users request:
- System architecture documentation
- Technical specifications for software projects
- Requirements analysis and traceability
- Implementation planning with validation
- Project documentation with structured methodology
## Prompt Optimization Guidelines
**The quality of architectural specifications is directly proportional to the clarity of upfront goals and boundaries.**
### For Best Results, Include in Your Request:
1. **Clear Business Objectives**
- What problem are you solving?
- Who are the users/stakeholders?
- What does success look like?
2. **Specific Constraints and Boundaries**
- Technology preferences or restrictions
- Performance requirements
- Security/compliance requirements
- Integration constraints
3. **Scope Definition**
- Must-have features vs. nice-to-haves
- Explicit out-of-scope items
- Timeline and resource constraints
4. **Context and Background**
- Existing systems to integrate with
- Team capabilities and expertise
- Previous attempts or solutions
### Example Effective Prompt:
```
"I need to architect a customer support ticket system for a mid-sized SaaS company.
The system must handle 10,000 tickets/month, integrate with our existing Salesforce CRM,
and comply with GDPR requirements. We need email integration, knowledge base search,
and reporting dashboards. Please do NOT include live chat or phone support features.
Our team specializes in Python/React and we need this deployed on AWS."
```
**Why This Works**:
- ✅ Clear business context (customer support for SaaS)
- ✅ Specific constraints (10k tickets/month, GDPR, AWS)
- ✅ Technology preferences (Python/React)
- ✅ Clear scope boundaries (no live chat/phone)
- ✅ Integration requirements (Salesforce)
- ✅ Success metrics (email, knowledge base, reporting)
## How to Use This Skill
Follow the five-phase process in sequence:
### Phase 0: Verifiable Research and Technology Selection
**GOAL**: To produce a technology proposal where every claim is supported by verifiable, browsed sources, thereby eliminating "research slop" and grounding the architecture in factual evidence.
**CRITICAL**: This phase prevents AI-generated misinformation that could lead to serious professional consequences. **You MUST complete this phase with proper verification before proceeding.**
#### Strict Protocol:
1. **Initial Search**: Use the `WebSearch` tool to gather a list of potential sources relevant to the user's request.
2. **Mandatory Verification**: Use the `WebFetch` tool on the URLs returned by the search. **You MUST NOT rely on search snippets alone.** You must read the content of the pages to confirm the information.
3. **Evidence-Based Synthesis**: For each proposed technology or architectural pattern, you must formulate a claim and support it with a rationale directly derived from the browsed content.
4. **Strict Citation Protocol**: Every sentence containing a factual claim in your rationale **MUST** end with a `[cite:INDEX]` citation corresponding to the browsed source. This creates an auditable trail from claim to evidence.
#### Research Process:
1. **Analyze User Request**
- Identify core domain (e.g., e-commerce, IoT, fintech, healthcare)
- Extract key requirements (scale, performance, security, integrations)
- Note any specific technology constraints or preferences
2. **Execute Research with Verification**
- Use `WebSearch` to find relevant sources for domain architecture patterns
- Use `WebFetch` to browse and verify each source's content
- Research technology options with current best practices
- Investigate integration approaches and deployment strategies
3. **Synthesize Evidence-Based Recommendations**
- Create technology recommendations ONLY from verified sources
- Support every claim with citations from browsed content
- Compare options using evidence, not assumptions
- Justify decisions with specific source references
#### Strict Output Template:
```markdown
# Verifiable Research and Technology Proposal
## 1. Core Problem Analysis
[A brief, 1-2 sentence analysis of the user's request and the primary technical challenges.]
## 2. Verifiable Technology Recommendations
| Technology/Pattern | Rationale & Evidence |
|---|---|
| **[Technology Name]** | [Rationale derived from browsed sources, with every factual claim cited.] |
| **[Pattern Name]** | [Rationale derived from browsed sources, with every factual claim cited.] |
## 3. Browsed Sources
- [1] [URL of browsed source 1]
- [2] [URL of browsed source 2]
- [...]
```
**Citation Requirements**:
- Every factual claim MUST end with `[cite:INDEX]` citation
- Citations must correspond to numbered browsed sources
- No technology recommendations allowed without source evidence
- All rationales must be derived from actual browsed content
**Example of Proper Citation**:
"Node.js excels at real-time applications due to its event-driven, non-blocking I/O model [cite:1]. TypeScript adds static typing that reduces runtime errors by approximately 15% in large codebases [cite:2]."
**Approval Gate**: "Research complete. The technology proposal above is based on [N] verifiable, browsed sources. Every claim is cited and traceable to evidence. Proceed to define the architectural blueprint?"
### Phase 1: Architectural Blueprint (blueprint.md)
**PREREQUISITE**: Approval of the technology stack
**GOAL**: To establish a high-level map of the system, its components, interactions, and boundaries
**CRITICAL SUCCESS FACTORS**:
- **Component Clarity**: Each component must have a single, well-defined responsibility
- **Data Flow Visualization**: Map how data moves through the system from input to output
- **Integration Points**: Clearly define all APIs, protocols, and external system connections
- **Boundaries Setting**: Explicitly define what's in scope vs. out of scope to prevent scope creep
**STRICT TEMPLATE**:
```markdown
# Architectural Blueprint
## 1. Core Objective
[Single paragraph defining the primary goal and what success looks like.]
## 2. System Scope and Boundaries
### In Scope
- [Specific feature 1 that WILL be built]
- [Specific capability 2 that WILL be implemented]
- [Integration 1 that WILL be supported]
### Out of Scope
- [Feature 1 that will NOT be built - prevents scope creep]
- [External system 1 that will NOT be integrated]
- [Technology 1 that will NOT be used]
## 3. Core System Components
| Component Name | Single Responsibility |
|---|---|
| **[ComponentName1]** | [One clear, focused responsibility - what this component DOES] |
| **[ComponentName2]** | [One clear, focused responsibility - what this component DOES] |
| **[ComponentName3]** | [One clear, focused responsibility - what this component DOES] |
## 4. High-Level Data Flow
```mermaid
graph TD
A[External Input/User] --> B[ComponentName1]
B --> C[ComponentName2]
C --> D[ComponentName3]
D --> E[External Output/Result]
%% Style components for clarity
style ComponentName1 fill:#e1f5fe
style ComponentName2 fill:#f3e5f5
style ComponentName3 fill:#e8f5e8
```
## 5. Key Integration Points
- **[ComponentName1] ↔ [ComponentName2]**: [API/Protocol - e.g., REST API, gRPC, message queue]
- **[ComponentName2] ↔ [ComponentName3]**: [API/Protocol - how they communicate]
- **[ComponentName1] ↔ External**: [External system integration - e.g., database, third-party API]
- **Authentication**: [How components authenticate with each other]
- **Data Format**: [Standard data format between components - JSON, protobuf, etc.]
```
**Quality Gates**:
- Are component responsibilities clear and non-overlapping?
- Does the data flow diagram show the complete journey from input to output?
- Are all integration points clearly specified with protocols?
- Are in/out scope boundaries unambiguous?
**Approval Gate**: "Architectural blueprint complete with clear component mapping, data flow visualization, and integration points. The component names defined here will be used consistently across all documents. Proceed to generate requirements?"
### Phase 2: Requirements Generation (requirements.md)
**PREREQUISITE**: Approval of the blueprint
**RULE**: All `[System Component]` placeholders MUST use the exact component names from the blueprint
**STRICT TEMPLATE**:
```markdown
# Requirements Document
[Introduction and Glossary...]
## Requirements
### Requirement 1: [Feature Name]
#### Acceptance Criteria
1. WHEN [trigger], THE **[ComponentName1]** SHALL [specific, testable behavior].
```
**Approval Gate**: "Requirements documented with [N] requirements and [M] acceptance criteria, each assigned to a specific component. Proceed to detailed design?"
### Phase 3: Detailed Design (design.md)
**PREREQUISITE**: Approval of requirements
**GOAL**: To elaborate on the blueprint with detailed specifications for each component
**STRICT TEMPLATE**:
```markdown
# Design Document
[Overview, Principles...]
## Component Specifications
#### Component: [ComponentName1]
**Purpose**: [Responsibility from blueprint]
**Location**: `path/to/component.py`
**Interface**: [Code block with methods and requirement references, e.g., `Implements Req 1.1`]
```
**Approval Gate**: "Detailed design complete. All components from the blueprint have been specified. Proceed to generate implementation tasks?"
### Phase 4: Task Decomposition (tasks.md)
**PREREQUISITE**: Approval of the design
**GOAL**: To create a granular, actionable implementation plan
**STRICT TEMPLATE**:
```markdown
# Implementation Plan
- [ ] 1. Implement the [ComponentName1]
- [ ] 1.1 [Specific action, e.g., "Create class in file.py"]
- [ ] 1.2 [Specific action, e.g., "Implement method_x()"]
- _Requirements: 1.1, 1.2, 2.3_
```
**Approval Gate**: "Implementation plan created with [N] tasks. Proceed to final validation?"
### Phase 5: Validation and Traceability (validation.md)
**PREREQUISITE**: Generation of all previous documents
**GOAL**: To perform a final, automated check that guarantees complete traceability from requirements to implementation tasks
**STRICT TEMPLATE**:
```markdown
# Validation Report
## 1. Requirements to Tasks Traceability Matrix
| Requirement | Acceptance Criterion | Implementing Task(s) | Status |
|---|---|---|---|
| 1. [Name] | 1.1 | Task 2, Task 5 | Covered |
| | 1.2 | Task 2, Task 3 | Covered |
| ... | ... | ... | ... |
| X. [Name] | X.Y | Task Z | Covered |
## 2. Coverage Analysis
### Summary
- **Total Acceptance Criteria**: [M]
- **Criteria Covered by Tasks**: [M]
- **Coverage Percentage**: 100%
### Detailed Status
- **Covered Criteria**: A list of all X.Y references that are successfully mapped to at least one task.
- **Missing Criteria**: A list of any X.Y references from `requirements.md` that were NOT found in any task's `_Requirements_` tag. **This list must be empty to pass validation.**
- **Invalid References**: A list of any task references (e.g., `_Requirements: 9.9_`) that do not correspond to a real acceptance criterion. **This list must be empty to pass validation.**
## 3. Final Validation
All [M] acceptance criteria are fully traced to implementation tasks. The plan is validated and ready for execution.
```
**Final Approval Gate**: "Validation complete. Traceability matrix confirms 100% coverage. Type 'execute' to begin implementation."
## Key Principles
1. **Traceability First**: Every requirement must be traced to implementation tasks
2. **Approval Gates**: Get explicit approval before proceeding to next phase
3. **Template Adherence**: Use the exact document templates provided
4. **Component Consistency**: Use identical component names across all documents
5. **Validation Guarantees**: Ensure 100% coverage before completion
## Research Guidelines
When conducting research in Phase 0:
1. **Identify Core Challenges**: Analyze the user's request to determine technical domains
2. **Search Current Best Practices**: Use `WebSearch` with specific queries about:
- Current architectural patterns for the domain
- Recommended technology stacks
- Industry standards and conventions
- Recent advancements or alternatives
3. **Evaluate Options**: Compare multiple approaches and justify the selected stack
4. **Document Rationale**: Briefly explain why each technology was chosen
## Validation Process
The validation phase ensures:
1. **Complete Coverage**: Every acceptance criterion from requirements.md is referenced in at least one task
2. **Valid References**: All task requirement references correspond to real acceptance criteria
3. **Traceability Matrix**: Clear mapping from requirements through tasks
4. **100% Success Rate**: Validation only passes when coverage is complete
## Files Referenced
- `scripts/traceability_validator.py` - Python script for automated validation
- `references/document_templates.md` - Detailed templates and examples
- `assets/sample_outputs/` - Example of complete specification documents
## Execution Workflow
1. **Phase 0**: Research → Get approval
2. **Phase 1**: Blueprint → Get approval
3. **Phase 2**: Requirements → Get approval
4. **Phase 3**: Design → Get approval
5. **Phase 4**: Tasks → Get approval
6. **Phase 5**: Validation → Get final approval
Each phase must be completed and approved before proceeding to the next phase. The final validation confirms 100% traceability and coverage.

View File

@@ -0,0 +1,403 @@
# Validation Scripts Documentation
This directory includes automated validation scripts that verify complete traceability across all five specification documents.
## Quick Start
### Linux/macOS
```bash
# Make script executable
chmod +x validate.sh
# Basic validation
./validate.sh
# Validate specific directory
./validate.sh --path ./specs
# Generate validation.md
./validate.sh --generate
# Verbose output
./validate.sh --verbose
```
### Windows
```cmd
# Basic validation
validate.bat
# Validate specific directory
validate.bat --path .\specs
# Generate validation.md
validate.bat --generate
# Verbose output
validate.bat --verbose
```
### Python (Cross-Platform)
```bash
# Basic validation
python validate_specifications.py
# Validate specific directory
python validate_specifications.py --path ./specs
# Generate validation.md
python validate_specifications.py --generate-validation
# JSON output
python validate_specifications.py --json
# Verbose mode
python validate_specifications.py --verbose
```
## What Gets Validated
The scripts perform comprehensive validation checks:
1. **File Presence**
- blueprint.md exists
- requirements.md exists
- design.md exists
- tasks.md exists
2. **Component Consistency**
- All components from blueprint.md are used in requirements.md
- No undefined components are referenced
- Component names match exactly (case-sensitive)
3. **Requirements Format**
- Requirement numbers: "### Requirement N: [Name]"
- Acceptance criteria: "N. WHEN ... THE **ComponentName** SHALL ..."
- Criteria use decimal notation (1.1, 1.2, 2.1, etc.)
4. **Task Requirements Tags**
- All tasks include `_Requirements: X.Y, X.Z, ..._` tags
- All referenced criteria IDs are valid
- Format is correct with underscores and spaces
5. **Traceability Coverage**
- Every acceptance criterion is referenced in at least one task
- No orphaned requirements exist
- No invalid requirement references
6. **Coverage Calculation**
- Total acceptance criteria count
- Number covered by tasks
- Coverage percentage (must be 100%)
## Output Examples
### Success (100% Coverage)
```
================================================================================
SPECIFICATION VALIDATION REPORT
================================================================================
SUMMARY
--------------------------------------------------------------------------------
Total Acceptance Criteria: 12
Criteria Covered by Tasks: 12
Coverage Percentage: 100.0%
COVERAGE STATUS
--------------------------------------------------------------------------------
✓ Covered Criteria: 12
✗ Missing Criteria: 0
! Invalid References: 0
VALIDATION STATUS
--------------------------------------------------------------------------------
✅ VALIDATION PASSED
All acceptance criteria are fully traced to implementation tasks.
```
### Failure (Incomplete Coverage)
```
MISSING CRITERIA (Not covered by any task)
--------------------------------------------------------------------------------
- 3.1
- 4.2
VALIDATION STATUS
--------------------------------------------------------------------------------
❌ VALIDATION FAILED
- 2 acceptance criteria are not covered by tasks
```
## Command Options
### --path DIR
Path to directory containing specification documents.
**Default**: `.` (current directory)
```bash
python validate_specifications.py --path /path/to/specs
```
### --verbose
Enable detailed output showing extraction progress.
```bash
./validate.sh --verbose
```
Shows:
- Components found
- Requirements extracted
- Tasks parsed
- Validation steps
### --generate-validation
Generate or update `validation.md` file with report.
```bash
python validate_specifications.py --generate-validation
```
Creates `validation.md` with:
- Traceability matrix
- Coverage analysis
- Validation status
### --json
Output results as JSON instead of human-readable text.
```bash
python validate_specifications.py --json
```
Output:
```json
{
"total_criteria": 12,
"covered_criteria": 12,
"missing_criteria": [],
"coverage_percentage": 100.0,
"is_valid": true,
"errors": [],
"warnings": []
}
```
## Exit Codes
- **0** = Success (validation passed, 100% coverage)
- **1** = Failure (incomplete coverage or errors)
Use in scripts:
```bash
python validate_specifications.py --path ./specs
if [ $? -eq 0 ]; then
echo "Deployment approved"
./deploy.sh
else
echo "Validation failed - fix requirements first"
exit 1
fi
```
## Document Format Requirements
For validation to work correctly:
### Blueprint.md Format
```markdown
## 3. Core System Components
| Component Name | Responsibility |
|---|---|
| **AuthenticationComponent** | Handles user authentication |
| **DatabaseAdapter** | Manages database connections |
```
### Requirements.md Format
```markdown
### Requirement 1: User Authentication
#### Acceptance Criteria
1. WHEN user submits credentials, THE **AuthenticationComponent** SHALL validate them.
2. WHEN validation succeeds, THE **AuthenticationComponent** SHALL return a session token.
```
### Tasks.md Format
```markdown
## Task 1: Implement AuthenticationComponent
- [ ] 1.1 Create AuthenticationComponent class
- [ ] 1.2 Implement validation method
- [ ] 1.3 Add unit tests
- _Requirements: 1.1, 1.2_
```
## Troubleshooting
### Components not extracted
**Issue**: "No components found in blueprint.md"
**Solution**: Verify format matches exactly:
- `| **ComponentName** | description |`
- Component name must be between `**` markers
- Must be in a markdown table
### Requirements not found
**Issue**: "No requirements found"
**Solution**: Use exact format:
- `### Requirement N: [Name]`
- Criteria: `N. WHEN ... THE **Component** SHALL ...`
- Must have numbers and exact spacing
### Missing criteria list has items
**Issue**: Validation fails with missing criteria
**Solution**: Add `_Requirements:` tags to all tasks:
- Format: `_Requirements: 1.1, 1.2, 3.1_`
- Underscore prefix and suffix
- Space after colon
- IDs separated by commas
### Invalid component names
**Issue**: "Unknown component: ComponentName"
**Solution**: Check spelling and capitalization:
- Use exact names from blueprint
- Names are case-sensitive
- Verify in requirements criteria
## Integration Examples
### GitHub Actions
```yaml
name: Validate Specifications
on: [push, pull_request]
jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- name: Validate specifications
run: |
python validate_specifications.py \
--path ./specs \
--generate-validation
```
### GitLab CI
```yaml
validate_specs:
image: python:3.9
script:
- python validate_specifications.py --path ./specs --generate-validation
only:
changes:
- specs/**
```
### Local Pre-commit Hook
```bash
#!/bin/bash
python validate_specifications.py --path ./specs
if [ $? -ne 0 ]; then
echo "Specifications validation failed!"
exit 1
fi
```
## Performance
- Validates 100+ requirements in <1 second
- Handles 1000+ tasks efficiently
- Minimal memory usage
- No external dependencies
## Requirements
- **Python**: 3.7 or higher
- **Dependencies**: None (standard library only)
## File Structure
```
specification-architect-skill/
├── validate_specifications.py # Main validation script
├── validate.bat # Windows helper
├── validate.sh # Linux/macOS helper
├── VALIDATION_SCRIPTS_README.md # This file
├── SKILL.md # Full skill documentation
├── TEMPLATE_REFERENCE.md # Document templates
├── USAGE_GUIDE.md # Phase-by-phase guide
└── README.md # Quick start
```
## Complete Validation Workflow
1. **Write Specifications**
- Create blueprint.md
- Create requirements.md
- Create design.md
- Create tasks.md
2. **Run Validation**
```bash
python validate_specifications.py --path ./specs --verbose
```
3. **Review Results**
- Check coverage percentage
- Review missing criteria (if any)
- Review invalid references (if any)
4. **Fix Issues**
- Add missing requirement tags to tasks
- Fix invalid requirement references
- Update requirements if needed
5. **Validate Again**
```bash
python validate_specifications.py --path ./specs
```
6. **Generate Report**
```bash
python validate_specifications.py --path ./specs --generate-validation
```
7. **Ready to Execute**
- validation.md confirms 100% coverage
- Commit all spec files
- Begin implementation
## API Usage (Python)
Use the validator as a Python module:
```python
from validate_specifications import SpecificationValidator
# Create validator
validator = SpecificationValidator("./specs", verbose=True)
# Run validation
result = validator.validate_all()
# Check results
if result.is_valid:
print("✅ Validation passed!")
print(f"Coverage: {result.coverage_percentage}%")
else:
print(f"❌ {len(result.missing_criteria)} missing criteria")
# Generate and save validation.md
validator.save_validation_markdown()
```

View File

@@ -0,0 +1,60 @@
# Architectural Blueprint
## 1. Core Objective
To create a comprehensive task management system that enables users to create, assign, track, and complete tasks across multiple projects with real-time collaboration features.
## 2. System Scope and Boundaries
### In Scope
- User authentication and authorization
- Task creation, assignment, and management
- Project organization and team collaboration
- Real-time notifications and updates
- Basic reporting and analytics
### Out of Scope
- Advanced project management features (Gantt charts, critical path)
- File attachments and document management
- Integration with third-party project management tools
- Mobile application development
- Advanced workflow automation
## 3. Core System Components
| Component Name | Responsibility |
|---|---|
| **UserAuthenticationService** | Handles user registration, login, and session management |
| **TaskManagementEngine** | Core task CRUD operations and business logic |
| **ProjectOrganizer** | Manages project creation, membership, and permissions |
| **NotificationService** | Handles real-time notifications and email alerts |
| **ReportingModule** | Generates basic reports and analytics dashboards |
## 4. High-Level Data Flow
```mermaid
graph TD
A[User Interface] --> B[UserAuthenticationService]
A --> C[TaskManagementEngine]
A --> D[ProjectOrganizer]
B --> E[User Database]
C --> F[Task Database]
D --> G[Project Database]
C --> H[NotificationService]
H --> I[Email Service]
H --> J[WebSocket Service]
C --> K[ReportingModule]
K --> L[Analytics Database]
style UserAuthenticationService fill:#e1f5fe
style TaskManagementEngine fill:#f3e5f5
style ProjectOrganizer fill:#e8f5e8
style NotificationService fill:#fff3e0
style ReportingModule fill:#fce4ec
```
## 5. Key Integration Points
- **Authentication API**: JWT-based authentication between User Interface and UserAuthenticationService
- **Task API**: RESTful APIs between User Interface and TaskManagementEngine
- **Project API**: RESTful APIs between User Interface and ProjectOrganizer
- **Notification Gateway**: WebSocket connections for real-time updates
- **Email Service**: SMTP integration for email notifications
- **Database Connections**: PostgreSQL connections for all data storage components

View File

@@ -0,0 +1,393 @@
# Design Document
## Overview
This document provides detailed design specifications for the TaskMaster Pro task management system components.
## Design Principles
- **Single Responsibility**: Each component has a single, well-defined responsibility
- **Loose Coupling**: Components interact through well-defined interfaces
- **High Cohesion**: Related functionality is grouped together
- **Scalability**: Design supports future growth and expansion
- **Security**: All components implement proper authentication and authorization
## Component Specifications
### Component: UserAuthenticationService
**Purpose**: Handles user registration, login, and session management
**Location**: `src/services/auth/UserAuthenticationService.py`
**Interface**:
```python
class UserAuthenticationService:
"""
User authentication and authorization service
Implements: Req 1.1, 1.2, 1.3, 1.4, 7.1, 7.2
"""
def __init__(self, user_repository: UserRepository, email_service: EmailService):
"""Initialize authentication service with dependencies"""
self.user_repository = user_repository
self.email_service = email_service
self.jwt_secret = os.getenv('JWT_SECRET')
self.token_expiry = int(os.getenv('TOKEN_EXPIRY_HOURS', '24'))
def register_user(self, user_data: UserRegistrationData) -> AuthResult:
"""
Register a new user account with email verification
Implements: Req 1.1
"""
pass
def authenticate_user(self, credentials: LoginCredentials) -> AuthResult:
"""
Authenticate user and return JWT token
Implements: Req 1.2
"""
pass
def reset_password(self, email: str) -> PasswordResetResult:
"""
Initiate password reset process
Implements: Req 1.3
"""
pass
def validate_token(self, token: str) -> TokenValidationResult:
"""
Validate JWT token and extract user information
Implements: Req 1.4
"""
pass
def hash_password(self, password: str) -> str:
"""
Hash password using bcrypt
Implements: Req 7.2
"""
pass
```
**Dependencies**:
- UserRepository: Database access for user operations
- EmailService: Email sending functionality
- JWT library: Token generation and validation
- bcrypt: Password hashing
**Data Model**:
```python
from dataclasses import dataclass
from typing import Optional, List
from datetime import datetime
from enum import Enum
class UserRole(Enum):
ADMIN = "admin"
MANAGER = "manager"
MEMBER = "member"
@dataclass
class User:
"""User entity model"""
id: str
email: str
username: str
password_hash: str
role: UserRole
is_active: bool
email_verified: bool
created_at: datetime
last_login: Optional[datetime] = None
@dataclass
class UserRegistrationData:
"""User registration request data"""
email: str
username: str
password: str
confirm_password: str
@dataclass
class LoginCredentials:
"""User login credentials"""
email: str
password: str
@dataclass
class AuthResult:
"""Authentication operation result"""
success: bool
token: Optional[str] = None
user: Optional[User] = None
message: str = ""
```
### Component: TaskManagementEngine
**Purpose**: Core task CRUD operations and business logic
**Location**: `src/services/tasks/TaskManagementEngine.py`
**Interface**:
```python
class TaskManagementEngine:
"""
Task management and business logic engine
Implements: Req 2.1, 2.2, 2.3, 2.4, 6.1, 7.3
"""
def __init__(self, task_repository: TaskRepository,
notification_service: NotificationService,
auth_service: UserAuthenticationService):
"""Initialize task engine with dependencies"""
self.task_repository = task_repository
self.notification_service = notification_service
self.auth_service = auth_service
def create_task(self, task_data: TaskCreationData, user_id: str) -> TaskCreationResult:
"""
Create a new task with validation and assignment
Implements: Req 2.1
"""
pass
def update_task(self, task_id: str, updates: TaskUpdateData, user_id: str) -> TaskUpdateResult:
"""
Update existing task with change tracking
Implements: Req 2.2
"""
pass
def assign_task(self, task_id: str, assignee_id: str, assigner_id: str) -> TaskAssignmentResult:
"""
Assign task to user and send notification
Implements: Req 2.3
"""
pass
def change_task_status(self, task_id: str, new_status: TaskStatus, user_id: str) -> StatusChangeResult:
"""
Change task status and notify relevant users
Implements: Req 2.4
"""
pass
def get_user_tasks(self, user_id: str, filters: TaskFilters) -> List[Task]:
"""
Retrieve tasks for a specific user with filters
Implements: Req 6.1
"""
pass
```
**Dependencies**:
- TaskRepository: Database access for task operations
- NotificationService: Real-time notifications
- UserAuthenticationService: User validation and permissions
**Data Model**:
```python
from dataclasses import dataclass
from typing import Optional, List, Dict, Any
from datetime import datetime
from enum import Enum
class TaskStatus(Enum):
TODO = "todo"
IN_PROGRESS = "in_progress"
IN_REVIEW = "in_review"
COMPLETED = "completed"
CANCELLED = "cancelled"
class TaskPriority(Enum):
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
URGENT = "urgent"
@dataclass
class Task:
"""Task entity model"""
id: str
title: str
description: str
status: TaskStatus
priority: TaskPriority
assignee_id: Optional[str]
creator_id: str
project_id: str
due_date: Optional[datetime]
created_at: datetime
updated_at: datetime
completed_at: Optional[datetime] = None
tags: List[str] = None
custom_fields: Dict[str, Any] = None
@dataclass
class TaskCreationData:
"""Task creation request data"""
title: str
description: str
priority: TaskPriority
assignee_id: Optional[str]
project_id: str
due_date: Optional[datetime]
tags: List[str] = None
@dataclass
class TaskUpdateData:
"""Task update request data"""
title: Optional[str] = None
description: Optional[str] = None
priority: Optional[TaskPriority] = None
assignee_id: Optional[str] = None
due_date: Optional[datetime] = None
tags: Optional[List[str]] = None
```
### Component: NotificationService
**Purpose**: Handles real-time notifications and email alerts
**Location**: `src/services/notifications/NotificationService.py`
**Interface**:
```python
class NotificationService:
"""
Real-time notification and alert service
Implements: Req 4.1, 4.2, 4.3, 4.4, 6.4
"""
def __init__(self, websocket_manager: WebSocketManager,
email_service: EmailService,
notification_repository: NotificationRepository):
"""Initialize notification service with dependencies"""
self.websocket_manager = websocket_manager
self.email_service = email_service
self.notification_repository = notification_repository
def send_task_assignment_notification(self, task_id: str, assignee_id: str) -> NotificationResult:
"""
Send real-time notification for task assignment
Implements: Req 4.1
"""
pass
def send_deadline_reminder(self, task_id: str, assignee_id: str) -> NotificationResult:
"""
Send reminder notification for approaching deadline
Implements: Req 4.2
"""
pass
def broadcast_task_update(self, task_id: str, update_data: Dict[str, Any]) -> BroadcastResult:
"""
Broadcast real-time task updates to prevent conflicts
Implements: Req 4.3
"""
pass
def send_maintenance_notification(self, message: str, scheduled_time: datetime) -> NotificationResult:
"""
Send system maintenance notifications
Implements: Req 4.4
"""
pass
```
**Dependencies**:
- WebSocketManager: Real-time WebSocket connection management
- EmailService: Email notification delivery
- NotificationRepository: Database storage for notifications
## Integration Design
### API Contracts
```python
# REST API between User Interface and TaskManagementEngine
POST /api/tasks
Authorization: Bearer {jwt_token}
Content-Type: application/json
Request:
{
"title": "Complete user authentication",
"description": "Implement JWT-based authentication system",
"priority": "high",
"assignee_id": "user123",
"project_id": "proj456",
"due_date": "2024-01-15T17:00:00Z",
"tags": ["backend", "security"]
}
Response:
{
"id": "task789",
"status": "todo",
"created_at": "2024-01-01T12:00:00Z",
"assigned_at": "2024-01-01T12:00:00Z"
}
```
### Database Schema
```sql
-- Users table for UserAuthenticationService
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
email VARCHAR(255) UNIQUE NOT NULL,
username VARCHAR(100) UNIQUE NOT NULL,
password_hash VARCHAR(255) NOT NULL,
role VARCHAR(20) NOT NULL DEFAULT 'member',
is_active BOOLEAN DEFAULT true,
email_verified BOOLEAN DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
last_login TIMESTAMP WITH TIME ZONE
);
-- Tasks table for TaskManagementEngine
CREATE TABLE tasks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
title VARCHAR(255) NOT NULL,
description TEXT,
status VARCHAR(20) NOT NULL DEFAULT 'todo',
priority VARCHAR(10) NOT NULL DEFAULT 'medium',
assignee_id UUID REFERENCES users(id),
creator_id UUID NOT NULL REFERENCES users(id),
project_id UUID NOT NULL REFERENCES projects(id),
due_date TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
completed_at TIMESTAMP WITH TIME ZONE
);
-- Projects table for ProjectOrganizer
CREATE TABLE projects (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL,
description TEXT,
owner_id UUID NOT NULL REFERENCES users(id),
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
```
### Security Implementation
```python
# JWT Token validation middleware
def require_auth(func):
"""Decorator to require JWT authentication"""
def wrapper(*args, **kwargs):
token = request.headers.get('Authorization')
if not token or not token.startswith('Bearer '):
return jsonify({'error': 'Authentication required'}), 401
token = token.split(' ')[1]
try:
payload = jwt.decode(token, JWT_SECRET, algorithms=['HS256'])
request.user_id = payload['user_id']
return func(*args, **kwargs)
except jwt.ExpiredSignatureError:
return jsonify({'error': 'Token expired'}), 401
except jwt.InvalidTokenError:
return jsonify({'error': 'Invalid token'}), 401
return wrapper
```

View File

@@ -0,0 +1,78 @@
# Requirements Document
## Introduction
This document defines the functional and non-functional requirements for the TaskMaster Pro task management system.
## Glossary
- **Task**: A unit of work that can be assigned to a user and tracked through completion
- **Project**: A collection of related tasks organized under a common goal
- **User**: An individual with login credentials who can interact with the system
- **Notification**: A message sent to users about task updates or assignments
- **UserAuthenticationService**: The component responsible for managing user accounts and authentication
## Requirements
### Requirement 1: User Management
**Description**: Users must be able to register, authenticate, and manage their profiles.
#### Acceptance Criteria
1. WHEN a new user provides valid registration information, THE **UserAuthenticationService** SHALL create a new user account and send a verification email.
2. WHEN a registered user provides correct credentials, THE **UserAuthenticationService** SHALL return a valid JWT token for session management.
3. WHEN a user requests password reset, THE **UserAuthenticationService** SHALL send a password reset link to their registered email.
4. WHEN a JWT token expires, THE **UserAuthenticationService** SHALL require re-authentication.
### Requirement 2: Task Creation and Management
**Description**: Users must be able to create, edit, assign, and track tasks.
#### Acceptance Criteria
1. WHEN a user creates a new task with valid information, THE **TaskManagementEngine** SHALL save the task and assign it a unique identifier.
2. WHEN a user edits an existing task, THE **TaskManagementEngine** SHALL update the task and maintain change history.
3. WHEN a task is assigned to a user, THE **TaskManagementEngine** SHALL notify the assigned user via the **NotificationService**.
4. WHEN a task status changes, THE **TaskManagementEngine** SHALL update the task status and notify relevant users.
### Requirement 3: Project Organization
**Description**: Tasks must be organized into projects with proper access control.
#### Acceptance Criteria
1. WHEN a user creates a new project, THE **ProjectOrganizer** SHALL create the project and assign the user as project owner.
2. WHEN a project owner adds team members, THE **ProjectOrganizer** SHALL grant appropriate permissions based on role assignments.
3. WHEN a user accesses project tasks, THE **ProjectOrganizer** SHALL validate that the user has permission to view the project.
4. WHEN a project is deleted, THE **ProjectOrganizer** SHALL archive all associated tasks and notify project members.
### Requirement 4: Real-time Notifications
**Description**: Users must receive real-time notifications about task updates and assignments.
#### Acceptance Criteria
1. WHEN a task is assigned to a user, THE **NotificationService** SHALL send an immediate notification via WebSocket.
2. WHEN a task deadline approaches, THE **NotificationService** SHALL send reminder notifications to assigned users.
3. WHEN multiple users edit the same task, THE **NotificationService** SHALL broadcast real-time updates to prevent conflicts.
4. WHEN system maintenance occurs, THE **NotificationService** SHALL display maintenance notifications to all active users.
### Requirement 5: Reporting and Analytics
**Description**: Users must be able to view reports and analytics about task completion and project progress.
#### Acceptance Criteria
1. WHEN a project owner requests a progress report, THE **ReportingModule** SHALL generate a report showing task completion rates and team productivity.
2. WHEN a manager views analytics dashboard, THE **ReportingModule** SHALL display charts showing task distribution by status and assignee.
3. WHEN tasks are overdue, THE **ReportingModule** SHALL highlight overdue items and calculate impact on project timeline.
4. WHEN a project is completed, THE **ReportingModule** SHALL generate a final performance report with key metrics.
## Non-Functional Requirements
### Requirement 6: Performance
**Description**: System must respond quickly under normal load conditions.
#### Acceptance Criteria
1. WHEN 100 concurrent users access the system, THE **TaskManagementEngine** SHALL respond to task operations within 200 milliseconds.
2. WHEN generating reports, THE **ReportingModule** SHALL complete report generation within 5 seconds for projects with up to 1000 tasks.
3. WHEN users authenticate, THE **UserAuthenticationService** SHALL complete login within 500 milliseconds.
4. WHEN notifications are sent, THE **NotificationService** SHALL deliver notifications within 1 second of trigger events.
### Requirement 7: Security
**Description**: System must protect user data and prevent unauthorized access.
#### Acceptance Criteria
1. WHEN users submit sensitive information, THE **UserAuthenticationService** SHALL encrypt all data in transit using HTTPS.
2. WHEN passwords are stored, THE **UserAuthenticationService** SHALL hash passwords using bcrypt with minimum 12 rounds.
3. WHEN API requests are made, THE **TaskManagementEngine** SHALL validate JWT tokens and enforce role-based access control.
4. WHEN database connections are established, THE system SHALL use SSL/TLS encryption for all database communications.

View File

@@ -0,0 +1,123 @@
## Research Summary for Task Management System
### Domain Analysis
- **Industry**: Productivity/Project Management Software
- **Scale Requirements**: 1,000+ concurrent users, 10,000+ tasks, real-time collaboration
- **Key Challenges**: Real-time updates, data consistency, user permission management, notification delivery
### Architectural Approaches Considered
1. **Microservices Architecture**
- Description: Decompose system into independent services for users, tasks, projects, notifications
- Pros: Independent scaling, fault isolation, technology diversity, team autonomy
- Cons: Operational complexity, network latency, distributed transactions, higher cost
2. **Monolithic Architecture**
- Description: Single application with modular components within one deployable unit
- Pros: Simpler deployment, easier debugging, lower operational overhead, better performance
- Cons: Scalability limits, technology lock-in, deployment risks, team coordination challenges
3. **Event-Driven Architecture with CQRS**
- Description: Command Query Responsibility Segregation with event sourcing
- Pros: Excellent scalability, audit trails, real-time updates, loose coupling
- Cons: High complexity, eventual consistency, steep learning curve, debugging challenges
### Technology Stack Research
#### Backend Frameworks
- **Node.js + Express**: Excellent for real-time features, large ecosystem, fast development
- **Python + FastAPI**: Strong typing, async support, good for APIs, data science integration
- **Java + Spring Boot**: Enterprise-grade, mature ecosystem, strong consistency
#### Database Options
- **PostgreSQL**: ACID compliance, JSON support, reliability, good for complex queries
- **MongoDB**: Flexible schema, horizontal scaling, good for rapid development
- **MySQL**: Mature, widely used, good performance, familiar to most developers
#### Real-time Communication
- **WebSockets**: Direct communication, low latency, widely supported
- **Server-Sent Events (SSE)**: Simpler than WebSockets, good for one-way updates
- **Message Queues (Redis/RabbitMQ)**: Reliable delivery, scalable, decoupled
### Recommended Technology Stack
- **Architecture Pattern**: **Modular Monolith with Microservice Readiness**
- Start with monolith for speed and simplicity
- Design modules to be easily extractable into microservices later
- Use clear boundaries between functional areas
- **Backend**: **Node.js + TypeScript + Express**
- TypeScript for type safety and better development experience
- Express for mature, well-documented framework
- Excellent ecosystem for real-time features (Socket.io)
- Good performance for I/O-bound applications
- **Database**: **PostgreSQL + Redis**
- PostgreSQL as primary database for ACID compliance and reliability
- Redis for session management, caching, and real-time data
- Both have excellent Node.js support
- **Real-time Communication**: **Socket.io + Redis Adapter**
- Socket.io for WebSocket connections with fallback support
- Redis adapter for multi-instance scaling
- Proven solution for real-time collaboration
- **Authentication**: **JWT + Refresh Tokens**
- JWT for stateless authentication
- Refresh tokens for security and better user experience
- Industry standard with good library support
- **Infrastructure**: **Docker + AWS ECS/RDS**
- Docker for containerization and consistency
- AWS ECS for managed container orchestration
- AWS RDS for managed PostgreSQL with automatic backups
### Research Sources
1. **"Microservices vs Monolith: When to Choose Which"** (Martin Fowler, 2024)
- Key insight: Start with monolith, extract microservices when clear boundaries emerge
- Most successful microservices implementations evolved from monoliths
2. **"Real-time Web Application Architecture Best Practices"** (InfoQ, 2024)
- WebSocket scaling challenges and solutions
- Redis adapter pattern for multi-instance deployments
3. **"PostgreSQL vs MongoDB for Task Management Systems"** (Database Journal, 2024)
- PostgreSQL superior for complex queries and data consistency
- JSON support provides flexibility when needed
4. **"Node.js TypeScript Best Practices for Enterprise Applications"** (Node.js Foundation, 2024)
- Type safety significantly reduces runtime errors
- Better development experience with IDE support
5. **"Authentication Patterns for Modern Web Applications"** (OWASP, 2024)
- JWT + refresh token pattern recommended for SPA applications
- Proper token storage and refresh strategies
### Decision Rationale
**Why Modular Monolith First:**
- Team size (3-5 developers) doesn't warrant microservices complexity
- Faster time-to-market with simpler deployment and debugging
- Clear module boundaries will allow future extraction if needed
- Lower operational cost and complexity for initial launch
**Why Node.js + TypeScript:**
- Real-time features are first-class citizens in Node.js ecosystem
- TypeScript provides enterprise-grade type safety
- Large talent pool and extensive library ecosystem
- Excellent performance for our I/O-bound use case
**Why PostgreSQL + Redis:**
- Data consistency is critical for task management
- PostgreSQL handles complex queries and relationships well
- Redis provides excellent caching and real-time data capabilities
- Both technologies are mature, well-supported, and cost-effective
**Why Socket.io for Real-time:**
- Handles WebSocket connection management complexity
- Provides automatic fallback to other transport methods
- Redis adapter enables horizontal scaling
- Large community and proven track record
This technology stack balances development speed, operational simplicity, and future scalability while leveraging current best practices and well-established patterns.

View File

@@ -0,0 +1,152 @@
# Implementation Plan
## Phase 1: Core Infrastructure
- [ ] 1. Implement the UserAuthenticationService
- [ ] 1.1 Create project structure and setup configuration
- [ ] 1.2 Implement core UserAuthenticationService class in `src/services/auth/UserAuthenticationService.py`
- [ ] 1.3 Add user registration and validation methods
- [ ] 1.4 Implement JWT token generation and validation
- [ ] 1.5 Add password hashing with bcrypt
- [ ] 1.6 Create user repository interface and implementation
- [ ] 1.7 Write unit tests for UserAuthenticationService
- [ ] 1.8 Create integration tests for authentication flow
- _Requirements: 1.1, 1.2, 1.3, 1.4, 7.1, 7.2_
- [ ] 2. Implement the TaskManagementEngine
- [ ] 2.1 Create TaskManagementEngine class in `src/services/tasks/TaskManagementEngine.py`
- [ ] 2.2 Implement task CRUD operations (create, read, update, delete)
- [ ] 2.3 Add task assignment and status change methods
- [ ] 2.4 Implement task filtering and search functionality
- [ ] 2.5 Create task repository interface and implementation
- [ ] 2.6 Add input validation and business rules
- [ ] 2.7 Write unit tests for task operations
- [ ] 2.8 Create performance tests for task queries
- _Requirements: 2.1, 2.2, 2.3, 2.4, 6.1, 7.3_
## Phase 2: Data Layer and Storage
- [ ] 3. Setup Database Infrastructure
- [ ] 3.1 Configure PostgreSQL database connection
- [ ] 3.2 Create database migration scripts for schema
- [ ] 3.3 Implement users table with proper constraints
- [ ] 3.4 Create tasks table with foreign key relationships
- [ ] 3.5 Setup projects table and member relationships
- [ ] 3.6 Add indexes for performance optimization
- [ ] 3.7 Create database backup and recovery procedures
- [ ] 3.8 Write database integration tests
- _Requirements: 1.1, 2.1, 3.1, 3.2, 3.3, 3.4, 7.4_
- [ ] 4. Implement the ProjectOrganizer
- [ ] 4.1 Create ProjectOrganizer class in `src/services/projects/ProjectOrganizer.py`
- [ ] 4.2 Implement project creation and management methods
- [ ] 4.3 Add team member invitation and permission system
- [ ] 4.4 Create project repository interface and implementation
- [ ] 4.5 Implement role-based access control for projects
- [ ] 4.6 Add project archiving and deletion functionality
- [ ] 4.7 Write unit tests for project operations
- [ ] 4.8 Create tests for permission validation
- _Requirements: 3.1, 3.2, 3.3, 3.4_
## Phase 3: Communication Layer
- [ ] 5. Implement the NotificationService
- [ ] 5.1 Create NotificationService class in `src/services/notifications/NotificationService.py`
- [ ] 5.2 Setup WebSocket manager for real-time communications
- [ ] 5.3 Implement email service integration with SMTP
- [ ] 5.4 Create notification templates and formatting
- [ ] 5.5 Add notification queue and retry mechanisms
- [ ] 5.6 Implement notification preferences and filtering
- [ ] 5.7 Write tests for real-time notification delivery
- [ ] 5.8 Create email notification tests
- _Requirements: 4.1, 4.2, 4.3, 4.4, 6.4_
- [ ] 6. REST API Implementation
- [ ] 6.1 Create Flask/FastAPI application structure
- [ ] 6.2 Implement authentication middleware and decorators
- [ ] 6.3 Create user endpoints (register, login, profile)
- [ ] 6.4 Implement task CRUD endpoints with proper validation
- [ ] 6.5 Add project management endpoints
- [ ] 6.6 Create API documentation with OpenAPI/Swagger
- [ ] 6.7 Add rate limiting and request validation
- [ ] 6.8 Write comprehensive API tests
- _Requirements: 1.2, 2.1, 2.2, 3.1, 7.3_
## Phase 4: Business Intelligence
- [ ] 7. Implement the ReportingModule
- [ ] 7.1 Create ReportingModule class in `src/services/reports/ReportingModule.py`
- [ ] 7.2 Implement task completion rate calculations
- [ ] 7.3 Create productivity analytics and dashboards
- [ ] 7.4 Add overdue task identification and impact analysis
- [ ] 7.5 Implement project performance metrics
- [ ] 7.6 Create report generation and export functionality
- [ ] 7.7 Add caching for frequently accessed reports
- [ ] 7.8 Write tests for report accuracy and performance
- _Requirements: 5.1, 5.2, 5.3, 5.4, 6.2_
## Phase 5: Testing and Quality Assurance
- [ ] 8. Comprehensive Testing Suite
- [ ] 8.1 Complete unit test coverage for all components (target: 90%+)
- [ ] 8.2 Create integration tests for component interactions
- [ ] 8.3 Implement end-to-end tests for critical user flows
- [ ] 8.4 Add performance testing and load testing
- [ ] 8.5 Create security testing and vulnerability scanning
- [ ] 8.6 Implement automated testing in CI/CD pipeline
- [ ] 8.7 Add user acceptance testing scenarios
- [ ] 8.8 Create test data management and cleanup procedures
- _Requirements: 6.1, 6.2, 6.3, 6.4_
- [ ] 9. Security Implementation
- [ ] 9.1 Configure HTTPS/TLS for all communications
- [ ] 9.2 Implement secure password storage and hashing
- [ ] 9.3 Add input validation and sanitization
- [ ] 9.4 Create security headers and CSP policies
- [ ] 9.5 Implement audit logging for sensitive operations
- [ ] 9.6 Add rate limiting and DDoS protection
- [ ] 9.7 Create security monitoring and alerting
- [ ] 9.8 Write security tests and penetration testing
- _Requirements: 7.1, 7.2, 7.3, 7.4_
## Phase 6: Deployment and Operations
- [ ] 10. Production Deployment
- [ ] 10.1 Setup production environment and infrastructure
- [ ] 10.2 Configure application servers and load balancers
- [ ] 10.3 Implement database clustering and backup strategies
- [ ] 10.4 Setup monitoring and logging infrastructure
- [ ] 10.5 Create deployment scripts and CI/CD pipeline
- [ ] 10.6 Configure environment-specific settings
- [ ] 10.7 Implement health checks and monitoring alerts
- [ ] 10.8 Create disaster recovery and rollback procedures
- _Requirements: 5.1, 5.2_
- [ ] 11. Documentation and Training
- [ ] 11.1 Create comprehensive API documentation
- [ ] 11.2 Write user guides and documentation
- [ ] 11.3 Create administrator and deployment guides
- [ ] 11.4 Document system architecture and design decisions
- [ ] 11.5 Create troubleshooting and maintenance guides
- [ ] 11.6 Develop training materials for end users
- [ ] 11.7 Record video tutorials and walkthroughs
- [ ] 11.8 Create knowledge base and FAQ resources
- _Requirements: 5.1, 5.2_
## Phase 7: Performance Optimization
- [ ] 12. Performance Tuning
- [ ] 12.1 Optimize database queries and add query caching
- [ ] 12.2 Implement Redis caching for frequently accessed data
- [ ] 12.3 Add connection pooling and optimize resource usage
- [ ] 12.4 Optimize API response times and implement pagination
- [ ] 12.5 Add asynchronous processing for long-running tasks
- [ ] 12.6 Implement content delivery network for static assets
- [ ] 12.7 Monitor and optimize memory usage
- [ ] 12.8 Create performance benchmarks and monitoring
- _Requirements: 6.1, 6.2, 6.3, 6.4_
## Final Acceptance Criteria
- [ ] 13. System Integration and Validation
- [ ] 13.1 Validate all acceptance criteria from requirements document
- [ ] 13.2 Run complete traceability validation using automated script
- [ ] 13.3 Perform full system integration testing
- [ ] 13.4 Conduct security audit and penetration testing
- [ ] 13.5 Validate performance under expected load
- [ ] 13.6 Confirm all user workflows function correctly
- [ ] 13.7 Complete user acceptance testing with stakeholders
- [ ] 13.8 Finalize documentation and prepare for launch
- _Requirements: 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, 4.1, 4.2, 4.3, 4.4, 5.1, 5.2, 5.3, 5.4, 6.1, 6.2, 6.3, 6.4, 7.1, 7.2, 7.3, 7.4_

View File

@@ -0,0 +1,110 @@
# Validation Report
## 1. Requirements to Tasks Traceability Matrix
| Requirement | Acceptance Criterion | Implementing Task(s) | Status |
|---|---|---|---|
| 1. User Management | 1.1 | Task 1, Task 8 | Covered |
| | 1.2 | Task 1, Task 6 | Covered |
| | 1.3 | Task 1, Task 8 | Covered |
| | 1.4 | Task 1, Task 6 | Covered |
| 2. Task Creation and Management | 2.1 | Task 2, Task 6 | Covered |
| | 2.2 | Task 2, Task 6 | Covered |
| | 2.3 | Task 2, Task 5 | Covered |
| | 2.4 | Task 2, Task 5 | Covered |
| 3. Project Organization | 3.1 | Task 4, Task 6 | Covered |
| | 3.2 | Task 4, Task 8 | Covered |
| | 3.3 | Task 4, Task 8 | Covered |
| | 3.4 | Task 4, Task 8 | Covered |
| 4. Real-time Notifications | 4.1 | Task 5, Task 8 | Covered |
| | 4.2 | Task 5, Task 8 | Covered |
| | 4.3 | Task 5, Task 8 | Covered |
| | 4.4 | Task 5, Task 8 | Covered |
| 5. Reporting and Analytics | 5.1 | Task 7, Task 10 | Covered |
| | 5.2 | Task 7, Task 8 | Covered |
| | 5.3 | Task 7, Task 8 | Covered |
| | 5.4 | Task 7, Task 10 | Covered |
| 6. Performance | 6.1 | Task 2, Task 12 | Covered |
| | 6.2 | Task 7, Task 12 | Covered |
| | 6.3 | Task 8, Task 12 | Covered |
| | 6.4 | Task 5, Task 12 | Covered |
| 7. Security | 7.1 | Task 1, Task 9 | Covered |
| | 7.2 | Task 1, Task 9 | Covered |
| | 7.3 | Task 2, Task 9 | Covered |
| | 7.4 | Task 3, Task 9 | Covered |
## 2. Coverage Analysis
### Summary
- **Total Acceptance Criteria**: 28
- **Criteria Covered by Tasks**: 28
- **Coverage Percentage**: 100%
### Detailed Status
- **Covered Criteria**: 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, 4.1, 4.2, 4.3, 4.4, 5.1, 5.2, 5.3, 5.4, 6.1, 6.2, 6.3, 6.4, 7.1, 7.2, 7.3, 7.4
- **Missing Criteria**: None
- **Invalid References**: None
## 3. Validation Summary
### Component Coverage Analysis
- **UserAuthenticationService**: All 6 acceptance criteria covered across tasks 1, 6, 8, 9
- **TaskManagementEngine**: All 4 acceptance criteria covered across tasks 2, 5, 6, 9, 12
- **ProjectOrganizer**: All 4 acceptance criteria covered across tasks 4, 6, 8
- **NotificationService**: All 4 acceptance criteria covered across tasks 5, 8, 12
- **ReportingModule**: All 4 acceptance criteria covered across tasks 7, 8, 10, 12
- **Infrastructure Components**: All 6 security and performance criteria covered across tasks 3, 9, 12
### Task Distribution Analysis
- **Phase 1 (Infrastructure)**: Tasks 1-2 cover 10 acceptance criteria
- **Phase 2 (Data Layer)**: Tasks 3-4 cover 8 acceptance criteria
- **Phase 3 (Communication)**: Tasks 5-6 cover 8 acceptance criteria
- **Phase 4 (Business Intelligence)**: Task 7 covers 4 acceptance criteria
- **Phase 5 (Testing)**: Tasks 8-9 cover 16 acceptance criteria
- **Phase 6 (Deployment)**: Task 10 covers 2 acceptance criteria
- **Phase 7 (Performance)**: Task 12 covers 4 acceptance criteria
### Cross-Cutting Concerns
- **Security Requirements**: All 4 criteria (7.1-7.4) addressed in tasks 1, 2, 3, 9
- **Performance Requirements**: All 4 criteria (6.1-6.4) addressed in tasks 2, 5, 7, 12
- **Authentication/Authorization**: Integrated throughout tasks 1, 2, 4, 6
- **Data Validation**: Covered in tasks 1, 2, 4, 6, 9
- **Error Handling**: Addressed in tasks 1, 2, 4, 5, 7, 9
## 4. Final Validation
All 28 acceptance criteria are fully traced to implementation tasks. The plan is validated and ready for execution.
### Validation Results:
-**Requirements Coverage**: 100% (28/28 criteria covered)
-**Task Traceability**: All 13 major tasks have proper requirement references
-**Component Consistency**: All component names used consistently across documents
-**Template Adherence**: All documents follow the specified templates
-**Reference Validity**: No invalid requirement references found in tasks
### Readiness Assessment:
-**Architecture**: Complete and validated
-**Requirements**: Fully specified and testable
-**Design**: Detailed and comprehensive
-**Implementation Plan**: Granular and actionable
-**Validation**: Automated and verified
## 5. Next Steps
The specification is now complete and validated. The following actions are recommended:
1. **Begin Implementation**: Start with Phase 1 tasks as outlined in the implementation plan
2. **Setup Development Environment**: Configure tools, databases, and repositories
3. **Establish Quality Gates**: Implement the traceability validator in CI/CD pipeline
4. **Regular Validation**: Run validation checks after each major milestone
5. **Stakeholder Review**: Conduct final review with all project stakeholders
## 6. Validation Command
To re-run validation during implementation:
```bash
python scripts/traceability_validator.py --path . --requirements requirements.md --tasks tasks.md
```
This command will verify that all requirements remain covered throughout the development process.

View File

@@ -0,0 +1,25 @@
# Verifiable Research and Technology Proposal
## 1. Core Problem Analysis
The user requires a task management system for a growing SaaS company that must handle real-time collaboration, support 1,000+ concurrent users, integrate with existing tools, and scale horizontally. The primary technical challenges include real-time data synchronization, conflict resolution, and maintaining performance under high load.
## 2. Verifiable Technology Recommendations
| Technology/Pattern | Rationale & Evidence |
|---|---|
| **Node.js + TypeScript** | Node.js excels at real-time applications due to its event-driven, non-blocking I/O model that can handle thousands of concurrent connections efficiently [cite:1]. TypeScript adds static typing that reduces runtime errors by approximately 15% in large codebases while providing better IDE support and documentation [cite:2]. |
| **Modular Monolith Architecture** | A modular monolith approach is recommended over microservices for teams of 3-5 developers because it provides clear module boundaries that can be extracted into microservices later, while avoiding the operational complexity of distributed systems [cite:3]. This approach has been successfully used by companies like Basecamp and GitHub before scaling to microservices. |
| **PostgreSQL + Redis** | PostgreSQL provides ACID compliance and has been proven reliable for financial applications with 99.99% uptime, making it ideal for critical task data [cite:4]. Redis offers sub-millisecond latency for real-time features like notifications and presence detection, with proven scalability for millions of concurrent connections [cite:5]. |
| **Socket.io for Real-time Communication** | Socket.io provides automatic fallback from WebSockets to other transport methods, ensuring compatibility across all network environments including restrictive corporate firewalls [cite:6]. The library handles connection management, reconnection logic, and room-based messaging out of the box. |
| **Docker + Kubernetes Deployment** | Containerization with Docker provides consistent environments across development, testing, and production, eliminating "it works on my machine" issues [cite:7]. Kubernetes enables horizontal scaling with automatic load balancing and self-healing capabilities that have been proven to reduce infrastructure costs by 30-40% for SaaS applications [cite:8]. |
## 3. Browsed Sources
- [1] https://nodejs.org/en/docs/guides/blocking-vs-non-blocking/ - Official Node.js documentation explaining event-driven, non-blocking I/O architecture and its benefits for concurrent applications
- [2] https://www.typescriptlang.org/docs/handbook/intro.html - TypeScript documentation showing how static typing reduces runtime errors and improves development experience
- [3] https://martinfowler.com/articles/monoliths.html - Martin Fowler's analysis of modular monolith architecture, including successful case studies from Basecamp and GitHub
- [4] https://www.postgresql.org/about/ - PostgreSQL official documentation highlighting ACID compliance, reliability statistics, and financial industry adoption
- [5] https://redis.io/topics/introduction - Redis documentation showing performance benchmarks and scalability for real-time applications
- [6] https://socket.io/docs/ - Socket.io documentation demonstrating fallback mechanisms and compatibility features
- [7] https://www.docker.com/why-docker/ - Docker documentation showing containerization benefits and environment consistency
- [8] https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/ - Kubernetes documentation detailing scaling capabilities and cost reduction studies

View File

@@ -0,0 +1,25 @@
{
"name": "specification-architect",
"version": "1.0.0",
"description": "A rigorous, evidence-based system that generates six interconnected architectural documents (research.md, blueprint.md, requirements.md, design.md, tasks.md, validation.md) with complete traceability. Implements a 6-phase process that eliminates AI-generated misinformation by mandating verification of all claims through web browsing and citations.",
"author": {
"name": "George A Puiu",
"email": "puiu.adrian@gmail.com"
},
"license": "MIT",
"keywords": [
"architecture",
"specification",
"documentation",
"traceability",
"requirements",
"design",
"evidence-based",
"validation"
],
"category": "productivity",
"commands": "./SKILL.md",
"strict": false,
"repository": "https://github.com/adrianpuiu/specification-document-generator",
"homepage": "https://github.com/adrianpuiu/specification-document-generator"
}

View File

@@ -0,0 +1,448 @@
# Document Templates
This document provides detailed templates and examples for each of the five specification architect documents.
## Phase 0: Verifiable Research Template
### Strict Protocol - NO "RESEARCH SLOP"
**CRITICAL**: This phase prevents AI-generated misinformation that could lead to serious professional consequences. Every claim MUST be verified with browsed sources.
### Evidence-Based Research Template
```markdown
# Verifiable Research and Technology Proposal
## 1. Core Problem Analysis
[A brief, 1-2 sentence analysis of the user's request and the primary technical challenges.]
## 2. Verifiable Technology Recommendations
| Technology/Pattern | Rationale & Evidence |
|---|---|
| **[Technology Name]** | [Rationale derived from browsed sources, with every factual claim cited.] |
| **[Pattern Name]** | [Rationale derived from browsed sources, with every factual claim cited.] |
| **[Framework]** | [Specific capabilities and limitations supported by sources.] |
| **[Database]** | [Performance characteristics and use case suitability with citations.] |
## 3. Browsed Sources
- [1] [Complete URL of browsed source 1] - [Brief description of content]
- [2] [Complete URL of browsed source 2] - [Brief description of content]
- [3] [Complete URL of browsed source 3] - [Brief description of content]
- [...]
```
### Citation Requirements - MANDATORY
**Every factual claim MUST end with `[cite:INDEX]` citation:**
**Examples of Proper Citations:**
- ❌ "Node.js is great for real-time applications" (NO CITATION - RESEARCH SLOP)
- ✅ "Node.js excels at real-time applications due to its event-driven, non-blocking I/O model [cite:1]."
- ❌ "TypeScript reduces errors" (NO CITATION - RESEARCH SLOP)
- ✅ "TypeScript adds static typing that reduces runtime errors by approximately 15% in large codebases [cite:2]."
- ❌ "PostgreSQL is reliable" (NO CITATION - RESEARCH SLOP)
- ✅ "PostgreSQL provides ACID compliance and has been proven reliable for financial applications with 99.99% uptime [cite:3]."
### Mandatory Research Process
1. **Search THEN Browse**: Use WebSearch to find sources, then WebFetch to read the actual content
2. **NO Search Snippets**: You MUST read the full content of sources, not just search results
3. **Verify Claims**: Every technology claim must be supported by actual browsed content
4. **Cite Everything**: All factual statements must have corresponding citations
5. **Source Listing**: All browsed URLs must be listed with index numbers
### Quality Assurance Checklist
**Before proceeding to Phase 1, verify:**
- [ ] Every technology recommendation has citations
- [ ] All citations correspond to browsed sources
- [ ] No claims made without source evidence
- [ ] Source URLs are complete and accessible
- [ ] Rationales are derived from actual source content
- [ ] No "research slop" or AI-generated assumptions
**Professional Standards Compliance:**
This research process prevents the types of errors that have led to legal sanctions, financial penalties, and professional ruin when relying on unverified AI-generated content.
**Quality Gate**: Do not proceed to Phase 1 until ALL claims are cited and verified with browsed sources.
## Blueprint Template
### Complete Template
```markdown
# Architectural Blueprint
## 1. Core Objective
[Single paragraph defining the primary goal and purpose of the system.]
## 2. System Scope and Boundaries
### In Scope
- [Feature 1 that will be implemented]
- [Feature 2 that will be implemented]
- [Component 1 that will be developed]
### Out of Scope
- [Feature 1 that will NOT be implemented]
- [External system 1 that will NOT be integrated]
- [Technology 1 that will NOT be used]
## 3. Core System Components
| Component Name | Responsibility |
|---|---|
| **[ComponentName1]** | [Concise function description explaining what this component does.] |
| **[ComponentName2]** | [Concise function description explaining what this component does.] |
| **[ComponentName3]** | [Concise function description explaining what this component does.] |
## 4. High-Level Data Flow
```mermaid
graph TD
A[External Input] --> B[ComponentName1]
B --> C[ComponentName2]
C --> D[ComponentName3]
D --> E[External Output]
style ComponentName1 fill:#e1f5fe
style ComponentName2 fill:#f3e5f5
style ComponentName3 fill:#e8f5e8
```
## 5. Key Integration Points
- **API Gateway**: RESTful APIs between [ComponentName1] and [ComponentName2]
- **Database Connection**: [ComponentName2] connects to PostgreSQL database
- **External Service**: [ComponentName3] integrates with [External Service Name] via [Protocol]
- **Authentication**: JWT-based authentication between all components
```
### Blueprint Guidelines
**CRITICAL: Upfront Planning Quality Determines Everything**
- **Component Names**: Use clear, descriptive names (e.g., "UserAuthenticationService", "DataProcessingEngine")
- Names should clearly indicate what the component DOES
- Use consistent naming conventions (Service, Engine, Manager, Repository, etc.)
- Avoid ambiguous names like "Helper" or "Utilities"
- **Single Responsibility Principle**: Each component should have ONE clear purpose
- If you find yourself saying "and also", split into multiple components
- Clear responsibilities prevent confusion and enable independent testing
- **Data Flow Visualization**: The Mermaid diagram is CRITICAL for understanding
- Show the complete journey from external input to external output
- Include all decision points and data transformations
- Use colors/styles to make component boundaries obvious
- **Integration Points**: Be explicit about how components communicate
- Specify protocols (REST, gRPC, message queues, events)
- Define data formats (JSON, protobuf, XML)
- Include authentication and error handling strategies
- **Boundaries Setting**: The In Scope/Out of Scope section prevents scope creep
- Be specific about what you WILL NOT build
- This protects against "while you're at it" additions
- Clear boundaries enable accurate estimation and planning
## Requirements Template
### Complete Template
```markdown
# Requirements Document
## Introduction
This document defines the functional and non-functional requirements for the [System Name] system.
## Glossary
- **Term1**: Definition of term1
- **Term2**: Definition of term2
- **[ComponentName1]**: The component responsible for [function]
## Requirements
### Requirement 1: [Feature Name]
**Description**: [Brief description of what this requirement accomplishes.]
#### Acceptance Criteria
1. WHEN [trigger condition], THE **[ComponentName1]** SHALL [specific, testable behavior].
2. WHEN [another trigger condition], THE **[ComponentName2]** SHALL [specific, testable behavior].
3. GIVEN [precondition], WHEN [action], THEN **[ComponentName3]** SHALL [expected outcome].
### Requirement 2: [Another Feature Name]
**Description**: [Brief description of what this requirement accomplishes.]
#### Acceptance Criteria
1. WHEN [trigger condition], THE **[ComponentName1]** SHALL [specific, testable behavior].
2. WHEN [trigger condition], THE **[ComponentName2]** SHALL [specific, testable behavior].
## Non-Functional Requirements
### Requirement 3: Performance
**Description**: System must meet performance requirements.
#### Acceptance Criteria
1. WHEN [load condition], THE **[ComponentName1]** SHALL [response time requirement].
2. WHEN [concurrent users condition], THE **[ComponentName2]** SHALL [throughput requirement].
```
### Requirements Guidelines
- **Component References**: MUST use exact component names from blueprint (copy-paste to avoid errors)
- **Testable Criteria**: Each acceptance criterion must be measurable and testable
- **WHEN-THEN Format**: Use the format "WHEN [condition], THE **[Component]** SHALL [behavior]"
- **Requirement Numbering**: Use sequential numbering (1, 2, 3...)
- **Criteria Numbering**: Use decimal numbering (1.1, 1.2, 2.1, 2.2...)
## Design Template
### Complete Template
```markdown
# Design Document
## Overview
This document provides detailed design specifications for the [System Name] system components.
## Design Principles
- **Single Responsibility**: Each component has a single, well-defined responsibility
- **Loose Coupling**: Components interact through well-defined interfaces
- **High Cohesion**: Related functionality is grouped together
- **Scalability**: Design supports future growth and expansion
## Component Specifications
### Component: [ComponentName1]
**Purpose**: [Responsibility from blueprint - copy exactly]
**Location**: `src/components/[ComponentName1].py`
**Interface**:
```python
class [ComponentName1]:
"""
[Brief description of component purpose]
Implements: Req 1.1, Req 1.2
"""
def __init__(self, dependency: [DependencyType]):
"""Initialize component with required dependencies"""
pass
def process_data(self, input: InputType) -> OutputType:
"""
Process input data and return results
Implements: Req 1.1
"""
pass
def validate_input(self, data: Any) -> bool:
"""
Validate input data format and constraints
Implements: Req 1.2
"""
pass
```
**Dependencies**:
- [Dependency1]: [Description of dependency]
- [Dependency2]: [Description of dependency]
**Data Model**:
```python
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class [DataModelName]:
"""Data structure for [ComponentName1]"""
field1: str
field2: int
field3: Optional[List[str]] = None
```
### Component: [ComponentName2]
**Purpose**: [Responsibility from blueprint - copy exactly]
**Location**: `src/services/[ComponentName2].py`
**Interface**:
```python
class [ComponentName2]:
"""
[Brief description of component purpose]
Implements: Req 2.1, Req 2.2, Req 3.1
"""
def __init__(self, config: ConfigType):
"""Initialize component with configuration"""
pass
def execute_service(self, request: RequestType) -> ResponseType:
"""
Execute main service functionality
Implements: Req 2.1
"""
pass
def handle_error(self, error: Exception) -> ErrorResponseType:
"""
Handle and log service errors
Implements: Req 2.2
"""
pass
```
## Integration Design
### API Contracts
```python
# REST API between [ComponentName1] and [ComponentName2]
POST /api/process
Content-Type: application/json
Request:
{
"data": "input_data",
"options": {
"validate": true,
"format": "json"
}
}
Response:
{
"status": "success",
"result": "processed_data",
"metadata": {
"processing_time": 150,
"timestamp": "2024-01-01T12:00:00Z"
}
}
```
### Database Schema
```sql
-- Table for [ComponentName2] data storage
CREATE TABLE processed_data (
id SERIAL PRIMARY KEY,
input_data TEXT NOT NULL,
output_data TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
processed_by VARCHAR(100) NOT NULL
);
```
```
### Design Guidelines
- **Purpose**: Copy the responsibility exactly from the blueprint
- **Location**: Specify the exact file path where the component will be implemented
- **Requirement References**: Include "Implements: Req X.Y" comments for each method
- **Dependencies**: List all external dependencies and services
- **Data Models**: Define data structures used by the component
## Tasks Template
### Complete Template
```markdown
# Implementation Plan
## Phase 1: Core Infrastructure
- [ ] 1. Implement the [ComponentName1]
- [ ] 1.1 Create project structure and setup
- [ ] 1.2 Implement core [ComponentName1] class in `src/components/[ComponentName1].py`
- [ ] 1.3 Add input validation methods
- [ ] 1.4 Write unit tests for [ComponentName1]
- [ ] 1.5 Create integration tests
- _Requirements: 1.1, 1.2_
- [ ] 2. Implement the [ComponentName2]
- [ ] 2.1 Create [ComponentName2] service class
- [ ] 2.2 Implement main service methods
- [ ] 2.3 Add error handling and logging
- [ ] 2.4 Configure service dependencies
- [ ] 2.5 Write service tests
- _Requirements: 2.1, 2.2, 3.1_
## Phase 2: Data Layer
- [ ] 3. Implement the [ComponentName3]
- [ ] 3.1 Setup database connection and schema
- [ ] 3.2 Implement data access methods
- [ ] 3.3 Add data validation and transformation
- [ ] 3.4 Create database migration scripts
- [ ] 3.5 Write database tests
- _Requirements: 3.1, 3.2_
## Phase 3: Integration
- [ ] 4. API Integration
- [ ] 4.1 Implement REST API endpoints
- [ ] 4.2 Add request/response validation
- [ ] 4.3 Configure API security and authentication
- [ ] 4.4 Add API documentation
- [ ] 4.5 Write API integration tests
- _Requirements: 2.1, 4.1_
## Phase 4: Testing and Deployment
- [ ] 5. Testing
- [ ] 5.1 Complete end-to-end test suite
- [ ] 5.2 Performance testing and optimization
- [ ] 5.3 Security testing and vulnerability scanning
- [ ] 5.4 User acceptance testing
- _Requirements: 3.1, 5.1_
- [ ] 6. Deployment
- [ ] 6.1 Setup production environment
- [ ] 6.2 Configure monitoring and logging
- [ ] 6.3 Create deployment scripts
- [ ] 6.4 Implement rollback procedures
- _Requirements: 5.1, 5.2_
```
### Tasks Guidelines
- **Component Names**: Use exact component names from blueprint
- **Task Numbering**: Use sequential numbering (1, 2, 3...)
- **Sub-tasks**: Use decimal numbering (1.1, 1.2, 1.3...)
- **Requirement Tags**: Each main task must include `_Requirements: X.Y, Z.W_` tags
- **Actionable Tasks**: Each sub-task should be a specific, actionable item
- **Complete Coverage**: Every acceptance criterion from requirements must be referenced
## Validation Template
### Complete Template
```markdown
# Validation Report
## 1. Requirements to Tasks Traceability Matrix
| Requirement | Acceptance Criterion | Implementing Task(s) | Status |
|---|---|---|---|
| 1. User Authentication | 1.1 | Task 1, Task 2 | Covered |
| | 1.2 | Task 1, Task 3 | Covered |
| 2. Data Processing | 2.1 | Task 4, Task 5 | Covered |
| | 2.2 | Task 4 | Covered |
| | 2.3 | Task 6 | Covered |
| 3. Performance | 3.1 | Task 7, Task 8 | Covered |
| ... | ... | ... | ... |
| X. [Final Requirement] | X.Y | Task Z | Covered |
## 2. Coverage Analysis
### Summary
- **Total Acceptance Criteria**: [M]
- **Criteria Covered by Tasks**: [M]
- **Coverage Percentage**: 100%
### Detailed Status
- **Covered Criteria**: 1.1, 1.2, 2.1, 2.2, 2.3, 3.1, ..., X.Y
- **Missing Criteria**: None
- **Invalid References**: None
## 3. Final Validation
All [M] acceptance criteria are fully traced to implementation tasks. The plan is validated and ready for execution.
```
### Validation Guidelines
- **Matrix Generation**: Create a row for each acceptance criterion
- **Task Mapping**: List all tasks that implement each criterion
- **Status Indicators**: Use "Covered" for implemented criteria, "Missing" for gaps
- **Coverage Analysis**: Provide summary statistics and detailed status
- **Final Statement**: Confirm 100% coverage or identify gaps
## Template Usage Tips
1. **Copy-Paste Accuracy**: Copy component names exactly between documents to avoid traceability errors
2. **Sequential Numbering**: Maintain consistent numbering schemes across all documents
3. **Requirement References**: Double-check that all task requirement references exist in the requirements document
4. **Template Strictness**: Follow the templates exactly - the validation script depends on the specific format
5. **Validation First**: Run the traceability validator before considering the specification complete

View File

@@ -0,0 +1,340 @@
#!/usr/bin/env python3
"""
Traceability validator for specification architect skill.
Validates that all requirements are covered by implementation tasks.
"""
import re
import sys
from pathlib import Path
from typing import Dict, List, Tuple, Set
class TraceabilityValidator:
def __init__(self, base_path: str):
self.base_path = Path(base_path)
self.requirements = {}
self.tasks = []
self.research_citations = {}
def parse_requirements(self, requirements_file: str) -> Dict:
"""Parse requirements.md to extract requirements and acceptance criteria."""
req_file = self.base_path / requirements_file
if not req_file.exists():
raise FileNotFoundError(f"Requirements file not found: {requirements_file}")
content = req_file.read_text(encoding='utf-8')
# Split content by requirement headers and capture requirement numbers
pattern = r'\n### Requirement (\d+): ([^\n]+)'
matches = list(re.finditer(pattern, content))
requirements = {}
for match in matches:
req_num = match.group(1).strip()
req_title = match.group(2).strip()
# Find the start and end of this requirement section
start_pos = match.start()
end_pos = content.find('\n### Requirement', start_pos + 1)
if end_pos == -1:
end_pos = len(content)
# Extract this requirement's content
section_content = content[start_pos:end_pos]
# Find acceptance criteria within this section
ac_match = re.search(r'#### Acceptance Criteria\n(.*?)(?=\n###|\n##|\Z)', section_content, re.DOTALL)
if not ac_match:
continue
ac_text = ac_match.group(1).strip()
# Parse acceptance criteria
requirements[req_num] = {
"title": req_title,
"acceptance_criteria": {}
}
ac_pattern = r"(\d+)\.\s+(.+)"
ac_matches = re.findall(ac_pattern, ac_text)
for ac_num, ac_text in ac_matches:
requirements[req_num]["acceptance_criteria"][f"{req_num}.{ac_num}"] = ac_text.strip()
self.requirements = requirements
return requirements
def parse_tasks(self, tasks_file: str) -> List[Dict]:
"""Parse tasks.md to extract tasks and their requirement references."""
task_file = self.base_path / tasks_file
if not task_file.exists():
raise FileNotFoundError(f"Tasks file not found: {tasks_file}")
content = task_file.read_text(encoding='utf-8')
# Parse tasks and requirement references
task_pattern = r"- \[ \] (\d+).+?_Requirements: (.+?)_"
matches = re.findall(task_pattern, content, re.MULTILINE | re.DOTALL)
tasks = []
for task_num, req_refs in matches:
# Parse requirement references
req_refs = [ref.strip() for ref in req_refs.split(",")]
tasks.append({
"task_id": task_num,
"requirement_references": req_refs
})
self.tasks = tasks
return tasks
def validate_traceability(self) -> Tuple[Dict, List[str], List[str]]:
"""Validate that all requirements are covered by tasks."""
all_criteria = set()
for req_num, req_data in self.requirements.items():
for ac_ref in req_data["acceptance_criteria"]:
all_criteria.add(ac_ref)
covered_criteria = set()
invalid_references = set()
for task in self.tasks:
for req_ref in task["requirement_references"]:
if req_ref in all_criteria:
covered_criteria.add(req_ref)
else:
invalid_references.add(req_ref)
missing_criteria = all_criteria - covered_criteria
return {
"total_criteria": len(all_criteria),
"covered_criteria": len(covered_criteria),
"coverage_percentage": (len(covered_criteria) / len(all_criteria) * 100) if all_criteria else 100
}, list(missing_criteria), list(invalid_references)
def validate_research_evidence(self, research_file: str = "example_research.md") -> Dict:
"""Validate research document for proper citations and evidence."""
research_path = self.base_path / research_file
if not research_path.exists():
return {"valid": False, "error": f"Research file not found: {research_file}"}
content = research_path.read_text(encoding='utf-8')
validation_results = {
"valid": True,
"citation_errors": [],
"missing_sources": [],
"uncited_claims": [],
"total_sources": 0,
"total_citations": 0
}
# Extract source list (## 3. Browsed Sources section)
source_pattern = r'## 3\. Browsed Sources\n(.*?)(?=\n##|\Z)'
source_match = re.search(source_pattern, content, re.DOTALL)
if not source_match:
validation_results["valid"] = False
validation_results["citation_errors"].append("Missing 'Browsed Sources' section")
return validation_results
sources_text = source_match.group(1)
source_lines = [line.strip() for line in sources_text.split('\n') if line.strip()]
# Extract source URLs and indices
sources = {}
for line in source_lines:
source_match = re.match(r'- \[(\d+)\] (https?://\S+)', line)
if source_match:
index = source_match.group(1)
url = source_match.group(2)
sources[index] = url
validation_results["total_sources"] = len(sources)
# Check for citations in rationale section
rationale_pattern = r'\| \*\*(.+?)\*\* \| (.+?) \|'
rationale_matches = re.findall(rationale_pattern, content, re.DOTALL)
total_citations = 0
for technology, rationale in rationale_matches:
# Find all citations in rationale
citations = re.findall(r'\[cite:(\d+)\]', rationale)
total_citations += len(citations)
# Check each citation has corresponding source
for citation in citations:
if citation not in sources:
validation_results["citation_errors"].append(f"Citation [cite:{citation}] references non-existent source")
validation_results["valid"] = False
validation_results["total_citations"] = total_citations
# Check for factual claims without citations (simplified detection)
# Look for sentences with specific numbers, percentages, or strong claims
factual_claims = re.findall(r'[^.!?]*\d+(?:\.\d+)?%?[^.!?]*\.|[^.!?]*?(excellent|proven|ideal|best|optimal)[^.!?]*\.', content)
for claim in factual_claims:
if not re.search(r'\[cite:\d+\]', claim):
validation_results["uncited_claims"].append(claim.strip())
# Validate that we have both sources and citations
if len(sources) == 0:
validation_results["valid"] = False
validation_results["citation_errors"].append("No sources found in research document")
if total_citations == 0:
validation_results["valid"] = False
validation_results["citation_errors"].append("No citations found in technology rationales")
# Check citation to source ratio (should have reasonable coverage)
if total_citations < len(sources):
validation_results["citation_errors"].append(f"Too few citations ({total_citations}) for number of sources ({len(sources)})")
return validation_results
def generate_validation_report(self, requirements_file: str = "requirements.md",
tasks_file: str = "tasks.md",
research_file: str = "example_research.md") -> str:
"""Generate a complete validation report."""
self.parse_requirements(requirements_file)
self.parse_tasks(tasks_file)
validation_result, missing, invalid = self.validate_traceability()
research_validation = self.validate_research_evidence(research_file)
report = f"""# Validation Report
## 1. Requirements to Tasks Traceability Matrix
| Requirement | Acceptance Criterion | Implementing Task(s) | Status |
|---|---|---|---|"""
# Generate traceability matrix
for req_num, req_data in self.requirements.items():
for ac_ref, ac_text in req_data["acceptance_criteria"].items():
# Find tasks implementing this criterion
implementing_tasks = []
for task in self.tasks:
if ac_ref in task["requirement_references"]:
implementing_tasks.append(f"Task {task['task_id']}")
status = "Covered" if implementing_tasks else "Missing"
tasks_str = ", ".join(implementing_tasks) if implementing_tasks else "None"
report += f"\n| {req_num} | {ac_ref} | {tasks_str} | {status} |"
report += f"""
## 2. Coverage Analysis
### Summary
- **Total Acceptance Criteria**: {validation_result['total_criteria']}
- **Criteria Covered by Tasks**: {validation_result['covered_criteria']}
- **Coverage Percentage**: {validation_result['coverage_percentage']:.1f}%
### Detailed Status
- **Covered Criteria**: {[ref for ref in self._get_all_criteria() if ref in self._get_covered_criteria()]}
- **Missing Criteria**: {missing if missing else 'None'}
- **Invalid References**: {invalid if invalid else 'None'}
## 3. Research Evidence Validation
### Summary
- **Total Sources**: {research_validation['total_sources']}
- **Total Citations**: {research_validation['total_citations']}
- **Research Validation**: {'PASSED' if research_validation['valid'] else 'FAILED'}
### Evidence Quality
- **Citation Errors**: {len(research_validation['citation_errors'])}
- **Uncited Claims**: {len(research_validation['uncited_claims'])}
"""
if research_validation['citation_errors']:
report += "\n#### Citation Issues:\n"
for error in research_validation['citation_errors']:
report += f"- {error}\n"
if research_validation['uncited_claims']:
report += "\n#### Uncited Factual Claims:\n"
for claim in research_validation['uncited_claims'][:5]: # Limit to first 5
report += f"- {claim}\n"
if len(research_validation['uncited_claims']) > 5:
report += f"- ... and {len(research_validation['uncited_claims']) - 5} more\n"
report += """
## 4. Final Validation
"""
requirements_valid = validation_result['coverage_percentage'] == 100 and not invalid
research_valid = research_validation['valid']
if requirements_valid and research_valid:
report += f"[PASS] **VALIDATION PASSED**\n\nAll {validation_result['total_criteria']} acceptance criteria are fully traced to implementation tasks AND all research claims are properly cited with verifiable sources. The plan is validated and ready for execution."
elif not requirements_valid and research_valid:
report += f"[FAIL] **VALIDATION FAILED** - Requirements Issues\n\n{len(missing)} criteria not covered, {len(invalid)} invalid references. Research evidence is properly cited, but requirements traceability needs attention."
elif requirements_valid and not research_valid:
report += f"[FAIL] **VALIDATION FAILED** - Research Evidence Issues\n\nRequirements traceability is complete, but research evidence has {len(research_validation['citation_errors'])} citation errors and {len(research_validation['uncited_claims'])} uncited claims. This violates the evidence-based protocol and prevents professional use."
else:
report += f"[FAIL] **VALIDATION FAILED** - Multiple Issues\n\nRequirements: {len(missing)} criteria not covered, {len(invalid)} invalid references. Research: {len(research_validation['citation_errors'])} citation errors, {len(research_validation['uncited_claims'])} uncited claims."
return report
def _get_all_criteria(self) -> Set[str]:
"""Get all acceptance criteria references."""
all_criteria = set()
for req_num, req_data in self.requirements.items():
for ac_ref in req_data["acceptance_criteria"]:
all_criteria.add(ac_ref)
return all_criteria
def _get_covered_criteria(self) -> Set[str]:
"""Get all covered acceptance criteria references."""
covered_criteria = set()
all_criteria = self._get_all_criteria()
for task in self.tasks:
for req_ref in task["requirement_references"]:
if req_ref in all_criteria:
covered_criteria.add(req_ref)
return covered_criteria
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Validate specification architect traceability")
parser.add_argument("--path", default=".", help="Base path containing specification files")
parser.add_argument("--requirements", default="requirements.md", help="Requirements file name")
parser.add_argument("--tasks", default="tasks.md", help="Tasks file name")
parser.add_argument("--research", default="example_research.md", help="Research file name")
args = parser.parse_args()
try:
validator = TraceabilityValidator(args.path)
report = validator.generate_validation_report(args.requirements, args.tasks, args.research)
print(report)
# Exit with error code if validation fails
validation_result, missing, invalid = validator.validate_traceability()
research_validation = validator.validate_research_evidence(args.research)
requirements_valid = validation_result['coverage_percentage'] == 100 and not invalid
research_valid = research_validation['valid']
if not requirements_valid or not research_valid:
sys.exit(1)
else:
sys.exit(0)
except FileNotFoundError as e:
print(f"Error: {e}")
sys.exit(1)
except Exception as e:
print(f"Unexpected error: {e}")
sys.exit(1)

View File

@@ -0,0 +1,24 @@
@echo off
REM Specification Architect Validation Helper
setlocal enabledelayedexpansion
set SCRIPT_DIR=%~dp0
set SPEC_DIR=.
set VERBOSE=
set GENERATE=
:parse_args
if "%1"=="" goto run
if "%1"=="-p" (set SPEC_DIR=%2 & shift & shift & goto parse_args)
if "%1"=="--path" (set SPEC_DIR=%2 & shift & shift & goto parse_args)
if "%1"=="-v" (set VERBOSE=--verbose & shift & goto parse_args)
if "%1"=="--verbose" (set VERBOSE=--verbose & shift & goto parse_args)
if "%1"=="-g" (set GENERATE=--generate-validation & shift & goto parse_args)
if "%1"=="--generate" (set GENERATE=--generate-validation & shift & goto parse_args)
shift
goto parse_args
:run
echo Running specification validation...
python "%SCRIPT_DIR%validate_specifications.py" --path "%SPEC_DIR%" %VERBOSE% %GENERATE%
exit /b %ERRORLEVEL%

View File

@@ -0,0 +1,38 @@
#!/bin/bash
# Specification Architect Validation Helper
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
SPEC_DIR="."
VERBOSE=""
GENERATE=""
while [[ $# -gt 0 ]]; do
case $1 in
-p|--path)
SPEC_DIR="$2"
shift 2
;;
-v|--verbose)
VERBOSE="--verbose"
shift
;;
-g|--generate)
GENERATE="--generate-validation"
shift
;;
-h|--help)
echo "Usage: ./validate.sh [options]"
echo "Options:"
echo " -p, --path DIR Path to spec directory"
echo " -v, --verbose Verbose output"
echo " -g, --generate Generate validation.md"
exit 0
;;
*)
shift
;;
esac
done
python3 "$SCRIPT_DIR/validate_specifications.py" --path "$SPEC_DIR" $VERBOSE $GENERATE
exit $?

View File

@@ -0,0 +1,164 @@
#!/usr/bin/env python3
"""Specification Architect Validation Script"""
import re, sys, json, argparse
from pathlib import Path
from dataclasses import dataclass, field
from typing import Dict, Set, List
@dataclass
class Result:
total: int = 0
covered: Set[str] = field(default_factory=set)
missing: Set[str] = field(default_factory=set)
coverage: float = 0.0
valid: bool = False
errors: List[str] = field(default_factory=list)
class Validator:
def __init__(self, spec_dir: str, verbose=False):
self.dir = Path(spec_dir)
self.verbose = verbose
self.result = Result()
self.components = set()
self.requirements = {}
self.task_reqs = set()
def log(self, msg, level="INFO"):
if self.verbose or level=="ERROR":
print(f"[{level}] {msg}")
def validate(self) -> Result:
self.log("Starting validation...")
if not self._files_exist():
return self.result
if not self._extract_components():
return self.result
if not self._extract_requirements():
return self.result
if not self._extract_tasks():
return self.result
self._calculate()
self._report()
return self.result
def _files_exist(self) -> bool:
for name in ["blueprint.md", "requirements.md", "tasks.md"]:
if not (self.dir / name).exists():
self.result.errors.append(f"Missing: {name}")
return len(self.result.errors) == 0
def _extract_components(self) -> bool:
try:
content = (self.dir / "blueprint.md").read_text()
self.components = set(re.findall(r'\|\s*\*\*([A-Za-z0-9_]+)\*\*\s*\|', content))
if not self.components:
self.log("No components found", "WARNING")
return False
self.log(f"Found {len(self.components)} components")
return True
except Exception as e:
self.log(f"Error: {e}", "ERROR")
return False
def _extract_requirements(self) -> bool:
try:
content = (self.dir / "requirements.md").read_text()
for match in re.finditer(r'### Requirement (\d+):', content):
req_num = match.group(1)
start = match.end()
end = len(content)
text = content[start:end]
criteria = [f"{req_num}.{c}" for c, _ in re.findall(
r'(\d+)\.\s+WHEN.*?THE\s+\*\*([A-Za-z0-9_]+)\*\*\s+SHALL',
text, re.DOTALL)]
if criteria:
self.requirements[req_num] = criteria
self.result.total = sum(len(v) for v in self.requirements.values())
self.log(f"Found {self.result.total} criteria")
return self.result.total > 0
except Exception as e:
self.log(f"Error: {e}", "ERROR")
return False
def _extract_tasks(self) -> bool:
try:
content = (self.dir / "tasks.md").read_text()
for match in re.findall(r'_Requirements:\s*([\d., ]+)_', content):
for c in match.split(','):
self.task_reqs.add(c.strip())
if not self.task_reqs:
self.log("No requirement tags found", "WARNING")
return False
self.log(f"Found {len(self.task_reqs)} covered criteria")
return True
except Exception as e:
self.log(f"Error: {e}", "ERROR")
return False
def _calculate(self):
all_crit = set()
for crit_list in self.requirements.values():
all_crit.update(crit_list)
self.result.covered = self.task_reqs & all_crit
self.result.missing = all_crit - self.task_reqs
if all_crit:
self.result.coverage = (len(self.result.covered) / len(all_crit)) * 100
self.result.valid = self.result.coverage == 100.0
def _report(self):
print("\n" + "="*80)
print("SPECIFICATION VALIDATION REPORT")
print("="*80 + "\n")
print("SUMMARY")
print("-"*80)
print(f"Total Criteria: {self.result.total}")
print(f"Covered by Tasks: {len(self.result.covered)}")
print(f"Coverage: {self.result.coverage:.1f}%\n")
if self.result.missing:
print("MISSING CRITERIA")
print("-"*80)
for c in sorted(self.result.missing, key=lambda x: tuple(map(int, x.split('.')))):
print(f" - {c}")
print()
print("VALIDATION STATUS")
print("-"*80)
if self.result.valid:
print("✅ PASSED - All criteria covered\n")
else:
print(f"❌ FAILED - {len(self.result.missing)} uncovered\n")
print("="*80 + "\n")
def main():
parser = argparse.ArgumentParser(description="Validate specifications")
parser.add_argument("--path", default=".", help="Spec directory")
parser.add_argument("--verbose", action="store_true", help="Verbose")
parser.add_argument("--json", action="store_true", help="JSON output")
args = parser.parse_args()
v = Validator(args.path, args.verbose)
result = v.validate()
if args.json:
print(json.dumps({
"total": result.total,
"covered": len(result.covered),
"missing": list(result.missing),
"coverage": result.coverage,
"valid": result.valid,
}, indent=2))
sys.exit(0 if result.valid else 1)
if __name__ == "__main__":
main()