Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:48:35 +08:00
commit 6f1ef3ef54
45 changed files with 15173 additions and 0 deletions

136
skills/git-workflow.md Normal file
View File

@@ -0,0 +1,136 @@
# Git Workflow Skill
Reusable Git workflow patterns for branching, committing, and PR creation.
## Branch Creation
```bash
# Always branch from dev, not main
git checkout dev && git pull origin dev
# For issue-based work
if [ -n "$ISSUE_NUMBER" ]; then
BRANCH_NAME="feature/${ISSUE_NUMBER}-$(echo "$DESCRIPTION" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/--*/-/g' | cut -c1-30)"
git checkout -b "$BRANCH_NAME"
else
# For quick fixes
BRANCH_NAME="fix/$(echo "$DESCRIPTION" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/--*/-/g' | cut -c1-50)"
git checkout -b "$BRANCH_NAME"
fi
echo "✓ Created branch: $BRANCH_NAME"
```
## Commit Creation
```bash
# Stage all changes
git add -A
# For issue-based commits
if [ -n "$ISSUE_NUMBER" ]; then
git commit -m "$(cat <<EOF
feat: implement solution for #${ISSUE_NUMBER}
${COMMIT_DETAILS}
Closes #${ISSUE_NUMBER}
EOF
)"
else
# For quick fixes
git commit -m "$(cat <<EOF
fix: ${DESCRIPTION}
${COMMIT_DETAILS}
EOF
)"
fi
echo "✓ Changes committed"
```
## PR Creation
```bash
# Push to remote
if [ -n "$ISSUE_NUMBER" ]; then
git push origin "feature/${ISSUE_NUMBER}-*" || git push origin HEAD
else
git push origin HEAD
fi
# Create pull request
if [ -n "$ISSUE_NUMBER" ]; then
gh pr create \
--base dev \
--title "feat: #${ISSUE_NUMBER} - ${PR_TITLE}" \
--body "$(cat <<EOF
## Description
Implements solution for #${ISSUE_NUMBER}
## Changes
${CHANGES_LIST}
## Testing
- [ ] Unit tests pass
- [ ] Integration tests pass
- [ ] Manual testing completed
## Checklist
- [ ] Code follows project conventions
- [ ] No TypeScript errors
- [ ] Tests added/updated
- [ ] Documentation updated if needed
Closes #${ISSUE_NUMBER}
EOF
)" \
--assignee "@me"
else
gh pr create \
--base dev \
--title "fix: ${PR_TITLE}" \
--body "$(cat <<EOF
## Description
Quick fix: ${DESCRIPTION}
## Changes
${CHANGES_LIST}
## Testing
- [ ] Tests pass
- [ ] Manually verified fix
## Type of Change
- [x] Bug fix (non-breaking change)
- [ ] New feature
- [ ] Breaking change
EOF
)" \
--assignee "@me"
fi
# Get PR number
PR_NUMBER=$(gh pr list --author "@me" --limit 1 --json number --jq '.[0].number')
echo "✓ PR #${PR_NUMBER} created"
```
## Usage
Invoke this skill from commands by setting variables and sourcing:
```bash
# Set required variables
ISSUE_NUMBER="347" # or empty for quick fixes
DESCRIPTION="add user authentication"
COMMIT_DETAILS="- Added JWT authentication
- Implemented login/logout endpoints
- Added user session management"
CHANGES_LIST="- \`src/auth/jwt.ts\` - JWT token generation
- \`src/routes/auth.ts\` - Authentication endpoints
- \`src/middleware/auth.ts\` - Auth middleware"
PR_TITLE="Add JWT authentication system"
# Then include the skill sections as needed
```

180
skills/parallel-dispatch.md Normal file
View File

@@ -0,0 +1,180 @@
# Parallel Dispatch Skill
Coordinate multiple agents in parallel for maximum efficiency (Every's aggressive parallelism pattern).
## Determine Agents to Invoke
```bash
# Based on issue/PR context, determine which agents to invoke in parallel
AGENTS_TO_INVOKE=""
# Always invoke test-specialist for test strategy
AGENTS_TO_INVOKE="test-specialist"
# Check for security-sensitive changes
SECURITY_KEYWORDS="auth|login|password|token|session|permission|role|encrypt|decrypt|payment|billing"
if echo "$ISSUE_BODY $CHANGED_FILES" | grep -iEq "$SECURITY_KEYWORDS"; then
AGENTS_TO_INVOKE="$AGENTS_TO_INVOKE security-analyst-specialist"
echo " Security-sensitive changes detected - adding security-analyst"
fi
# Detect domain by file patterns
if echo "$CHANGED_FILES" | grep -Eq "\.(tsx|jsx|vue|svelte)"; then
AGENTS_TO_INVOKE="$AGENTS_TO_INVOKE frontend-specialist"
echo " Frontend files detected - adding frontend-specialist"
fi
if echo "$CHANGED_FILES" | grep -Eq "api|routes|controllers|services|\.go|\.rs"; then
AGENTS_TO_INVOKE="$AGENTS_TO_INVOKE backend-specialist"
echo " Backend files detected - adding backend-specialist"
fi
if echo "$CHANGED_FILES" | grep -Eq "schema|migration|\.sql|database"; then
AGENTS_TO_INVOKE="$AGENTS_TO_INVOKE database-specialist"
echo " Database changes detected - adding database-specialist"
fi
if echo "$ISSUE_BODY" | grep -iEq "ai|llm|gpt|claude|openai|anthropic"; then
AGENTS_TO_INVOKE="$AGENTS_TO_INVOKE llm-specialist"
echo " AI/LLM features detected - adding llm-specialist"
fi
echo "=== Agents to invoke in parallel: $AGENTS_TO_INVOKE ==="
```
## Parallel Agent Invocation Pattern
This is a TEMPLATE for how commands should invoke agents in parallel.
Commands cannot directly execute Task tool invocations - they should describe the pattern.
```markdown
### Phase: Parallel Agent Analysis
Based on detected context, invoke agents IN PARALLEL using multiple Task tool calls:
**Agent 1: test-specialist**
- subagent_type: "psd-claude-coding-system:test-specialist"
- description: "Test strategy for issue #$ISSUE_NUMBER"
- prompt: "Design comprehensive test strategy for: $ISSUE_DESCRIPTION
Provide:
1. Unit test approach
2. Integration test scenarios
3. Edge cases to cover
4. Mock/stub requirements"
**Agent 2: security-analyst-specialist** (if security-sensitive)
- subagent_type: "psd-claude-coding-system:security-analyst-specialist"
- description: "Security guidance for issue #$ISSUE_NUMBER"
- prompt: "Provide PRE-IMPLEMENTATION security guidance for: $ISSUE_DESCRIPTION
Focus on:
1. Security requirements to follow
2. Common pitfalls to avoid
3. Recommended secure patterns
4. Testing security aspects"
**Agent 3: [domain]-specialist** (if detected)
- subagent_type: "psd-claude-coding-system:[backend/frontend/database/llm]-specialist"
- description: "[Domain] implementation for issue #$ISSUE_NUMBER"
- prompt: "Provide implementation guidance for: $ISSUE_DESCRIPTION
Include:
1. Architecture patterns
2. Best practices for this domain
3. Common mistakes to avoid
4. Integration points"
**CRITICAL: Invoke ALL agents simultaneously in a SINGLE response with multiple Task tool uses.**
Wait for all agents to return, then synthesize their recommendations.
```
## Synthesize Agent Recommendations
After all agents return:
```bash
# Collect insights from all agents
echo "=== Synthesizing Agent Recommendations ==="
# Agent responses will be in variables like:
# $TEST_SPECIALIST_RESPONSE
# $SECURITY_ANALYST_RESPONSE
# $DOMAIN_SPECIALIST_RESPONSE
# Create consolidated implementation plan
echo "## Consolidated Implementation Plan"
echo ""
echo "### Testing Strategy (from test-specialist)"
echo "$TEST_SPECIALIST_RESPONSE" | grep -A 20 "test"
echo ""
echo "### Security Requirements (from security-analyst)"
echo "$SECURITY_ANALYST_RESPONSE" | grep -A 20 "security"
echo ""
echo "### Domain Implementation (from $DOMAIN-specialist)"
echo "$DOMAIN_SPECIALIST_RESPONSE" | grep -A 20 "implementation"
echo ""
echo "✓ Agent recommendations synthesized - proceeding with implementation"
```
## Track Parallel Execution
```bash
# Mark this execution as using parallel agents for telemetry
if [ -n "$SESSION_ID" ]; then
SESSION_FILE="plugins/psd-claude-coding-system/meta/.session_state_${SESSION_ID}"
# Write parallel execution metadata
echo "PARALLEL=true" >> "$SESSION_FILE"
echo "PARALLEL_AGENTS=$AGENTS_TO_INVOKE" >> "$SESSION_FILE"
echo "PARALLEL_START=$(date +%s%3N)" >> "$SESSION_FILE"
fi
# After agents complete
if [ -n "$SESSION_ID" ]; then
PARALLEL_END=$(date +%s%3N)
PARALLEL_START=$(grep "^PARALLEL_START=" "$SESSION_FILE" | cut -d= -f2)
PARALLEL_DURATION=$((PARALLEL_END - PARALLEL_START))
echo "PARALLEL_DURATION_MS=$PARALLEL_DURATION" >> "$SESSION_FILE"
echo "✓ Parallel execution completed in ${PARALLEL_DURATION}ms"
fi
```
## Usage
### In /work Command
```markdown
### Phase 2.5: Parallel Agent Analysis (NEW)
Always dispatch 2-3 agents in parallel for maximum insight (Every's philosophy: speed > cost).
**Step 1: Detect which agents are needed**
```bash
# Include "Determine Agents to Invoke" section from @skills/parallel-dispatch.md
```
**Step 2: Invoke agents in parallel**
```markdown
# Include "Parallel Agent Invocation Pattern" section from @skills/parallel-dispatch.md
# This describes HOW to use Task tool with multiple simultaneous invocations
```
**Step 3: Synthesize recommendations**
```bash
# Include "Synthesize Agent Recommendations" section from @skills/parallel-dispatch.md
```
**Step 4: Track for telemetry**
```bash
# Include "Track Parallel Execution" section from @skills/parallel-dispatch.md
```
```
### In /review_pr Command
Similar pattern - detect feedback types, dispatch categorization agents in parallel, synthesize responses.

152
skills/security-scan.md Normal file
View File

@@ -0,0 +1,152 @@
# Security Scan Skill
Automated security scanning and vulnerability analysis for pull requests.
## Invoke Security Analyst Agent
```bash
# This skill invokes the security-analyst-specialist agent to perform comprehensive analysis
# Get the current PR number (if in PR context)
if [ -n "$PR_NUMBER" ]; then
SCAN_CONTEXT="PR #$PR_NUMBER"
else
SCAN_CONTEXT="current branch changes"
fi
echo "=== Running Security Analysis on $SCAN_CONTEXT ==="
# The command should use the Task tool to invoke security-analyst-specialist
# This is a template for commands to follow:
# Example invocation pattern:
# Task tool with:
# subagent_type: "psd-claude-coding-system:security-analyst-specialist"
# description: "Security audit for $SCAN_CONTEXT"
# prompt: "Perform comprehensive security audit on $SCAN_CONTEXT. Analyze all changed files for:
#
# 1. Security vulnerabilities (SQL injection, XSS, auth issues, secrets)
# 2. Architecture violations (business logic in UI, improper layer separation)
# 3. Best practices compliance (TypeScript quality, error handling, testing)
#
# Return structured findings in the specified format so they can be posted as a single consolidated PR comment."
```
## Post Security Findings to PR
```bash
# After agent returns findings, post as consolidated comment
if [ -n "$PR_NUMBER" ]; then
# Format findings from agent into PR comment
gh pr comment $PR_NUMBER --body "## 🔍 Automated Security & Best Practices Review
$AGENT_FINDINGS
### Summary
- 🔴 Critical Issues: $CRITICAL_COUNT
- 🟡 High Priority: $HIGH_COUNT
- 🟢 Suggestions: $SUGGESTION_COUNT
### Critical Issues (🔴 Must Fix Before Merge)
$CRITICAL_FINDINGS
### High Priority (🟡 Should Fix Before Merge)
$HIGH_FINDINGS
### Suggestions (🟢 Consider for Improvement)
$SUGGESTIONS
### Positive Practices Observed
$POSITIVE_FINDINGS
### Required Actions
1. Address all 🔴 critical issues before merge
2. Consider 🟡 high priority fixes
3. Run tests after fixes: \`npm run test\`, \`npm run lint\`, \`npm run typecheck\`
---
*Automated security review by security-analyst-specialist agent*"
echo "✅ Security review posted to PR #$PR_NUMBER"
else
echo "=== Security Findings ==="
echo "$AGENT_FINDINGS"
fi
```
## Pre-Implementation Security Check
For sensitive changes (auth, data, payments), run security check BEFORE implementation:
```bash
# Detect sensitive file changes
SENSITIVE_PATTERNS="auth|login|password|token|payment|billing|credit|card|ssn|encrypt|decrypt|session"
if echo "$CHANGED_FILES" | grep -iE "$SENSITIVE_PATTERNS"; then
echo "⚠️ Sensitive files detected - running pre-implementation security check"
# Invoke security-analyst for guidance
# Agent should provide:
# - Security requirements to follow
# - Common pitfalls to avoid
# - Recommended patterns
# - Testing strategies
echo "✓ Review security guidance before proceeding with implementation"
fi
```
## Security Checklist
Common security checks to validate:
```bash
# Check for secrets in code
echo "=== Checking for exposed secrets ==="
if git diff --cached | grep -iE "api[_-]?key|secret|password|token" | grep -v "example"; then
echo "⚠️ Possible secrets detected in staged changes"
echo "Review carefully before committing"
fi
# Check for SQL injection vulnerabilities
echo "=== Checking for SQL injection risks ==="
if git diff --cached | grep -E "execute\(|query\(" | grep -v "prepared"; then
echo "⚠️ Direct SQL execution detected - ensure using prepared statements"
fi
# Check for XSS vulnerabilities
echo "=== Checking for XSS risks ==="
if git diff --cached | grep -iE "innerHTML|dangerouslySetInnerHTML" | grep -v "sanitize"; then
echo "⚠️ innerHTML usage detected - ensure proper sanitization"
fi
# Check for authentication bypass
echo "=== Checking authentication patterns ==="
if git diff --cached | grep -iE "req\.user|auth|permission" | grep -v "check"; then
echo " Authentication-related changes detected - verify authorization checks"
fi
echo "✓ Basic security checks complete"
```
## Usage
### Pre-Implementation (in /work command)
```bash
# Before starting implementation, check if security review needed
CHANGED_FILES=$(gh issue view $ISSUE_NUMBER --json body --jq '.body' | grep -oE '\w+\.(ts|js|py|go|rs)' || echo "")
# Include Pre-Implementation Security Check section
```
### Post-Implementation (traditional)
```bash
# After PR created
PR_NUMBER=$(gh pr list --author "@me" --limit 1 --json number --jq '.[0].number')
# Include Invoke Security Analyst Agent section
# Then include Post Security Findings to PR section
```

130
skills/telemetry-report.md Normal file
View File

@@ -0,0 +1,130 @@
# Telemetry Report Skill
Track agent activity and report to telemetry system for meta-learning.
## Report Agent Invocation
Note: The actual telemetry collection happens automatically via hooks (SubagentStop hook).
This skill provides utility functions for commands to access telemetry data.
```bash
# Read current session's agent invocations
# The SubagentStop hook automatically records agent names to session state
if [ -n "$SESSION_ID" ]; then
SESSION_FILE="plugins/psd-claude-coding-system/meta/.session_state_${SESSION_ID}"
if [ -f "$SESSION_FILE" ]; then
AGENTS_INVOKED=$(grep "^AGENTS=" "$SESSION_FILE" | cut -d= -f2)
echo "Agents invoked this session: $AGENTS_INVOKED"
fi
fi
```
## Query Telemetry for Patterns
```bash
# Check which agents work well together
# Useful for meta-learning and optimization
TELEMETRY_FILE="plugins/psd-claude-coding-system/meta/telemetry.json"
if [ -f "$TELEMETRY_FILE" ] && command -v jq &> /dev/null; then
# Find most common agent combinations for /work command
echo "=== Most Common Agent Combinations for /work ==="
jq -r '.executions[] | select(.command == "work") | .agents_invoked | join(",")' "$TELEMETRY_FILE" \
| sort | uniq -c | sort -rn | head -5
# Find average duration by command
echo -e "\n=== Average Duration by Command ==="
jq -r '.executions | group_by(.command) | map({command: .[0].command, avg_duration: (map(.duration_ms) | add / length)}) | .[]' "$TELEMETRY_FILE"
# Find commands with highest success rate
echo -e "\n=== Success Rates by Command ==="
jq -r '.executions | group_by(.command) | map({command: .[0].command, success_rate: ((map(select(.success == true)) | length) / length * 100)}) | .[]' "$TELEMETRY_FILE"
fi
```
## Track Parallel Execution
```bash
# When invoking multiple agents in parallel, track the pattern
if [ -n "$SESSION_ID" ]; then
SESSION_FILE="plugins/psd-claude-coding-system/meta/.session_state_${SESSION_ID}"
# Mark that this session used parallel execution
echo "PARALLEL=true" >> "$SESSION_FILE"
# Track which agents ran in parallel
echo "PARALLEL_GROUP=$AGENT_LIST" >> "$SESSION_FILE"
# The Stop hook will read these and add to telemetry.json
fi
```
## Get Recommendations from History
```bash
# Based on current issue/context, get recommendations for which agents to invoke
if [ -f "$TELEMETRY_FILE" ] && command -v jq &> /dev/null; then
# For similar issues (by keyword), what agents were successful?
KEYWORDS=$(echo "$ISSUE_TITLE" | tr '[:upper:]' '[:lower:]')
echo "=== Recommended Agents Based on Similar Issues ==="
# This is a placeholder - real implementation would use more sophisticated matching
jq -r ".executions[] | select(.success == true) | select(.command == \"work\") | .agents_invoked[]" "$TELEMETRY_FILE" \
| sort | uniq -c | sort -rn | head -3
fi
```
## Report Command Metrics
```bash
# At end of command execution, report key metrics for telemetry
echo "=== Command Execution Metrics ==="
echo "Command: $COMMAND_NAME"
echo "Duration: ${DURATION_MS}ms"
echo "Agents Invoked: $AGENTS_INVOKED"
echo "Files Modified: $FILES_MODIFIED"
echo "Tests Run: $TESTS_RUN"
echo "Success: $SUCCESS"
# These metrics are automatically captured by the Stop hook
# which reads from session state and writes to telemetry.json
```
## Usage
### In Commands
```bash
# At start of command
SESSION_ID="${RANDOM}_${RANDOM}" # Generated by Claude Code
COMMAND_NAME="work"
START_TIME=$(date +%s%3N)
# During execution, agents are invoked
# SubagentStop hook automatically tracks them
# At end of command (Stop hook does this automatically)
END_TIME=$(date +%s%3N)
DURATION_MS=$((END_TIME - START_TIME))
# Stop hook reads session state and updates telemetry.json with:
# - command name
# - duration
# - agents invoked
# - success/failure
# - parallel execution (if applicable)
```
### For Meta-Learning
```bash
# Meta-learning commands can query telemetry for insights
# Include Query Telemetry for Patterns section
# Include Get Recommendations from History section
```

193
skills/test-runner.md Normal file
View File

@@ -0,0 +1,193 @@
# Test Runner Skill
Universal test execution patterns for various testing frameworks.
## Auto-Detect Test Framework
```bash
# Detect which test framework is being used
if [ -f "package.json" ]; then
if grep -q "\"jest\"" package.json; then
TEST_FRAMEWORK="jest"
elif grep -q "\"vitest\"" package.json; then
TEST_FRAMEWORK="vitest"
elif grep -q "\"mocha\"" package.json; then
TEST_FRAMEWORK="mocha"
else
TEST_FRAMEWORK="npm"
fi
elif [ -f "Cargo.toml" ]; then
TEST_FRAMEWORK="cargo"
elif [ -f "go.mod" ]; then
TEST_FRAMEWORK="go"
elif [ -f "pytest.ini" ] || [ -f "pyproject.toml" ]; then
TEST_FRAMEWORK="pytest"
else
TEST_FRAMEWORK="unknown"
fi
echo "Detected test framework: $TEST_FRAMEWORK"
```
## Run All Tests
```bash
case "$TEST_FRAMEWORK" in
jest)
npm test || yarn test
;;
vitest)
npm run test || yarn test
;;
mocha)
npm test || yarn test
;;
npm)
npm test
;;
cargo)
cargo test
;;
go)
go test ./...
;;
pytest)
pytest
;;
*)
echo "Unknown test framework, attempting npm test..."
npm test
;;
esac
```
## Run Specific Test Suite
```bash
# Run unit tests
case "$TEST_FRAMEWORK" in
jest|vitest)
npm run test:unit || npx jest --testPathPattern=unit
;;
cargo)
cargo test --lib
;;
go)
go test ./... -run Unit
;;
pytest)
pytest tests/unit/
;;
esac
# Run integration tests
case "$TEST_FRAMEWORK" in
jest|vitest)
npm run test:integration || npx jest --testPathPattern=integration
;;
cargo)
cargo test --test integration
;;
go)
go test ./... -run Integration
;;
pytest)
pytest tests/integration/
;;
esac
# Run e2e tests
case "$TEST_FRAMEWORK" in
jest|vitest)
npm run test:e2e || npx jest --testPathPattern=e2e
;;
cargo)
cargo test --test e2e
;;
go)
go test ./... -run E2E
;;
pytest)
pytest tests/e2e/
;;
esac
```
## Test Coverage
```bash
case "$TEST_FRAMEWORK" in
jest)
npm run test:coverage || npx jest --coverage
;;
vitest)
npm run test:coverage || npx vitest --coverage
;;
cargo)
cargo tarpaulin --out Html
;;
go)
go test -cover ./...
;;
pytest)
pytest --cov=. --cov-report=html
;;
esac
echo "✓ Coverage report generated"
```
## Quality Checks
```bash
# Type checking
if [ -f "tsconfig.json" ]; then
npm run typecheck || npx tsc --noEmit
echo "✓ Type checking passed"
fi
# Linting
if [ -f ".eslintrc" ] || [ -f ".eslintrc.json" ] || grep -q "eslint" package.json; then
npm run lint || npx eslint .
echo "✓ Linting passed"
elif [ -f "Cargo.toml" ]; then
cargo clippy
echo "✓ Clippy passed"
elif [ -f "go.mod" ]; then
go vet ./...
golint ./...
echo "✓ Go vet passed"
fi
# Formatting check
if [ -f ".prettierrc" ] || grep -q "prettier" package.json; then
npm run format:check || npx prettier --check .
echo "✓ Format check passed"
elif [ -f "Cargo.toml" ]; then
cargo fmt --check
echo "✓ Format check passed"
elif [ -f "go.mod" ]; then
gofmt -l .
echo "✓ Go format check passed"
fi
```
## Usage
```bash
# From commands, set TEST_SCOPE then source appropriate sections:
TEST_SCOPE="unit" # or "integration", "e2e", "all"
# Auto-detect framework
# ... (include Auto-Detect section)
# Run tests
if [ "$TEST_SCOPE" = "all" ]; then
# ... (include Run All Tests section)
else
# ... (include Run Specific Test Suite section)
fi
# Run quality checks
# ... (include Quality Checks section)
```