Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 09:03:11 +08:00
commit 4aff69d9a9
61 changed files with 7343 additions and 0 deletions

387
hooks/after_deploy.sh Executable file
View File

@@ -0,0 +1,387 @@
# hooks/after_deploy.sh
#!/usr/bin/env bash
set -euo pipefail
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
if [ "$LANG" = "ja" ]; then
echo "[after_deploy] スモークテスト & ロールバック準備..."
else
echo "[after_deploy] Smoke tests & rollback readiness..."
fi
DEPLOY_ENV="${DEPLOY_ENV:-production}"
DEPLOY_URL="${DEPLOY_URL:-https://app.example.com}"
ROLLOUT_STATUS_FILE="rollout-status.md"
if [ "$LANG" = "ja" ]; then
echo "→ デプロイ環境:$DEPLOY_ENV"
echo "→ デプロイURL$DEPLOY_URL"
else
echo "→ Deployment environment: $DEPLOY_ENV"
echo "→ Deployment URL: $DEPLOY_URL"
fi
# Wait for deployment to be ready
if [ "$LANG" = "ja" ]; then
echo "→ デプロイの安定化を待機中..."
else
echo "→ Waiting for deployment to stabilize..."
fi
sleep 10
# Smoke tests - Critical path validation
if [ "$LANG" = "ja" ]; then
echo "→ スモークテスト実行中..."
else
echo "→ Running smoke tests..."
fi
smoke_test_failed=false
# Test 1: Health endpoint
if [ "$LANG" = "ja" ]; then
echo " • ヘルスエンドポイントをテスト中..."
else
echo " • Testing health endpoint..."
fi
if curl -f -s --max-time 10 "$DEPLOY_URL/health" > /dev/null 2>&1; then
if [ "$LANG" = "ja" ]; then
echo " ✅ ヘルスエンドポイントが応答"
else
echo " ✅ Health endpoint responsive"
fi
else
if [ "$LANG" = "ja" ]; then
echo " ❌ ヘルスエンドポイントが失敗"
else
echo " ❌ Health endpoint failed"
fi
smoke_test_failed=true
fi
# Test 2: API endpoints
if [ "$LANG" = "ja" ]; then
echo " • APIエンドポイントをテスト中..."
else
echo " • Testing API endpoints..."
fi
if curl -f -s --max-time 10 "$DEPLOY_URL/api/status" > /dev/null 2>&1; then
if [ "$LANG" = "ja" ]; then
echo " ✅ APIエンドポイントが応答"
else
echo " ✅ API endpoints responsive"
fi
else
if [ "$LANG" = "ja" ]; then
echo " ❌ APIエンドポイントが失敗"
else
echo " ❌ API endpoints failed"
fi
smoke_test_failed=true
fi
# Test 3: Database connectivity
if [ "$LANG" = "ja" ]; then
echo " • データベース接続性をテスト中..."
else
echo " • Testing database connectivity..."
fi
if curl -f -s --max-time 10 "$DEPLOY_URL/api/db-check" > /dev/null 2>&1; then
if [ "$LANG" = "ja" ]; then
echo " ✅ データベース接続性を確認"
else
echo " ✅ Database connectivity verified"
fi
else
if [ "$LANG" = "ja" ]; then
echo " ❌ データベース接続性が失敗"
else
echo " ❌ Database connectivity failed"
fi
smoke_test_failed=true
fi
# Test 4: Authentication flow (if applicable)
if [ -n "${AUTH_TEST_TOKEN:-}" ]; then
if [ "$LANG" = "ja" ]; then
echo " • 認証をテスト中..."
else
echo " • Testing authentication..."
fi
if curl -f -s --max-time 10 -H "Authorization: Bearer $AUTH_TEST_TOKEN" "$DEPLOY_URL/api/me" > /dev/null 2>&1; then
if [ "$LANG" = "ja" ]; then
echo " ✅ 認証が機能"
else
echo " ✅ Authentication working"
fi
else
if [ "$LANG" = "ja" ]; then
echo " ❌ 認証が失敗"
else
echo " ❌ Authentication failed"
fi
smoke_test_failed=true
fi
fi
# Generate rollout status report
if [ "$LANG" = "ja" ]; then
echo "→ ロールアウトステータスレポート生成中..."
else
echo "→ Generating rollout status report..."
fi
if [ "$LANG" = "ja" ]; then
cat > "$ROLLOUT_STATUS_FILE" <<EOF
# デプロイロールアウトステータス
**環境:** $DEPLOY_ENV
**デプロイURL** $DEPLOY_URL
**タイムスタンプ:** $(date -u +"%Y-%m-%d %H:%M:%S UTC")
**デプロイ実行者:** ${USER:-unknown}
**Gitコミット** $(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
**Gitブランチ** $(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
## スモークテスト結果
| テスト | ステータス |
|------|--------|
| ヘルスエンドポイント | $(curl -f -s --max-time 5 "$DEPLOY_URL/health" > /dev/null 2>&1 && echo "✅ 合格" || echo "❌ 不合格") |
| APIエンドポイント | $(curl -f -s --max-time 5 "$DEPLOY_URL/api/status" > /dev/null 2>&1 && echo "✅ 合格" || echo "❌ 不合格") |
| データベース接続性 | $(curl -f -s --max-time 5 "$DEPLOY_URL/api/db-check" > /dev/null 2>&1 && echo "✅ 合格" || echo "❌ 不合格") |
## ロールバック手順
問題が検出された場合、以下でロールバック:
\`\`\`bash
# Vercel
vercel rollback <deployment-url>
# Kubernetes
kubectl rollout undo deployment/<deployment-name> -n <namespace>
# Docker / Docker Compose
docker-compose down && git checkout <previous-commit> && docker-compose up -d
# Heroku
heroku releases:rollback -a <app-name>
\`\`\`
## 監視
- **ログ:** \`kubectl logs -f deployment/<name>\` またはロギングサービスを確認
- **メトリクス:** Datadog/Grafana/CloudWatchダッシュボードを確認
- **エラー:** Sentry/エラートラッキングサービスを監視
- **パフォーマンス:** レスポンスタイムとエラー率を確認
## 次のステップ
- [ ] 今後30分間エラー率を監視
- [ ] ユーザー向け機能を手動確認
- [ ] アナリティクス/トラッキングが機能していることを確認
- [ ] チームチャンネルでデプロイをアナウンス
- [ ] リリースノートを更新
---
**ステータス:** $(if [ "$smoke_test_failed" = false ]; then echo "✅ デプロイ成功"; else echo "❌ デプロイ失敗 - ロールバック検討"; fi)
EOF
else
cat > "$ROLLOUT_STATUS_FILE" <<EOF
# Deployment Rollout Status
**Environment:** $DEPLOY_ENV
**Deployment URL:** $DEPLOY_URL
**Timestamp:** $(date -u +"%Y-%m-%d %H:%M:%S UTC")
**Deployed By:** ${USER:-unknown}
**Git Commit:** $(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
**Git Branch:** $(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
## Smoke Test Results
| Test | Status |
|------|--------|
| Health Endpoint | $(curl -f -s --max-time 5 "$DEPLOY_URL/health" > /dev/null 2>&1 && echo "✅ Pass" || echo "❌ Fail") |
| API Endpoints | $(curl -f -s --max-time 5 "$DEPLOY_URL/api/status" > /dev/null 2>&1 && echo "✅ Pass" || echo "❌ Fail") |
| Database Connectivity | $(curl -f -s --max-time 5 "$DEPLOY_URL/api/db-check" > /dev/null 2>&1 && echo "✅ Pass" || echo "❌ Fail") |
## Rollback Procedure
If issues are detected, rollback using:
\`\`\`bash
# Vercel
vercel rollback <deployment-url>
# Kubernetes
kubectl rollout undo deployment/<deployment-name> -n <namespace>
# Docker / Docker Compose
docker-compose down && git checkout <previous-commit> && docker-compose up -d
# Heroku
heroku releases:rollback -a <app-name>
\`\`\`
## Monitoring
- **Logs:** \`kubectl logs -f deployment/<name>\` or check your logging service
- **Metrics:** Check Datadog/Grafana/CloudWatch dashboards
- **Errors:** Monitor Sentry/error tracking service
- **Performance:** Check response times and error rates
## Next Steps
- [ ] Monitor error rates for next 30 minutes
- [ ] Check user-facing features manually
- [ ] Verify analytics/tracking is working
- [ ] Announce deployment in team channel
- [ ] Update release notes
---
**Status:** $(if [ "$smoke_test_failed" = false ]; then echo "✅ Deployment Successful"; else echo "❌ Deployment Failed - Consider Rollback"; fi)
EOF
fi
if [ "$LANG" = "ja" ]; then
echo "✅ ロールアウトステータスレポートを生成:$ROLLOUT_STATUS_FILE"
else
echo "✅ Rollout status report generated: $ROLLOUT_STATUS_FILE"
fi
cat "$ROLLOUT_STATUS_FILE"
# Send notification (if Slack webhook is configured)
if [ -n "${SLACK_WEBHOOK_URL:-}" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ Slack通知を送信中..."
else
echo "→ Sending Slack notification..."
fi
STATUS_EMOJI=$(if [ "$smoke_test_failed" = false ]; then echo ":white_check_mark:"; else echo ":x:"; fi)
if [ "$LANG" = "ja" ]; then
STATUS_TEXT=$(if [ "$smoke_test_failed" = false ]; then echo "成功"; else echo "失敗"; fi)
else
STATUS_TEXT=$(if [ "$smoke_test_failed" = false ]; then echo "Successful"; else echo "Failed"; fi)
fi
if [ "$LANG" = "ja" ]; then
curl -X POST "$SLACK_WEBHOOK_URL" \
-H 'Content-Type: application/json' \
-d "{\"text\":\"$STATUS_EMOJI デプロイ $STATUS_TEXT\",\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"*デプロイ $STATUS_TEXT*\n*環境:* $DEPLOY_ENV\n*URL:* $DEPLOY_URL\n*コミット:* $(git rev-parse --short HEAD 2>/dev/null || echo 'unknown')\"}}]}" \
> /dev/null 2>&1 || echo "⚠️ Slack通知の送信に失敗しました"
else
curl -X POST "$SLACK_WEBHOOK_URL" \
-H 'Content-Type: application/json' \
-d "{\"text\":\"$STATUS_EMOJI Deployment $STATUS_TEXT\",\"blocks\":[{\"type\":\"section\",\"text\":{\"type\":\"mrkdwn\",\"text\":\"*Deployment $STATUS_TEXT*\n*Environment:* $DEPLOY_ENV\n*URL:* $DEPLOY_URL\n*Commit:* $(git rev-parse --short HEAD 2>/dev/null || echo 'unknown')\"}}]}" \
> /dev/null 2>&1 || echo "⚠️ Failed to send Slack notification"
fi
fi
# Voice notification (Theo announces deployment status)
VOICE_SCRIPT="$(dirname "$0")/../mcp-servers/play-voice.sh"
if [ -f "$VOICE_SCRIPT" ]; then
if [ "$smoke_test_failed" = false ]; then
"$VOICE_SCRIPT" "theo" "deployment" 2>/dev/null || true
fi
fi
# Auto-commit deployment verification results (Theo)
AUTO_COMMIT_SCRIPT="$(dirname "$0")/../mcp-servers/auto-commit.sh"
if [ -f "$AUTO_COMMIT_SCRIPT" ] && [ -x "$AUTO_COMMIT_SCRIPT" ] && [ "$smoke_test_failed" = false ]; then
"$AUTO_COMMIT_SCRIPT" \
"chore" \
"to track deployment state" \
"Complete post-deployment verification (smoke tests, rollout status)" \
"Theo" 2>/dev/null || true
fi
# Record deployment milestone to Memory Bank
if [ "$smoke_test_failed" = false ]; then
RECORD_MILESTONE_SCRIPT="$(dirname "$0")/../.orchestra/scripts/record-milestone.sh"
if [ -f "$RECORD_MILESTONE_SCRIPT" ] && [ -x "$RECORD_MILESTONE_SCRIPT" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ デプロイマイルストーンを記録中..."
else
echo "→ Recording deployment milestone..."
fi
COMMIT_HASH=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
DEPLOY_MILESTONE="Deploy to $DEPLOY_ENV"
DEPLOY_DESCRIPTION="Deployed commit $COMMIT_HASH to $DEPLOY_ENV environment at $DEPLOY_URL"
"$RECORD_MILESTONE_SCRIPT" \
"$DEPLOY_MILESTONE" \
"$DEPLOY_DESCRIPTION" \
"chore" \
"${USER:-unknown}" 2>/dev/null || true
# Update deployment history in Memory Bank
MEMORY_BANK_PATH="$HOME/.memory-bank/orchestra/progress.md"
if [ -f "$MEMORY_BANK_PATH" ]; then
# Check if Deployment History section exists
if ! grep -q "## Deployment History" "$MEMORY_BANK_PATH"; then
cat >> "$MEMORY_BANK_PATH" <<EOF
## Deployment History
| Date | Environment | Commit | Status |
|------|-------------|--------|--------|
EOF
fi
# Add deployment entry
DEPLOY_DATE=$(date -u +"%Y-%m-%d %H:%M:%S UTC")
DEPLOY_ENTRY="| $DEPLOY_DATE | $DEPLOY_ENV | $COMMIT_HASH | ✅ Success |"
# Insert after table header
awk -v entry="$DEPLOY_ENTRY" '
/## Deployment History/ {
print
getline
print
getline
print
getline
print
print entry
next
}
{ print }
' "$MEMORY_BANK_PATH" > "${MEMORY_BANK_PATH}.tmp"
mv "${MEMORY_BANK_PATH}.tmp" "$MEMORY_BANK_PATH"
if [ "$LANG" = "ja" ]; then
echo "✅ デプロイ履歴を記録しました"
else
echo "✅ Deployment history recorded"
fi
fi
fi
fi
# Exit with error if smoke tests failed
if [ "$smoke_test_failed" = true ]; then
if [ "$LANG" = "ja" ]; then
echo "❌ スモークテストが失敗しました!デプロイのロールバックを検討してください。"
else
echo "❌ Smoke tests failed! Consider rolling back the deployment."
fi
exit 1
fi
if [ "$LANG" = "ja" ]; then
echo "✅ 全てのデプロイ後チェックが通過しました!"
else
echo "✅ All post-deployment checks passed!"
fi

214
hooks/after_pr_merge.sh Executable file
View File

@@ -0,0 +1,214 @@
#!/usr/bin/env bash
# hooks/after_pr_merge.sh
# Automatic PR merge recording hook
set -euo pipefail
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
RECORD_MILESTONE_SCRIPT="${SCRIPT_DIR}/../.orchestra/scripts/record-milestone.sh"
LOG_DIR="${SCRIPT_DIR}/../.orchestra/logs"
LOG_FILE="${LOG_DIR}/after-pr-merge.log"
MEMORY_BANK_PROJECT="orchestra"
DECISIONS_FILE="decisions.md"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Logging function (non-blocking)
log() {
local timestamp
timestamp=$(date -u +"%Y-%m-%d %H:%M:%S UTC")
echo "[$timestamp] $*" >> "$LOG_FILE" 2>/dev/null || true
}
# Non-blocking execution wrapper
safe_execute() {
"$@" 2>> "$LOG_FILE" || log "WARNING: Command failed but continuing: $*"
}
log "=== PR Merge Hook Triggered ==="
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
# Try to extract PR information from environment or git
PR_NUMBER="${PR_NUMBER:-}"
PR_TITLE="${PR_TITLE:-}"
PR_DESCRIPTION="${PR_DESCRIPTION:-}"
PR_MERGER="${PR_MERGER:-$(git config user.name 2>/dev/null || echo "Unknown")}"
PR_BRANCH="${PR_BRANCH:-$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")}"
# If GitHub CLI is available, try to get PR info
if command -v gh &> /dev/null && [ -z "$PR_NUMBER" ]; then
log "Attempting to fetch PR info using GitHub CLI..."
# Get PR number from current branch
PR_INFO=$(gh pr view --json number,title,body,mergedBy 2>/dev/null || echo "")
if [ -n "$PR_INFO" ]; then
PR_NUMBER=$(echo "$PR_INFO" | grep -o '"number":[0-9]*' | grep -o '[0-9]*' || echo "")
PR_TITLE=$(echo "$PR_INFO" | grep -o '"title":"[^"]*"' | sed 's/"title":"//;s/"$//' || echo "")
PR_DESCRIPTION=$(echo "$PR_INFO" | grep -o '"body":"[^"]*"' | sed 's/"body":"//;s/"$//' || echo "")
PR_MERGER=$(echo "$PR_INFO" | grep -o '"login":"[^"]*"' | sed 's/"login":"//;s/"$//' || echo "$PR_MERGER")
log "PR info extracted from GitHub: #$PR_NUMBER - $PR_TITLE"
fi
fi
# Fallback: Use git log to infer merge information
if [ -z "$PR_TITLE" ]; then
log "Falling back to git log for PR information..."
# Try to get merge commit message
MERGE_COMMIT=$(git log -1 --merges --pretty=format:"%s" 2>/dev/null || echo "")
if [ -n "$MERGE_COMMIT" ]; then
# Extract PR number from merge commit (format: "Merge pull request #123 from branch")
PR_NUMBER=$(echo "$MERGE_COMMIT" | grep -oE "#[0-9]+" | grep -oE "[0-9]+" | head -1 || echo "")
PR_TITLE="$MERGE_COMMIT"
PR_DESCRIPTION="Merged from branch: $PR_BRANCH"
log "Merge commit found: $MERGE_COMMIT"
else
# No merge commit found, check if this is a squash merge
RECENT_COMMIT=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "")
if [ -n "$RECENT_COMMIT" ]; then
PR_TITLE="$RECENT_COMMIT"
PR_DESCRIPTION="Commit from branch: $PR_BRANCH"
log "Using recent commit as PR info: $RECENT_COMMIT"
else
log "No PR information available, skipping automatic recording"
exit 0
fi
fi
fi
# Determine tag from PR title or description
PR_TAG="feature"
if echo "$PR_TITLE" | grep -qiE "(fix|bug)"; then
PR_TAG="bugfix"
elif echo "$PR_TITLE" | grep -qiE "refactor"; then
PR_TAG="refactor"
elif echo "$PR_TITLE" | grep -qiE "(doc|docs)"; then
PR_TAG="docs"
elif echo "$PR_TITLE" | grep -qiE "test"; then
PR_TAG="test"
elif echo "$PR_TITLE" | grep -qiE "perf"; then
PR_TAG="perf"
elif echo "$PR_TITLE" | grep -qiE "chore"; then
PR_TAG="chore"
fi
log "PR Title: $PR_TITLE"
log "PR Description: $PR_DESCRIPTION"
log "PR Merger: $PR_MERGER"
log "PR Branch: $PR_BRANCH"
log "PR Tag: $PR_TAG"
# Record PR merge as milestone
if [ -f "$RECORD_MILESTONE_SCRIPT" ] && [ -x "$RECORD_MILESTONE_SCRIPT" ]; then
log "Recording PR merge as milestone..."
MILESTONE_NAME="PR"
if [ -n "$PR_NUMBER" ]; then
MILESTONE_NAME="PR #$PR_NUMBER"
fi
MILESTONE_NAME="$MILESTONE_NAME: $PR_TITLE"
safe_execute "$RECORD_MILESTONE_SCRIPT" \
"$MILESTONE_NAME" \
"$PR_DESCRIPTION" \
"$PR_TAG" \
"$PR_MERGER"
log "✅ PR merge recorded as milestone"
else
log "ERROR: record-milestone.sh not found or not executable at $RECORD_MILESTONE_SCRIPT"
exit 0 # Non-blocking
fi
# Update decisions.md if this PR contains important decisions
log "Checking for decision updates..."
MEMORY_BANK_PATH="$HOME/.memory-bank/$MEMORY_BANK_PROJECT/$DECISIONS_FILE"
CURRENT_DATE=$(date -u +"%Y-%m-%d %H:%M:%S UTC")
COMMIT_HASH=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
# Check if PR description contains keywords indicating a decision
DECISION_KEYWORDS="(decision|decided|choose|chose|selected|approach|strategy|architecture)"
if echo "$PR_DESCRIPTION" | grep -qiE "$DECISION_KEYWORDS"; then
log "PR contains decision-related content, updating decisions.md..."
# Ensure decisions.md exists
if [ ! -f "$MEMORY_BANK_PATH" ]; then
mkdir -p "$(dirname "$MEMORY_BANK_PATH")"
cat > "$MEMORY_BANK_PATH" <<EOF
# Architecture Decision Records (ADR)
This file tracks important architectural and technical decisions made during the project.
## Decision Log
| Date | Decision | Context | Status | Related PR |
|------|----------|---------|--------|------------|
EOF
log "Created new decisions.md file"
fi
# Check if Decision Log section exists
if ! grep -q "## Decision Log" "$MEMORY_BANK_PATH"; then
cat >> "$MEMORY_BANK_PATH" <<EOF
## Decision Log
| Date | Decision | Context | Status | Related PR |
|------|----------|---------|--------|------------|
EOF
log "Added Decision Log section to decisions.md"
fi
# Prepare decision entry
DECISION_TITLE="$PR_TITLE"
DECISION_CONTEXT="$PR_DESCRIPTION"
DECISION_STATUS="Implemented"
PR_REFERENCE="PR #$PR_NUMBER (commit: $COMMIT_HASH)"
# Truncate long descriptions
if [ ${#DECISION_CONTEXT} -gt 100 ]; then
DECISION_CONTEXT="${DECISION_CONTEXT:0:100}..."
fi
DECISION_ENTRY="| $CURRENT_DATE | $DECISION_TITLE | $DECISION_CONTEXT | $DECISION_STATUS | $PR_REFERENCE |"
# Insert decision entry after table header
awk -v entry="$DECISION_ENTRY" '
/## Decision Log/ {
print
getline
print
getline
print
getline
print
print entry
next
}
{ print }
' "$MEMORY_BANK_PATH" > "${MEMORY_BANK_PATH}.tmp"
mv "${MEMORY_BANK_PATH}.tmp" "$MEMORY_BANK_PATH"
log "✅ Decision log updated in decisions.md"
fi
# Display completion message
if [ "$LANG" = "ja" ]; then
echo "[after_pr_merge] PRマージを記録しました: $PR_TITLE" >&2 || true
else
echo "[after_pr_merge] PR merge recorded: $PR_TITLE" >&2 || true
fi
log "=== PR Merge Hook Completed ==="
# Always exit successfully (non-blocking)
exit 0

125
hooks/after_task_complete.sh Executable file
View File

@@ -0,0 +1,125 @@
#!/usr/bin/env bash
# hooks/after_task_complete.sh
# Automatic task completion recording hook
set -euo pipefail
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
RECORD_MILESTONE_SCRIPT="${SCRIPT_DIR}/../.orchestra/scripts/record-milestone.sh"
LOG_DIR="${SCRIPT_DIR}/../.orchestra/logs"
LOG_FILE="${LOG_DIR}/after-task-complete.log"
MEMORY_BANK_PROJECT="orchestra"
PROGRESS_FILE="progress.md"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Logging function (non-blocking)
log() {
local timestamp
timestamp=$(date -u +"%Y-%m-%d %H:%M:%S UTC")
echo "[$timestamp] $*" >> "$LOG_FILE" 2>/dev/null || true
}
# Non-blocking execution wrapper
safe_execute() {
"$@" 2>> "$LOG_FILE" || log "WARNING: Command failed but continuing: $*"
}
log "=== Task Completion Hook Triggered ==="
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
# Try to detect completed task information from environment or recent activity
# Environment variables that might be set by TodoWrite or task systems
TASK_NAME="${COMPLETED_TASK_NAME:-}"
TASK_DESCRIPTION="${COMPLETED_TASK_DESCRIPTION:-}"
TASK_TAG="${COMPLETED_TASK_TAG:-chore}"
# If no task information is available, try to infer from git recent activity
if [ -z "$TASK_NAME" ]; then
# Check for recent git commits
RECENT_COMMIT=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "")
if [ -n "$RECENT_COMMIT" ]; then
TASK_NAME="Task: $RECENT_COMMIT"
TASK_DESCRIPTION="Completed via git commit"
# Infer tag from commit message prefix
if echo "$RECENT_COMMIT" | grep -qE "^feat:"; then
TASK_TAG="feature"
elif echo "$RECENT_COMMIT" | grep -qE "^fix:"; then
TASK_TAG="bugfix"
elif echo "$RECENT_COMMIT" | grep -qE "^refactor:"; then
TASK_TAG="refactor"
elif echo "$RECENT_COMMIT" | grep -qE "^docs:"; then
TASK_TAG="docs"
elif echo "$RECENT_COMMIT" | grep -qE "^test:"; then
TASK_TAG="test"
elif echo "$RECENT_COMMIT" | grep -qE "^perf:"; then
TASK_TAG="perf"
else
TASK_TAG="chore"
fi
else
log "No task information available, skipping automatic recording"
exit 0
fi
fi
log "Task Name: $TASK_NAME"
log "Task Description: $TASK_DESCRIPTION"
log "Task Tag: $TASK_TAG"
# Record milestone using the record-milestone.sh script
if [ -f "$RECORD_MILESTONE_SCRIPT" ] && [ -x "$RECORD_MILESTONE_SCRIPT" ]; then
log "Recording task completion as milestone..."
safe_execute "$RECORD_MILESTONE_SCRIPT" \
"$TASK_NAME" \
"$TASK_DESCRIPTION" \
"$TASK_TAG"
log "✅ Task completion recorded"
else
log "ERROR: record-milestone.sh not found or not executable at $RECORD_MILESTONE_SCRIPT"
exit 0 # Non-blocking - don't fail the hook
fi
# Update progress metrics in Memory Bank
log "Updating progress metrics..."
# Direct file access to Memory Bank
MEMORY_BANK_PATH="$HOME/.memory-bank/$MEMORY_BANK_PROJECT/$PROGRESS_FILE"
if [ -f "$MEMORY_BANK_PATH" ]; then
# Read current metrics
CURRENT_COMPLETED=$(grep "Total Tasks Completed" "$MEMORY_BANK_PATH" | grep -oE "[0-9]+" || echo "0")
NEW_COMPLETED=$((CURRENT_COMPLETED + 1))
# Update the count
safe_execute sed -i.bak "s/\*\*Total Tasks Completed\*\*: [0-9]*/\*\*Total Tasks Completed\*\*: $NEW_COMPLETED/" "$MEMORY_BANK_PATH"
# Update last updated timestamp
CURRENT_DATE=$(date -u +"%Y-%m-%d %H:%M:%S UTC")
safe_execute sed -i.bak "s/\*\*Last Updated\*\*: .*/\*\*Last Updated\*\*: $CURRENT_DATE/" "$MEMORY_BANK_PATH"
# Clean up backup file
rm -f "${MEMORY_BANK_PATH}.bak" 2>/dev/null || true
log "✅ Progress metrics updated: $NEW_COMPLETED tasks completed"
else
log "WARNING: progress.md not found in Memory Bank, skipping metrics update"
fi
# Display completion message
if [ "$LANG" = "ja" ]; then
echo "[after_task_complete] タスク完了を記録しました: $TASK_NAME" >&2 || true
else
echo "[after_task_complete] Task completion recorded: $TASK_NAME" >&2 || true
fi
log "=== Task Completion Hook Completed ==="
# Always exit successfully (non-blocking)
exit 0

459
hooks/agent-routing-reminder.sh Executable file
View File

@@ -0,0 +1,459 @@
#!/usr/bin/env bash
# Agent Auto-Routing Reminder Hook
# Analyzes user prompts and injects routing reminders for specialized agents
#
# This hook enables automatic agent invocation by detecting keywords
# and triggering appropriate specialist agents
set -euo pipefail
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
# Read JSON input from stdin
INPUT_JSON=$(cat)
# Extract user prompt from JSON
USER_PROMPT=$(echo "$INPUT_JSON" | jq -r '.prompt // empty' 2>/dev/null || echo "")
# If no prompt provided, exit silently
if [ -z "$USER_PROMPT" ]; then
exit 0
fi
# Convert to lowercase for case-insensitive matching
PROMPT_LOWER=$(echo "$USER_PROMPT" | tr '[:upper:]' '[:lower:]')
# Track if any agent was matched
AGENT_MATCHED=false
MATCHED_AGENTS=()
# --- Priority 1: Ambiguous Requirements → Riley ---
if echo "$PROMPT_LOWER" | grep -qE "(fast|faster|slow|slower|easy to use|intuitive|clean|simple|improve performance|optimize|better)"; then
MATCHED_AGENTS+=("Riley")
AGENT_MATCHED=true
fi
# --- Priority 2: Major Feature Addition → Alex ---
if echo "$PROMPT_LOWER" | grep -qE "(add new|build new|implement new|create new|新しい.*追加|新規.*作成|作りたい|作る|build|make|開発したい)"; then
if echo "$PROMPT_LOWER" | grep -qE "(system|feature|authentication|auth|認証|payment|決済|api|site|サイト|app|アプリ|website|ウェブサイト|service|サービス)"; then
MATCHED_AGENTS+=("Alex")
AGENT_MATCHED=true
fi
fi
# Authentication specifically triggers Alex + Iris
if echo "$PROMPT_LOWER" | grep -qE "(authentication|auth|login|認証|ログイン|oauth|jwt|session)"; then
if ! [[ " ${MATCHED_AGENTS[@]+"${MATCHED_AGENTS[@]}"} " =~ " Alex " ]]; then
MATCHED_AGENTS+=("Alex")
AGENT_MATCHED=true
fi
fi
# --- Priority 3: UI/UX → Nova ---
if echo "$PROMPT_LOWER" | grep -qE "(ui|dashboard|ダッシュボード|component|コンポーネント|form|フォーム|design|デザイン|layout|responsive|accessibility|a11y|lighthouse|portfolio|ポートフォリオ|landing.*page|ランディング.*ページ|website|ウェブサイト|site.*design|サイト.*デザイン)"; then
MATCHED_AGENTS+=("Nova")
AGENT_MATCHED=true
fi
# --- Priority 4: Database → Leo ---
if echo "$PROMPT_LOWER" | grep -qE "(database|データベース|table|テーブル|schema|スキーマ|migration|マイグレーション|column|カラム|index|インデックス|rls)"; then
MATCHED_AGENTS+=("Leo")
AGENT_MATCHED=true
fi
# --- Priority 5: External Integration → Mina ---
if echo "$PROMPT_LOWER" | grep -qE "(stripe|paypal|shopify|aws|gcp|azure|oauth|webhook|api integration|統合)"; then
MATCHED_AGENTS+=("Mina")
AGENT_MATCHED=true
fi
# --- Priority 6: Architecture → Kai ---
if echo "$PROMPT_LOWER" | grep -qE "(architecture|アーキテクチャ|refactor|リファクタ|design pattern|adr|technical decision)"; then
MATCHED_AGENTS+=("Kai")
AGENT_MATCHED=true
fi
# --- Priority 7: Security → Iris ---
if echo "$PROMPT_LOWER" | grep -qE "(security|セキュリティ|secret|シークレット|vulnerability|脆弱性|encryption|暗号化|auth|oauth|jwt|token|password|secure)"; then
MATCHED_AGENTS+=("Iris")
AGENT_MATCHED=true
fi
# --- Priority 8: Testing & QA → Finn ---
if echo "$PROMPT_LOWER" | grep -qE "(test|テスト|unit test|統合テスト|e2e|e2e test|coverage|カバレッジ|flaky|failing|jest|playwright|cypress|quality|qa|validate|benchmark)"; then
MATCHED_AGENTS+=("Finn")
AGENT_MATCHED=true
fi
# --- Priority 9: Documentation → Eden ---
if echo "$PROMPT_LOWER" | grep -qE "(documentation|ドキュメント|readme|guide|ガイド|handbook|runbook|adr|onboarding|knowledge|wiki|技術仕様書)"; then
MATCHED_AGENTS+=("Eden")
AGENT_MATCHED=true
fi
# --- Priority 10: Deployment & Release → Blake ---
if echo "$PROMPT_LOWER" | grep -qE "(deploy|デプロイ|release|リリース|version|バージョン|hotfix|hotfix|rollback|ロールバック|production|本番|staging|merge|pull request)"; then
MATCHED_AGENTS+=("Blake")
AGENT_MATCHED=true
fi
# --- Priority 11: Operations & Monitoring → Theo ---
if echo "$PROMPT_LOWER" | grep -qE "(monitoring|モニタリング|logs|ログ|metrics|メトリクス|alert|アラート|incident|インシデント|performance|パフォーマンス|latency|error|reliability|uptime)"; then
MATCHED_AGENTS+=("Theo")
AGENT_MATCHED=true
fi
# --- Priority 12: Code Implementation → Skye (when clear specs) ---
if echo "$PROMPT_LOWER" | grep -qE "(implement|実装|write|書く|code|コード|fix bug|バグ修正|refactor|リファクタ|optimize|最適化)"; then
# Check if requirements seem clear (no ambiguity words)
if ! echo "$PROMPT_LOWER" | grep -qE "(how should|どのように|what's the best|最善|vague|曖昧)"; then
MATCHED_AGENTS+=("Skye")
AGENT_MATCHED=true
fi
fi
# --- Default: If no specific agent matched, route to Riley (Requirements Clarifier) ---
if [ "$AGENT_MATCHED" = false ]; then
MATCHED_AGENTS+=("Riley")
AGENT_MATCHED=true
fi
# If any agents matched, output routing reminder as context for Claude
if [ "$AGENT_MATCHED" = true ]; then
# Build context message based on language
if [ "$LANG" = "ja" ]; then
CONTEXT=$(cat <<EOF
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🎭 エージェント自動ルーティングリマインダー
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
⚠️ 重要:専門領域を検出しました。
📋 マッチしたエージェント:${MATCHED_AGENTS[*]+"${MATCHED_AGENTS[*]}"}
🚨 必須アクション:
EOF
)
# Add agent-specific instructions in Japanese
for agent in "${MATCHED_AGENTS[@]}"; do
case $agent in
"Riley")
CONTEXT+=$(cat <<EOF
• Riley要件明確化担当曖昧・主観的な表現を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:🧐 Riley" を呼び出す
→ 理由:「速い」「遅い」「より良い」などの用語は具体的な基準が必要
EOF
)
;;
"Alex")
CONTEXT+=$(cat <<EOF
• Alexプロジェクト指揮者大規模機能追加を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:🙂 Alex" を呼び出す
→ 理由:新システムにはスコープ定義と調整が必要
EOF
)
;;
"Nova")
CONTEXT+=$(cat <<EOF
• NovaUI/UX スペシャリスト):ユーザーインターフェース作業を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:😄 Nova" を呼び出す
→ 理由UI/UXにはアクセシビリティ、パフォーマンス、デザインの専門知識が必要
EOF
)
;;
"Leo")
CONTEXT+=$(cat <<EOF
• Leoデータベースアーキテクトデータベーススキーマ作業を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:😌 Leo" を呼び出す
→ 理由スキーマ変更には適切な設計、マイグレーション、RLSポリシーが必要
EOF
)
;;
"Mina")
CONTEXT+=$(cat <<EOF
• Mina統合スペシャリスト外部サービス統合を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:😊 Mina" を呼び出す
→ 理由:統合には安全な設定とエラーハンドリングが必要
EOF
)
;;
"Kai")
CONTEXT+=$(cat <<EOF
• Kaiシステムアーキテクトアーキテクチャ判断を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:🤔 Kai" を呼び出す
→ 理由アーキテクチャ変更には設計レビューとADRドキュメントが必要
EOF
)
;;
"Iris")
CONTEXT+=$(cat <<EOF
• Irisセキュリティ監査官セキュリティ重要作業を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:🤨 Iris" を呼び出す
→ 理由:セキュリティには脆弱性とシークレット処理の監査が必要
EOF
)
;;
"Finn")
CONTEXT+=$(cat <<EOF
• FinnQAテストスペシャリストテスト関連作業を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:😤 Finn" を呼び出す
→ 理由:テストにはカバレッジ、フレーク対策、パフォーマンス検証が必要
EOF
)
;;
"Eden")
CONTEXT+=$(cat <<EOF
• Edenドキュメント担当ドキュメント作成を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:🤓 Eden" を呼び出す
→ 理由技術ドキュメントにはREADME、ADR、ガイドの作成が必要
EOF
)
;;
"Blake")
CONTEXT+=$(cat <<EOF
• Blakeリリースマネージャーデプロイリリース作業を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:😎 Blake" を呼び出す
→ 理由:デプロイには管理された本番環境へのリリースが必要
EOF
)
;;
"Theo")
CONTEXT+=$(cat <<EOF
• Theoオペレーションスペシャリスト運用監視作業を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:😬 Theo" を呼び出す
→ 理由:監視にはログ、メトリクス、アラート、インシデント対応が必要
EOF
)
;;
"Skye")
CONTEXT+=$(cat <<EOF
• Skyeコード実装者実装タスク仕様が明確を検出
→ 直ちに実行Taskツールで subagent_type="orchestra:😐 Skye" を呼び出す
→ 理由:仕様が明確な実装にはプロダクションレベルのコードが必要
EOF
)
;;
esac
done
CONTEXT+=$(cat <<EOF
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
❌ 禁止事項:
- ルーティングルールを確認せずにAskUserQuestionを使用
- 自分でコードベースを探索開始
- TodoWriteを作成して自分で処理
✅ 代わりにこれを実行:
1. Taskツールを使用して適切なエージェントを呼び出す
2. 専門エージェントに作業を任せる
3. 彼らの出力をレビューし、次のステップを調整
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
EOF
)
else
CONTEXT=$(cat <<EOF
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🎭 AGENT AUTO-ROUTING REMINDER
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
⚠️ CRITICAL: Detected specialized domain in user request.
📋 Matched Agents: ${MATCHED_AGENTS[*]+"${MATCHED_AGENTS[*]}"}
🚨 MANDATORY ACTION REQUIRED:
EOF
)
# Add agent-specific instructions in English
for agent in "${MATCHED_AGENTS[@]}"; do
case $agent in
"Riley")
CONTEXT+=$(cat <<EOF
• Riley (Clarifier): User request contains ambiguous/subjective language
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:🧐 Riley"
→ Reason: Terms like 'fast', 'slow', 'better' require specific criteria
EOF
)
;;
"Alex")
CONTEXT+=$(cat <<EOF
• Alex (Project Conductor): Major feature addition detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:🙂 Alex"
→ Reason: New systems need scope definition and coordination
EOF
)
;;
"Nova")
CONTEXT+=$(cat <<EOF
• Nova (UI/UX Specialist): User interface work detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:😄 Nova"
→ Reason: UI/UX requires accessibility, performance, and design expertise
EOF
)
;;
"Leo")
CONTEXT+=$(cat <<EOF
• Leo (Database Architect): Database schema work detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:😌 Leo"
→ Reason: Schema changes need proper design, migrations, and RLS policies
EOF
)
;;
"Mina")
CONTEXT+=$(cat <<EOF
• Mina (Integration Specialist): External service integration detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:😊 Mina"
→ Reason: Integrations require secure config and error handling
EOF
)
;;
"Kai")
CONTEXT+=$(cat <<EOF
• Kai (System Architect): Architectural decision detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:🤔 Kai"
→ Reason: Architecture changes need design review and ADR documentation
EOF
)
;;
"Iris")
CONTEXT+=$(cat <<EOF
• Iris (Security Auditor): Security-critical work detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:🤨 Iris"
→ Reason: Security requires audit for vulnerabilities and secret handling
EOF
)
;;
"Finn")
CONTEXT+=$(cat <<EOF
• Finn (QA & Testing Specialist): Test-related work detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:😤 Finn"
→ Reason: Testing requires coverage, flake prevention, and performance validation
EOF
)
;;
"Eden")
CONTEXT+=$(cat <<EOF
• Eden (Documentation Lead): Documentation work detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:🤓 Eden"
→ Reason: Technical docs require README, ADR, guides, and knowledge sharing
EOF
)
;;
"Blake")
CONTEXT+=$(cat <<EOF
• Blake (Release Manager): Deployment & release work detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:😎 Blake"
→ Reason: Deployments require managed production releases and versioning
EOF
)
;;
"Theo")
CONTEXT+=$(cat <<EOF
• Theo (Ops & Monitoring Specialist): Operations work detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:😬 Theo"
→ Reason: Monitoring requires logs, metrics, alerts, and incident response
EOF
)
;;
"Skye")
CONTEXT+=$(cat <<EOF
• Skye (Code Implementer): Implementation work with clear specs detected
→ IMMEDIATELY invoke: Task tool with subagent_type="orchestra:😐 Skye"
→ Reason: Well-defined implementations need production-ready code
EOF
)
;;
esac
done
CONTEXT+=$(cat <<EOF
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
❌ DO NOT:
- Use AskUserQuestion without checking routing rules
- Start exploring codebase yourself
- Create TodoWrite and handle it yourself
✅ DO THIS INSTEAD:
1. Use the Task tool to invoke the appropriate agent(s)
2. Let the specialist agent handle the work
3. Review their output and coordinate next steps
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
EOF
)
fi
# Output JSON format for Claude's context
cat <<EOF
{
"hookSpecificOutput": {
"hookEventName": "UserPromptSubmit",
"additionalContext": $(echo "$CONTEXT" | jq -Rs .)
}
}
EOF
fi
# Always approve (exit 0) - we're just adding reminders, not blocking
exit 0

38
hooks/before_code_write.sh Executable file
View File

@@ -0,0 +1,38 @@
#!/bin/bash
# Before Code Write Hook
# Enforces Test-First Development
# Checks if tests exist before allowing code changes
ORCHESTRA_CONFIG=".orchestra/config.json"
# Check if Orchestra workflow is enabled
if [ ! -f "$ORCHESTRA_CONFIG" ]; then
exit 0
fi
# Get enforce test first setting
ENFORCE_TEST_FIRST=$(jq -r '.workflow.enforceTestFirst // false' "$ORCHESTRA_CONFIG" 2>/dev/null || echo "false")
if [ "$ENFORCE_TEST_FIRST" = "true" ]; then
CHANGED_FILE="$1"
# Only check source files, not tests or config
if [[ "$CHANGED_FILE" =~ ^src/ ]] && [[ ! "$CHANGED_FILE" =~ \.test\. ]]; then
# Derive expected test file
TEST_FILE=$(echo "$CHANGED_FILE" | sed 's/^src/tests/' | sed 's/\.ts$/.test.ts/' | sed 's/\.js$/.test.js/')
if [ ! -f "$TEST_FILE" ]; then
echo "⚠️ Test-First Development Reminder"
echo " No test file found for: $CHANGED_FILE"
echo " Expected test file: $TEST_FILE"
echo ""
echo " Recommendation: Create the test file first before implementing the feature."
echo ""
# In lenient mode, just warn; don't block
exit 0
fi
fi
fi
exit 0

241
hooks/before_deploy.sh Executable file
View File

@@ -0,0 +1,241 @@
# hooks/before_deploy.sh
#!/usr/bin/env bash
set -euo pipefail
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
if [ "$LANG" = "ja" ]; then
echo "[before_deploy] 環境変数チェック、マイグレーションドライラン、ヘルスチェック実行中..."
else
echo "[before_deploy] Checking env vars, migrations dry-run, health..."
fi
DEPLOY_ENV="${DEPLOY_ENV:-production}"
if [ "$LANG" = "ja" ]; then
echo "→ デプロイ対象環境:$DEPLOY_ENV"
else
echo "→ Deployment target: $DEPLOY_ENV"
fi
# Environment variable validation
if [ "$LANG" = "ja" ]; then
echo "→ 必須環境変数の検証中..."
else
echo "→ Validating required environment variables..."
fi
REQUIRED_VARS=(
"DATABASE_URL"
"API_KEY"
# Add your required env vars here
)
missing_vars=()
for var in "${REQUIRED_VARS[@]}"; do
if [ -z "${!var:-}" ]; then
missing_vars+=("$var")
fi
done
if [ ${#missing_vars[@]} -gt 0 ]; then
if [ "$LANG" = "ja" ]; then
echo "❌ 必須環境変数が不足しています:"
else
echo "❌ Missing required environment variables:"
fi
printf ' - %s\n' "${missing_vars[@]}"
exit 1
fi
if [ "$LANG" = "ja" ]; then
echo "✅ 全ての必須環境変数が設定されています"
else
echo "✅ All required environment variables are set"
fi
# Database migration dry-run
if [ -f "package.json" ] && grep -q "prisma" package.json; then
if [ "$LANG" = "ja" ]; then
echo "→ Prismaマイグレーションドライラン実行中..."
else
echo "→ Running Prisma migration dry-run..."
fi
npx prisma migrate deploy --dry-run || {
if [ "$LANG" = "ja" ]; then
echo "❌ データベースマイグレーションドライランが失敗しました。デプロイ前にマイグレーションを確認してください。"
else
echo "❌ Database migration dry-run failed. Please review migrations before deploying."
fi
exit 1
}
if [ "$LANG" = "ja" ]; then
echo "✅ Prismaマイグレーションが検証されました"
else
echo "✅ Prisma migrations validated"
fi
elif [ -f "manage.py" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ Djangoマイグレーションチェック実行中..."
else
echo "→ Running Django migration check..."
fi
python manage.py migrate --check || {
if [ "$LANG" = "ja" ]; then
echo "❌ Djangoマイグレーションが適用されていません。デプロイ前にマイグレーションを確認してください。"
else
echo "❌ Django migrations are not applied. Please review migrations before deploying."
fi
exit 1
}
if [ "$LANG" = "ja" ]; then
echo "✅ Djangoマイグレーションが検証されました"
else
echo "✅ Django migrations validated"
fi
elif command -v alembic &> /dev/null && [ -f "alembic.ini" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ Alembicマイグレーションチェック実行中..."
else
echo "→ Running Alembic migration check..."
fi
alembic check || {
if [ "$LANG" = "ja" ]; then
echo "❌ Alembicマイグレーションが最新ではありません。デプロイ前にマイグレーションを確認してください。"
else
echo "❌ Alembic migrations are not up to date. Please review migrations before deploying."
fi
exit 1
}
if [ "$LANG" = "ja" ]; then
echo "✅ Alembicマイグレーションが検証されました"
else
echo "✅ Alembic migrations validated"
fi
else
if [ "$LANG" = "ja" ]; then
echo " データベースマイグレーションシステムが検出されませんでした。マイグレーションチェックをスキップします。"
else
echo " No database migration system detected. Skipping migration check."
fi
fi
# Health check for staging/production services
if [ "$DEPLOY_ENV" != "development" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ デプロイ前ヘルスチェック実行中..."
else
echo "→ Performing pre-deployment health check..."
fi
# Check if staging/production API is accessible
HEALTH_URL="${HEALTH_CHECK_URL:-https://api.example.com/health}"
if command -v curl &> /dev/null; then
if curl -f -s --max-time 10 "$HEALTH_URL" > /dev/null; then
if [ "$LANG" = "ja" ]; then
echo "✅ 現在のデプロイは正常です:$HEALTH_URL"
else
echo "✅ Current deployment is healthy: $HEALTH_URL"
fi
else
if [ "$LANG" = "ja" ]; then
echo "⚠️ 警告:現在のデプロイのヘルスチェックが失敗しました"
echo " URL$HEALTH_URL"
echo " 続行しますか? (y/N)"
else
echo "⚠️ Warning: Health check failed for current deployment"
echo " URL: $HEALTH_URL"
echo " Continue? (y/N)"
fi
read -r response
if [[ ! "$response" =~ ^[Yy]$ ]]; then
exit 1
fi
fi
else
if [ "$LANG" = "ja" ]; then
echo "⚠️ curlが利用できません。ヘルスチェックをスキップします。"
else
echo "⚠️ curl not available. Skipping health check."
fi
fi
fi
# Build validation
if [ -f "package.json" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ 本番ビルドの検証中..."
else
echo "→ Validating production build..."
fi
npm run build || {
if [ "$LANG" = "ja" ]; then
echo "❌ 本番ビルドが失敗しました。"
else
echo "❌ Production build failed."
fi
exit 1
}
if [ "$LANG" = "ja" ]; then
echo "✅ 本番ビルドが成功しました"
else
echo "✅ Production build successful"
fi
fi
# Container image security scan (if using Docker)
if [ -f "Dockerfile" ] && command -v trivy &> /dev/null; then
if [ "$LANG" = "ja" ]; then
echo "→ Dockerイメージの脆弱性スキャン中..."
else
echo "→ Scanning Docker image for vulnerabilities..."
fi
docker build -t pre-deploy-check:latest . > /dev/null
trivy image --severity HIGH,CRITICAL --exit-code 1 pre-deploy-check:latest || {
if [ "$LANG" = "ja" ]; then
echo "❌ Dockerイメージに重大な脆弱性が見つかりました。"
else
echo "❌ Critical vulnerabilities found in Docker image."
fi
exit 1
}
if [ "$LANG" = "ja" ]; then
echo "✅ Dockerイメージセキュリティスキャンが通過しました"
else
echo "✅ Docker image security scan passed"
fi
fi
# Voice notification (Iris announces security checks completion)
VOICE_SCRIPT="$(dirname "$0")/../mcp-servers/play-voice.sh"
if [ -f "$VOICE_SCRIPT" ]; then
"$VOICE_SCRIPT" "iris" "deployment validation" 2>/dev/null || true
fi
if [ "$LANG" = "ja" ]; then
echo "✅ 全てのデプロイ前チェックが通過しました!$DEPLOY_ENV へのデプロイ準備完了"
else
echo "✅ All pre-deployment checks passed! Ready to deploy to $DEPLOY_ENV"
fi
# Auto-commit deployment validation results (Iris)
AUTO_COMMIT_SCRIPT="$(dirname "$0")/../mcp-servers/auto-commit.sh"
if [ -f "$AUTO_COMMIT_SCRIPT" ] && [ -x "$AUTO_COMMIT_SCRIPT" ]; then
"$AUTO_COMMIT_SCRIPT" \
"chore" \
"to validate deployment security" \
"Pass pre-deployment checks (env, migrations, health, build, security scan)" \
"Iris" 2>/dev/null || true
fi

136
hooks/before_merge.sh Executable file
View File

@@ -0,0 +1,136 @@
# hooks/before_merge.sh
#!/usr/bin/env bash
set -euo pipefail
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
if [ "$LANG" = "ja" ]; then
echo "[before_merge] 統合/E2E/Lighthouse実行中..."
else
echo "[before_merge] Running integration/E2E/Lighthouse..."
fi
# E2E tests with Playwright
if [ -f "playwright.config.ts" ] || [ -f "playwright.config.js" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ Playwright E2Eテスト実行中..."
else
echo "→ Running Playwright E2E tests..."
fi
npx playwright test --reporter=list || {
if [ "$LANG" = "ja" ]; then
echo "❌ Playwrightテストが失敗しました。マージ前に失敗したE2Eテストを修正してください。"
else
echo "❌ Playwright tests failed. Please fix failing E2E tests before merging."
fi
exit 1
}
# Generate HTML report for review
if [ "$LANG" = "ja" ]; then
echo "→ Playwrightテストレポート生成中..."
else
echo "→ Generating Playwright test report..."
fi
npx playwright show-report --host 127.0.0.1 &
if [ "$LANG" = "ja" ]; then
echo " レポート閲覧URLhttp://127.0.0.1:9323"
else
echo " Report available at: http://127.0.0.1:9323"
fi
else
if [ "$LANG" = "ja" ]; then
echo "⚠️ Playwrightが設定されていません。E2Eテストをスキップします。"
echo " セットアップnpm init playwright@latest"
else
echo "⚠️ Playwright not configured. Skipping E2E tests."
echo " Setup: npm init playwright@latest"
fi
fi
# Lighthouse CI for performance/accessibility/SEO checks
if [ -f "lighthouserc.json" ] || [ -f ".lighthouserc.json" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ Lighthouse CI実行中..."
else
echo "→ Running Lighthouse CI..."
fi
# Start dev server in background if needed
if command -v lhci &> /dev/null; then
lhci autorun || {
if [ "$LANG" = "ja" ]; then
echo "❌ Lighthouse CIが失敗しました。パフォーマンス/アクセシビリティ/SEOチェックが基準を満たしていません。"
else
echo "❌ Lighthouse CI failed. Performance/accessibility/SEO checks did not meet thresholds."
fi
exit 1
}
else
if [ "$LANG" = "ja" ]; then
echo "⚠️ Lighthouse CIがインストールされていません。パフォーマンスチェックをスキップします。"
echo " インストールnpm install -g @lhci/cli"
else
echo "⚠️ Lighthouse CI not installed. Skipping performance checks."
echo " Install: npm install -g @lhci/cli"
fi
fi
else
if [ "$LANG" = "ja" ]; then
echo "⚠️ Lighthouse CIが設定されていません。パフォーマンス/アクセシビリティ/SEOチェックをスキップします。"
echo " セットアップlighthouserc.jsonを作成してください"
else
echo "⚠️ Lighthouse CI not configured. Skipping performance/accessibility/SEO checks."
echo " Setup: Create lighthouserc.json with your configuration"
fi
fi
# Optional: Visual regression testing with Percy or similar
if [ -n "${PERCY_TOKEN:-}" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ ビジュアルリグレッションテスト実行中..."
else
echo "→ Running visual regression tests..."
fi
npx percy exec -- npx playwright test || {
if [ "$LANG" = "ja" ]; then
echo "❌ ビジュアルリグレッションテストが失敗しました。"
else
echo "❌ Visual regression tests failed."
fi
exit 1
}
else
if [ "$LANG" = "ja" ]; then
echo " Percyが設定されていません。ビジュアルリグレッションテストをスキップします。"
else
echo " Percy not configured. Skipping visual regression tests."
fi
fi
# Voice notification (Eden announces integration tests completion)
VOICE_SCRIPT="$(dirname "$0")/../mcp-servers/play-voice.sh"
if [ -f "$VOICE_SCRIPT" ]; then
"$VOICE_SCRIPT" "eden" "integration tests" 2>/dev/null || true
fi
if [ "$LANG" = "ja" ]; then
echo "✅ 全てのマージ前チェックが通過しました!"
else
echo "✅ All pre-merge checks passed!"
fi
# Auto-commit integration test results (Eden)
AUTO_COMMIT_SCRIPT="$(dirname "$0")/../mcp-servers/auto-commit.sh"
if [ -f "$AUTO_COMMIT_SCRIPT" ] && [ -x "$AUTO_COMMIT_SCRIPT" ]; then
"$AUTO_COMMIT_SCRIPT" \
"test" \
"to validate integration quality" \
"Pass integration tests (E2E, Lighthouse CI, visual regression)" \
"Eden" 2>/dev/null || true
fi

184
hooks/before_pr.sh Executable file
View File

@@ -0,0 +1,184 @@
# hooks/before_pr.sh
#!/usr/bin/env bash
set -euo pipefail
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
if [ "$LANG" = "ja" ]; then
echo "[before_pr] リント/型チェック/テスト/シークレット/SBOM実行中..."
else
echo "[before_pr] Running lint/type/tests/secret/sbom..."
fi
# Sync documentation to Memory Bank before PR
SYNC_SCRIPT="$(dirname "$0")/../.orchestra/scripts/sync-to-memory-bank.sh"
if [ -f "$SYNC_SCRIPT" ] && [ -x "$SYNC_SCRIPT" ]; then
if [ "$LANG" = "ja" ]; then
echo ""
echo "[before_pr] Memory Bankへドキュメントを同期中..."
else
echo ""
echo "[before_pr] Syncing documentation to Memory Bank..."
fi
if "$SYNC_SCRIPT"; then
if [ "$LANG" = "ja" ]; then
echo "✅ Memory Bank同期完了"
else
echo "✅ Memory Bank sync completed"
fi
else
if [ "$LANG" = "ja" ]; then
echo "⚠️ Memory Bank同期が失敗しましたが、PR作成は続行します"
else
echo "⚠️ Memory Bank sync failed, but continuing with PR creation"
fi
fi
echo ""
fi
# Detect project type and run appropriate checks
if [ -f "package.json" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ ESLint実行中..."
else
echo "→ Running ESLint..."
fi
npx eslint . --ext .js,.jsx,.ts,.tsx --max-warnings 0 || {
if [ "$LANG" = "ja" ]; then
echo "❌ ESLintが失敗しました。PR作成前にリントエラーを修正してください。"
else
echo "❌ ESLint failed. Please fix linting errors before creating PR."
fi
exit 1
}
if [ "$LANG" = "ja" ]; then
echo "→ TypeScriptコンパイラ実行中..."
else
echo "→ Running TypeScript compiler..."
fi
npx tsc --noEmit || {
if [ "$LANG" = "ja" ]; then
echo "❌ TypeScriptコンパイルが失敗しました。PR作成前に型エラーを修正してください。"
else
echo "❌ TypeScript compilation failed. Please fix type errors before creating PR."
fi
exit 1
}
if [ "$LANG" = "ja" ]; then
echo "→ テスト実行中..."
else
echo "→ Running tests..."
fi
npm test -- --passWithNoTests || {
if [ "$LANG" = "ja" ]; then
echo "❌ テストが失敗しました。PR作成前に全てのテストが通ることを確認してください。"
else
echo "❌ Tests failed. Please ensure all tests pass before creating PR."
fi
exit 1
}
fi
if [ -f "pytest.ini" ] || [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then
if [ "$LANG" = "ja" ]; then
echo "→ pytest実行中..."
else
echo "→ Running pytest..."
fi
pytest --maxfail=1 --disable-warnings -q || {
if [ "$LANG" = "ja" ]; then
echo "❌ Pytestが失敗しました。PR作成前に失敗したテストを修正してください。"
else
echo "❌ Pytest failed. Please fix failing tests before creating PR."
fi
exit 1
}
fi
# Secret scanning with TruffleHog
if command -v trufflehog &> /dev/null; then
if [ "$LANG" = "ja" ]; then
echo "→ TruffleHogシークレットスキャン実行中..."
else
echo "→ Running TruffleHog secret scan..."
fi
trufflehog git file://. --since-commit HEAD~1 --only-verified --fail || {
if [ "$LANG" = "ja" ]; then
echo "❌ シークレットが検出されましたPR作成前にシークレットを削除してください。"
else
echo "❌ Secret detected! Please remove secrets before creating PR."
fi
exit 1
}
else
if [ "$LANG" = "ja" ]; then
echo "⚠️ TruffleHogがインストールされていません。シークレットスキャンをスキップします。インストールbrew install trufflehog"
else
echo "⚠️ TruffleHog not installed. Skipping secret scan. Install: brew install trufflehog"
fi
fi
# SBOM generation and vulnerability scanning with Syft + Grype
if command -v syft &> /dev/null && command -v grype &> /dev/null; then
if [ "$LANG" = "ja" ]; then
echo "→ SyftでSBOM生成中..."
else
echo "→ Generating SBOM with Syft..."
fi
syft dir:. -o cyclonedx-json > sbom.json
if [ "$LANG" = "ja" ]; then
echo "→ Grypeで脆弱性スキャン中..."
else
echo "→ Scanning vulnerabilities with Grype..."
fi
grype sbom:sbom.json --fail-on medium || {
if [ "$LANG" = "ja" ]; then
echo "❌ 脆弱性が検出されましたPR作成前にセキュリティ問題に対処してください。"
else
echo "❌ Vulnerabilities detected! Please address security issues before creating PR."
fi
exit 1
}
else
if [ "$LANG" = "ja" ]; then
echo "⚠️ Syft/Grypeがインストールされていません。SBOM & 脆弱性スキャンをスキップします。"
echo " インストールbrew install syft grype"
else
echo "⚠️ Syft/Grype not installed. Skipping SBOM & vulnerability scan."
echo " Install: brew install syft grype"
fi
fi
# Voice notification (Eden announces QA completion)
VOICE_SCRIPT="$(dirname "$0")/../mcp-servers/play-voice.sh"
if [ -f "$VOICE_SCRIPT" ]; then
"$VOICE_SCRIPT" "eden" "pre-PR checks" 2>/dev/null || true
fi
if [ "$LANG" = "ja" ]; then
echo "✅ 全てのPR前チェックが通過しました"
else
echo "✅ All pre-PR checks passed!"
fi
# Auto-commit QA validation results (Eden)
AUTO_COMMIT_SCRIPT="$(dirname "$0")/../mcp-servers/auto-commit.sh"
if [ -f "$AUTO_COMMIT_SCRIPT" ] && [ -x "$AUTO_COMMIT_SCRIPT" ]; then
"$AUTO_COMMIT_SCRIPT" \
"test" \
"to ensure code quality" \
"Pass pre-PR quality checks (lint, type, test, secrets, vulnerabilities)" \
"Eden" 2>/dev/null || true
fi

89
hooks/before_task.sh Executable file
View File

@@ -0,0 +1,89 @@
#!/usr/bin/env bash
# hooks/before_task.sh
# Non-interactive task clarity reminder
set -euo pipefail
# Allow disabling via environment variable for troubleshooting
if [ "${ORCHESTRA_DISABLE_PROMPT_HOOKS:-0}" = "1" ] || [ "${ORCHESTRA_DISABLE_TASK_HOOK:-0}" = "1" ]; then
exit 0
fi
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
# Read JSON input from stdin
INPUT_JSON=$(cat)
# Extract prompt from JSON
USER_PROMPT=$(echo "$INPUT_JSON" | jq -r '.prompt // empty' 2>/dev/null || echo "")
# Skip if no prompt (shouldn't happen in UserPromptSubmit)
if [ -z "$USER_PROMPT" ]; then
cat <<EOF
{
"hookSpecificOutput": {
"hookEventName": "UserPromptSubmit"
}
}
EOF
exit 0
fi
# Only show reminder for substantial requests (skip questions or very short asks)
PROMPT_LOWER=$(echo "$USER_PROMPT" | tr '[:upper:]' '[:lower:]')
if echo "$PROMPT_LOWER" | grep -qE "(what|how|why|show|explain|tell).*\?"; then
cat <<EOF
{
"hookSpecificOutput": {
"hookEventName": "UserPromptSubmit"
}
}
EOF
exit 0
fi
if [ "$(echo "$PROMPT_LOWER" | wc -w | tr -d ' ')" -lt 6 ]; then
cat <<EOF
{
"hookSpecificOutput": {
"hookEventName": "UserPromptSubmit"
}
}
EOF
exit 0
fi
# Build concise reminder text
TASK_FILE=".claude/current-task.md"
case "$LANG" in
"ja")
CONTEXT=$'💡 タスク開始前チェック\n- 完了基準\n- スコープ\n- テスト方法\n'
if echo "$PROMPT_LOWER" | grep -qE "(fast|faster|slow|slower|easy|simple|clean|better|improve|optimize)"; then
CONTEXT+=$'⚠️ 曖昧な用語あり:必要なら Riley に相談。\n'
fi
if [ -f "$TASK_FILE" ]; then
CONTEXT+=$"📋 参照: $TASK_FILE\n"
fi
;;
*)
CONTEXT=$'💡 Task readiness check\n- Acceptance criteria\n- Scope & boundaries\n- Test plan\n'
if echo "$PROMPT_LOWER" | grep -qE "(fast|faster|slow|slower|easy|simple|clean|better|improve|optimize)"; then
CONTEXT+=$'⚠️ Subjective wording spotted—consider looping in Riley.\n'
fi
if [ -f "$TASK_FILE" ]; then
CONTEXT+=$"📋 Reference: $TASK_FILE\n"
fi
;;
esac
# Output JSON format for Claude's context
cat <<EOF
{
"hookSpecificOutput": {
"hookEventName": "UserPromptSubmit",
"additionalContext": $(echo "$CONTEXT" | jq -Rs .)
}
}
EOF
# Always approve - this is just informational
exit 0

82
hooks/hooks.json Normal file
View File

@@ -0,0 +1,82 @@
{
"hooks": {
"UserPromptSubmit": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "bash /Users/tstomtimes/Documents/GitHub/orchestra/hooks/before_task.sh",
"description": "Task Clarity Reminder: Suggests best practices for well-defined tasks"
}
]
}
],
"PreToolUse": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "bash /Users/tstomtimes/Documents/GitHub/orchestra/hooks/user-prompt-submit.sh",
"description": "Safety Guard: Blocks dangerous operations (rm -rf, system files, etc.)"
},
{
"type": "command",
"command": "bash /Users/tstomtimes/Documents/GitHub/orchestra/hooks/pre-tool-use-compliance-checker.sh",
"description": "Routing Compliance: Verifies Task tool is called first when agent routing is required"
}
]
},
{
"matcher": "Bash",
"hooks": [
{
"type": "command",
"command": "bash /Users/tstomtimes/Documents/GitHub/orchestra/hooks/workflow-dispatcher.sh",
"description": "Workflow Quality Gates: Routes PR/merge/deploy commands to appropriate validation hooks"
}
]
}
],
"PostToolUse": [
{
"matcher": "Bash",
"hooks": [
{
"type": "command",
"command": "bash /Users/tstomtimes/Documents/GitHub/orchestra/hooks/workflow-post-dispatcher.sh",
"description": "Post-Workflow Validation: Runs smoke tests and validation after deployments"
}
]
},
{
"matcher": "TodoWrite",
"hooks": [
{
"type": "command",
"command": "bash /Users/tstomtimes/Documents/GitHub/orchestra/hooks/post_code_write.sh",
"description": "Progress Tracker Integration: Updates progress tracking and displays progress in chat"
},
{
"type": "command",
"command": "bash /Users/tstomtimes/Documents/GitHub/orchestra/hooks/after_task_complete.sh",
"description": "Task Completion Recording: Records task completion to Memory Bank and updates progress metrics"
}
]
}
],
"SessionStart": [
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "bash /Users/tstomtimes/Documents/GitHub/orchestra/hooks/session-start.sh",
"description": "Welcome message for Orchestra Plugin"
}
]
}
]
}
}

107
hooks/post_code_write.sh Executable file
View File

@@ -0,0 +1,107 @@
#!/bin/bash
# Post Code Write Hook
# Runs after TodoWrite tool usage
# Performs: Progress tracking update, display, auto-linting, code formatting
set -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PROJECT_ROOT="${PROJECT_ROOT:-$( cd "$SCRIPT_DIR/.." && pwd )}"
# Function to update progress data
update_progress_data() {
# Extract TodoWrite parameters (if available)
# Claude Code may pass tool parameters via CLAUDE_TOOL_PARAMS env var or stdin
local tool_params="${CLAUDE_TOOL_PARAMS:-}"
# If CLAUDE_TOOL_PARAMS is not set, try reading from stdin (non-blocking)
if [ -z "$tool_params" ] && [ ! -t 0 ]; then
# Read from stdin if available
tool_params=$(timeout 0.1 cat 2>/dev/null || echo "")
fi
# Update progress data if we have parameters
if [ -n "$tool_params" ] && [ "$tool_params" != "{}" ]; then
if [ -f "$PROJECT_ROOT/hooks/progress-tracker-update.sh" ]; then
echo "$tool_params" | bash "$PROJECT_ROOT/hooks/progress-tracker-update.sh" || true
fi
fi
}
# Function to display progress tracker output
display_progress_tracking() {
# Run the progress display hook
if [ -f "$PROJECT_ROOT/hooks/progress-tracker-display.sh" ]; then
bash "$PROJECT_ROOT/hooks/progress-tracker-display.sh"
fi
}
# Function to run linting/formatting
run_code_quality_checks() {
ORCHESTRA_CONFIG=".orchestra/config.json"
if [ ! -f "$ORCHESTRA_CONFIG" ]; then
return 0
fi
CHANGED_FILE="$1"
AUTO_LINT=$(jq -r '.workflow.autoLint // false' "$ORCHESTRA_CONFIG" 2>/dev/null || echo "false")
AUTO_FIX_LINT=$(jq -r '.workflow.autoFixLint // false' "$ORCHESTRA_CONFIG" 2>/dev/null || echo "false")
if [ "$AUTO_LINT" = "true" ] || [ "$AUTO_FIX_LINT" = "true" ]; then
echo "🔧 Running linting and formatting..."
# TypeScript/JavaScript files
if [[ "$CHANGED_FILE" =~ \.(ts|tsx|js|jsx)$ ]]; then
if command -v eslint &> /dev/null; then
if [ "$AUTO_FIX_LINT" = "true" ]; then
echo " Applying ESLint fixes..."
eslint --fix "$CHANGED_FILE" 2>/dev/null || true
else
echo " Checking with ESLint..."
eslint "$CHANGED_FILE" 2>/dev/null || true
fi
fi
if command -v prettier &> /dev/null; then
echo " Applying Prettier formatting..."
prettier --write "$CHANGED_FILE" 2>/dev/null || true
fi
fi
# Python files
if [[ "$CHANGED_FILE" =~ \.py$ ]]; then
if command -v black &> /dev/null; then
echo " Applying Black formatting..."
black "$CHANGED_FILE" 2>/dev/null || true
fi
if command -v isort &> /dev/null; then
echo " Sorting imports with isort..."
isort "$CHANGED_FILE" 2>/dev/null || true
fi
fi
echo "✅ Linting and formatting complete"
fi
}
# Main execution
# 1. Update progress data first (before display)
update_progress_data
# 2. Display updated progress
display_progress_tracking
# 3. Export progress for external monitoring
if [ -f "$PROJECT_ROOT/hooks/progress-tracker-export.sh" ]; then
bash "$PROJECT_ROOT/hooks/progress-tracker-export.sh" &> /dev/null || true
fi
# 4. Run code quality checks if file path provided
if [ -n "$1" ]; then
run_code_quality_checks "$1"
fi
exit 0

View File

@@ -0,0 +1,80 @@
#!/usr/bin/env bash
# Agent Routing Compliance Checker
# Enforces agent routing rules by checking if Task tool was called first
#
# This hook ensures Claude follows the mandatory routing workflow
set -euo pipefail
# Allow disabling via environment variable for troubleshooting
if [ "${ORCHESTRA_DISABLE_PROMPT_HOOKS:-0}" = "1" ] || [ "${ORCHESTRA_DISABLE_ROUTING_HOOK:-0}" = "1" ]; then
exit 0
fi
# jq is required to inspect tool payload; if unavailable, skip
if ! command -v jq >/dev/null 2>&1; then
exit 0
fi
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
# Read JSON input from stdin
INPUT_JSON=$(cat)
# Extract tool details from JSON
TOOL_NAME=$(echo "$INPUT_JSON" | jq -r '.tool_name // empty' 2>/dev/null || echo "")
# Get the routing flag for this process
TEMP_DIR="${TMPDIR:-/tmp}"
ROUTING_FLAG="$TEMP_DIR/orchestra_routing_required"
NOTICE_FILE="$TEMP_DIR/orchestra_routing_notified"
# Check if routing reminder is active
if [ -f "$ROUTING_FLAG" ]; then
REQUIRED_AGENT=$(cat "$ROUTING_FLAG")
# If routing reminder is active and tool is NOT Task, warn Claude
if [ "$TOOL_NAME" != "Task" ]; then
if [ ! -f "$NOTICE_FILE" ]; then
if [ "$LANG" = "ja" ]; then
cat <<EOF
💡 まず Task ツールで subagent_type="orchestra:$REQUIRED_AGENT" を呼び出すとスムーズです。
エージェントからの対応を受け取った後に他のツールを使ってください。
EOF
else
cat <<EOF
💡 Start with Task tool using subagent_type="orchestra:$REQUIRED_AGENT" for smoother coordination.
Follow-up tools are fine after that agent's response.
EOF
fi
echo "$REQUIRED_AGENT" > "$NOTICE_FILE"
fi
exit 0
else
# Task tool was used - check if it's the correct agent
SUBAGENT_TYPE=$(echo "$INPUT_JSON" | jq -r '.tool_input.subagent_type // empty' 2>/dev/null || echo "")
if echo "$SUBAGENT_TYPE" | grep -q "$REQUIRED_AGENT"; then
# Correct agent called - clear the flag
rm -f "$ROUTING_FLAG" "$NOTICE_FILE"
if [ "$LANG" = "ja" ]; then
echo "✅ コンプライアンスチェック通過:正しいエージェントが呼び出されました"
else
echo "✅ Compliance check passed: Correct agent invoked"
fi
else
# Wrong agent - warn
if [ "$LANG" = "ja" ]; then
echo "⚠️ subagent_type に \"$REQUIRED_AGENT\" を含めて呼び出してください。"
else
echo "⚠️ Please include \"$REQUIRED_AGENT\" in subagent_type for the Task call."
fi
fi
fi
fi
# Always approve (we're just adding warnings, not blocking)
exit 0

View File

@@ -0,0 +1,65 @@
#!/bin/bash
# Pre-Commit Sync Validator Hook
# Validates documentation-code-test synchronization before commits
# Checks Sync Score against configured threshold
ORCHESTRA_CONFIG=".orchestra/config.json"
SYNC_STATE=".orchestra/sync-state.json"
if [ ! -f "$ORCHESTRA_CONFIG" ]; then
exit 0
fi
VALIDATE_ON_COMMIT=$(jq -r '.workflow.validateOnCommit // false' "$ORCHESTRA_CONFIG" 2>/dev/null || echo "false")
if [ "$VALIDATE_ON_COMMIT" != "true" ]; then
exit 0
fi
SYNC_THRESHOLD=$(jq -r '.workflow.syncThreshold // 70' "$ORCHESTRA_CONFIG" 2>/dev/null || echo "70")
BLOCK_ON_FAILURE=$(jq -r '.quality.blockCommitOnFailure // false' "$ORCHESTRA_CONFIG" 2>/dev/null || echo "false")
echo "🔍 Running Sync Validation..."
# Run sync validator if it exists
if [ -f ".orchestra/scripts/sync-validator.ts" ]; then
if command -v ts-node &> /dev/null; then
ts-node ".orchestra/scripts/sync-validator.ts" > /dev/null 2>&1
elif command -v npx &> /dev/null; then
npx ts-node ".orchestra/scripts/sync-validator.ts" > /dev/null 2>&1
fi
fi
# Check sync state
if [ -f "$SYNC_STATE" ]; then
SYNC_SCORE=$(jq -r '.syncScore // 0' "$SYNC_STATE" 2>/dev/null || echo "0")
echo "📊 Sync Score: $SYNC_SCORE / 100 (Threshold: $SYNC_THRESHOLD)"
if [ "$SYNC_SCORE" -lt "$SYNC_THRESHOLD" ]; then
echo ""
echo "⚠️ Sync Score is below threshold!"
echo ""
# Check for issues in requirements
ISSUES=$(jq -r '.requirements[] | select(.warnings != null) | "\(.id): " + (.warnings | join(", "))' "$SYNC_STATE" 2>/dev/null)
if [ ! -z "$ISSUES" ]; then
echo "Issues detected:"
echo "$ISSUES" | sed 's/^/ - /'
echo ""
fi
if [ "$BLOCK_ON_FAILURE" = "true" ]; then
echo "❌ Commit blocked due to low Sync Score"
exit 1
else
echo "⚠️ Proceeding (Sync validation not blocking in lenient mode)"
fi
else
echo "✅ Sync validation passed"
fi
fi
exit 0

241
hooks/progress-tracker-display.sh Executable file
View File

@@ -0,0 +1,241 @@
#!/bin/bash
# Orchestra Progress Tracker - Enhanced Display
# Version: 2.0.0
# Shows rich formatted progress in chat after TodoWrite updates
set +e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/.." && pwd)}"
PROGRESS_FILE="$PROJECT_ROOT/.orchestra/cache/progress.json"
# Source utility library
if [ -f "$SCRIPT_DIR/lib/progress-utils.sh" ]; then
source "$SCRIPT_DIR/lib/progress-utils.sh"
else
echo "Warning: progress-utils.sh not found, using basic display" >&2
fi
# Check if progress file exists
if [ ! -f "$PROGRESS_FILE" ]; then
# No progress to display
exit 0
fi
# Check if jq is available
if ! command -v jq &> /dev/null; then
echo "Warning: jq not found, cannot display progress" >&2
exit 0
fi
# Get metadata
get_metadata() {
local field="$1"
jq -r ".metadata.$field // 0" "$PROGRESS_FILE" 2>/dev/null || echo "0"
}
# Display compact progress view
display_compact() {
local total=$(get_metadata "totalTasks")
local completed=$(get_metadata "completedTasks")
local in_progress=$(get_metadata "inProgressTasks")
local pending=$(get_metadata "pendingTasks")
local completion_rate=$(get_metadata "completionRate")
local active_agents_count=$(jq -r '.metadata.activeAgents | length' "$PROGRESS_FILE" 2>/dev/null || echo "0")
# Skip display if no tasks
if [ "$total" -eq 0 ]; then
return 0
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo -e "🎯 ${COLOR_BOLD}PROGRESS${COLOR_RESET} | ${active_agents_count} agent(s) | ${completion_rate}% complete"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Display active (in-progress) tasks with agent info
if [ "$in_progress" -gt 0 ]; then
jq -r '.todos[] | select(.status == "in_progress") | @json' "$PROGRESS_FILE" 2>/dev/null | while IFS= read -r task_json; do
local agent=$(echo "$task_json" | jq -r '.agent // "Unknown"')
local content=$(echo "$task_json" | jq -r '.content')
local active_form=$(echo "$task_json" | jq -r '.activeForm')
local start_time=$(echo "$task_json" | jq -r '.startTime // 0')
local current_step=$(echo "$task_json" | jq -r '.currentStep // null')
local total_steps=$(echo "$task_json" | jq -r '.totalSteps // null')
# Calculate elapsed time
local current_time=$(get_timestamp_ms)
local elapsed=$((current_time - start_time))
local duration=$(format_duration "$elapsed")
# Get agent emoji
local emoji=$(get_agent_emoji "$agent")
# Calculate progress percentage
local progress_pct=0
if [ "$current_step" != "null" ] && [ "$total_steps" != "null" ] && [ "$total_steps" -gt 0 ]; then
progress_pct=$((current_step * 100 / total_steps))
else
# Default to 50% if no step info
progress_pct=50
fi
# Format progress bar
local progress_bar=$(format_progress_bar "$progress_pct")
# Truncate content if too long
local display_content="$content"
if [ ${#display_content} -gt 50 ]; then
display_content="${display_content:0:47}..."
fi
# Display task line
echo -e "${COLOR_YELLOW}${emoji} ${agent}${COLOR_RESET} ${progress_bar} ${progress_pct}% ${display_content} (${duration})"
done
echo ""
fi
# Display summary line
echo -e "${COLOR_GREEN}$SYMBOL_COMPLETED ${completed}${COLOR_RESET} ${COLOR_YELLOW}$SYMBOL_IN_PROGRESS ${in_progress}${COLOR_RESET} ${COLOR_GRAY}$SYMBOL_PENDING ${pending}${COLOR_RESET}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
}
# Display detailed progress view (verbose mode)
display_detailed() {
local total=$(get_metadata "totalTasks")
local completed=$(get_metadata "completedTasks")
local in_progress=$(get_metadata "inProgressTasks")
local pending=$(get_metadata "pendingTasks")
local completion_rate=$(get_metadata "completionRate")
local session_start=$(get_metadata "sessionStartTime")
# Skip display if no tasks
if [ "$total" -eq 0 ]; then
echo "No tasks tracked yet."
return 0
fi
# Calculate session duration
local current_time=$(get_timestamp_ms)
local session_duration=$((current_time - session_start))
local session_duration_str=$(format_duration "$session_duration")
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo -e "${COLOR_BOLD}🎯 ORCHESTRA PROGRESS TRACKER${COLOR_RESET}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Session: $session_duration_str | Overall: $(format_progress_bar "$completion_rate" 20) $completion_rate% ($completed/$total tasks)"
echo ""
# Active agents section
local active_agents=$(jq -r '.metadata.activeAgents[]' "$PROGRESS_FILE" 2>/dev/null)
if [ -n "$active_agents" ]; then
local agent_count=$(echo "$active_agents" | wc -l | tr -d ' ')
echo -e "${COLOR_BOLD}👥 Active Agents ($agent_count)${COLOR_RESET}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Display each active agent's tasks
echo "$active_agents" | while IFS= read -r agent; do
local emoji=$(get_agent_emoji "$agent")
# Get in-progress tasks for this agent
jq -r --arg agent "$agent" '.todos[] | select(.status == "in_progress" and .agent == $agent) | @json' "$PROGRESS_FILE" 2>/dev/null | while IFS= read -r task_json; do
local content=$(echo "$task_json" | jq -r '.content')
local start_time=$(echo "$task_json" | jq -r '.startTime // 0')
local current_step=$(echo "$task_json" | jq -r '.currentStep // null')
local total_steps=$(echo "$task_json" | jq -r '.totalSteps // null')
local tags=$(echo "$task_json" | jq -r '.tags // [] | join(", ")')
# Calculate elapsed time
local current_time=$(get_timestamp_ms)
local elapsed=$((current_time - start_time))
local duration=$(format_duration "$elapsed")
# Calculate progress
local progress_pct=0
local step_info=""
if [ "$current_step" != "null" ] && [ "$total_steps" != "null" ] && [ "$total_steps" -gt 0 ]; then
progress_pct=$((current_step * 100 / total_steps))
step_info=" (Step $current_step/$total_steps)"
fi
local progress_bar=$(format_progress_bar "$progress_pct")
echo -e "${COLOR_YELLOW}${emoji} ${agent}${COLOR_RESET}"
echo " Task: $content"
echo " Progress: ${progress_bar} ${progress_pct}%${step_info}"
echo " Duration: ${duration}"
if [ -n "$tags" ]; then
echo " Tags: $tags"
fi
echo ""
done
done
fi
# Task summary section
echo -e "${COLOR_BOLD}📋 Task Summary${COLOR_RESET}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Completed tasks
if [ "$completed" -gt 0 ]; then
echo -e "${COLOR_GREEN}$SYMBOL_COMPLETED Completed ($completed tasks)${COLOR_RESET}"
jq -r '.todos[] | select(.status == "completed") | " - " + .content' "$PROGRESS_FILE" 2>/dev/null | head -5
if [ "$completed" -gt 5 ]; then
echo " ... and $((completed - 5)) more"
fi
echo ""
fi
# In-progress tasks
if [ "$in_progress" -gt 0 ]; then
echo -e "${COLOR_YELLOW}$SYMBOL_IN_PROGRESS In Progress ($in_progress tasks)${COLOR_RESET}"
jq -r '.todos[] | select(.status == "in_progress") | " - " + .content' "$PROGRESS_FILE" 2>/dev/null
echo ""
fi
# Pending tasks
if [ "$pending" -gt 0 ]; then
echo -e "${COLOR_GRAY}$SYMBOL_PENDING Pending ($pending tasks)${COLOR_RESET}"
jq -r '.todos[] | select(.status == "pending") | " - " + .content' "$PROGRESS_FILE" 2>/dev/null | head -5
if [ "$pending" -gt 5 ]; then
echo " ... and $((pending - 5)) more"
fi
echo ""
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
}
# Main execution
main() {
# Check for verbose flag
local verbose=false
if [ "$1" = "--verbose" ] || [ "$1" = "-v" ]; then
verbose=true
fi
# Check ORCHESTRA_PROGRESS_VERBOSE environment variable
if [ "${ORCHESTRA_PROGRESS_VERBOSE:-0}" = "1" ]; then
verbose=true
fi
# Display appropriate view
if [ "$verbose" = true ]; then
display_detailed
else
display_compact
fi
}
# Run main function
main "$@"
exit 0

172
hooks/progress-tracker-export.sh Executable file
View File

@@ -0,0 +1,172 @@
#!/bin/bash
# Orchestra Progress Tracker - Export Script
# Version: 2.0.0
# Exports formatted progress to .orchestra/cache/progress-status.txt for external monitoring
set +e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/.." && pwd)}"
PROGRESS_FILE="$PROJECT_ROOT/.orchestra/cache/progress.json"
STATUS_FILE="$PROJECT_ROOT/.orchestra/cache/progress-status.txt"
# Source utility library
if [ -f "$SCRIPT_DIR/lib/progress-utils.sh" ]; then
source "$SCRIPT_DIR/lib/progress-utils.sh"
fi
# Check if progress file exists
if [ ! -f "$PROGRESS_FILE" ]; then
# Create empty status file
echo "No progress data available" > "$STATUS_FILE"
exit 0
fi
# Check if jq is available
if ! command -v jq &> /dev/null; then
echo "jq not available - cannot export progress" > "$STATUS_FILE"
exit 0
fi
# Get metadata
get_metadata() {
local field="$1"
jq -r ".metadata.$field // 0" "$PROGRESS_FILE" 2>/dev/null || echo "0"
}
# Export formatted progress
export_progress() {
local total=$(get_metadata "totalTasks")
local completed=$(get_metadata "completedTasks")
local in_progress=$(get_metadata "inProgressTasks")
local pending=$(get_metadata "pendingTasks")
local completion_rate=$(get_metadata "completionRate")
local session_start=$(get_metadata "sessionStartTime")
local last_update=$(get_metadata "lastUpdateTime")
# Skip if no tasks
if [ "$total" -eq 0 ]; then
cat > "$STATUS_FILE" << 'EOF'
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🎯 ORCHESTRA PROGRESS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
No tasks tracked yet.
Start working to see progress here!
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
EOF
return 0
fi
# Calculate session duration
local current_time=$(get_timestamp_ms)
local session_duration=$((current_time - session_start))
local session_duration_str=$(format_duration "$session_duration")
# Time since last update
local update_elapsed=$((current_time - last_update))
local update_elapsed_str=$(format_duration "$update_elapsed")
# Start building output
{
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🎯 ORCHESTRA PROGRESS TRACKER"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Session: $session_duration_str | Last update: ${update_elapsed_str} ago"
echo "Overall: $(format_progress_bar "$completion_rate" 20) $completion_rate% ($completed/$total tasks)"
echo ""
# Active agents section
local active_agents=$(jq -r '.metadata.activeAgents[]' "$PROGRESS_FILE" 2>/dev/null)
if [ -n "$active_agents" ]; then
local agent_count=$(echo "$active_agents" | wc -l | tr -d ' ')
echo "👥 Active Agents ($agent_count)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Display each active agent's tasks
echo "$active_agents" | while IFS= read -r agent; do
local emoji=$(get_agent_emoji "$agent")
# Get in-progress tasks for this agent
jq -r --arg agent "$agent" '.todos[] | select(.status == "in_progress" and .agent == $agent) | @json' "$PROGRESS_FILE" 2>/dev/null | while IFS= read -r task_json; do
local content=$(echo "$task_json" | jq -r '.content')
local start_time=$(echo "$task_json" | jq -r '.startTime // 0')
local current_step=$(echo "$task_json" | jq -r '.currentStep // null')
local total_steps=$(echo "$task_json" | jq -r '.totalSteps // null')
# Calculate elapsed time
local current_time=$(get_timestamp_ms)
local elapsed=$((current_time - start_time))
local duration=$(format_duration "$elapsed")
# Calculate progress
local progress_pct=0
local step_info=""
if [ "$current_step" != "null" ] && [ "$total_steps" != "null" ] && [ "$total_steps" -gt 0 ]; then
progress_pct=$((current_step * 100 / total_steps))
step_info=" (Step $current_step/$total_steps)"
else
progress_pct=50
fi
local progress_bar=$(format_progress_bar "$progress_pct")
echo "${emoji} ${agent} ${progress_bar} ${progress_pct}%${step_info}"
echo " ${content}"
echo " Duration: ${duration}"
echo ""
done
done
fi
# Task summary
echo "📋 Task Summary"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "✅ Completed: $completed"
echo "⚡ In Progress: $in_progress"
echo "⏳ Pending: $pending"
echo ""
# Show recent completed tasks (last 3)
if [ "$completed" -gt 0 ]; then
echo "Recent completions:"
jq -r '.todos[] | select(.status == "completed") | " - " + .content' "$PROGRESS_FILE" 2>/dev/null | tail -3
echo ""
fi
# Show all in-progress tasks
if [ "$in_progress" -gt 0 ]; then
echo "Currently working on:"
jq -r '.todos[] | select(.status == "in_progress") | " - " + .content' "$PROGRESS_FILE" 2>/dev/null
echo ""
fi
# Show next pending tasks (up to 3)
if [ "$pending" -gt 0 ]; then
echo "Coming up next:"
jq -r '.todos[] | select(.status == "pending") | " - " + .content' "$PROGRESS_FILE" 2>/dev/null | head -3
if [ "$pending" -gt 3 ]; then
echo " ... and $((pending - 3)) more"
fi
echo ""
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Last updated: $(date '+%Y-%m-%d %H:%M:%S')"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "💡 Monitor this file with: watch -n 1 cat .orchestra/cache/progress-status.txt"
echo ""
} > "$STATUS_FILE"
}
# Main execution
export_progress
exit 0

218
hooks/progress-tracker-update.sh Executable file
View File

@@ -0,0 +1,218 @@
#!/bin/bash
# Orchestra Progress Tracker - Update Script
# Version: 2.0.0
# Handles atomic updates to progress.json with file locking
# Called from post_code_write.sh after TodoWrite tool execution
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/.." && pwd)}"
PROGRESS_FILE="$PROJECT_ROOT/.orchestra/cache/progress.json"
LOCK_FILE="/tmp/orchestra-progress-lock-${USER}"
# Source utility library
if [ -f "$SCRIPT_DIR/lib/progress-utils.sh" ]; then
source "$SCRIPT_DIR/lib/progress-utils.sh"
else
echo "ERROR: progress-utils.sh not found" >&2
exit 1
fi
# Parse TodoWrite parameters from stdin or argument
parse_tool_params() {
local params="$1"
# If params is empty, try to read from stdin
if [ -z "$params" ]; then
if [ ! -t 0 ]; then
params=$(cat)
fi
fi
echo "$params"
}
# Process a single todo item
process_todo() {
local todo_json="$1"
local timestamp="$2"
# Extract fields from todo
local task_id=$(echo "$todo_json" | jq -r '.id // empty')
local content=$(echo "$todo_json" | jq -r '.content // empty')
local active_form=$(echo "$todo_json" | jq -r '.activeForm // empty')
local status=$(echo "$todo_json" | jq -r '.status // "pending"')
local parent_id=$(echo "$todo_json" | jq -r '.parentId // null')
# Validate required fields
if [ -z "$task_id" ] || [ -z "$content" ]; then
log_event "WARN" "Skipping todo with missing required fields"
return 0
fi
# Detect agent
local agent=$(detect_agent_from_todo "$active_form" "$content" "$PROGRESS_FILE")
log_event "DEBUG" "Processing task $task_id: agent=$agent, status=$status"
# Check if task already exists
local existing=$(jq --arg id "$task_id" '.todos[] | select(.id == $id)' "$PROGRESS_FILE" 2>/dev/null || echo "")
local temp_file="${PROGRESS_FILE}.task.tmp"
if [ -z "$existing" ]; then
# New task: add with full metadata
log_event "INFO" "Creating new task: $task_id (Agent: $agent)"
jq --arg id "$task_id" \
--arg content "$content" \
--arg activeForm "$active_form" \
--arg status "$status" \
--arg agent "$agent" \
--argjson startTime "$timestamp" \
--argjson lastUpdateTime "$timestamp" \
--arg parentId "$parent_id" \
'.todos += [{
id: $id,
content: $content,
activeForm: $activeForm,
status: $status,
parentId: (if $parentId == "null" then null else $parentId end),
agent: $agent,
startTime: $startTime,
lastUpdateTime: $lastUpdateTime,
estimatedDuration: null,
currentStep: null,
totalSteps: null,
tags: []
}]' "$PROGRESS_FILE" > "$temp_file"
mv "$temp_file" "$PROGRESS_FILE"
# Add history entry for new task
add_history_entry "$PROGRESS_FILE" "$timestamp" "task_started" "$task_id" "$agent" "Task created"
else
# Existing task: update status and lastUpdateTime
local old_status=$(echo "$existing" | jq -r '.status')
log_event "DEBUG" "Updating task $task_id: $old_status -> $status"
jq --arg id "$task_id" \
--arg status "$status" \
--arg agent "$agent" \
--arg activeForm "$active_form" \
--argjson lastUpdateTime "$timestamp" \
'(.todos[] | select(.id == $id)) |= (. + {
status: $status,
agent: $agent,
activeForm: $activeForm,
lastUpdateTime: $lastUpdateTime
})' "$PROGRESS_FILE" > "$temp_file"
mv "$temp_file" "$PROGRESS_FILE"
# Log status change in history
if [ "$old_status" != "$status" ]; then
local event_type="task_updated"
if [ "$status" = "completed" ]; then
event_type="task_completed"
fi
log_event "INFO" "Task $task_id: $old_status$status (Agent: $agent)"
add_history_entry "$PROGRESS_FILE" "$timestamp" "$event_type" "$task_id" "$agent" "$old_status -> $status"
fi
fi
# Update currentAgent in metadata
jq --arg agent "$agent" \
'.metadata.currentAgent = $agent' "$PROGRESS_FILE" > "$temp_file"
mv "$temp_file" "$PROGRESS_FILE"
}
# Main update logic with file locking
main() {
local tool_params=$(parse_tool_params "$1")
# If no params provided, exit silently
if [ -z "$tool_params" ] || [ "$tool_params" = "{}" ] || [ "$tool_params" = "null" ]; then
log_event "DEBUG" "No TodoWrite parameters provided, skipping update"
exit 0
fi
log_event "DEBUG" "Starting progress update with params: ${tool_params:0:100}..."
# Ensure progress file exists
if [ ! -f "$PROGRESS_FILE" ]; then
log_event "INFO" "Initializing progress.json"
init_progress_file_if_missing "$PROGRESS_FILE"
fi
# Acquire exclusive lock with timeout (cross-platform approach)
# Try to create lock file, wait if it exists
local lock_attempts=0
local max_attempts=50 # 5 seconds (50 * 0.1s)
while [ $lock_attempts -lt $max_attempts ]; do
if mkdir "$LOCK_FILE" 2>/dev/null; then
# Lock acquired
break
fi
# Lock exists, wait a bit
sleep 0.1
lock_attempts=$((lock_attempts + 1))
done
if [ $lock_attempts -ge $max_attempts ]; then
log_event "ERROR" "Failed to acquire lock for progress update (timeout)"
exit 1
fi
# Ensure lock is released on exit
trap "rmdir '$LOCK_FILE' 2>/dev/null || true" EXIT
log_event "DEBUG" "Lock acquired successfully"
# Get current timestamp
local timestamp=$(get_timestamp_ms)
# Parse todos array from parameters
local todos=$(echo "$tool_params" | jq -c '.todos // []' 2>/dev/null || echo "[]")
if [ "$todos" = "[]" ] || [ -z "$todos" ]; then
log_event "DEBUG" "No todos in parameters"
rmdir "$LOCK_FILE" 2>/dev/null || true
exit 0
fi
# Process each todo
echo "$todos" | jq -c '.[]' 2>/dev/null | while IFS= read -r todo; do
if [ -n "$todo" ]; then
process_todo "$todo" "$timestamp"
fi
done
# Update metadata (task counts, completion rate, active agents)
log_event "DEBUG" "Updating metadata"
update_metadata "$PROGRESS_FILE" "$timestamp"
log_event "INFO" "Progress update completed successfully"
# Release lock
rmdir "$LOCK_FILE" 2>/dev/null || true
local exit_code=$?
if [ $exit_code -ne 0 ]; then
log_event "ERROR" "Progress update failed with exit code $exit_code"
fi
exit $exit_code
}
# Error handling
trap 'log_event "ERROR" "Update script terminated unexpectedly: $?"' ERR
# Run main function
main "$@"

110
hooks/session-start.sh Executable file
View File

@@ -0,0 +1,110 @@
#!/usr/bin/env bash
# Session Start Hook
# Provides context about Orchestra Plugin to Claude
set -euo pipefail
# Sync .claude.json settings to settings.local.json (silent mode)
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
if [ -f "$PROJECT_ROOT/hooks/sync-claude-settings.sh" ]; then
bash "$PROJECT_ROOT/hooks/sync-claude-settings.sh" true 2>/dev/null || true
fi
# Initialize/migrate progress tracking system
PROGRESS_FILE="$PROJECT_ROOT/.orchestra/cache/progress.json"
MIGRATE_SCRIPT="$PROJECT_ROOT/hooks/lib/progress-migrate.sh"
if [ -f "$MIGRATE_SCRIPT" ]; then
# Run migration silently (it handles initialization if file doesn't exist)
bash "$MIGRATE_SCRIPT" > /dev/null 2>&1 || true
# Update session start time if progress.json exists
if [ -f "$PROGRESS_FILE" ] && command -v jq &> /dev/null; then
# Get timestamp in milliseconds (macOS compatible)
if command -v python3 &> /dev/null; then
TIMESTAMP=$(python3 -c 'import time; print(int(time.time() * 1000))')
elif command -v python &> /dev/null; then
TIMESTAMP=$(python -c 'import time; print(int(time.time() * 1000))')
else
TIMESTAMP=$(($(date +%s) * 1000))
fi
TEMP_FILE="${PROGRESS_FILE}.session.tmp"
jq --argjson timestamp "$TIMESTAMP" \
'.metadata.sessionStartTime = $timestamp' \
"$PROGRESS_FILE" > "$TEMP_FILE" 2>/dev/null && mv "$TEMP_FILE" "$PROGRESS_FILE" || true
fi
fi
# Get language setting from environment
LANG="${ORCHESTRA_LANGUAGE:-en}"
# Create welcome message as context for Claude
if [ "$LANG" = "ja" ]; then
CONTEXT=$(cat <<'EOF'
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🎭 ORCHESTRA プラグイン読み込み完了
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✨ 専門エージェントが待機中です:
😎 Blake - リリース管理者(デプロイ、リリース)
🤓 Eden - ドキュメントリード(技術ライティング)
😤 Finn - QA & テスト(テストカバレッジ、検証)
🤨 Iris - セキュリティ監査官(認証、シークレット、脆弱性)
🤔 Kai - システムアーキテクト設計判断、ADR
😌 Leo - データベースアーキテクト(スキーマ、マイグレーション)
😊 Mina - 統合スペシャリスト外部API
😄 Nova - UI/UX スペシャリスト(インターフェース、アクセシビリティ)
🧐 Riley - 要件明確化担当(曖昧なリクエスト)
😐 Skye - コード実装者(明確な仕様)
😬 Theo - 運用 & 監視(信頼性、インシデント)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
重要ユーザーに挨拶し、Orchestraプラグインが読み込まれたことを伝えてください。
利用可能な専門エージェントをリストし、タスクのサポートを提案してください。
EOF
)
else
CONTEXT=$(cat <<'EOF'
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🎭 ORCHESTRA PLUGIN LOADED
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✨ Specialized agents are ready for coordination:
😎 Blake - Release Manager (deployments, releases)
🤓 Eden - Documentation Lead (technical writing)
😤 Finn - QA & Testing (test coverage, validation)
🤨 Iris - Security Auditor (auth, secrets, vulnerabilities)
🤔 Kai - System Architect (design decisions, ADRs)
😌 Leo - Database Architect (schema, migrations)
😊 Mina - Integration Specialist (external APIs)
😄 Nova - UI/UX Specialist (interfaces, accessibility)
🧐 Riley - Requirements Clarifier (vague requests)
😐 Skye - Code Implementer (well-defined specs)
😬 Theo - Ops & Monitoring (reliability, incidents)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
IMPORTANT: You should greet the user and inform them that Orchestra Plugin has been loaded.
List the available specialist agents and encourage them to ask for help with their tasks.
EOF
)
fi
# Output JSON format for Claude's context
cat <<EOF
{
"hookSpecificOutput": {
"hookEventName": "SessionStart",
"additionalContext": $(echo "$CONTEXT" | jq -Rs .)
}
}
EOF
exit 0

115
hooks/sync-claude-settings.sh Executable file
View File

@@ -0,0 +1,115 @@
#!/usr/bin/env bash
# Sync .claude.json settings to .claude/settings.local.json
# This ensures user-defined settings in .claude.json take priority over auto-generated settings
set -euo pipefail
# Detect project root
PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
cd "$PROJECT_ROOT"
# Configuration
CLAUDE_JSON="$PROJECT_ROOT/.claude.json"
SETTINGS_LOCAL="$PROJECT_ROOT/.claude/settings.local.json"
# Determine verbosity (silent mode for hooks, verbose for setup)
SILENT="${1:-false}"
log() {
if [ "$SILENT" != "true" ]; then
echo "$@"
fi
}
log_error() {
echo "$@" >&2
}
# Check if .claude.json exists
if [ ! -f "$CLAUDE_JSON" ]; then
log_error "Warning: .claude.json not found at $CLAUDE_JSON"
exit 0 # Non-fatal, exit gracefully
fi
# Check if jq is available
if ! command -v jq &> /dev/null; then
log_error "Warning: jq is not installed. Cannot sync settings."
log_error "Install with: brew install jq (macOS) or apt install jq (Linux)"
exit 0 # Non-fatal, exit gracefully
fi
# Read dangerouslySkipPermissions from .claude.json
SKIP_PERMISSIONS=$(jq -r '.dangerouslySkipPermissions // empty' "$CLAUDE_JSON" 2>/dev/null)
if [ -z "$SKIP_PERMISSIONS" ] || [ "$SKIP_PERMISSIONS" = "null" ]; then
log "No dangerouslySkipPermissions found in .claude.json, skipping sync."
exit 0
fi
# Ensure .claude directory exists
mkdir -p "$PROJECT_ROOT/.claude"
# Check if settings.local.json exists
if [ ! -f "$SETTINGS_LOCAL" ]; then
log "Creating new settings.local.json with dangerouslySkipPermissions..."
# Create new settings.local.json with dangerouslySkipPermissions
jq -n \
--argjson skipPerms "$(echo "$SKIP_PERMISSIONS")" \
'{dangerouslySkipPermissions: $skipPerms}' \
> "$SETTINGS_LOCAL"
log "✓ Created $SETTINGS_LOCAL with dangerouslySkipPermissions"
exit 0
fi
# Read current dangerouslySkipPermissions from settings.local.json
CURRENT_SKIP_PERMISSIONS=$(jq -r '.dangerouslySkipPermissions // empty' "$SETTINGS_LOCAL" 2>/dev/null || echo "")
# Check if settings.local.json has a permissions section
HAS_PERMISSIONS=$(jq 'has("permissions")' "$SETTINGS_LOCAL" 2>/dev/null || echo "false")
# Determine if we need to update
NEEDS_UPDATE=false
if [ "$SKIP_PERMISSIONS" != "$CURRENT_SKIP_PERMISSIONS" ]; then
NEEDS_UPDATE=true
fi
# If dangerouslySkipPermissions contains "*", permissions section should be removed
if echo "$SKIP_PERMISSIONS" | jq -e 'contains(["*"])' > /dev/null 2>&1; then
if [ "$HAS_PERMISSIONS" = "true" ]; then
NEEDS_UPDATE=true
fi
fi
if [ "$NEEDS_UPDATE" = "true" ]; then
log "Syncing dangerouslySkipPermissions from .claude.json to settings.local.json..."
# Backup existing settings.local.json
cp "$SETTINGS_LOCAL" "$SETTINGS_LOCAL.backup" 2>/dev/null || true
# If dangerouslySkipPermissions contains "*", remove permissions section
# as it's redundant when all permissions are skipped
if echo "$SKIP_PERMISSIONS" | jq -e 'contains(["*"])' > /dev/null 2>&1; then
log "Detected wildcard permission skip, removing redundant permissions section..."
echo '{}' | jq \
--argjson skipPerms "$(echo "$SKIP_PERMISSIONS")" \
'{dangerouslySkipPermissions: $skipPerms}' \
> "$SETTINGS_LOCAL.tmp"
else
# Merge dangerouslySkipPermissions into settings.local.json
jq \
--argjson skipPerms "$(echo "$SKIP_PERMISSIONS")" \
'. + {dangerouslySkipPermissions: $skipPerms}' \
"$SETTINGS_LOCAL" > "$SETTINGS_LOCAL.tmp"
fi
mv "$SETTINGS_LOCAL.tmp" "$SETTINGS_LOCAL"
log "✓ Synced dangerouslySkipPermissions to $SETTINGS_LOCAL"
else
log "Settings already in sync, no changes needed."
fi
exit 0

119
hooks/user-prompt-submit.sh Executable file
View File

@@ -0,0 +1,119 @@
#!/usr/bin/env bash
# Auto-approve Hook with Safety Guards
# Automatically approves all tool uses EXCEPT dangerous operations
#
# This hook enables autonomous operation while preventing destructive actions
set -euo pipefail
# Read JSON input from stdin
INPUT_JSON=$(cat)
# Extract tool details from JSON
TOOL_NAME=$(echo "$INPUT_JSON" | jq -r '.tool_name // empty' 2>/dev/null || echo "")
TOOL_PARAMS=$(echo "$INPUT_JSON" | jq -c '.tool_input // {}' 2>/dev/null || echo "{}")
# List of dangerous operations to block
DANGEROUS_PATTERNS=(
# File deletion
"rm -rf /"
"rm -rf ~"
"rm -rf \*"
"rm -rf ."
"sudo rm -rf"
# Disk operations
"dd if="
"mkfs"
"fdisk"
# System modifications
"sudo shutdown"
"sudo reboot"
"sudo halt"
"sudo poweroff"
# Package manager dangerous operations
"sudo apt-get remove"
"sudo apt remove"
"sudo yum remove"
"brew uninstall"
# Git destructive operations
"git push --force"
"git push -f"
"git reset --hard HEAD~"
# Database drops
"DROP DATABASE"
"DROP TABLE"
# Permission changes
"chmod 777"
"chmod -R 777"
)
# Check if this is a Bash tool use
if [ "$TOOL_NAME" = "Bash" ]; then
# Extract the command from TOOL_PARAMS
COMMAND=$(echo "$TOOL_PARAMS" | jq -r '.command // empty' 2>/dev/null || echo "")
if [ -n "$COMMAND" ]; then
# Check against dangerous patterns
for pattern in "${DANGEROUS_PATTERNS[@]}"; do
if echo "$COMMAND" | grep -qF "$pattern"; then
# Block dangerous command
echo "🛑 BLOCKED: Dangerous command detected: $pattern"
echo "Command: $COMMAND"
echo ""
echo "This command has been blocked for safety."
echo "If you need to run this, please do it manually."
exit 1
fi
done
# Additional checks for rm with recursive flag
if echo "$COMMAND" | grep -qE "rm\s+.*-[rf].*\s*/"; then
echo "🛑 BLOCKED: Potentially dangerous rm command with root path"
echo "Command: $COMMAND"
exit 1
fi
# Check for rm of important directories
if echo "$COMMAND" | grep -qE "rm\s+.*-[rf].*\s+(bin|usr|etc|var|lib|boot|sys|proc|dev|home)"; then
echo "🛑 BLOCKED: Attempting to delete system directory"
echo "Command: $COMMAND"
exit 1
fi
fi
fi
# Check for Edit/Write operations on critical system files
if [ "$TOOL_NAME" = "Edit" ] || [ "$TOOL_NAME" = "Write" ]; then
FILE_PATH=$(echo "$TOOL_PARAMS" | jq -r '.file_path // empty' 2>/dev/null || echo "")
if [ -n "$FILE_PATH" ]; then
# Block modifications to critical system files
CRITICAL_PATHS=(
"/etc/passwd"
"/etc/shadow"
"/etc/sudoers"
"/etc/hosts"
"/boot/"
"/sys/"
"/proc/"
)
for path in "${CRITICAL_PATHS[@]}"; do
if echo "$FILE_PATH" | grep -qF "$path"; then
echo "🛑 BLOCKED: Attempting to modify critical system file"
echo "File: $FILE_PATH"
exit 1
fi
done
fi
fi
# Auto-approve all other operations
# No output means approval (hook succeeds)
exit 0

57
hooks/workflow-dispatcher.sh Executable file
View File

@@ -0,0 +1,57 @@
#!/usr/bin/env bash
# Workflow Dispatcher Hook
# Routes tool executions to appropriate workflow hooks based on command patterns
set -euo pipefail
# Read JSON input from stdin
INPUT_JSON=$(cat)
# Extract tool details from JSON
TOOL_NAME=$(echo "$INPUT_JSON" | jq -r '.tool_name // empty' 2>/dev/null || echo "")
TOOL_PARAMS=$(echo "$INPUT_JSON" | jq -c '.tool_input // {}' 2>/dev/null || echo "{}")
# Only process Bash tool executions
if [ "$TOOL_NAME" != "Bash" ]; then
exit 0
fi
# Extract the command from tool parameters
COMMAND=$(echo "$TOOL_PARAMS" | jq -r '.command // empty' 2>/dev/null || echo "")
if [ -z "$COMMAND" ]; then
exit 0
fi
# Get the directory of this script
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Convert INPUT_JSON to environment variables for legacy hooks
export TOOL_NAME
export TOOL_PARAMS
export COMMAND
# Route to appropriate workflow hook based on command pattern
if echo "$COMMAND" | grep -qE "(gh pr create|hub pull-request)"; then
echo "📋 Pre-PR Quality Checks"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
bash "$SCRIPT_DIR/before_pr.sh" || exit 1
echo "✅ Pre-PR checks passed"
echo ""
elif echo "$COMMAND" | grep -qE "git merge"; then
echo "🔀 Pre-Merge Quality Checks"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
bash "$SCRIPT_DIR/before_merge.sh" || exit 1
echo "✅ Pre-merge checks passed"
echo ""
elif echo "$COMMAND" | grep -qE "(deploy|vercel|netlify|git push.*production|git push.*main)"; then
echo "🚀 Pre-Deploy Validation"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
bash "$SCRIPT_DIR/before_deploy.sh" || exit 1
echo "✅ Pre-deploy checks passed"
echo ""
fi
exit 0

View File

@@ -0,0 +1,50 @@
#!/usr/bin/env bash
# Workflow Post-Execution Dispatcher Hook
# Routes completed tool executions to appropriate post-workflow hooks
set -euo pipefail
# Read JSON input from stdin
INPUT_JSON=$(cat)
# Extract tool details from JSON
TOOL_NAME=$(echo "$INPUT_JSON" | jq -r '.tool_name // empty' 2>/dev/null || echo "")
TOOL_PARAMS=$(echo "$INPUT_JSON" | jq -c '.tool_input // {}' 2>/dev/null || echo "{}")
TOOL_OUTPUT=$(echo "$INPUT_JSON" | jq -r '.tool_output // empty' 2>/dev/null || echo "")
# Only process Bash tool executions
if [ "$TOOL_NAME" != "Bash" ]; then
exit 0
fi
# Extract the command from tool parameters
COMMAND=$(echo "$TOOL_PARAMS" | jq -r '.command // empty' 2>/dev/null || echo "")
if [ -z "$COMMAND" ]; then
exit 0
fi
# Get the directory of this script
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Convert INPUT_JSON to environment variables for legacy hooks
export TOOL_NAME
export TOOL_PARAMS
export COMMAND
export TOOL_OUTPUT
# Route to appropriate post-workflow hook based on command pattern
if echo "$COMMAND" | grep -qE "(deploy|vercel|netlify|git push.*production|git push.*main)"; then
echo ""
echo "🎯 Post-Deploy Validation"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
bash "$SCRIPT_DIR/after_deploy.sh" || {
echo "⚠️ Post-deploy checks failed. Consider rollback."
# Don't exit 1 here - deployment already happened
exit 0
}
echo "✅ Post-deploy validation passed"
echo ""
fi
exit 0