Initial commit
This commit is contained in:
134
hooks/scripts/pre_compact.py
Executable file
134
hooks/scripts/pre_compact.py
Executable file
@@ -0,0 +1,134 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# requires-python = ">=3.11"
|
||||
# dependencies = [
|
||||
# "python-dotenv",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def log_pre_compact(input_data):
|
||||
"""Log pre-compact event to logs directory."""
|
||||
# Ensure logs directory exists
|
||||
log_dir = Path("logs")
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_file = log_dir / "pre_compact.json"
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if log_file.exists():
|
||||
with open(log_file) as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append the entire input data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_file, "w") as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
|
||||
def backup_transcript(transcript_path, trigger):
|
||||
"""Create a backup of the transcript before compaction."""
|
||||
try:
|
||||
if not os.path.exists(transcript_path):
|
||||
return
|
||||
|
||||
# Create backup directory
|
||||
backup_dir = Path("logs") / "transcript_backups"
|
||||
backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate backup filename with timestamp and trigger type
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
session_name = Path(transcript_path).stem
|
||||
backup_name = f"{session_name}_pre_compact_{trigger}_{timestamp}.jsonl"
|
||||
backup_path = backup_dir / backup_name
|
||||
|
||||
# Copy transcript to backup
|
||||
import shutil
|
||||
|
||||
shutil.copy2(transcript_path, backup_path)
|
||||
|
||||
return str(backup_path)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--backup",
|
||||
action="store_true",
|
||||
help="Create backup of transcript before compaction",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose", action="store_true", help="Print verbose output"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Extract fields
|
||||
session_id = input_data.get("session_id", "unknown")
|
||||
transcript_path = input_data.get("transcript_path", "")
|
||||
trigger = input_data.get("trigger", "unknown") # "manual" or "auto"
|
||||
custom_instructions = input_data.get("custom_instructions", "")
|
||||
|
||||
# Log the pre-compact event
|
||||
log_pre_compact(input_data)
|
||||
|
||||
# Create backup if requested
|
||||
backup_path = None
|
||||
if args.backup and transcript_path:
|
||||
backup_path = backup_transcript(transcript_path, trigger)
|
||||
|
||||
# Provide feedback based on trigger type
|
||||
if args.verbose:
|
||||
if trigger == "manual":
|
||||
message = (
|
||||
f"Preparing for manual compaction (session: {session_id[:8]}...)"
|
||||
)
|
||||
if custom_instructions:
|
||||
message += f"\nCustom instructions: {custom_instructions[:100]}..."
|
||||
else: # auto
|
||||
message = f"Auto-compaction triggered due to full context window (session: {session_id[:8]}...)"
|
||||
|
||||
if backup_path:
|
||||
message += f"\nTranscript backed up to: {backup_path}"
|
||||
|
||||
print(message)
|
||||
|
||||
# Success - compaction will proceed
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
153
hooks/scripts/subagent_stop.py
Executable file
153
hooks/scripts/subagent_stop.py
Executable file
@@ -0,0 +1,153 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# requires-python = ">=3.11"
|
||||
# dependencies = [
|
||||
# "python-dotenv",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def get_tts_script_path():
|
||||
"""
|
||||
Determine which TTS script to use based on available API keys.
|
||||
Priority order: ElevenLabs > OpenAI > pyttsx3
|
||||
"""
|
||||
# Get current script directory and construct utils/tts path
|
||||
script_dir = Path(__file__).parent
|
||||
tts_dir = script_dir / "utils" / "tts"
|
||||
|
||||
# Check for ElevenLabs API key (highest priority)
|
||||
if os.getenv("ELEVENLABS_API_KEY"):
|
||||
elevenlabs_script = tts_dir / "elevenlabs_tts.py"
|
||||
if elevenlabs_script.exists():
|
||||
return str(elevenlabs_script)
|
||||
|
||||
# Check for OpenAI API key (second priority)
|
||||
if os.getenv("OPENAI_API_KEY"):
|
||||
openai_script = tts_dir / "openai_tts.py"
|
||||
if openai_script.exists():
|
||||
return str(openai_script)
|
||||
|
||||
# Fall back to pyttsx3 (no API key required)
|
||||
pyttsx3_script = tts_dir / "pyttsx3_tts.py"
|
||||
if pyttsx3_script.exists():
|
||||
return str(pyttsx3_script)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def announce_subagent_completion():
|
||||
"""Announce subagent completion using the best available TTS service."""
|
||||
try:
|
||||
tts_script = get_tts_script_path()
|
||||
if not tts_script:
|
||||
return # No TTS scripts available
|
||||
|
||||
# Use fixed message for subagent completion
|
||||
completion_message = "Subagent Complete"
|
||||
|
||||
# Call the TTS script with the completion message
|
||||
subprocess.run(
|
||||
["uv", "run", tts_script, completion_message],
|
||||
capture_output=True, # Suppress output
|
||||
timeout=10, # 10-second timeout
|
||||
)
|
||||
|
||||
except (subprocess.TimeoutExpired, subprocess.SubprocessError, FileNotFoundError):
|
||||
# Fail silently if TTS encounters issues
|
||||
pass
|
||||
except Exception:
|
||||
# Fail silently for any other errors
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--chat", action="store_true", help="Copy transcript to chat.json"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.load(sys.stdin)
|
||||
|
||||
# Extract required fields
|
||||
session_id = input_data.get("session_id", "")
|
||||
stop_hook_active = input_data.get("stop_hook_active", False)
|
||||
|
||||
# Ensure log directory exists
|
||||
log_dir = os.path.join(os.getcwd(), "logs")
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_path = os.path.join(log_dir, "subagent_stop.json")
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if os.path.exists(log_path):
|
||||
with open(log_path) as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append new data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_path, "w") as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
# Handle --chat switch (same as stop.py)
|
||||
if args.chat and "transcript_path" in input_data:
|
||||
transcript_path = input_data["transcript_path"]
|
||||
if os.path.exists(transcript_path):
|
||||
# Read .jsonl file and convert to JSON array
|
||||
chat_data = []
|
||||
try:
|
||||
with open(transcript_path) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line:
|
||||
try:
|
||||
chat_data.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
pass # Skip invalid lines
|
||||
|
||||
# Write to logs/chat.json
|
||||
chat_file = os.path.join(log_dir, "chat.json")
|
||||
with open(chat_file, "w") as f:
|
||||
json.dump(chat_data, f, indent=2)
|
||||
except Exception:
|
||||
pass # Fail silently
|
||||
|
||||
# Announce subagent completion via TTS
|
||||
announce_subagent_completion()
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
449
hooks/scripts/task-completion-enforcer.py
Executable file
449
hooks/scripts/task-completion-enforcer.py
Executable file
@@ -0,0 +1,449 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
|
||||
# /// script
|
||||
# requires-python = ">=3.10"
|
||||
# dependencies = []
|
||||
# ///
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
async def enforce_task_completion(hook_input: dict[str, Any]):
|
||||
"""Main enforcement function"""
|
||||
tool_input = hook_input.get("tool_input")
|
||||
phase = hook_input.get("phase", os.environ.get("CLAUDE_HOOK_PHASE", "unknown"))
|
||||
|
||||
# Only run compliance checks in PostToolUse and Stop phases
|
||||
# Skip PreToolUse to avoid redundant execution
|
||||
if phase == "PreToolUse":
|
||||
print(
|
||||
json.dumps(
|
||||
{
|
||||
"approve": True,
|
||||
"message": "Task completion enforcement skipped in PreToolUse (avoiding redundancy)",
|
||||
}
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# Detect task completion indicators
|
||||
if is_task_completion_attempt(tool_input):
|
||||
print(
|
||||
"🔍 TASK COMPLETION DETECTED - Running mandatory compliance checks...",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
compliance_results = await run_compliance_checks(tool_input)
|
||||
|
||||
if not compliance_results["allPassed"]:
|
||||
print(
|
||||
json.dumps(
|
||||
{
|
||||
"approve": False,
|
||||
"message": generate_blocking_message(compliance_results),
|
||||
}
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
print(
|
||||
"✅ All compliance checks passed - Task completion approved",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
print(
|
||||
json.dumps({"approve": True, "message": "Task completion enforcement passed"})
|
||||
)
|
||||
|
||||
|
||||
def is_task_completion_attempt(tool_input: Any) -> bool:
|
||||
"""Check if this is a task completion attempt"""
|
||||
content = (
|
||||
json.dumps(tool_input) if isinstance(tool_input, dict) else str(tool_input)
|
||||
)
|
||||
|
||||
# Check for TodoWrite tool with completed status
|
||||
if isinstance(tool_input, dict) and tool_input.get("todos"):
|
||||
has_completed_todo = any(
|
||||
todo.get("status") in ["completed", "done"] for todo in tool_input["todos"]
|
||||
)
|
||||
if has_completed_todo:
|
||||
return True
|
||||
|
||||
# Original completion indicators for other tools
|
||||
completion_indicators = [
|
||||
r"✅.*complete",
|
||||
r"✅.*done",
|
||||
r"✅.*fixed",
|
||||
r"✅.*finished",
|
||||
r"task.*complete",
|
||||
r"workflow.*complete",
|
||||
r"all.*fixed",
|
||||
r"ready.*review",
|
||||
r"implementation.*complete",
|
||||
r"changes.*made",
|
||||
r"should.*work.*now",
|
||||
r"⏺.*fixed",
|
||||
r"⏺.*complete",
|
||||
r'"status":\s*"completed"',
|
||||
r'"status":\s*"done"',
|
||||
]
|
||||
|
||||
return any(
|
||||
re.search(pattern, content, re.IGNORECASE) for pattern in completion_indicators
|
||||
)
|
||||
|
||||
|
||||
async def run_compliance_checks(tool_input: Any) -> dict[str, Any]:
|
||||
"""Run all compliance checks"""
|
||||
results = {"allPassed": True, "checks": [], "failures": []}
|
||||
|
||||
# Determine validation scope based on task completion type
|
||||
validation_scope = determine_validation_scope(tool_input)
|
||||
print(
|
||||
f"📋 VALIDATION SCOPE: {validation_scope['type']} ({validation_scope['reason']})",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
# 1. TypeScript validation (includes Biome, type checking, coding standards) - Centralized
|
||||
try:
|
||||
print("Running centralized TypeScript validation...", file=sys.stderr)
|
||||
ts_validator_path = Path(__file__).parent / "typescript-validator.py"
|
||||
|
||||
if ts_validator_path.exists():
|
||||
ts_result = await run_typescript_validator(ts_validator_path, tool_input)
|
||||
|
||||
if ts_result.get("approve", False):
|
||||
results["checks"].append(
|
||||
f"✅ TypeScript validation passed ({validation_scope['type']})"
|
||||
)
|
||||
else:
|
||||
results["allPassed"] = False
|
||||
results["failures"].append(
|
||||
{
|
||||
"check": "TypeScript",
|
||||
"error": ts_result.get(
|
||||
"message", "TypeScript validation failed"
|
||||
),
|
||||
"fix": "Fix all TypeScript validation issues listed above",
|
||||
}
|
||||
)
|
||||
else:
|
||||
results["checks"].append("ℹ️ TypeScript validator not found")
|
||||
except Exception as error:
|
||||
results["allPassed"] = False
|
||||
results["failures"].append(
|
||||
{
|
||||
"check": "TypeScript",
|
||||
"error": str(error),
|
||||
"fix": "Fix TypeScript validation system error",
|
||||
}
|
||||
)
|
||||
|
||||
# 2. Test check (if tests exist)
|
||||
if Path("package.json").exists():
|
||||
try:
|
||||
with open("package.json") as f:
|
||||
package_json = json.load(f)
|
||||
|
||||
if package_json.get("scripts", {}).get("test"):
|
||||
try:
|
||||
print("Running tests...", file=sys.stderr)
|
||||
subprocess.run(
|
||||
["pnpm", "test"], check=True, capture_output=True, text=True
|
||||
)
|
||||
results["checks"].append("✅ Tests passed")
|
||||
except subprocess.CalledProcessError as error:
|
||||
results["allPassed"] = False
|
||||
results["failures"].append(
|
||||
{
|
||||
"check": "Tests",
|
||||
"error": error.stdout or str(error),
|
||||
"fix": "Fix all failing tests before completing task",
|
||||
}
|
||||
)
|
||||
except Exception as error:
|
||||
results["checks"].append(f"ℹ️ Could not check tests: {error}")
|
||||
|
||||
# 3. Git status check (warn about uncommitted changes)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "status", "--porcelain"], capture_output=True, text=True, check=True
|
||||
)
|
||||
if result.stdout.strip():
|
||||
results["checks"].append("⚠️ Uncommitted changes detected")
|
||||
else:
|
||||
results["checks"].append("✅ Git status clean")
|
||||
except subprocess.CalledProcessError:
|
||||
# Git not available or not a git repo - not critical
|
||||
results["checks"].append("ℹ️ Git status not available")
|
||||
|
||||
# 4. Claude.md compliance check
|
||||
if Path(".claude/CLAUDE.md").exists() or Path("CLAUDE.md").exists():
|
||||
results["checks"].append(
|
||||
"✅ CLAUDE.md compliance assumed (manual verification)"
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def run_typescript_validator(
|
||||
validator_path: Path, tool_input: Any
|
||||
) -> dict[str, Any]:
|
||||
"""Run the TypeScript validator"""
|
||||
try:
|
||||
input_data = json.dumps(
|
||||
{"tool_name": "TaskCompletion", "tool_input": tool_input, "phase": "Stop"}
|
||||
)
|
||||
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
"uv",
|
||||
"run",
|
||||
"--script",
|
||||
str(validator_path),
|
||||
stdin=asyncio.subprocess.PIPE,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
|
||||
stdout, stderr = await process.communicate(input_data.encode())
|
||||
|
||||
if process.returncode == 0:
|
||||
return json.loads(stdout.decode())
|
||||
else:
|
||||
return {
|
||||
"approve": False,
|
||||
"message": f"TypeScript validator failed: {stderr.decode()}",
|
||||
}
|
||||
except Exception as error:
|
||||
return {
|
||||
"approve": False,
|
||||
"message": f"TypeScript validator output parsing failed: {error}",
|
||||
}
|
||||
|
||||
|
||||
def determine_validation_scope(tool_input: Any) -> dict[str, str]:
|
||||
"""Determine the validation scope based on task completion type"""
|
||||
content = (
|
||||
json.dumps(tool_input) if isinstance(tool_input, dict) else str(tool_input)
|
||||
)
|
||||
|
||||
# Major task completion indicators - require full validation
|
||||
major_completion_indicators = [
|
||||
r"feature.*complete",
|
||||
r"implementation.*complete",
|
||||
r"ready.*review",
|
||||
r"ready.*production",
|
||||
r"workflow.*complete",
|
||||
r"task.*finished",
|
||||
r"all.*done",
|
||||
r"fully.*implemented",
|
||||
r"complete.*testing",
|
||||
r"deployment.*ready",
|
||||
r"final.*implementation",
|
||||
r"story.*complete",
|
||||
r"epic.*complete",
|
||||
]
|
||||
|
||||
# Minor update indicators - can use incremental validation
|
||||
minor_update_indicators = [
|
||||
r"progress.*update",
|
||||
r"status.*update",
|
||||
r"partial.*complete",
|
||||
r"checkpoint",
|
||||
r"intermediate.*step",
|
||||
r"milestone.*reached",
|
||||
r"draft.*complete",
|
||||
r"initial.*implementation",
|
||||
r"work.*in.*progress",
|
||||
r"temporary.*fix",
|
||||
]
|
||||
|
||||
# Check for TodoWrite with multiple todos - likely full completion
|
||||
if isinstance(tool_input, dict) and tool_input.get("todos"):
|
||||
completed_todos = [
|
||||
todo
|
||||
for todo in tool_input["todos"]
|
||||
if todo.get("status") in ["completed", "done"]
|
||||
]
|
||||
total_todos = len(tool_input["todos"])
|
||||
|
||||
# If completing more than 50% of todos or 3+ todos, treat as major
|
||||
if len(completed_todos) >= 3 or (len(completed_todos) / total_todos) > 0.5:
|
||||
return {"type": "full", "reason": "Multiple todos completed"}
|
||||
|
||||
# Check for major completion patterns
|
||||
is_major_completion = any(
|
||||
re.search(pattern, content, re.IGNORECASE)
|
||||
for pattern in major_completion_indicators
|
||||
)
|
||||
if is_major_completion:
|
||||
return {"type": "full", "reason": "Major task completion detected"}
|
||||
|
||||
# Check for minor update patterns
|
||||
is_minor_update = any(
|
||||
re.search(pattern, content, re.IGNORECASE)
|
||||
for pattern in minor_update_indicators
|
||||
)
|
||||
if is_minor_update:
|
||||
return {"type": "incremental", "reason": "Minor progress update detected"}
|
||||
|
||||
# Default to incremental for single task completions
|
||||
return {
|
||||
"type": "incremental",
|
||||
"reason": "Single task completion - using incremental validation",
|
||||
}
|
||||
|
||||
|
||||
def get_changed_files() -> list[str]:
|
||||
"""Get list of changed files from git"""
|
||||
try:
|
||||
unstaged = subprocess.run(
|
||||
["git", "diff", "--name-only"], capture_output=True, text=True, check=True
|
||||
)
|
||||
staged = subprocess.run(
|
||||
["git", "diff", "--cached", "--name-only"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
all_changed = []
|
||||
if unstaged.stdout.strip():
|
||||
all_changed.extend(unstaged.stdout.strip().split("\n"))
|
||||
if staged.stdout.strip():
|
||||
all_changed.extend(staged.stdout.strip().split("\n"))
|
||||
|
||||
return list(set(all_changed)) # Remove duplicates
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
|
||||
def generate_blocking_message(results: dict[str, Any]) -> str:
|
||||
"""Generate blocking message for failed compliance checks"""
|
||||
message = f"""🛑 TASK COMPLETION BLOCKED 🛑
|
||||
|
||||
{len(results['failures'])} CRITICAL ISSUE(S) MUST BE FIXED:
|
||||
|
||||
"""
|
||||
|
||||
for i, failure in enumerate(results["failures"]):
|
||||
message += f"""❌ {failure['check']} FAILED:
|
||||
{failure['error']}
|
||||
|
||||
🔧 FIX: {failure['fix']}
|
||||
|
||||
"""
|
||||
|
||||
message += """════════════════════════════════════════════
|
||||
⚠️ CLAUDE.md COMPLIANCE VIOLATION DETECTED ⚠️
|
||||
════════════════════════════════════════════
|
||||
|
||||
According to CLAUDE.md requirements:
|
||||
• "ALL hook issues are BLOCKING"
|
||||
• "STOP IMMEDIATELY - Do not continue with other tasks"
|
||||
• "FIX ALL ISSUES - Address every ❌ issue until everything is ✅ GREEN"
|
||||
• "There are NO warnings, only requirements"
|
||||
|
||||
📋 MANDATORY NEXT STEPS:
|
||||
1. Fix ALL issues listed above
|
||||
2. Verify fixes by running the failed commands manually
|
||||
3. Only THEN mark the task as complete
|
||||
4. NEVER ignore blocking issues
|
||||
|
||||
🚫 TASK COMPLETION IS FORBIDDEN UNTIL ALL ISSUES ARE RESOLVED 🚫"""
|
||||
|
||||
return message
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main execution"""
|
||||
try:
|
||||
input_data = json.load(sys.stdin)
|
||||
|
||||
# Ensure log directory exists
|
||||
log_dir = Path.cwd() / "logs"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_path = log_dir / "task_completion_enforcer.json"
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if log_path.exists():
|
||||
with open(log_path) as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Add timestamp to the log entry
|
||||
timestamp = datetime.now().strftime("%b %d, %I:%M%p").lower()
|
||||
input_data["timestamp"] = timestamp
|
||||
|
||||
# Process the enforcement logic
|
||||
await enforce_task_completion(input_data)
|
||||
|
||||
# Add completion status to log entry
|
||||
input_data["enforcement_completed"] = True
|
||||
|
||||
# Append new data to log
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_path, "w") as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
except Exception as error:
|
||||
# Log the error as well
|
||||
try:
|
||||
log_dir = Path.cwd() / "logs"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_path = log_dir / "task_completion_enforcer.json"
|
||||
|
||||
if log_path.exists():
|
||||
with open(log_path) as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
timestamp = datetime.now().strftime("%b %d, %I:%M%p").lower()
|
||||
error_entry = {
|
||||
"timestamp": timestamp,
|
||||
"error": str(error),
|
||||
"enforcement_completed": False,
|
||||
"critical_failure": True,
|
||||
}
|
||||
|
||||
log_data.append(error_entry)
|
||||
|
||||
with open(log_path, "w") as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
except Exception:
|
||||
# If logging fails, continue with original error handling
|
||||
pass
|
||||
|
||||
print(
|
||||
json.dumps(
|
||||
{
|
||||
"approve": False,
|
||||
"message": f"🛑 CRITICAL: Task completion enforcement failed: {error}",
|
||||
}
|
||||
),
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
201
hooks/scripts/user_prompt_sumbit.py
Executable file
201
hooks/scripts/user_prompt_sumbit.py
Executable file
@@ -0,0 +1,201 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# requires-python = ">=3.11"
|
||||
# dependencies = [
|
||||
# "python-dotenv",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def log_user_prompt(session_id, input_data):
|
||||
"""Log user prompt to logs directory."""
|
||||
# Ensure logs directory exists
|
||||
log_dir = Path("logs")
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_file = log_dir / "user_prompt_submit.json"
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if log_file.exists():
|
||||
with open(log_file) as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append the entire input data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_file, "w") as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
|
||||
# Legacy function removed - now handled by manage_session_data
|
||||
|
||||
|
||||
def manage_session_data(session_id, prompt, name_agent=False):
|
||||
"""Manage session data in the new JSON structure."""
|
||||
import subprocess
|
||||
|
||||
# Ensure sessions directory exists
|
||||
sessions_dir = Path(".claude/data/sessions")
|
||||
sessions_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load or create session file
|
||||
session_file = sessions_dir / f"{session_id}.json"
|
||||
|
||||
if session_file.exists():
|
||||
try:
|
||||
with open(session_file) as f:
|
||||
session_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
session_data = {"session_id": session_id, "prompts": []}
|
||||
else:
|
||||
session_data = {"session_id": session_id, "prompts": []}
|
||||
|
||||
# Add the new prompt
|
||||
session_data["prompts"].append(prompt)
|
||||
|
||||
# Generate agent name if requested and not already present
|
||||
if name_agent and "agent_name" not in session_data:
|
||||
# Try Ollama first (preferred)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["uv", "run", ".claude/hooks/utils/llm/ollama.py", "--agent-name"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5, # Shorter timeout for local Ollama
|
||||
)
|
||||
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
agent_name = result.stdout.strip()
|
||||
# Check if it's a valid name (not an error message)
|
||||
if len(agent_name.split()) == 1 and agent_name.isalnum():
|
||||
session_data["agent_name"] = agent_name
|
||||
else:
|
||||
raise Exception("Invalid name from Ollama")
|
||||
except Exception:
|
||||
# Fall back to Anthropic if Ollama fails
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["uv", "run", ".claude/hooks/utils/llm/anth.py", "--agent-name"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
agent_name = result.stdout.strip()
|
||||
# Validate the name
|
||||
if len(agent_name.split()) == 1 and agent_name.isalnum():
|
||||
session_data["agent_name"] = agent_name
|
||||
except Exception:
|
||||
# If both fail, don't block the prompt
|
||||
pass
|
||||
|
||||
# Save the updated session data
|
||||
try:
|
||||
with open(session_file, "w") as f:
|
||||
json.dump(session_data, f, indent=2)
|
||||
except Exception:
|
||||
# Silently fail if we can't write the file
|
||||
pass
|
||||
|
||||
|
||||
def validate_prompt(prompt):
|
||||
"""
|
||||
Validate the user prompt for security or policy violations.
|
||||
Returns tuple (is_valid, reason).
|
||||
"""
|
||||
# Example validation rules (customize as needed)
|
||||
blocked_patterns = [
|
||||
# Add any patterns you want to block
|
||||
# Example: ('rm -rf /', 'Dangerous command detected'),
|
||||
]
|
||||
|
||||
prompt_lower = prompt.lower()
|
||||
|
||||
for pattern, reason in blocked_patterns:
|
||||
if pattern.lower() in prompt_lower:
|
||||
return False, reason
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--validate", action="store_true", help="Enable prompt validation"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--log-only",
|
||||
action="store_true",
|
||||
help="Only log prompts, no validation or blocking",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--store-last-prompt",
|
||||
action="store_true",
|
||||
help="Store the last prompt for status line display",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--name-agent",
|
||||
action="store_true",
|
||||
help="Generate an agent name for the session",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Extract session_id and prompt
|
||||
session_id = input_data.get("session_id", "unknown")
|
||||
prompt = input_data.get("prompt", "")
|
||||
|
||||
# Log the user prompt
|
||||
log_user_prompt(session_id, input_data)
|
||||
|
||||
# Manage session data with JSON structure
|
||||
if args.store_last_prompt or args.name_agent:
|
||||
manage_session_data(session_id, prompt, name_agent=args.name_agent)
|
||||
|
||||
# Validate prompt if requested and not in log-only mode
|
||||
if args.validate and not args.log_only:
|
||||
is_valid, reason = validate_prompt(prompt)
|
||||
if not is_valid:
|
||||
# Exit code 2 blocks the prompt with error message
|
||||
print(f"Prompt blocked: {reason}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
# Add context information (optional)
|
||||
# You can print additional context that will be added to the prompt
|
||||
# Example: print(f"Current time: {datetime.now()}")
|
||||
|
||||
# Success - prompt will be processed
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user