Initial commit
This commit is contained in:
12
.claude-plugin/plugin.json
Normal file
12
.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "customize",
|
||||
"description": "Personnalise ton expérience Claude Code avec hooks, output styles et status lines sur mesure",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Aurélien Tournayre",
|
||||
"email": "aurelien.tournayre@gmail.com"
|
||||
},
|
||||
"hooks": [
|
||||
"./hooks"
|
||||
]
|
||||
}
|
||||
3
README.md
Normal file
3
README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# customize
|
||||
|
||||
Personnalise ton expérience Claude Code avec hooks, output styles et status lines sur mesure
|
||||
114
hooks/notification.py
Executable file
114
hooks/notification.py
Executable file
@@ -0,0 +1,114 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import random
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def get_tts_script_path():
|
||||
"""
|
||||
Determine which TTS script to use - only pyttsx3 is available.
|
||||
"""
|
||||
# Get current script directory and construct utils/tts path
|
||||
script_dir = Path(__file__).parent
|
||||
tts_dir = script_dir / "utils" / "tts"
|
||||
|
||||
# Use pyttsx3 (no API key required)
|
||||
pyttsx3_script = tts_dir / "pyttsx3_tts.py"
|
||||
if pyttsx3_script.exists():
|
||||
return str(pyttsx3_script)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def announce_notification():
|
||||
"""Announce that the agent needs user input."""
|
||||
try:
|
||||
tts_script = get_tts_script_path()
|
||||
if not tts_script:
|
||||
return # No TTS scripts available
|
||||
|
||||
# Get engineer name if available
|
||||
engineer_name = os.getenv('ENGINEER_NAME', '').strip()
|
||||
|
||||
# Create notification message with 30% chance to include name
|
||||
if engineer_name and random.random() < 0.3:
|
||||
notification_message = f"{engineer_name}, your agent needs your input"
|
||||
else:
|
||||
notification_message = "Your agent needs your input"
|
||||
|
||||
# Call the TTS script with the notification message
|
||||
subprocess.run([
|
||||
"python3", tts_script, notification_message
|
||||
],
|
||||
capture_output=True, # Suppress output
|
||||
timeout=10 # 10-second timeout
|
||||
)
|
||||
|
||||
except (subprocess.TimeoutExpired, subprocess.SubprocessError, FileNotFoundError):
|
||||
# Fail silently if TTS encounters issues
|
||||
pass
|
||||
except Exception:
|
||||
# Fail silently for any other errors
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--notify', action='store_true', help='Enable TTS notifications')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Ensure log directory exists
|
||||
import os
|
||||
log_dir = os.path.join(os.getcwd(), '.claude', 'logs')
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_file = os.path.join(log_dir, 'notification.json')
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if os.path.exists(log_file):
|
||||
with open(log_file, 'r') as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append new data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_file, 'w') as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
# Announce notification via TTS only if --notify flag is set
|
||||
# Skip TTS for the generic "Claude is waiting for your input" message
|
||||
if args.notify and input_data.get('message') != 'Claude is waiting for your input':
|
||||
announce_notification()
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
106
hooks/post_tool_use.py
Executable file
106
hooks/post_tool_use.py
Executable file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
def fix_permissions(path: Path):
|
||||
"""
|
||||
Ajuste les permissions des fichiers et dossiers créés.
|
||||
- Dossiers: 755 (rwxr-xr-x) - lecture/exécution pour tous
|
||||
- Fichiers: 644 (rw-r--r--) - lecture pour tous, écriture propriétaire
|
||||
"""
|
||||
try:
|
||||
if path.is_dir():
|
||||
# Permissions dossiers: 755 (0o755)
|
||||
os.chmod(path, 0o755)
|
||||
elif path.is_file():
|
||||
# Permissions fichiers: 644 (0o644)
|
||||
os.chmod(path, 0o644)
|
||||
except (OSError, PermissionError):
|
||||
# Ignore silencieusement les erreurs de permissions
|
||||
pass
|
||||
|
||||
def process_write_tool(tool_input: dict):
|
||||
"""Traite les outils Write pour ajuster les permissions."""
|
||||
file_path = tool_input.get('file_path')
|
||||
if file_path:
|
||||
path = Path(file_path)
|
||||
if path.exists():
|
||||
fix_permissions(path)
|
||||
# Corriger aussi le dossier parent si nécessaire
|
||||
parent = path.parent
|
||||
if parent.exists():
|
||||
fix_permissions(parent)
|
||||
|
||||
def process_bash_tool(tool_input: dict):
|
||||
"""Traite les commandes Bash pour détecter mkdir et ajuster permissions."""
|
||||
command = tool_input.get('command', '')
|
||||
|
||||
# Détecter mkdir avec extraction du chemin
|
||||
if 'mkdir' in command:
|
||||
# Extraction basique du chemin après mkdir
|
||||
parts = command.split()
|
||||
for i, part in enumerate(parts):
|
||||
if part == 'mkdir' and i + 1 < len(parts):
|
||||
# Ignorer les flags (-p, etc.)
|
||||
next_part = parts[i + 1]
|
||||
if not next_part.startswith('-'):
|
||||
path = Path(next_part)
|
||||
if path.exists():
|
||||
fix_permissions(path)
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Read JSON input from stdin
|
||||
input_data = json.load(sys.stdin)
|
||||
|
||||
# Traiter selon le type de tool
|
||||
tool_name = input_data.get('tool_name')
|
||||
tool_input = input_data.get('tool_input', {})
|
||||
|
||||
# Debug log
|
||||
debug_path = Path.cwd() / '.claude' / 'logs' / 'permissions_debug.log'
|
||||
with open(debug_path, 'a') as f:
|
||||
f.write(f"Tool: {tool_name}, Input: {tool_input}\n")
|
||||
|
||||
if tool_name == 'Write':
|
||||
process_write_tool(tool_input)
|
||||
elif tool_name == 'Bash':
|
||||
process_bash_tool(tool_input)
|
||||
|
||||
# Ensure log directory exists
|
||||
log_dir = Path.cwd() / '.claude' / 'logs'
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_path = log_dir / 'post_tool_use.json'
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if log_path.exists():
|
||||
with open(log_path, 'r') as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append new data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_path, 'w') as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Exit cleanly on any other error
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
120
hooks/pre_compact.py
Executable file
120
hooks/pre_compact.py
Executable file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def log_pre_compact(input_data):
|
||||
"""Log pre-compact event to logs directory."""
|
||||
# Ensure logs directory exists
|
||||
log_dir = Path(".claude/logs")
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_file = log_dir / 'pre_compact.json'
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if log_file.exists():
|
||||
with open(log_file, 'r') as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append the entire input data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_file, 'w') as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
|
||||
def backup_transcript(transcript_path, trigger):
|
||||
"""Create a backup of the transcript before compaction."""
|
||||
try:
|
||||
if not os.path.exists(transcript_path):
|
||||
return
|
||||
|
||||
# Create backup directory
|
||||
backup_dir = Path(".claude/logs") / "transcript_backups"
|
||||
backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate backup filename with timestamp and trigger type
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
session_name = Path(transcript_path).stem
|
||||
backup_name = f"{session_name}_pre_compact_{trigger}_{timestamp}.jsonl"
|
||||
backup_path = backup_dir / backup_name
|
||||
|
||||
# Copy transcript to backup
|
||||
import shutil
|
||||
shutil.copy2(transcript_path, backup_path)
|
||||
|
||||
return str(backup_path)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--backup', action='store_true',
|
||||
help='Create backup of transcript before compaction')
|
||||
parser.add_argument('--verbose', action='store_true',
|
||||
help='Print verbose output')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Extract fields
|
||||
session_id = input_data.get('session_id', 'unknown')
|
||||
transcript_path = input_data.get('transcript_path', '')
|
||||
trigger = input_data.get('trigger', 'unknown') # "manual" or "auto"
|
||||
custom_instructions = input_data.get('custom_instructions', '')
|
||||
|
||||
# Log the pre-compact event
|
||||
log_pre_compact(input_data)
|
||||
|
||||
# Create backup if requested
|
||||
backup_path = None
|
||||
if args.backup and transcript_path:
|
||||
backup_path = backup_transcript(transcript_path, trigger)
|
||||
|
||||
# Provide feedback based on trigger type
|
||||
if args.verbose:
|
||||
if trigger == "manual":
|
||||
message = f"Preparing for manual compaction (session: {session_id[:8]}...)"
|
||||
if custom_instructions:
|
||||
message += f"\nCustom instructions: {custom_instructions[:100]}..."
|
||||
else: # auto
|
||||
message = f"Auto-compaction triggered due to full context window (session: {session_id[:8]}...)"
|
||||
|
||||
if backup_path:
|
||||
message += f"\nTranscript backed up to: {backup_path}"
|
||||
|
||||
print(message)
|
||||
|
||||
# Success - compaction will proceed
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
136
hooks/pre_tool_use.py
Executable file
136
hooks/pre_tool_use.py
Executable file
@@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import sys
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def is_dangerous_rm_command(command):
|
||||
"""
|
||||
Comprehensive detection of dangerous rm commands.
|
||||
Matches various forms of rm -rf and similar destructive patterns.
|
||||
"""
|
||||
# Normalize command by removing extra spaces and converting to lowercase
|
||||
normalized = ' '.join(command.lower().split())
|
||||
|
||||
# Pattern 1: Standard rm -rf variations
|
||||
patterns = [
|
||||
r'\brm\s+.*-[a-z]*r[a-z]*f', # rm -rf, rm -fr, rm -Rf, etc.
|
||||
r'\brm\s+.*-[a-z]*f[a-z]*r', # rm -fr variations
|
||||
r'\brm\s+--recursive\s+--force', # rm --recursive --force
|
||||
r'\brm\s+--force\s+--recursive', # rm --force --recursive
|
||||
r'\brm\s+-r\s+.*-f', # rm -r ... -f
|
||||
r'\brm\s+-f\s+.*-r', # rm -f ... -r
|
||||
]
|
||||
|
||||
# Check for dangerous patterns
|
||||
for pattern in patterns:
|
||||
if re.search(pattern, normalized):
|
||||
return True
|
||||
|
||||
# Pattern 2: Check for rm with recursive flag targeting dangerous paths
|
||||
dangerous_paths = [
|
||||
r'/', # Root directory
|
||||
r'/\*', # Root with wildcard
|
||||
r'~', # Home directory
|
||||
r'~/', # Home directory path
|
||||
r'\$HOME', # Home environment variable
|
||||
r'\.\.', # Parent directory references
|
||||
r'\*', # Wildcards in general rm -rf context
|
||||
r'\.', # Current directory
|
||||
r'\.\s*$', # Current directory at end of command
|
||||
]
|
||||
|
||||
if re.search(r'\brm\s+.*-[a-z]*r', normalized): # If rm has recursive flag
|
||||
for path in dangerous_paths:
|
||||
if re.search(path, normalized):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def is_env_file_access(tool_name, tool_input):
|
||||
"""
|
||||
Check if any tool is trying to access .env files containing sensitive data.
|
||||
"""
|
||||
if tool_name in ['Read', 'Edit', 'MultiEdit', 'Write', 'Bash']:
|
||||
# Check file paths for file-based tools
|
||||
if tool_name in ['Read', 'Edit', 'MultiEdit', 'Write']:
|
||||
file_path = tool_input.get('file_path', '')
|
||||
if '.env' in file_path and not file_path.endswith('.env.sample'):
|
||||
return True
|
||||
|
||||
# Check bash commands for .env file access
|
||||
elif tool_name == 'Bash':
|
||||
command = tool_input.get('command', '')
|
||||
# Pattern to detect .env file access (but allow .env.sample)
|
||||
env_patterns = [
|
||||
r'\b\.env\b(?!\.sample)', # .env but not .env.sample
|
||||
r'cat\s+.*\.env\b(?!\.sample)', # cat .env
|
||||
r'echo\s+.*>\s*\.env\b(?!\.sample)', # echo > .env
|
||||
r'touch\s+.*\.env\b(?!\.sample)', # touch .env
|
||||
r'cp\s+.*\.env\b(?!\.sample)', # cp .env
|
||||
r'mv\s+.*\.env\b(?!\.sample)', # mv .env
|
||||
]
|
||||
|
||||
for pattern in env_patterns:
|
||||
if re.search(pattern, command):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Read JSON input from stdin
|
||||
input_data = json.load(sys.stdin)
|
||||
|
||||
tool_name = input_data.get('tool_name', '')
|
||||
tool_input = input_data.get('tool_input', {})
|
||||
|
||||
# Check for .env file access (blocks access to sensitive environment files)
|
||||
if is_env_file_access(tool_name, tool_input):
|
||||
print("BLOCKED: Access to .env files containing sensitive data is prohibited", file=sys.stderr)
|
||||
print("Use .env.sample for template files instead", file=sys.stderr)
|
||||
sys.exit(2) # Exit code 2 blocks tool call and shows error to Claude
|
||||
|
||||
# Check for dangerous rm -rf commands
|
||||
if tool_name == 'Bash':
|
||||
command = tool_input.get('command', '')
|
||||
|
||||
# Block rm -rf commands with comprehensive pattern matching
|
||||
if is_dangerous_rm_command(command):
|
||||
print("BLOCKED: Dangerous rm command detected and prevented", file=sys.stderr)
|
||||
sys.exit(2) # Exit code 2 blocks tool call and shows error to Claude
|
||||
|
||||
# Ensure log directory exists
|
||||
log_dir = Path.cwd() / '.claude' / 'logs'
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_path = log_dir / 'pre_tool_use.json'
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if log_path.exists():
|
||||
with open(log_path, 'r') as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append new data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_path, 'w') as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Gracefully handle JSON decode errors
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
207
hooks/session_start.py
Executable file
207
hooks/session_start.py
Executable file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def log_session_start(input_data):
|
||||
"""Log session start event to logs directory."""
|
||||
# Ensure logs directory exists
|
||||
log_dir = Path(".claude/logs")
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_file = log_dir / 'session_start.json'
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if log_file.exists():
|
||||
with open(log_file, 'r') as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append the entire input data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_file, 'w') as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
|
||||
def get_git_status():
|
||||
"""Get current git status information."""
|
||||
try:
|
||||
# Get current branch
|
||||
branch_result = subprocess.run(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5
|
||||
)
|
||||
current_branch = branch_result.stdout.strip() if branch_result.returncode == 0 else "unknown"
|
||||
|
||||
# Get uncommitted changes count
|
||||
status_result = subprocess.run(
|
||||
['git', 'status', '--porcelain'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5
|
||||
)
|
||||
if status_result.returncode == 0:
|
||||
changes = status_result.stdout.strip().split('\n') if status_result.stdout.strip() else []
|
||||
uncommitted_count = len(changes)
|
||||
else:
|
||||
uncommitted_count = 0
|
||||
|
||||
return current_branch, uncommitted_count
|
||||
except Exception:
|
||||
return None, None
|
||||
|
||||
|
||||
def get_recent_issues():
|
||||
"""Get recent GitHub issues if gh CLI is available."""
|
||||
try:
|
||||
# Check if gh is available
|
||||
gh_check = subprocess.run(['which', 'gh'], capture_output=True)
|
||||
if gh_check.returncode != 0:
|
||||
return None
|
||||
|
||||
# Get recent open issues
|
||||
result = subprocess.run(
|
||||
['gh', 'issue', 'list', '--limit', '5', '--state', 'open'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
return result.stdout.strip()
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def load_development_context(source):
|
||||
"""Load relevant development context based on session source."""
|
||||
context_parts = []
|
||||
|
||||
# Add timestamp
|
||||
context_parts.append(f"Session started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
context_parts.append(f"Session source: {source}")
|
||||
|
||||
# Add git information
|
||||
branch, changes = get_git_status()
|
||||
if branch:
|
||||
context_parts.append(f"Git branch: {branch}")
|
||||
if changes > 0:
|
||||
context_parts.append(f"Uncommitted changes: {changes} files")
|
||||
|
||||
# Load project-specific context files if they exist
|
||||
context_files = [
|
||||
".claude/CONTEXT.md",
|
||||
".claude/TODO.md",
|
||||
"TODO.md",
|
||||
".github/ISSUE_TEMPLATE.md"
|
||||
]
|
||||
|
||||
for file_path in context_files:
|
||||
if Path(file_path).exists():
|
||||
try:
|
||||
with open(file_path, 'r') as f:
|
||||
content = f.read().strip()
|
||||
if content:
|
||||
context_parts.append(f"\n--- Content from {file_path} ---")
|
||||
context_parts.append(content[:1000]) # Limit to first 1000 chars
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Add recent issues if available
|
||||
issues = get_recent_issues()
|
||||
if issues:
|
||||
context_parts.append("\n--- Recent GitHub Issues ---")
|
||||
context_parts.append(issues)
|
||||
|
||||
return "\n".join(context_parts)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--load-context', action='store_true',
|
||||
help='Load development context at session start')
|
||||
parser.add_argument('--announce', action='store_true',
|
||||
help='Announce session start via TTS')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Extract fields
|
||||
session_id = input_data.get('session_id', 'unknown')
|
||||
source = input_data.get('source', 'unknown') # "startup", "resume", or "clear"
|
||||
|
||||
# Log the session start event
|
||||
log_session_start(input_data)
|
||||
|
||||
# Load development context if requested
|
||||
if args.load_context:
|
||||
context = load_development_context(source)
|
||||
if context:
|
||||
# Using JSON output to add context
|
||||
output = {
|
||||
"hookSpecificOutput": {
|
||||
"hookEventName": "SessionStart",
|
||||
"additionalContext": context
|
||||
}
|
||||
}
|
||||
print(json.dumps(output))
|
||||
sys.exit(0)
|
||||
|
||||
# Announce session start if requested
|
||||
if args.announce:
|
||||
try:
|
||||
# Try to use TTS to announce session start
|
||||
script_dir = Path(__file__).parent
|
||||
tts_script = script_dir / "utils" / "tts" / "pyttsx3_tts.py"
|
||||
|
||||
if tts_script.exists():
|
||||
messages = {
|
||||
"startup": "Claude Code session started",
|
||||
"resume": "Resuming previous session",
|
||||
"clear": "Starting fresh session"
|
||||
}
|
||||
message = messages.get(source, "Session started")
|
||||
|
||||
subprocess.run(
|
||||
["python3", str(tts_script), message],
|
||||
capture_output=True,
|
||||
timeout=5
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Success
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
179
hooks/stop.py
Executable file
179
hooks/stop.py
Executable file
@@ -0,0 +1,179 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import random
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def get_completion_messages():
|
||||
"""Return list of friendly completion messages."""
|
||||
return [
|
||||
"Work complete!",
|
||||
"All done!",
|
||||
"Task finished!",
|
||||
"Job complete!",
|
||||
"Ready for next task!"
|
||||
]
|
||||
|
||||
|
||||
def get_tts_script_path():
|
||||
"""
|
||||
Determine which TTS script to use - only pyttsx3 is available.
|
||||
"""
|
||||
# Get current script directory and construct utils/tts path
|
||||
script_dir = Path(__file__).parent
|
||||
tts_dir = script_dir / "utils" / "tts"
|
||||
|
||||
# Use pyttsx3 (no API key required)
|
||||
pyttsx3_script = tts_dir / "pyttsx3_tts.py"
|
||||
if pyttsx3_script.exists():
|
||||
return str(pyttsx3_script)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_llm_completion_message():
|
||||
"""
|
||||
Generate completion message using Anthropic or fallback to random message.
|
||||
|
||||
Returns:
|
||||
str: Generated or fallback completion message
|
||||
"""
|
||||
# Get current script directory and construct utils/llm path
|
||||
script_dir = Path(__file__).parent
|
||||
llm_dir = script_dir / "utils" / "llm"
|
||||
|
||||
# Try Anthropic first
|
||||
if os.getenv('__ANTHROPIC_API_KEY'):
|
||||
anth_script = llm_dir / "anth.py"
|
||||
if anth_script.exists():
|
||||
try:
|
||||
result = subprocess.run([
|
||||
"python3", str(anth_script), "--completion"
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
return result.stdout.strip()
|
||||
except (subprocess.TimeoutExpired, subprocess.SubprocessError):
|
||||
pass
|
||||
|
||||
# Fallback to random predefined message
|
||||
messages = get_completion_messages()
|
||||
return random.choice(messages)
|
||||
|
||||
def announce_completion():
|
||||
"""Announce completion using the best available TTS service."""
|
||||
try:
|
||||
tts_script = get_tts_script_path()
|
||||
if not tts_script:
|
||||
return # No TTS scripts available
|
||||
|
||||
# Get completion message (LLM-generated or fallback)
|
||||
completion_message = get_llm_completion_message()
|
||||
|
||||
# Call the TTS script with the completion message
|
||||
subprocess.run([
|
||||
"python3", tts_script, completion_message
|
||||
],
|
||||
capture_output=True, # Suppress output
|
||||
timeout=10 # 10-second timeout
|
||||
)
|
||||
|
||||
except (subprocess.TimeoutExpired, subprocess.SubprocessError, FileNotFoundError):
|
||||
# Fail silently if TTS encounters issues
|
||||
pass
|
||||
except Exception:
|
||||
# Fail silently for any other errors
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--chat', action='store_true', help='Copy transcript to chat.json')
|
||||
parser.add_argument('--notify', action='store_true', help='Enable TTS completion announcement')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.load(sys.stdin)
|
||||
|
||||
# Extract required fields
|
||||
session_id = input_data.get("session_id", "")
|
||||
stop_hook_active = input_data.get("stop_hook_active", False)
|
||||
|
||||
# Ensure log directory exists
|
||||
log_dir = os.path.join(os.getcwd(), ".claude", "logs")
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_path = os.path.join(log_dir, "stop.json")
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if os.path.exists(log_path):
|
||||
with open(log_path, 'r') as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append new data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_path, 'w') as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
# Handle --chat switch
|
||||
if args.chat and 'transcript_path' in input_data:
|
||||
transcript_path = input_data['transcript_path']
|
||||
if os.path.exists(transcript_path):
|
||||
# Read .jsonl file and convert to JSON array
|
||||
chat_data = []
|
||||
try:
|
||||
with open(transcript_path, 'r') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line:
|
||||
try:
|
||||
chat_data.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
pass # Skip invalid lines
|
||||
|
||||
# Write to .claude/logs/chat.json
|
||||
chat_file = os.path.join(log_dir, 'chat.json')
|
||||
with open(chat_file, 'w') as f:
|
||||
json.dump(chat_data, f, indent=2)
|
||||
except Exception:
|
||||
pass # Fail silently
|
||||
|
||||
# Announce completion via TTS (only if --notify flag is set)
|
||||
if args.notify:
|
||||
announce_completion()
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
135
hooks/subagent_stop.py
Executable file
135
hooks/subagent_stop.py
Executable file
@@ -0,0 +1,135 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def get_tts_script_path():
|
||||
"""
|
||||
Determine which TTS script to use - only pyttsx3 is available.
|
||||
"""
|
||||
# Get current script directory and construct utils/tts path
|
||||
script_dir = Path(__file__).parent
|
||||
tts_dir = script_dir / "utils" / "tts"
|
||||
|
||||
# Use pyttsx3 (no API key required)
|
||||
pyttsx3_script = tts_dir / "pyttsx3_tts.py"
|
||||
if pyttsx3_script.exists():
|
||||
return str(pyttsx3_script)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def announce_subagent_completion():
|
||||
"""Announce subagent completion using the best available TTS service."""
|
||||
try:
|
||||
tts_script = get_tts_script_path()
|
||||
if not tts_script:
|
||||
return # No TTS scripts available
|
||||
|
||||
# Use fixed message for subagent completion
|
||||
completion_message = "Subagent Complete"
|
||||
|
||||
# Call the TTS script with the completion message
|
||||
subprocess.run([
|
||||
"python3", tts_script, completion_message
|
||||
],
|
||||
capture_output=True, # Suppress output
|
||||
timeout=10 # 10-second timeout
|
||||
)
|
||||
|
||||
except (subprocess.TimeoutExpired, subprocess.SubprocessError, FileNotFoundError):
|
||||
# Fail silently if TTS encounters issues
|
||||
pass
|
||||
except Exception:
|
||||
# Fail silently for any other errors
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--chat', action='store_true', help='Copy transcript to chat.json')
|
||||
parser.add_argument('--notify', action='store_true', help='Enable TTS completion announcement')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.load(sys.stdin)
|
||||
|
||||
# Extract required fields
|
||||
session_id = input_data.get("session_id", "")
|
||||
stop_hook_active = input_data.get("stop_hook_active", False)
|
||||
|
||||
# Ensure log directory exists
|
||||
log_dir = os.path.join(os.getcwd(), ".claude", "logs")
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_path = os.path.join(log_dir, "subagent_stop.json")
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if os.path.exists(log_path):
|
||||
with open(log_path, 'r') as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append new data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_path, 'w') as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
# Handle --chat switch (same as stop.py)
|
||||
if args.chat and 'transcript_path' in input_data:
|
||||
transcript_path = input_data['transcript_path']
|
||||
if os.path.exists(transcript_path):
|
||||
# Read .jsonl file and convert to JSON array
|
||||
chat_data = []
|
||||
try:
|
||||
with open(transcript_path, 'r') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line:
|
||||
try:
|
||||
chat_data.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
pass # Skip invalid lines
|
||||
|
||||
# Write to .claude/logs/chat.json
|
||||
chat_file = os.path.join(log_dir, 'chat.json')
|
||||
with open(chat_file, 'w') as f:
|
||||
json.dump(chat_data, f, indent=2)
|
||||
except Exception:
|
||||
pass # Fail silently
|
||||
|
||||
# Announce subagent completion via TTS (only if --notify flag is set)
|
||||
if args.notify:
|
||||
announce_subagent_completion()
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
186
hooks/user_prompt_submit.py
Normal file
186
hooks/user_prompt_submit.py
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
except ImportError:
|
||||
pass # dotenv is optional
|
||||
|
||||
|
||||
def log_user_prompt(session_id, input_data):
|
||||
"""Log user prompt to logs directory."""
|
||||
# Ensure logs directory exists
|
||||
log_dir = Path(".claude/logs")
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_file = log_dir / 'user_prompt_submit.json'
|
||||
|
||||
# Read existing log data or initialize empty list
|
||||
if log_file.exists():
|
||||
with open(log_file, 'r') as f:
|
||||
try:
|
||||
log_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
log_data = []
|
||||
else:
|
||||
log_data = []
|
||||
|
||||
# Append the entire input data
|
||||
log_data.append(input_data)
|
||||
|
||||
# Write back to file with formatting
|
||||
with open(log_file, 'w') as f:
|
||||
json.dump(log_data, f, indent=2)
|
||||
|
||||
|
||||
# Legacy function removed - now handled by manage_session_data
|
||||
|
||||
|
||||
def manage_session_data(session_id, prompt, name_agent=False):
|
||||
"""Manage session data in the new JSON structure."""
|
||||
import subprocess
|
||||
|
||||
# Ensure sessions directory exists
|
||||
sessions_dir = Path(".claude/data/sessions")
|
||||
sessions_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load or create session file
|
||||
session_file = sessions_dir / f"{session_id}.json"
|
||||
|
||||
if session_file.exists():
|
||||
try:
|
||||
with open(session_file, 'r') as f:
|
||||
session_data = json.load(f)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
session_data = {"session_id": session_id, "prompts": []}
|
||||
else:
|
||||
session_data = {"session_id": session_id, "prompts": []}
|
||||
|
||||
# Add the new prompt
|
||||
session_data["prompts"].append(prompt)
|
||||
|
||||
# Generate agent name if requested and not already present
|
||||
if name_agent and "agent_name" not in session_data:
|
||||
# Try Ollama first (preferred)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["python3", ".claude/hooks/utils/llm/ollama.py", "--agent-name"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5 # Shorter timeout for local Ollama
|
||||
)
|
||||
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
agent_name = result.stdout.strip()
|
||||
# Check if it's a valid name (not an error message)
|
||||
if len(agent_name.split()) == 1 and agent_name.isalnum():
|
||||
session_data["agent_name"] = agent_name
|
||||
else:
|
||||
raise Exception("Invalid name from Ollama")
|
||||
except Exception:
|
||||
# Fall back to Anthropic if Ollama fails
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["python3", ".claude/hooks/utils/llm/anth.py", "--agent-name"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
agent_name = result.stdout.strip()
|
||||
# Validate the name
|
||||
if len(agent_name.split()) == 1 and agent_name.isalnum():
|
||||
session_data["agent_name"] = agent_name
|
||||
except Exception:
|
||||
# If both fail, don't block the prompt
|
||||
pass
|
||||
|
||||
# Save the updated session data
|
||||
try:
|
||||
with open(session_file, 'w') as f:
|
||||
json.dump(session_data, f, indent=2)
|
||||
except Exception:
|
||||
# Silently fail if we can't write the file
|
||||
pass
|
||||
|
||||
|
||||
def validate_prompt(prompt):
|
||||
"""
|
||||
Validate the user prompt for security or policy violations.
|
||||
Returns tuple (is_valid, reason).
|
||||
"""
|
||||
# Example validation rules (customize as needed)
|
||||
blocked_patterns = [
|
||||
# Add any patterns you want to block
|
||||
# Example: ('rm -rf /', 'Dangerous command detected'),
|
||||
]
|
||||
|
||||
prompt_lower = prompt.lower()
|
||||
|
||||
for pattern, reason in blocked_patterns:
|
||||
if pattern.lower() in prompt_lower:
|
||||
return False, reason
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--validate', action='store_true',
|
||||
help='Enable prompt validation')
|
||||
parser.add_argument('--log-only', action='store_true',
|
||||
help='Only log prompts, no validation or blocking')
|
||||
parser.add_argument('--store-last-prompt', action='store_true',
|
||||
help='Store the last prompt for status line display')
|
||||
parser.add_argument('--name-agent', action='store_true',
|
||||
help='Generate an agent name for the session')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read JSON input from stdin
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Extract session_id and prompt
|
||||
session_id = input_data.get('session_id', 'unknown')
|
||||
prompt = input_data.get('prompt', '')
|
||||
|
||||
# Log the user prompt
|
||||
log_user_prompt(session_id, input_data)
|
||||
|
||||
# Manage session data with JSON structure
|
||||
if args.store_last_prompt or args.name_agent:
|
||||
manage_session_data(session_id, prompt, name_agent=args.name_agent)
|
||||
|
||||
# Validate prompt if requested and not in log-only mode
|
||||
if args.validate and not args.log_only:
|
||||
is_valid, reason = validate_prompt(prompt)
|
||||
if not is_valid:
|
||||
# Exit code 2 blocks the prompt with error message
|
||||
print(f"Prompt blocked: {reason}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
# Add context information (optional)
|
||||
# You can print additional context that will be added to the prompt
|
||||
# Example: print(f"Current time: {datetime.now()}")
|
||||
|
||||
# Success - prompt will be processed
|
||||
sys.exit(0)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Handle JSON decode errors gracefully
|
||||
sys.exit(0)
|
||||
except Exception:
|
||||
# Handle any other errors gracefully
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
186
hooks/utils/llm/anth.py
Executable file
186
hooks/utils/llm/anth.py
Executable file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
def prompt_llm(prompt_text):
|
||||
"""
|
||||
Base Anthropic LLM prompting method using fastest model.
|
||||
|
||||
Args:
|
||||
prompt_text (str): The prompt to send to the model
|
||||
|
||||
Returns:
|
||||
str: The model's response text, or None if error
|
||||
"""
|
||||
load_dotenv()
|
||||
|
||||
api_key = os.getenv("__ANTHROPIC_API_KEY")
|
||||
if not api_key:
|
||||
return None
|
||||
|
||||
try:
|
||||
import anthropic
|
||||
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-3-5-haiku-20241022", # Fastest Anthropic model
|
||||
max_tokens=100,
|
||||
temperature=0.7,
|
||||
messages=[{"role": "user", "content": prompt_text}],
|
||||
)
|
||||
|
||||
return message.content[0].text.strip()
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def generate_completion_message():
|
||||
"""
|
||||
Generate a completion message using Anthropic LLM.
|
||||
|
||||
Returns:
|
||||
str: A natural language completion message, or None if error
|
||||
"""
|
||||
engineer_name = os.getenv("ENGINEER_NAME", "").strip()
|
||||
|
||||
if engineer_name:
|
||||
name_instruction = f"Sometimes (about 30% of the time) include the engineer's name '{engineer_name}' in a natural way."
|
||||
examples = f"""Examples of the style:
|
||||
- Standard: "Work complete!", "All done!", "Task finished!", "Ready for your next move!"
|
||||
- Personalized: "{engineer_name}, all set!", "Ready for you, {engineer_name}!", "Complete, {engineer_name}!", "{engineer_name}, we're done!" """
|
||||
else:
|
||||
name_instruction = ""
|
||||
examples = """Examples of the style: "Work complete!", "All done!", "Task finished!", "Ready for your next move!" """
|
||||
|
||||
prompt = f"""Generate a short, friendly completion message for when an AI coding assistant finishes a task.
|
||||
|
||||
Requirements:
|
||||
- Keep it under 10 words
|
||||
- Make it positive and future focused
|
||||
- Use natural, conversational language
|
||||
- Focus on completion/readiness
|
||||
- Do NOT include quotes, formatting, or explanations
|
||||
- Return ONLY the completion message text
|
||||
{name_instruction}
|
||||
|
||||
{examples}
|
||||
|
||||
Generate ONE completion message:"""
|
||||
|
||||
response = prompt_llm(prompt)
|
||||
|
||||
# Clean up response - remove quotes and extra formatting
|
||||
if response:
|
||||
response = response.strip().strip('"').strip("'").strip()
|
||||
# Take first line if multiple lines
|
||||
response = response.split("\n")[0].strip()
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def generate_agent_name():
|
||||
"""
|
||||
Generate a one-word agent name using Anthropic.
|
||||
|
||||
Returns:
|
||||
str: A single-word agent name, or fallback name if error
|
||||
"""
|
||||
import random
|
||||
|
||||
# Example names to guide generation
|
||||
example_names = [
|
||||
"Phoenix", "Sage", "Nova", "Echo", "Atlas", "Cipher", "Nexus",
|
||||
"Oracle", "Quantum", "Zenith", "Aurora", "Vortex", "Nebula",
|
||||
"Catalyst", "Prism", "Axiom", "Helix", "Flux", "Synth", "Vertex"
|
||||
]
|
||||
|
||||
# If no API key, return random fallback
|
||||
if not os.getenv("__ANTHROPIC_API_KEY"):
|
||||
return random.choice(example_names)
|
||||
|
||||
# Create examples string
|
||||
examples_str = ", ".join(example_names[:10]) # Use first 10 as examples
|
||||
|
||||
prompt_text = f"""Generate exactly ONE unique agent/assistant name.
|
||||
|
||||
Requirements:
|
||||
- Single word only (no spaces, hyphens, or punctuation)
|
||||
- Abstract and memorable
|
||||
- Professional sounding
|
||||
- Easy to pronounce
|
||||
- Similar style to these examples: {examples_str}
|
||||
|
||||
Generate a NEW name (not from the examples). Respond with ONLY the name, nothing else.
|
||||
|
||||
Name:"""
|
||||
|
||||
try:
|
||||
# Use faster Haiku model with lower tokens for name generation
|
||||
load_dotenv()
|
||||
api_key = os.getenv("__ANTHROPIC_API_KEY")
|
||||
if not api_key:
|
||||
raise Exception("No API key")
|
||||
|
||||
import anthropic
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-3-5-haiku-20241022", # Fast model
|
||||
max_tokens=20,
|
||||
temperature=0.7,
|
||||
messages=[{"role": "user", "content": prompt_text}],
|
||||
)
|
||||
|
||||
# Extract and clean the name
|
||||
name = message.content[0].text.strip()
|
||||
# Ensure it's a single word
|
||||
name = name.split()[0] if name else "Agent"
|
||||
# Remove any punctuation
|
||||
name = ''.join(c for c in name if c.isalnum())
|
||||
# Capitalize first letter
|
||||
name = name.capitalize() if name else "Agent"
|
||||
|
||||
# Validate it's not empty and reasonable length
|
||||
if name and 3 <= len(name) <= 20:
|
||||
return name
|
||||
else:
|
||||
raise Exception("Invalid name generated")
|
||||
|
||||
except Exception:
|
||||
# Return random fallback name
|
||||
return random.choice(example_names)
|
||||
|
||||
|
||||
def main():
|
||||
"""Command line interface for testing."""
|
||||
import json
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "--completion":
|
||||
message = generate_completion_message()
|
||||
if message:
|
||||
print(message)
|
||||
else:
|
||||
print("Error generating completion message")
|
||||
elif sys.argv[1] == "--agent-name":
|
||||
# Generate agent name (no input needed)
|
||||
name = generate_agent_name()
|
||||
print(name)
|
||||
else:
|
||||
prompt_text = " ".join(sys.argv[1:])
|
||||
response = prompt_llm(prompt_text)
|
||||
if response:
|
||||
print(response)
|
||||
else:
|
||||
print("Error calling Anthropic API")
|
||||
else:
|
||||
print("Usage: ./anth.py 'your prompt here' or ./anth.py --completion or ./anth.py --agent-name")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
98
hooks/utils/tts/pyttsx3_tts.py
Executable file
98
hooks/utils/tts/pyttsx3_tts.py
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
import random
|
||||
|
||||
def main():
|
||||
"""
|
||||
pyttsx3 TTS Script
|
||||
|
||||
Uses pyttsx3 for offline text-to-speech synthesis.
|
||||
Accepts optional text prompt as command-line argument.
|
||||
|
||||
Usage:
|
||||
- ./pyttsx3_tts.py # Uses default text
|
||||
- ./pyttsx3_tts.py "Your custom text" # Uses provided text
|
||||
|
||||
Features:
|
||||
- Offline TTS (no API key required)
|
||||
- Cross-platform compatibility
|
||||
- Configurable voice settings
|
||||
- Immediate audio playback
|
||||
"""
|
||||
|
||||
try:
|
||||
import pyttsx3
|
||||
|
||||
# Initialize TTS engine
|
||||
engine = pyttsx3.init()
|
||||
|
||||
# Configure French voice if available
|
||||
voices = engine.getProperty('voices')
|
||||
french_voice = None
|
||||
|
||||
# Priority order for French voices
|
||||
french_voice_preferences = [
|
||||
'roa/fr', # French (France) - preferred
|
||||
'roa/fr-be', # French (Belgium)
|
||||
'roa/fr-ch', # French (Switzerland)
|
||||
]
|
||||
|
||||
# Find the best French voice (exact match)
|
||||
for pref in french_voice_preferences:
|
||||
for voice in voices:
|
||||
if voice.id == pref: # Exact match instead of substring
|
||||
french_voice = voice
|
||||
break
|
||||
if french_voice:
|
||||
break
|
||||
|
||||
# Set French voice if found
|
||||
if french_voice:
|
||||
engine.setProperty('voice', french_voice.id)
|
||||
print(f"🎙️ Voix française sélectionnée: {french_voice.name}")
|
||||
else:
|
||||
print("⚠️ Aucune voix française trouvée, utilisation de la voix par défaut")
|
||||
|
||||
# Configure engine settings
|
||||
engine.setProperty('rate', 160) # Slightly slower for better French pronunciation
|
||||
engine.setProperty('volume', 0.8) # Volume (0.0 to 1.0)
|
||||
|
||||
print("🎙️ pyttsx3 TTS")
|
||||
print("=" * 15)
|
||||
|
||||
# Get text from command line argument or use default
|
||||
if len(sys.argv) > 1:
|
||||
text = " ".join(sys.argv[1:]) # Join all arguments as text
|
||||
else:
|
||||
# Default completion messages in French
|
||||
completion_messages = [
|
||||
"Travail terminé !",
|
||||
"Tout est fini !",
|
||||
"Tâche accomplie !",
|
||||
"Mission accomplie !",
|
||||
"Prêt pour la prochaine tâche !",
|
||||
"C'est dans la boîte !",
|
||||
"Opération réussie !"
|
||||
]
|
||||
text = random.choice(completion_messages)
|
||||
|
||||
print(f"🎯 Text: {text}")
|
||||
print("🔊 Speaking...")
|
||||
|
||||
# Speak the text
|
||||
engine.say(text)
|
||||
engine.runAndWait()
|
||||
|
||||
print("✅ Playback complete!")
|
||||
|
||||
except ImportError:
|
||||
print("❌ Error: pyttsx3 package not installed")
|
||||
print("This script uses UV to auto-install dependencies.")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
81
plugin.lock.json
Normal file
81
plugin.lock.json
Normal file
@@ -0,0 +1,81 @@
|
||||
{
|
||||
"$schema": "internal://schemas/plugin.lock.v1.json",
|
||||
"pluginId": "gh:atournayre/claude-marketplace:customize",
|
||||
"normalized": {
|
||||
"repo": null,
|
||||
"ref": "refs/tags/v20251128.0",
|
||||
"commit": "a9e4f5f1d182a6442959f167392dc9da3c61b2b6",
|
||||
"treeHash": "1e29ffcc5fe8b60cad4b171fd23d988a4114f15c2b6e1f47c89f2a972a79b03c",
|
||||
"generatedAt": "2025-11-28T10:13:59.326284Z",
|
||||
"toolVersion": "publish_plugins.py@0.2.0"
|
||||
},
|
||||
"origin": {
|
||||
"remote": "git@github.com:zhongweili/42plugin-data.git",
|
||||
"branch": "master",
|
||||
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
|
||||
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
|
||||
},
|
||||
"manifest": {
|
||||
"name": "customize",
|
||||
"description": "Personnalise ton expérience Claude Code avec hooks, output styles et status lines sur mesure",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"content": {
|
||||
"files": [
|
||||
{
|
||||
"path": "README.md",
|
||||
"sha256": "3537187c5183419b92ed65456dc77ac956782039d4a471c26a0556a52973df36"
|
||||
},
|
||||
{
|
||||
"path": "hooks/post_tool_use.py",
|
||||
"sha256": "f5a678ecbfbeaa8e025099b04eeb682b037b91f0abf2758f2ed095ba2ebfef17"
|
||||
},
|
||||
{
|
||||
"path": "hooks/notification.py",
|
||||
"sha256": "7b78b1aed5f3afe3718e3ab2d86bea0fd18e8b7f1abedf0348d9d14e705cd5fd"
|
||||
},
|
||||
{
|
||||
"path": "hooks/stop.py",
|
||||
"sha256": "0f708fc9f10e4bb3787a9209b00154cda6fb2b99b3566545e02541002fa9e6fc"
|
||||
},
|
||||
{
|
||||
"path": "hooks/pre_tool_use.py",
|
||||
"sha256": "e39b435de16dc34673cde467a16c16bc9543ff681442fa4fbfde7cd84161402e"
|
||||
},
|
||||
{
|
||||
"path": "hooks/session_start.py",
|
||||
"sha256": "08befda7910d6fe6826f605b2e6681e9a920d276069ecc6990038edb7b6b6dd4"
|
||||
},
|
||||
{
|
||||
"path": "hooks/subagent_stop.py",
|
||||
"sha256": "4fbe456867e32820bd13a0255b68e39dd063ee259fa226ccba18d3a9f6ecc6cb"
|
||||
},
|
||||
{
|
||||
"path": "hooks/pre_compact.py",
|
||||
"sha256": "b295248244a9e828053facf5b543f1739aa04fcdb34ec609b1f15d5c30f5993e"
|
||||
},
|
||||
{
|
||||
"path": "hooks/user_prompt_submit.py",
|
||||
"sha256": "f91f90a07c5bfabfe2eb76322aec7d6a47314606427eceed3ee119ecce048e12"
|
||||
},
|
||||
{
|
||||
"path": "hooks/utils/llm/anth.py",
|
||||
"sha256": "2be2ab8cedebb695cab1e1eb4da1992701100f149fda8a2a442d2acd6564301d"
|
||||
},
|
||||
{
|
||||
"path": "hooks/utils/tts/pyttsx3_tts.py",
|
||||
"sha256": "d99a37c0d6993b630fa917c29c5fbd0fa57a3662ac97389f368416441b2c776d"
|
||||
},
|
||||
{
|
||||
"path": ".claude-plugin/plugin.json",
|
||||
"sha256": "6f1f694db954a3a70825ae66d7190b1b43c67dc4440b41b64550336460e453d0"
|
||||
}
|
||||
],
|
||||
"dirSha256": "1e29ffcc5fe8b60cad4b171fd23d988a4114f15c2b6e1f47c89f2a972a79b03c"
|
||||
},
|
||||
"security": {
|
||||
"scannedAt": null,
|
||||
"scannerVersion": null,
|
||||
"flags": []
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user