Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 17:57:28 +08:00
commit e063391898
27 changed files with 3055 additions and 0 deletions

26
hooks/utils/README.md Normal file
View File

@@ -0,0 +1,26 @@
# Utils - Shared Utilities
This directory contains shared utilities and helper functions used by various hooks.
## Structure:
- **llm/**: Language model utilities
- anth.py: Anthropic API utilities
- oai.py: OpenAI API utilities
- **tts/**: Text-to-speech utilities
- elevenlabs_tts.py: ElevenLabs TTS integration
- openai_tts.py: OpenAI TTS integration
- pyttsx3_tts.py: Local TTS using pyttsx3
## Usage:
These utilities are imported and used by various hooks. They provide common functionality like:
- API integrations
- Text-to-speech capabilities
- Shared helper functions
- Common validation logic
## Note:
Do not run these files directly. They are meant to be imported by hooks.

115
hooks/utils/llm/anth.py Executable file
View File

@@ -0,0 +1,115 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "anthropic",
# "python-dotenv",
# ]
# ///
import os
import sys
from dotenv import load_dotenv
def prompt_llm(prompt_text):
"""
Base Anthropic LLM prompting method using fastest model.
Args:
prompt_text (str): The prompt to send to the model
Returns:
str: The model's response text, or None if error
"""
load_dotenv()
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
return None
try:
import anthropic
client = anthropic.Anthropic(api_key=api_key)
message = client.messages.create(
model="claude-3-5-haiku-20241022", # Fastest Anthropic model
max_tokens=100,
temperature=0.7,
messages=[{"role": "user", "content": prompt_text}],
)
return message.content[0].text.strip()
except Exception:
return None
def generate_completion_message():
"""
Generate a completion message using Anthropic LLM.
Returns:
str: A natural language completion message, or None if error
"""
engineer_name = os.getenv("ENGINEER_NAME", "").strip()
if engineer_name:
name_instruction = f"Sometimes (about 30% of the time) include the engineer's name '{engineer_name}' in a natural way."
examples = f"""Examples of the style:
- Standard: "Work complete!", "All done!", "Task finished!", "Ready for your next move!"
- Personalized: "{engineer_name}, all set!", "Ready for you, {engineer_name}!", "Complete, {engineer_name}!", "{engineer_name}, we're done!" """
else:
name_instruction = ""
examples = """Examples of the style: "Work complete!", "All done!", "Task finished!", "Ready for your next move!" """
prompt = f"""Generate a short, friendly completion message for when an AI coding assistant finishes a task.
Requirements:
- Keep it under 10 words
- Make it positive and future focused
- Use natural, conversational language
- Focus on completion/readiness
- Do NOT include quotes, formatting, or explanations
- Return ONLY the completion message text
{name_instruction}
{examples}
Generate ONE completion message:"""
response = prompt_llm(prompt)
# Clean up response - remove quotes and extra formatting
if response:
response = response.strip().strip('"').strip("'").strip()
# Take first line if multiple lines
response = response.split("\n")[0].strip()
return response
def main():
"""Command line interface for testing."""
if len(sys.argv) > 1:
if sys.argv[1] == "--completion":
message = generate_completion_message()
if message:
print(message)
else:
print("Error generating completion message")
else:
prompt_text = " ".join(sys.argv[1:])
response = prompt_llm(prompt_text)
if response:
print(response)
else:
print("Error calling Anthropic API")
else:
print("Usage: ./anth.py 'your prompt here' or ./anth.py --completion")
if __name__ == "__main__":
main()

115
hooks/utils/llm/oai.py Executable file
View File

@@ -0,0 +1,115 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "openai",
# "python-dotenv",
# ]
# ///
import os
import sys
from dotenv import load_dotenv
def prompt_llm(prompt_text):
"""
Base OpenAI LLM prompting method using fastest model.
Args:
prompt_text (str): The prompt to send to the model
Returns:
str: The model's response text, or None if error
"""
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
return None
try:
from openai import OpenAI
client = OpenAI(api_key=api_key)
response = client.chat.completions.create(
model="gpt-4.1-nano", # Fastest OpenAI model
messages=[{"role": "user", "content": prompt_text}],
max_tokens=100,
temperature=0.7,
)
return response.choices[0].message.content.strip()
except Exception:
return None
def generate_completion_message():
"""
Generate a completion message using OpenAI LLM.
Returns:
str: A natural language completion message, or None if error
"""
engineer_name = os.getenv("ENGINEER_NAME", "").strip()
if engineer_name:
name_instruction = f"Sometimes (about 30% of the time) include the engineer's name '{engineer_name}' in a natural way."
examples = f"""Examples of the style:
- Standard: "Work complete!", "All done!", "Task finished!", "Ready for your next move!"
- Personalized: "{engineer_name}, all set!", "Ready for you, {engineer_name}!", "Complete, {engineer_name}!", "{engineer_name}, we're done!" """
else:
name_instruction = ""
examples = """Examples of the style: "Work complete!", "All done!", "Task finished!", "Ready for your next move!" """
prompt = f"""Generate a short, friendly completion message for when an AI coding assistant finishes a task.
Requirements:
- Keep it under 10 words
- Make it positive and future focused
- Use natural, conversational language
- Focus on completion/readiness
- Do NOT include quotes, formatting, or explanations
- Return ONLY the completion message text
{name_instruction}
{examples}
Generate ONE completion message:"""
response = prompt_llm(prompt)
# Clean up response - remove quotes and extra formatting
if response:
response = response.strip().strip('"').strip("'").strip()
# Take first line if multiple lines
response = response.split("\n")[0].strip()
return response
def main():
"""Command line interface for testing."""
if len(sys.argv) > 1:
if sys.argv[1] == "--completion":
message = generate_completion_message()
if message:
print(message)
else:
print("Error generating completion message")
else:
prompt_text = " ".join(sys.argv[1:])
response = prompt_llm(prompt_text)
if response:
print(response)
else:
print("Error calling OpenAI API")
else:
print("Usage: ./oai.py 'your prompt here' or ./oai.py --completion")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,90 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "elevenlabs",
# "python-dotenv",
# ]
# ///
import os
import sys
from dotenv import load_dotenv
def main():
"""
ElevenLabs Turbo v2.5 TTS Script
Uses ElevenLabs' Turbo v2.5 model for fast, high-quality text-to-speech.
Accepts optional text prompt as command-line argument.
Usage:
- ./eleven_turbo_tts.py # Uses default text
- ./eleven_turbo_tts.py "Your custom text" # Uses provided text
Features:
- Fast generation (optimized for real-time use)
- High-quality voice synthesis
- Stable production model
- Cost-effective for high-volume usage
"""
# Load environment variables
load_dotenv()
# Get API key from environment
api_key = os.getenv("ELEVENLABS_API_KEY")
if not api_key:
print("❌ Error: ELEVENLABS_API_KEY not found in environment variables")
print("Please add your ElevenLabs API key to .env file:")
print("ELEVENLABS_API_KEY=your_api_key_here")
sys.exit(1)
try:
from elevenlabs import play
from elevenlabs.client import ElevenLabs
# Initialize client
elevenlabs = ElevenLabs(api_key=api_key)
print("🎙️ ElevenLabs Turbo v2.5 TTS")
print("=" * 40)
# Get text from command line argument or use default
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:]) # Join all arguments as text
else:
text = "The first move is what sets everything in motion."
print(f"🎯 Text: {text}")
print("🔊 Generating and playing...")
try:
# Generate and play audio directly
audio = elevenlabs.text_to_speech.convert(
text=text,
voice_id="9BWtsMINqrJLrRacOk9x", # Aria voice
model_id="eleven_turbo_v2_5",
output_format="mp3_44100_128",
)
play(audio)
print("✅ Playback complete!")
except Exception as e:
print(f"❌ Error: {e}")
except ImportError:
print("❌ Error: elevenlabs package not installed")
print("This script uses UV to auto-install dependencies.")
print("Make sure UV is installed: https://docs.astral.sh/uv/")
sys.exit(1)
except Exception as e:
print(f"❌ Unexpected error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

107
hooks/utils/tts/openai_tts.py Executable file
View File

@@ -0,0 +1,107 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "openai",
# "openai[voice_helpers]",
# "python-dotenv",
# ]
# ///
import asyncio
import os
import subprocess
import sys
import tempfile
from dotenv import load_dotenv
async def main():
"""
OpenAI TTS Script
Uses OpenAI's latest TTS model for high-quality text-to-speech.
Accepts optional text prompt as command-line argument.
Usage:
- ./openai_tts.py # Uses default text
- ./openai_tts.py "Your custom text" # Uses provided text
Features:
- OpenAI gpt-4o-mini-tts model (latest)
- Nova voice (engaging and warm)
- Streaming audio with instructions support
- Live audio playback via afplay (macOS)
"""
# Load environment variables
load_dotenv()
# Get API key from environment
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
print("❌ Error: OPENAI_API_KEY not found in environment variables")
print("Please add your OpenAI API key to .env file:")
print("OPENAI_API_KEY=your_api_key_here")
sys.exit(1)
try:
from openai import AsyncOpenAI
# Initialize OpenAI client
openai = AsyncOpenAI(api_key=api_key)
print("🎙️ OpenAI TTS")
print("=" * 20)
# Get text from command line argument or use default
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:]) # Join all arguments as text
else:
text = "Today is a wonderful day to build something people love!"
print(f"🎯 Text: {text}")
print("🔊 Generating and streaming...")
try:
# Generate and stream audio using OpenAI TTS
async with openai.audio.speech.with_streaming_response.create(
model="gpt-4o-mini-tts",
voice="nova",
input=text,
instructions="Speak in a cheerful, positive yet professional tone.",
response_format="mp3",
) as response:
# Create a temporary file to store the audio
with tempfile.NamedTemporaryFile(
delete=False, suffix=".mp3"
) as temp_file:
# Write the audio stream to the temporary file
async for chunk in response.iter_bytes():
temp_file.write(chunk)
temp_file_path = temp_file.name
try:
# Play the audio using afplay
subprocess.run(["afplay", temp_file_path], check=True)
print("✅ Playback complete!")
finally:
# Clean up the temporary file
os.unlink(temp_file_path)
except Exception as e:
print(f"❌ Error: {e}")
except ImportError:
print("❌ Error: Required package not installed")
print("This script uses UV to auto-install dependencies.")
print("Make sure UV is installed: https://docs.astral.sh/uv/")
sys.exit(1)
except Exception as e:
print(f"❌ Unexpected error: {e}")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

77
hooks/utils/tts/pyttsx3_tts.py Executable file
View File

@@ -0,0 +1,77 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.8"
# dependencies = [
# "pyttsx3",
# ]
# ///
import random
import sys
def main():
"""
pyttsx3 TTS Script
Uses pyttsx3 for offline text-to-speech synthesis.
Accepts optional text prompt as command-line argument.
Usage:
- ./pyttsx3_tts.py # Uses default text
- ./pyttsx3_tts.py "Your custom text" # Uses provided text
Features:
- Offline TTS (no API key required)
- Cross-platform compatibility
- Configurable voice settings
- Immediate audio playback
"""
try:
import pyttsx3
# Initialize TTS engine
engine = pyttsx3.init()
# Configure engine settings
engine.setProperty("rate", 180) # Speech rate (words per minute)
engine.setProperty("volume", 0.8) # Volume (0.0 to 1.0)
print("🎙️ pyttsx3 TTS")
print("=" * 15)
# Get text from command line argument or use default
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:]) # Join all arguments as text
else:
# Default completion messages
completion_messages = [
"Work complete!",
"All done!",
"Task finished!",
"Job complete!",
"Ready for next task!",
]
text = random.choice(completion_messages)
print(f"🎯 Text: {text}")
print("🔊 Speaking...")
# Speak the text
engine.say(text)
engine.runAndWait()
print("✅ Playback complete!")
except ImportError:
print("❌ Error: pyttsx3 package not installed")
print("This script uses UV to auto-install dependencies.")
sys.exit(1)
except Exception as e:
print(f"❌ Error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()