Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:24:01 +08:00
commit 7ca465850c
24 changed files with 5512 additions and 0 deletions

115
templates/basic-chat.ts Normal file
View File

@@ -0,0 +1,115 @@
import Anthropic from '@anthropic-ai/sdk';
// Initialize the client
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY || '',
});
async function basicChat() {
try {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: 'Hello, Claude! Tell me a fun fact about TypeScript.',
},
],
});
// Extract text from response
const textContent = message.content.find(block => block.type === 'text');
if (textContent && textContent.type === 'text') {
console.log('Claude:', textContent.text);
}
// Log usage information
console.log('\nUsage:');
console.log('- Input tokens:', message.usage.input_tokens);
console.log('- Output tokens:', message.usage.output_tokens);
} catch (error) {
if (error instanceof Anthropic.APIError) {
console.error(`API Error [${error.status}]:`, error.message);
} else {
console.error('Unexpected error:', error);
}
}
}
// Multi-turn conversation example
async function multiTurnChat() {
const messages: Array<{ role: 'user' | 'assistant'; content: string }> = [];
// First turn
messages.push({
role: 'user',
content: 'What is the capital of France?',
});
const response1 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
const text1 = response1.content.find(b => b.type === 'text');
if (text1 && text1.type === 'text') {
messages.push({ role: 'assistant', content: text1.text });
console.log('Claude:', text1.text);
}
// Second turn
messages.push({
role: 'user',
content: 'What is its population?',
});
const response2 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
const text2 = response2.content.find(b => b.type === 'text');
if (text2 && text2.type === 'text') {
console.log('Claude:', text2.text);
}
}
// System prompt example
async function chatWithSystemPrompt() {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
system: 'You are a helpful Python coding assistant. Always provide type hints and docstrings.',
messages: [
{
role: 'user',
content: 'Write a function to calculate the factorial of a number.',
},
],
});
const textContent = message.content.find(block => block.type === 'text');
if (textContent && textContent.type === 'text') {
console.log(textContent.text);
}
}
// Run examples
if (require.main === module) {
console.log('=== Basic Chat ===\n');
basicChat()
.then(() => {
console.log('\n=== Multi-turn Chat ===\n');
return multiTurnChat();
})
.then(() => {
console.log('\n=== Chat with System Prompt ===\n');
return chatWithSystemPrompt();
})
.catch(console.error);
}
export { basicChat, multiTurnChat, chatWithSystemPrompt };

View File

@@ -0,0 +1,268 @@
// Cloudflare Worker with Claude API
// Uses fetch API (no SDK needed)
export interface Env {
ANTHROPIC_API_KEY: string;
}
// Basic chat endpoint
export default {
async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise<Response> {
// CORS headers
const corsHeaders = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type',
};
if (request.method === 'OPTIONS') {
return new Response(null, { headers: corsHeaders });
}
if (request.method !== 'POST') {
return new Response('Method not allowed', { status: 405 });
}
try {
const { messages } = await request.json<{ messages: any[] }>();
const response = await fetch('https://api.anthropic.com/v1/messages', {
method: 'POST',
headers: {
'x-api-key': env.ANTHROPIC_API_KEY,
'anthropic-version': '2023-06-01',
'content-type': 'application/json',
},
body: JSON.stringify({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
}),
});
const data = await response.json();
return new Response(JSON.stringify(data), {
headers: {
'Content-Type': 'application/json',
...corsHeaders,
},
});
} catch (error) {
return new Response(
JSON.stringify({ error: error.message }),
{
status: 500,
headers: {
'Content-Type': 'application/json',
...corsHeaders,
},
}
);
}
},
};
// Streaming endpoint example
export const streamingEndpoint = {
async fetch(request: Request, env: Env): Promise<Response> {
if (request.method !== 'POST') {
return new Response('Method not allowed', { status: 405 });
}
try {
const { messages } = await request.json<{ messages: any[] }>();
const response = await fetch('https://api.anthropic.com/v1/messages', {
method: 'POST',
headers: {
'x-api-key': env.ANTHROPIC_API_KEY,
'anthropic-version': '2023-06-01',
'content-type': 'application/json',
},
body: JSON.stringify({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
stream: true,
}),
});
// Return SSE stream directly
return new Response(response.body, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Access-Control-Allow-Origin': '*',
},
});
} catch (error) {
return new Response(
JSON.stringify({ error: error.message }),
{ status: 500 }
);
}
},
};
// With rate limiting using Durable Objects
interface RateLimiterState {
requests: number;
resetTime: number;
}
export class RateLimiter implements DurableObject {
state: DurableObjectState;
storage: DurableObjectStorage;
constructor(state: DurableObjectState, env: Env) {
this.state = state;
this.storage = state.storage;
}
async fetch(request: Request): Promise<Response> {
const now = Date.now();
const limitData = await this.storage.get<RateLimiterState>('limiter') || {
requests: 0,
resetTime: now + 60000, // 1 minute
};
// Reset if time window expired
if (now > limitData.resetTime) {
limitData.requests = 0;
limitData.resetTime = now + 60000;
}
// Check limit (e.g., 10 requests per minute)
if (limitData.requests >= 10) {
return new Response('Rate limit exceeded', { status: 429 });
}
limitData.requests++;
await this.storage.put('limiter', limitData);
return new Response('OK', { status: 200 });
}
}
// Complete worker with error handling and rate limiting
export const productionWorker = {
async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise<Response> {
const url = new URL(request.url);
// Route handling
if (url.pathname === '/chat') {
return handleChat(request, env);
}
if (url.pathname === '/stream') {
return handleStream(request, env);
}
return new Response('Not Found', { status: 404 });
},
};
async function handleChat(request: Request, env: Env): Promise<Response> {
if (request.method !== 'POST') {
return new Response('Method not allowed', { status: 405 });
}
try {
const { messages } = await request.json<{ messages: any[] }>();
// Validate input
if (!Array.isArray(messages) || messages.length === 0) {
return new Response(
JSON.stringify({ error: 'Invalid messages array' }),
{ status: 400 }
);
}
const response = await fetch('https://api.anthropic.com/v1/messages', {
method: 'POST',
headers: {
'x-api-key': env.ANTHROPIC_API_KEY,
'anthropic-version': '2023-06-01',
'content-type': 'application/json',
},
body: JSON.stringify({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
}),
});
if (!response.ok) {
const error = await response.text();
console.error('Claude API error:', error);
return new Response(
JSON.stringify({ error: 'API request failed' }),
{ status: response.status }
);
}
const data = await response.json();
return new Response(JSON.stringify(data), {
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
},
});
} catch (error) {
console.error('Worker error:', error);
return new Response(
JSON.stringify({ error: 'Internal server error' }),
{ status: 500 }
);
}
}
async function handleStream(request: Request, env: Env): Promise<Response> {
if (request.method !== 'POST') {
return new Response('Method not allowed', { status: 405 });
}
try {
const { messages } = await request.json<{ messages: any[] }>();
const response = await fetch('https://api.anthropic.com/v1/messages', {
method: 'POST',
headers: {
'x-api-key': env.ANTHROPIC_API_KEY,
'anthropic-version': '2023-06-01',
'content-type': 'application/json',
},
body: JSON.stringify({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
stream: true,
}),
});
if (!response.ok) {
return new Response(
JSON.stringify({ error: 'Stream failed' }),
{ status: response.status }
);
}
return new Response(response.body, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Access-Control-Allow-Origin': '*',
},
});
} catch (error) {
console.error('Stream error:', error);
return new Response(
JSON.stringify({ error: 'Stream error' }),
{ status: 500 }
);
}
}

373
templates/error-handling.ts Normal file
View File

@@ -0,0 +1,373 @@
import Anthropic from '@anthropic-ai/sdk';
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
});
// Example 1: Basic error handling
async function basicErrorHandling(prompt: string) {
try {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [{ role: 'user', content: prompt }],
});
return message;
} catch (error) {
if (error instanceof Anthropic.APIError) {
console.error(`API Error [${error.status}]:`, error.message);
console.error('Error type:', error.type);
console.error('Error details:', error.error);
// Handle specific error types
switch (error.status) {
case 400:
console.error('Invalid request. Check your parameters.');
break;
case 401:
console.error('Authentication failed. Check your API key.');
break;
case 403:
console.error('Permission denied. Check your account tier.');
break;
case 404:
console.error('Resource not found. Check the endpoint.');
break;
case 429:
console.error('Rate limit exceeded. Implement retry logic.');
break;
case 500:
console.error('Server error. Retry with exponential backoff.');
break;
case 529:
console.error('API overloaded. Retry later.');
break;
default:
console.error('Unexpected error occurred.');
}
} else {
console.error('Non-API error:', error);
}
throw error;
}
}
// Example 2: Rate limit handler with retry
async function handleRateLimits(
requestFn: () => Promise<any>,
maxRetries = 3,
baseDelay = 1000
): Promise<any> {
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
return await requestFn();
} catch (error) {
if (error instanceof Anthropic.APIError && error.status === 429) {
// Check retry-after header
const retryAfter = error.response?.headers?.['retry-after'];
const delay = retryAfter
? parseInt(retryAfter) * 1000
: baseDelay * Math.pow(2, attempt);
if (attempt < maxRetries - 1) {
console.warn(`Rate limited. Retrying in ${delay}ms... (Attempt ${attempt + 1}/${maxRetries})`);
await new Promise(resolve => setTimeout(resolve, delay));
continue;
}
}
throw error;
}
}
throw new Error('Max retries exceeded');
}
// Example 3: Comprehensive error handler
class APIErrorHandler {
private maxRetries: number;
private baseDelay: number;
private onError?: (error: Error) => void;
constructor(options: {
maxRetries?: number;
baseDelay?: number;
onError?: (error: Error) => void;
} = {}) {
this.maxRetries = options.maxRetries || 3;
this.baseDelay = options.baseDelay || 1000;
this.onError = options.onError;
}
async execute<T>(requestFn: () => Promise<T>): Promise<T> {
for (let attempt = 0; attempt < this.maxRetries; attempt++) {
try {
return await requestFn();
} catch (error) {
if (this.onError) {
this.onError(error);
}
if (error instanceof Anthropic.APIError) {
if (this.shouldRetry(error) && attempt < this.maxRetries - 1) {
const delay = this.calculateDelay(error, attempt);
console.warn(`Retrying after ${delay}ms... (${attempt + 1}/${this.maxRetries})`);
await this.sleep(delay);
continue;
}
}
throw this.enhanceError(error);
}
}
throw new Error('Max retries exceeded');
}
private shouldRetry(error: Anthropic.APIError): boolean {
// Retry on rate limits, server errors, and overload
return error.status === 429 || error.status === 500 || error.status === 529;
}
private calculateDelay(error: Anthropic.APIError, attempt: number): number {
// Use retry-after header if available
const retryAfter = error.response?.headers?.['retry-after'];
if (retryAfter) {
return parseInt(retryAfter) * 1000;
}
// Exponential backoff
return this.baseDelay * Math.pow(2, attempt);
}
private enhanceError(error: any): Error {
if (error instanceof Anthropic.APIError) {
const enhancedError = new Error(`Claude API Error: ${error.message}`);
(enhancedError as any).originalError = error;
(enhancedError as any).status = error.status;
(enhancedError as any).type = error.type;
return enhancedError;
}
return error;
}
private sleep(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
}
// Example 4: Streaming error handling
async function streamWithErrorHandling(prompt: string) {
try {
const stream = anthropic.messages.stream({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [{ role: 'user', content: prompt }],
});
let hasError = false;
stream.on('error', (error) => {
hasError = true;
console.error('Stream error:', error);
if (error instanceof Anthropic.APIError) {
console.error(`Status: ${error.status}`);
console.error(`Type: ${error.type}`);
}
// Implement fallback or retry logic here
});
stream.on('abort', (error) => {
console.warn('Stream aborted:', error);
});
stream.on('text', (text) => {
if (!hasError) {
process.stdout.write(text);
}
});
await stream.finalMessage();
if (hasError) {
throw new Error('Stream completed with errors');
}
} catch (error) {
console.error('Failed to complete stream:', error);
throw error;
}
}
// Example 5: Validation errors
function validateRequest(params: {
messages: any[];
max_tokens?: number;
model?: string;
}): { valid: boolean; errors: string[] } {
const errors: string[] = [];
if (!Array.isArray(params.messages) || params.messages.length === 0) {
errors.push('Messages must be a non-empty array');
}
if (params.max_tokens && (params.max_tokens < 1 || params.max_tokens > 8192)) {
errors.push('max_tokens must be between 1 and 8192');
}
if (params.model && !params.model.startsWith('claude-')) {
errors.push('Invalid model name');
}
for (const [index, message] of params.messages.entries()) {
if (!message.role || !['user', 'assistant'].includes(message.role)) {
errors.push(`Message ${index}: Invalid role. Must be "user" or "assistant"`);
}
if (!message.content) {
errors.push(`Message ${index}: Missing content`);
}
}
return {
valid: errors.length === 0,
errors,
};
}
// Example 6: Circuit breaker pattern
class CircuitBreaker {
private failures: number = 0;
private lastFailureTime: number = 0;
private state: 'closed' | 'open' | 'half-open' = 'closed';
private readonly threshold: number;
private readonly timeout: number;
constructor(options: { threshold?: number; timeout?: number } = {}) {
this.threshold = options.threshold || 5;
this.timeout = options.timeout || 60000; // 1 minute
}
async execute<T>(requestFn: () => Promise<T>): Promise<T> {
if (this.state === 'open') {
if (Date.now() - this.lastFailureTime > this.timeout) {
console.log('Circuit breaker: Transitioning to half-open');
this.state = 'half-open';
} else {
throw new Error('Circuit breaker is open. Service unavailable.');
}
}
try {
const result = await requestFn();
// Success - reset failures
if (this.state === 'half-open') {
console.log('Circuit breaker: Transitioning to closed');
this.state = 'closed';
}
this.failures = 0;
return result;
} catch (error) {
this.failures++;
this.lastFailureTime = Date.now();
if (this.failures >= this.threshold) {
console.error(`Circuit breaker: Opening after ${this.failures} failures`);
this.state = 'open';
}
throw error;
}
}
getState(): { state: string; failures: number } {
return {
state: this.state,
failures: this.failures,
};
}
}
// Example 7: Usage with all patterns
async function robustAPICall(prompt: string) {
const errorHandler = new APIErrorHandler({
maxRetries: 3,
baseDelay: 1000,
onError: (error) => {
console.error('Error logged:', error);
// Could send to monitoring service here
},
});
const circuitBreaker = new CircuitBreaker({
threshold: 5,
timeout: 60000,
});
try {
const validation = validateRequest({
messages: [{ role: 'user', content: prompt }],
max_tokens: 1024,
model: 'claude-sonnet-4-5-20250929',
});
if (!validation.valid) {
throw new Error(`Validation failed: ${validation.errors.join(', ')}`);
}
const result = await circuitBreaker.execute(() =>
errorHandler.execute(() =>
anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [{ role: 'user', content: prompt }],
})
)
);
return result;
} catch (error) {
console.error('Robust API call failed:', error);
console.error('Circuit breaker state:', circuitBreaker.getState());
throw error;
}
}
// Run examples
if (require.main === module) {
console.log('=== Error Handling Examples ===\n');
// Test basic error handling
basicErrorHandling('Hello, Claude!')
.then(() => {
console.log('\n=== Testing Rate Limit Handler ===\n');
return handleRateLimits(() =>
anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [{ role: 'user', content: 'Test message' }],
})
);
})
.then(() => {
console.log('\n=== Testing Robust API Call ===\n');
return robustAPICall('What is 2+2?');
})
.catch(console.error);
}
export {
basicErrorHandling,
handleRateLimits,
APIErrorHandler,
streamWithErrorHandling,
validateRequest,
CircuitBreaker,
robustAPICall,
};

View File

@@ -0,0 +1,320 @@
import Anthropic from '@anthropic-ai/sdk';
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY || '',
});
/**
* IMPORTANT: Extended thinking is ONLY available in:
* - Claude 3.7 Sonnet (claude-3-7-sonnet-20250228)
* - Claude 4 models (Opus 4, Sonnet 4)
*
* NOT available in Claude 3.5 Sonnet
*/
// Example 1: Basic extended thinking
async function basicExtendedThinking() {
const message = await anthropic.messages.create({
model: 'claude-3-7-sonnet-20250228', // Must use 3.7 or 4.x
max_tokens: 4096, // Higher token limit for thinking
messages: [
{
role: 'user',
content: `A ball is thrown upward with an initial velocity of 20 m/s.
How high does it go? (Use g = 9.8 m/s²)`,
},
],
});
console.log('=== Response with Extended Thinking ===\n');
// Display thinking blocks separately from answer
for (const block of message.content) {
if (block.type === 'thinking') {
console.log('🤔 Claude is thinking:');
console.log(block.text);
console.log('\n' + '='.repeat(50) + '\n');
} else if (block.type === 'text') {
console.log('💡 Final Answer:');
console.log(block.text);
}
}
console.log('\nStop reason:', message.stop_reason);
console.log('Token usage:', message.usage);
}
// Example 2: Complex problem solving
async function complexProblemSolving() {
const message = await anthropic.messages.create({
model: 'claude-3-7-sonnet-20250228',
max_tokens: 8192, // Even higher for complex reasoning
messages: [
{
role: 'user',
content: `Debug this Python code and explain what's wrong:
def fibonacci(n):
if n <= 1:
return n
return fibonacci(n-1) + fibonacci(n-1)
print(fibonacci(10))
Why is it slow and what's the correct implementation?`,
},
],
});
for (const block of message.content) {
if (block.type === 'thinking') {
console.log('🔍 Debugging process:');
console.log(block.text);
console.log();
} else if (block.type === 'text') {
console.log('✅ Solution:');
console.log(block.text);
}
}
}
// Example 3: Multi-step reasoning
async function multiStepReasoning() {
const message = await anthropic.messages.create({
model: 'claude-3-7-sonnet-20250228',
max_tokens: 6144,
messages: [
{
role: 'user',
content: `I have a 10-liter jug and a 6-liter jug. How can I measure exactly 8 liters of water?
Think through this step by step.`,
},
],
});
for (const block of message.content) {
if (block.type === 'thinking') {
console.log('🧠 Reasoning steps:');
console.log(block.text);
console.log();
} else if (block.type === 'text') {
console.log('📝 Final solution:');
console.log(block.text);
}
}
}
// Example 4: Comparing with and without extended thinking
async function compareThinkingModes() {
const problem = 'What is the sum of all prime numbers less than 100?';
// Without extended thinking (Claude 3.5 Sonnet)
console.log('=== Without Extended Thinking (Claude 3.5 Sonnet) ===\n');
const response1 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 2048,
messages: [{ role: 'user', content: problem }],
});
const text1 = response1.content.find(b => b.type === 'text');
if (text1 && text1.type === 'text') {
console.log(text1.text);
}
console.log('\nTokens used:', response1.usage.input_tokens + response1.usage.output_tokens);
// With extended thinking (Claude 3.7 Sonnet)
console.log('\n\n=== With Extended Thinking (Claude 3.7 Sonnet) ===\n');
const response2 = await anthropic.messages.create({
model: 'claude-3-7-sonnet-20250228',
max_tokens: 4096,
messages: [{ role: 'user', content: problem }],
});
for (const block of response2.content) {
if (block.type === 'thinking') {
console.log('🤔 Thinking process:');
console.log(block.text);
console.log();
} else if (block.type === 'text') {
console.log('💡 Answer:');
console.log(block.text);
}
}
console.log('\nTokens used:', response2.usage.input_tokens + response2.usage.output_tokens);
}
// Example 5: Extended thinking with tools
async function extendedThinkingWithTools() {
const tools: Anthropic.Tool[] = [
{
name: 'calculate',
description: 'Perform mathematical calculations',
input_schema: {
type: 'object',
properties: {
expression: {
type: 'string',
description: 'Mathematical expression to evaluate',
},
},
required: ['expression'],
},
},
];
const messages: Anthropic.MessageParam[] = [
{
role: 'user',
content:
'Calculate the compound interest on $1000 invested at 5% annual interest for 10 years, compounded monthly',
},
];
const response = await anthropic.messages.create({
model: 'claude-3-7-sonnet-20250228',
max_tokens: 4096,
tools,
messages,
});
console.log('=== Extended Thinking with Tools ===\n');
for (const block of response.content) {
if (block.type === 'thinking') {
console.log('🤔 Planning:');
console.log(block.text);
console.log();
} else if (block.type === 'tool_use') {
console.log('🔧 Tool use:', block.name);
console.log('Parameters:', block.input);
console.log();
} else if (block.type === 'text') {
console.log('💡 Response:');
console.log(block.text);
}
}
}
// Example 6: Error when using wrong model
async function demonstrateWrongModelError() {
try {
console.log('=== Attempting extended thinking on Claude 3.5 Sonnet ===\n');
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929', // Wrong model!
max_tokens: 4096,
messages: [
{
role: 'user',
content: 'Solve this complex math problem step by step',
},
],
});
// No thinking blocks will be present
const hasThinking = message.content.some(block => block.type === 'thinking');
if (!hasThinking) {
console.log('⚠️ No thinking blocks found!');
console.log('Extended thinking is only available in Claude 3.7 Sonnet or Claude 4 models.');
}
for (const block of message.content) {
if (block.type === 'text') {
console.log('Regular response:', block.text);
}
}
} catch (error) {
console.error('Error:', error);
}
}
// Example 7: Check model capabilities
function getModelCapabilities(modelId: string): {
supportsExtendedThinking: boolean;
contextWindow: number;
} {
const models: Record<
string,
{ supportsExtendedThinking: boolean; contextWindow: number }
> = {
'claude-sonnet-4-5-20250929': {
supportsExtendedThinking: false,
contextWindow: 200_000,
},
'claude-3-7-sonnet-20250228': {
supportsExtendedThinking: true,
contextWindow: 2_000_000,
},
'claude-opus-4-20250514': {
supportsExtendedThinking: true,
contextWindow: 200_000,
},
'claude-3-5-haiku-20241022': {
supportsExtendedThinking: false,
contextWindow: 200_000,
},
};
return (
models[modelId] || {
supportsExtendedThinking: false,
contextWindow: 200_000,
}
);
}
// Helper: Validate model for extended thinking
function validateModelForExtendedThinking(modelId: string): void {
const capabilities = getModelCapabilities(modelId);
if (!capabilities.supportsExtendedThinking) {
throw new Error(
`Model ${modelId} does not support extended thinking. Use Claude 3.7 Sonnet or Claude 4 models.`
);
}
console.log(`✅ Model ${modelId} supports extended thinking`);
console.log(`Context window: ${capabilities.contextWindow.toLocaleString()} tokens`);
}
// Run examples
if (require.main === module) {
console.log('=== Extended Thinking Examples ===\n');
// Validate model first
try {
validateModelForExtendedThinking('claude-3-7-sonnet-20250228');
} catch (error) {
console.error(error.message);
process.exit(1);
}
basicExtendedThinking()
.then(() => {
console.log('\n\n=== Complex Problem ===\n');
return complexProblemSolving();
})
.then(() => {
console.log('\n\n=== Multi-step Reasoning ===\n');
return multiStepReasoning();
})
.then(() => {
console.log('\n\n=== Wrong Model Demo ===\n');
return demonstrateWrongModelError();
})
.catch(console.error);
}
export {
basicExtendedThinking,
complexProblemSolving,
multiStepReasoning,
compareThinkingModes,
extendedThinkingWithTools,
demonstrateWrongModelError,
getModelCapabilities,
validateModelForExtendedThinking,
};

View File

@@ -0,0 +1,304 @@
// Next.js API Routes for Claude API
// ============================================
// App Router (app/api/chat/route.ts)
// ============================================
import Anthropic from '@anthropic-ai/sdk';
import { NextRequest, NextResponse } from 'next/server';
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
});
// POST /api/chat - Non-streaming
export async function POST(request: NextRequest) {
try {
const { messages } = await request.json();
// Validate input
if (!Array.isArray(messages) || messages.length === 0) {
return NextResponse.json({ error: 'Invalid messages' }, { status: 400 });
}
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
return NextResponse.json(message);
} catch (error) {
console.error('Chat error:', error);
if (error instanceof Anthropic.APIError) {
return NextResponse.json(
{ error: error.message },
{ status: error.status || 500 }
);
}
return NextResponse.json({ error: 'Internal server error' }, { status: 500 });
}
}
// ============================================
// App Router with Streaming (app/api/stream/route.ts)
// ============================================
export async function POST_STREAMING(request: NextRequest) {
try {
const { messages } = await request.json();
const stream = anthropic.messages.stream({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
// Convert to ReadableStream
const readableStream = new ReadableStream({
async start(controller) {
try {
for await (const event of stream) {
if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {
const text = event.delta.text;
controller.enqueue(new TextEncoder().encode(text));
}
if (event.type === 'message_stop') {
controller.close();
}
}
} catch (error) {
console.error('Stream error:', error);
controller.error(error);
}
},
});
return new Response(readableStream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
},
});
} catch (error) {
console.error('Stream setup error:', error);
return NextResponse.json({ error: 'Stream failed' }, { status: 500 });
}
}
// ============================================
// Pages Router (pages/api/chat.ts)
// ============================================
import type { NextApiRequest, NextApiResponse } from 'next';
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
if (req.method !== 'POST') {
return res.status(405).json({ error: 'Method not allowed' });
}
try {
const { messages } = req.body;
if (!Array.isArray(messages)) {
return res.status(400).json({ error: 'Invalid request' });
}
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
res.status(200).json(message);
} catch (error) {
console.error('API error:', error);
if (error instanceof Anthropic.APIError) {
return res.status(error.status || 500).json({ error: error.message });
}
res.status(500).json({ error: 'Internal server error' });
}
}
// ============================================
// Pages Router with Streaming (pages/api/stream.ts)
// ============================================
export async function streamHandler(req: NextApiRequest, res: NextApiResponse) {
if (req.method !== 'POST') {
return res.status(405).json({ error: 'Method not allowed' });
}
try {
const { messages } = req.body;
// Set SSE headers
res.setHeader('Content-Type', 'text/event-stream');
res.setHeader('Cache-Control', 'no-cache');
res.setHeader('Connection', 'keep-alive');
const stream = anthropic.messages.stream({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
stream.on('text', (text) => {
res.write(`data: ${JSON.stringify({ text })}\n\n`);
});
stream.on('error', (error) => {
console.error('Stream error:', error);
res.write(`data: ${JSON.stringify({ error: error.message })}\n\n`);
res.end();
});
stream.on('end', () => {
res.write('data: [DONE]\n\n');
res.end();
});
await stream.finalMessage();
} catch (error) {
console.error('Stream setup error:', error);
res.status(500).json({ error: 'Stream failed' });
}
}
// ============================================
// With Tool Use (App Router)
// ============================================
export async function POST_WITH_TOOLS(request: NextRequest) {
try {
const { messages } = await request.json();
const tools: Anthropic.Tool[] = [
{
name: 'get_weather',
description: 'Get the current weather',
input_schema: {
type: 'object',
properties: {
location: { type: 'string' },
},
required: ['location'],
},
},
];
let conversationMessages = messages;
while (true) {
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
tools,
messages: conversationMessages,
});
conversationMessages.push({
role: 'assistant',
content: response.content,
});
if (response.stop_reason === 'tool_use') {
const toolResults = [];
for (const block of response.content) {
if (block.type === 'tool_use') {
// Execute tool
const result = await executeToolFunction(block.name, block.input);
toolResults.push({
type: 'tool_result',
tool_use_id: block.id,
content: JSON.stringify(result),
});
}
}
conversationMessages.push({
role: 'user',
content: toolResults,
});
} else {
// Final response
return NextResponse.json(response);
}
}
} catch (error) {
console.error('Tool use error:', error);
return NextResponse.json({ error: 'Tool execution failed' }, { status: 500 });
}
}
async function executeToolFunction(name: string, input: any): Promise<any> {
if (name === 'get_weather') {
// Mock implementation
return { temperature: 72, condition: 'Sunny' };
}
return { error: 'Unknown tool' };
}
// ============================================
// With Prompt Caching (App Router)
// ============================================
export async function POST_WITH_CACHING(request: NextRequest) {
try {
const { messages, systemPrompt } = await request.json();
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
system: [
{
type: 'text',
text: systemPrompt,
cache_control: { type: 'ephemeral' },
},
],
messages,
});
return NextResponse.json({
message,
usage: message.usage,
});
} catch (error) {
console.error('Caching error:', error);
return NextResponse.json({ error: 'Request failed' }, { status: 500 });
}
}
// ============================================
// Middleware for Rate Limiting
// ============================================
import { Ratelimit } from '@upstash/ratelimit';
import { Redis } from '@upstash/redis';
const ratelimit = new Ratelimit({
redis: Redis.fromEnv(),
limiter: Ratelimit.slidingWindow(10, '1 m'),
});
export async function POST_WITH_RATE_LIMIT(request: NextRequest) {
// Get identifier (IP or user ID)
const identifier = request.ip ?? 'anonymous';
const { success } = await ratelimit.limit(identifier);
if (!success) {
return NextResponse.json({ error: 'Rate limit exceeded' }, { status: 429 });
}
// Continue with normal handler
return POST(request);
}

381
templates/nodejs-example.ts Normal file
View File

@@ -0,0 +1,381 @@
// Complete Node.js examples for Claude API
import Anthropic from '@anthropic-ai/sdk';
import * as dotenv from 'dotenv';
import * as readline from 'readline';
dotenv.config();
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
});
// Example 1: Simple CLI chatbot
async function simpleCLIChatbot() {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
const messages: Anthropic.MessageParam[] = [];
console.log('Claude CLI Chatbot (type "exit" to quit)\n');
const chat = async () => {
rl.question('You: ', async (userInput) => {
if (userInput.toLowerCase() === 'exit') {
console.log('Goodbye!');
rl.close();
return;
}
messages.push({ role: 'user', content: userInput });
try {
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
const textContent = response.content.find(b => b.type === 'text');
if (textContent && textContent.type === 'text') {
console.log(`\nClaude: ${textContent.text}\n`);
messages.push({ role: 'assistant', content: textContent.text });
}
} catch (error) {
console.error('Error:', error.message);
}
chat();
});
};
chat();
}
// Example 2: Streaming CLI chatbot
async function streamingCLIChatbot() {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
console.log('Streaming Claude CLI Chatbot (type "exit" to quit)\n');
const chat = async () => {
rl.question('You: ', async (userInput) => {
if (userInput.toLowerCase() === 'exit') {
console.log('Goodbye!');
rl.close();
return;
}
try {
process.stdout.write('\nClaude: ');
const stream = anthropic.messages.stream({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [{ role: 'user', content: userInput }],
});
let fullText = '';
stream.on('text', (text) => {
process.stdout.write(text);
fullText += text;
});
await stream.finalMessage();
console.log('\n');
} catch (error) {
console.error('\nError:', error.message);
}
chat();
});
};
chat();
}
// Example 3: Batch processing from file
import * as fs from 'fs';
async function batchProcessing(inputFile: string, outputFile: string) {
const lines = fs.readFileSync(inputFile, 'utf-8').split('\n').filter(Boolean);
const results = [];
console.log(`Processing ${lines.length} prompts...`);
for (let i = 0; i < lines.length; i++) {
const prompt = lines[i];
console.log(`\n[${i + 1}/${lines.length}] Processing: ${prompt.substring(0, 50)}...`);
try {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [{ role: 'user', content: prompt }],
});
const textContent = message.content.find(b => b.type === 'text');
if (textContent && textContent.type === 'text') {
results.push({
prompt,
response: textContent.text,
tokens: message.usage,
});
}
// Rate limiting pause
await new Promise(resolve => setTimeout(resolve, 1000));
} catch (error) {
console.error(`Error processing prompt ${i + 1}:`, error.message);
results.push({
prompt,
error: error.message,
});
}
}
fs.writeFileSync(outputFile, JSON.stringify(results, null, 2));
console.log(`\nResults written to ${outputFile}`);
}
// Example 4: Document summarization
async function summarizeDocument(filePath: string) {
const document = fs.readFileSync(filePath, 'utf-8');
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 2048,
system: 'You are an expert document summarizer. Provide concise, accurate summaries.',
messages: [
{
role: 'user',
content: [
{
type: 'text',
text: 'Summarize the following document in 3-5 bullet points:',
},
{
type: 'text',
text: document,
cache_control: { type: 'ephemeral' },
},
],
},
],
});
const textContent = message.content.find(b => b.type === 'text');
if (textContent && textContent.type === 'text') {
console.log('Summary:');
console.log(textContent.text);
console.log('\nToken usage:', message.usage);
}
}
// Example 5: Code review assistant
async function codeReview(codeContent: string, language: string) {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 2048,
system: `You are an expert ${language} code reviewer. Analyze code for:
- Bugs and potential issues
- Performance optimizations
- Security vulnerabilities
- Best practices
- Code style and readability`,
messages: [
{
role: 'user',
content: `Review this ${language} code:\n\n\`\`\`${language}\n${codeContent}\n\`\`\``,
},
],
});
const textContent = message.content.find(b => b.type === 'text');
if (textContent && textContent.type === 'text') {
console.log('Code Review:');
console.log(textContent.text);
}
}
// Example 6: Translation service
async function translateText(text: string, from: string, to: string) {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: `Translate the following text from ${from} to ${to}:\n\n${text}`,
},
],
});
const textContent = message.content.find(b => b.type === 'text');
if (textContent && textContent.type === 'text') {
return textContent.text;
}
return null;
}
// Example 7: Parallel requests
async function parallelRequests(prompts: string[]) {
console.log(`Processing ${prompts.length} prompts in parallel...`);
const promises = prompts.map(async (prompt, index) => {
try {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 512,
messages: [{ role: 'user', content: prompt }],
});
const textContent = message.content.find(b => b.type === 'text');
return {
index,
prompt,
response: textContent && textContent.type === 'text' ? textContent.text : null,
};
} catch (error) {
return {
index,
prompt,
error: error.message,
};
}
});
const results = await Promise.all(promises);
return results;
}
// Example 8: Retry logic with exponential backoff
async function requestWithRetry(
prompt: string,
maxRetries = 3,
baseDelay = 1000
): Promise<string> {
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [{ role: 'user', content: prompt }],
});
const textContent = message.content.find(b => b.type === 'text');
if (textContent && textContent.type === 'text') {
return textContent.text;
}
throw new Error('No text content in response');
} catch (error) {
if (error instanceof Anthropic.APIError && error.status === 429) {
if (attempt < maxRetries - 1) {
const delay = baseDelay * Math.pow(2, attempt);
console.warn(`Rate limited. Retrying in ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
continue;
}
}
throw error;
}
}
throw new Error('Max retries exceeded');
}
// Example 9: Conversation logger
class ConversationLogger {
private messages: Anthropic.MessageParam[] = [];
private logFile: string;
constructor(logFile: string) {
this.logFile = logFile;
}
async chat(userMessage: string): Promise<string> {
this.messages.push({ role: 'user', content: userMessage });
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: this.messages,
});
const textContent = response.content.find(b => b.type === 'text');
if (textContent && textContent.type === 'text') {
this.messages.push({ role: 'assistant', content: textContent.text });
this.save();
return textContent.text;
}
throw new Error('No response');
}
private save() {
fs.writeFileSync(this.logFile, JSON.stringify(this.messages, null, 2));
}
load() {
if (fs.existsSync(this.logFile)) {
this.messages = JSON.parse(fs.readFileSync(this.logFile, 'utf-8'));
}
}
}
// Run examples
if (require.main === module) {
const args = process.argv.slice(2);
const example = args[0];
switch (example) {
case 'cli':
simpleCLIChatbot();
break;
case 'stream':
streamingCLIChatbot();
break;
case 'batch':
batchProcessing(args[1], args[2] || 'output.json');
break;
case 'summarize':
summarizeDocument(args[1]);
break;
case 'review':
const code = fs.readFileSync(args[1], 'utf-8');
codeReview(code, args[2] || 'typescript');
break;
case 'translate':
translateText(args[1], args[2], args[3]).then(console.log);
break;
default:
console.log('Available examples:');
console.log('- cli: Interactive chatbot');
console.log('- stream: Streaming chatbot');
console.log('- batch <input> [output]: Batch processing');
console.log('- summarize <file>: Document summarization');
console.log('- review <file> [language]: Code review');
console.log('- translate <text> <from> <to>: Translation');
}
}
export {
simpleCLIChatbot,
streamingCLIChatbot,
batchProcessing,
summarizeDocument,
codeReview,
translateText,
parallelRequests,
requestWithRetry,
ConversationLogger,
};

48
templates/package.json Normal file
View File

@@ -0,0 +1,48 @@
{
"name": "claude-api-examples",
"version": "1.0.0",
"description": "Examples for using the Claude API with various platforms",
"main": "index.ts",
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"cli": "tsx src/nodejs-example.ts cli",
"stream": "tsx src/nodejs-example.ts stream",
"test": "vitest",
"lint": "eslint . --ext .ts",
"format": "prettier --write \"**/*.{ts,tsx,md,json}\"",
"typecheck": "tsc --noEmit"
},
"keywords": [
"claude",
"anthropic",
"ai",
"llm",
"chatbot"
],
"author": "",
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.67.0"
},
"devDependencies": {
"@types/node": "^20.0.0",
"@typescript-eslint/eslint-plugin": "^6.0.0",
"@typescript-eslint/parser": "^6.0.0",
"dotenv": "^16.3.1",
"eslint": "^8.50.0",
"prettier": "^3.0.3",
"tsx": "^4.0.0",
"typescript": "^5.3.0",
"vitest": "^1.0.0",
"zod": "^3.23.0"
},
"optionalDependencies": {
"@upstash/ratelimit": "^1.0.0",
"@upstash/redis": "^1.25.0"
},
"engines": {
"node": ">=18.0.0"
}
}

272
templates/prompt-caching.ts Normal file
View File

@@ -0,0 +1,272 @@
import Anthropic from '@anthropic-ai/sdk';
import fs from 'fs';
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY || '',
});
// Example 1: Basic prompt caching with system prompt
async function cacheSystemPrompt() {
// Simulate a large system prompt (must be >= 1024 tokens for caching)
const largeSystemPrompt = `
You are an expert software architect with deep knowledge of:
- Microservices architecture and design patterns
- Cloud-native applications (AWS, GCP, Azure)
- Containerization (Docker, Kubernetes)
- CI/CD pipelines and DevOps practices
- Database design (SQL and NoSQL)
- API design (REST, GraphQL, gRPC)
- Security best practices and compliance
- Performance optimization and scalability
- Monitoring and observability (Prometheus, Grafana)
- Event-driven architectures and message queues
${' '.repeat(10000)} // Padding to ensure > 1024 tokens
Always provide detailed, production-ready advice with code examples.
`.trim();
// First request - creates cache
const message1 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
system: [
{
type: 'text',
text: largeSystemPrompt,
cache_control: { type: 'ephemeral' },
},
],
messages: [
{
role: 'user',
content: 'How do I design a scalable authentication system?',
},
],
});
console.log('=== First Request (Cache Creation) ===');
console.log('Cache creation tokens:', message1.usage.cache_creation_input_tokens);
console.log('Cache read tokens:', message1.usage.cache_read_input_tokens);
console.log('Input tokens:', message1.usage.input_tokens);
console.log('Output tokens:', message1.usage.output_tokens);
// Second request - hits cache (within 5 minutes)
const message2 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
system: [
{
type: 'text',
text: largeSystemPrompt, // MUST be identical to hit cache
cache_control: { type: 'ephemeral' },
},
],
messages: [
{
role: 'user',
content: 'What about rate limiting strategies?',
},
],
});
console.log('\n=== Second Request (Cache Hit) ===');
console.log('Cache creation tokens:', message2.usage.cache_creation_input_tokens);
console.log('Cache read tokens:', message2.usage.cache_read_input_tokens);
console.log('Input tokens:', message2.usage.input_tokens);
console.log('Output tokens:', message2.usage.output_tokens);
console.log('Savings: ~90% on cached content');
}
// Example 2: Caching large documents
async function cacheLargeDocument() {
// Read a large document (e.g., documentation, codebase)
const largeDocument = fs.readFileSync('./large-document.txt', 'utf-8');
// Ensure document is >= 1024 tokens
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: [
{
type: 'text',
text: 'Analyze the following documentation:',
},
{
type: 'text',
text: largeDocument,
cache_control: { type: 'ephemeral' },
},
{
type: 'text',
text: 'What are the main API endpoints?',
},
],
},
],
});
console.log('=== Document Analysis with Caching ===');
console.log('Cache creation:', message.usage.cache_creation_input_tokens);
console.log('Cache read:', message.usage.cache_read_input_tokens);
}
// Example 3: Multi-turn conversation with caching (chatbot pattern)
async function multiTurnCachingConversation() {
const systemInstructions = `
You are a customer support AI for TechCorp, specializing in:
- Product troubleshooting
- Account management
- Billing inquiries
- Technical specifications
${' '.repeat(10000)} // Ensure > 1024 tokens
Knowledge Base:
- Product A: Cloud storage service
- Product B: Analytics platform
- Product C: AI API service
Always be polite, helpful, and provide actionable solutions.
`.trim();
// Conversation state
const messages: Anthropic.MessageParam[] = [];
// Turn 1
messages.push({
role: 'user',
content: 'How do I reset my password?',
});
const response1 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
system: [
{
type: 'text',
text: systemInstructions,
cache_control: { type: 'ephemeral' },
},
],
messages,
});
const text1 = response1.content.find(b => b.type === 'text');
if (text1 && text1.type === 'text') {
messages.push({ role: 'assistant', content: text1.text });
console.log('Turn 1 - Cache creation:', response1.usage.cache_creation_input_tokens);
}
// Turn 2 - cache hit
messages.push({
role: 'user',
content: 'What about two-factor authentication?',
});
const response2 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
system: [
{
type: 'text',
text: systemInstructions,
cache_control: { type: 'ephemeral' },
},
],
messages,
});
console.log('Turn 2 - Cache read:', response2.usage.cache_read_input_tokens);
console.log('Turn 2 - New input tokens:', response2.usage.input_tokens);
}
// Example 4: Caching with conversation history
async function cacheConversationHistory() {
const messages: Anthropic.MessageParam[] = [
{ role: 'user', content: 'Tell me about TypeScript' },
{ role: 'assistant', content: 'TypeScript is a superset of JavaScript...' },
{ role: 'user', content: 'What about interfaces?' },
{ role: 'assistant', content: 'Interfaces in TypeScript define contracts...' },
{ role: 'user', content: 'Can you give examples?' },
];
// Cache the conversation history
const messagesWithCache: Anthropic.MessageParam[] = messages.slice(0, -1).map((msg, idx) => {
if (idx === messages.length - 2) {
// Cache the last assistant message
return {
...msg,
content: [
{
type: 'text',
text: typeof msg.content === 'string' ? msg.content : '',
cache_control: { type: 'ephemeral' },
},
],
};
}
return msg;
});
messagesWithCache.push(messages[messages.length - 1]);
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: messagesWithCache,
});
console.log('=== Caching Conversation History ===');
console.log('Cache usage:', response.usage);
}
// Example 5: Cost comparison calculator
function calculateCachingSavings(inputTokens: number, outputTokens: number, turnCount: number) {
const inputCostPerMTok = 3; // $3 per million tokens
const outputCostPerMTok = 15; // $15 per million tokens
const cacheCostPerMTok = 3.75; // $3.75 per million tokens (write)
const cacheReadCostPerMTok = 0.3; // $0.30 per million tokens (read)
// Without caching
const costWithoutCache =
(inputTokens / 1_000_000) * inputCostPerMTok * turnCount +
(outputTokens / 1_000_000) * outputCostPerMTok * turnCount;
// With caching
const cacheWriteCost = (inputTokens / 1_000_000) * cacheCostPerMTok; // First request
const cacheReadCost = (inputTokens / 1_000_000) * cacheReadCostPerMTok * (turnCount - 1); // Subsequent
const outputCost = (outputTokens / 1_000_000) * outputCostPerMTok * turnCount;
const costWithCache = cacheWriteCost + cacheReadCost + outputCost;
const savings = costWithoutCache - costWithCache;
const savingsPercent = (savings / costWithoutCache) * 100;
console.log('\n=== Cost Comparison ===');
console.log(`Input tokens: ${inputTokens}, Output tokens: ${outputTokens}, Turns: ${turnCount}`);
console.log(`Without caching: $${costWithoutCache.toFixed(4)}`);
console.log(`With caching: $${costWithCache.toFixed(4)}`);
console.log(`Savings: $${savings.toFixed(4)} (${savingsPercent.toFixed(1)}%)`);
}
// Run examples
if (require.main === module) {
cacheSystemPrompt()
.then(() => multiTurnCachingConversation())
.then(() => {
// Example cost calculation
calculateCachingSavings(100000, 5000, 10); // 100k input, 5k output, 10 turns
})
.catch(console.error);
}
export {
cacheSystemPrompt,
cacheLargeDocument,
multiTurnCachingConversation,
cacheConversationHistory,
calculateCachingSavings,
};

194
templates/streaming-chat.ts Normal file
View File

@@ -0,0 +1,194 @@
import Anthropic from '@anthropic-ai/sdk';
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY || '',
});
// Method 1: Using SDK stream helper with event listeners
async function streamWithEvents() {
console.log('Claude:');
const stream = anthropic.messages.stream({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: 'Write a short poem about coding.',
},
],
});
stream
.on('text', (text) => {
process.stdout.write(text);
})
.on('message', (message) => {
console.log('\n\nFinal message:', message);
console.log('Stop reason:', message.stop_reason);
})
.on('error', (error) => {
console.error('\nStream error:', error);
})
.on('abort', (error) => {
console.warn('\nStream aborted:', error);
})
.on('end', () => {
console.log('\n\nStream ended');
});
// Wait for stream to complete
const finalMessage = await stream.finalMessage();
return finalMessage;
}
// Method 2: Manual iteration over stream events
async function streamWithManualIteration() {
console.log('Claude:');
const stream = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: 'Explain quantum computing in simple terms.',
},
],
stream: true,
});
let fullText = '';
try {
for await (const event of stream) {
if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {
const text = event.delta.text;
fullText += text;
process.stdout.write(text);
}
if (event.type === 'message_stop') {
console.log('\n\nStream complete');
}
}
} catch (error) {
console.error('\nStream error:', error);
throw error;
}
return fullText;
}
// Method 3: Streaming with abort control
async function streamWithAbort() {
console.log('Claude (can be aborted):');
const stream = anthropic.messages.stream({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 2048,
messages: [
{
role: 'user',
content: 'Write a long essay about the history of computers.',
},
],
});
let charCount = 0;
const maxChars = 200; // Abort after 200 characters
stream.on('text', (text) => {
process.stdout.write(text);
charCount += text.length;
// Abort stream after reaching limit
if (charCount >= maxChars) {
console.log('\n\n[Aborting stream after', charCount, 'characters]');
stream.abort();
}
});
stream.on('abort', () => {
console.log('Stream was aborted successfully');
});
stream.on('error', (error) => {
console.error('Stream error:', error);
});
try {
await stream.done();
} catch (error) {
// Handle abort error
if (error.name === 'APIUserAbortError') {
console.log('Stream aborted by user');
} else {
throw error;
}
}
}
// Method 4: Streaming with retry logic
async function streamWithRetry(maxRetries = 3) {
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
const stream = anthropic.messages.stream({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: 'Tell me a fun fact about space.',
},
],
});
let fullText = '';
stream.on('text', (text) => {
fullText += text;
process.stdout.write(text);
});
stream.on('error', (error) => {
console.error(`\nStream error on attempt ${attempt + 1}:`, error);
throw error;
});
await stream.finalMessage();
console.log('\n\nStream completed successfully');
return fullText;
} catch (error) {
if (attempt < maxRetries - 1) {
const delay = Math.pow(2, attempt) * 1000;
console.log(`\nRetrying in ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
} else {
console.error('\nMax retries exceeded');
throw error;
}
}
}
}
// Run examples
if (require.main === module) {
console.log('=== Stream with Events ===\n');
streamWithEvents()
.then(() => {
console.log('\n\n=== Stream with Manual Iteration ===\n');
return streamWithManualIteration();
})
.then(() => {
console.log('\n\n=== Stream with Abort ===\n');
return streamWithAbort();
})
.then(() => {
console.log('\n\n=== Stream with Retry ===\n');
return streamWithRetry();
})
.catch(console.error);
}
export { streamWithEvents, streamWithManualIteration, streamWithAbort, streamWithRetry };

View File

@@ -0,0 +1,296 @@
import Anthropic from '@anthropic-ai/sdk';
import { betaZodTool } from '@anthropic-ai/sdk/helpers/zod';
import { z } from 'zod';
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY || '',
});
// Example 1: Using Zod schemas with betaZodTool
const weatherTool = betaZodTool({
name: 'get_weather',
inputSchema: z.object({
location: z.string().describe('The city and state, e.g. San Francisco, CA'),
unit: z.enum(['celsius', 'fahrenheit']).optional().describe('Temperature unit'),
}),
description: 'Get the current weather in a given location',
run: async (input) => {
// Mock implementation - replace with actual API call
console.log(`Fetching weather for ${input.location}...`);
const temp = input.unit === 'celsius' ? 22 : 72;
return `The weather in ${input.location} is sunny and ${temp}°${input.unit || 'F'}`;
},
});
const searchTool = betaZodTool({
name: 'search_web',
inputSchema: z.object({
query: z.string().describe('The search query'),
max_results: z.number().int().min(1).max(10).optional().describe('Maximum number of results'),
}),
description: 'Search the web for information',
run: async (input) => {
console.log(`Searching for: ${input.query}...`);
// Mock implementation
return `Found ${input.max_results || 5} results for "${input.query}":
1. Example result 1
2. Example result 2
3. Example result 3`;
},
});
const calculatorTool = betaZodTool({
name: 'calculate',
inputSchema: z.object({
expression: z.string().describe('Mathematical expression to evaluate'),
}),
description: 'Evaluate a mathematical expression',
run: async (input) => {
try {
// WARNING: eval is dangerous - this is just for demonstration
// In production, use a safe math parser like math.js
const result = eval(input.expression);
return `${input.expression} = ${result}`;
} catch (error) {
throw new Error(`Invalid expression: ${input.expression}`);
}
},
});
// Example 2: Using toolRunner for automatic execution
async function automaticToolExecution() {
const finalMessage = await anthropic.beta.messages.toolRunner({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1000,
messages: [
{
role: 'user',
content: 'What is the weather in Tokyo? Also, search for "best sushi restaurants in Tokyo"',
},
],
tools: [weatherTool, searchTool],
});
console.log('\nFinal response:');
for (const block of finalMessage.content) {
if (block.type === 'text') {
console.log(block.text);
}
}
return finalMessage;
}
// Example 3: Streaming with tools
async function streamingWithTools() {
const runner = anthropic.beta.messages.toolRunner({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1000,
messages: [
{
role: 'user',
content: 'Calculate 123 * 456, then tell me about the result',
},
],
tools: [calculatorTool],
stream: true,
});
console.log('Streaming response:');
// Iterate through messages as they arrive
for await (const messageStream of runner) {
// Each message can have multiple events
for await (const event of messageStream) {
if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {
process.stdout.write(event.delta.text);
}
}
console.log('\n\nMessage completed');
}
// Get final result
const result = await runner;
console.log('\nFinal result:', result);
}
// Example 4: Complex tool chain
const databaseTool = betaZodTool({
name: 'query_database',
inputSchema: z.object({
query: z.string().describe('SQL query to execute'),
}),
description: 'Query the database',
run: async (input) => {
console.log(`Executing SQL: ${input.query}`);
// Mock database response
return JSON.stringify([
{ id: 1, name: 'Product A', price: 29.99 },
{ id: 2, name: 'Product B', price: 49.99 },
]);
},
});
const emailTool = betaZodTool({
name: 'send_email',
inputSchema: z.object({
to: z.string().email().describe('Recipient email address'),
subject: z.string().describe('Email subject'),
body: z.string().describe('Email body'),
}),
description: 'Send an email',
run: async (input) => {
console.log(`Sending email to ${input.to}...`);
// Mock email sending
return `Email sent successfully to ${input.to}`;
},
});
async function complexToolChain() {
const finalMessage = await anthropic.beta.messages.toolRunner({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 2000,
messages: [
{
role: 'user',
content:
'Query the database for all products, calculate their average price, and send me an email with the results to test@example.com',
},
],
tools: [databaseTool, calculatorTool, emailTool],
});
console.log('\nTool chain completed');
for (const block of finalMessage.content) {
if (block.type === 'text') {
console.log(block.text);
}
}
}
// Example 5: Tool with max iterations limit
async function toolsWithMaxIterations() {
try {
const finalMessage = await anthropic.beta.messages.toolRunner({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1000,
max_iterations: 3, // Limit tool execution loops
messages: [
{
role: 'user',
content: 'Search for "quantum computing" and then search for each result',
},
],
tools: [searchTool],
});
console.log('Completed with max_iterations limit');
for (const block of finalMessage.content) {
if (block.type === 'text') {
console.log(block.text);
}
}
} catch (error) {
console.error('Max iterations reached or error occurred:', error);
}
}
// Example 6: Custom tool runner with manual control
async function manualToolRunner() {
const runner = anthropic.beta.messages.toolRunner({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1000,
messages: [
{
role: 'user',
content: 'What is 15 * 27?',
},
],
tools: [calculatorTool],
});
// Manually iterate through messages
for await (const message of runner) {
console.log('\nReceived message');
console.log('Stop reason:', message.stop_reason);
// Generate tool response if needed
const toolResponse = await runner.generateToolResponse();
if (toolResponse) {
console.log('Tool results:', toolResponse.content);
}
// Can inspect and modify the conversation here
console.log('Current params:', runner.params);
}
// Wait for completion
const finalMessage = await runner.done();
console.log('\nFinal message:', finalMessage);
}
// Example 7: Error recovery in tools
const unreliableTool = betaZodTool({
name: 'unreliable_api',
inputSchema: z.object({
data: z.string(),
}),
description: 'An API that sometimes fails',
run: async (input) => {
// Randomly fail to demonstrate error handling
if (Math.random() < 0.3) {
throw new Error('API temporarily unavailable');
}
return `Processed: ${input.data}`;
},
});
async function toolWithErrorRecovery() {
try {
const finalMessage = await anthropic.beta.messages.toolRunner({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1000,
messages: [
{
role: 'user',
content: 'Process this data with the unreliable API: "test data"',
},
],
tools: [unreliableTool],
});
console.log('Success:', finalMessage.content);
} catch (error) {
console.error('Tool execution failed:', error);
// Implement retry logic or fallback
}
}
// Run examples
if (require.main === module) {
console.log('=== Automatic Tool Execution ===\n');
automaticToolExecution()
.then(() => {
console.log('\n\n=== Streaming with Tools ===\n');
return streamingWithTools();
})
.then(() => {
console.log('\n\n=== Complex Tool Chain ===\n');
return complexToolChain();
})
.then(() => {
console.log('\n\n=== Manual Tool Runner ===\n');
return manualToolRunner();
})
.catch(console.error);
}
export {
automaticToolExecution,
streamingWithTools,
complexToolChain,
toolsWithMaxIterations,
manualToolRunner,
toolWithErrorRecovery,
};

310
templates/tool-use-basic.ts Normal file
View File

@@ -0,0 +1,310 @@
import Anthropic from '@anthropic-ai/sdk';
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY || '',
});
// Define tools
const tools: Anthropic.Tool[] = [
{
name: 'get_weather',
description: 'Get the current weather in a given location',
input_schema: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'The city and state, e.g. San Francisco, CA',
},
unit: {
type: 'string',
enum: ['celsius', 'fahrenheit'],
description: 'The unit of temperature, either "celsius" or "fahrenheit"',
},
},
required: ['location'],
},
},
{
name: 'get_time',
description: 'Get the current time in a given timezone',
input_schema: {
type: 'object',
properties: {
timezone: {
type: 'string',
description: 'The IANA timezone name, e.g. America/Los_Angeles',
},
},
required: ['timezone'],
},
},
];
// Mock tool implementations
function executeWeatherTool(location: string, unit?: string): string {
// In production, call actual weather API
const temp = unit === 'celsius' ? 22 : 72;
return `The weather in ${location} is sunny and ${temp}°${unit === 'celsius' ? 'C' : 'F'}`;
}
function executeTimeTool(timezone: string): string {
// In production, get actual time for timezone
const time = new Date().toLocaleTimeString('en-US', { timeZone: timezone });
return `The current time in ${timezone} is ${time}`;
}
// Example 1: Basic tool use detection
async function basicToolUse() {
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
tools,
messages: [
{
role: 'user',
content: 'What is the weather like in San Francisco?',
},
],
});
console.log('Stop reason:', message.stop_reason);
if (message.stop_reason === 'tool_use') {
console.log('\nClaude wants to use tools:');
for (const block of message.content) {
if (block.type === 'tool_use') {
console.log(`- Tool: ${block.name}`);
console.log(` ID: ${block.id}`);
console.log(` Input:`, block.input);
}
}
}
return message;
}
// Example 2: Tool execution loop
async function toolExecutionLoop(userMessage: string) {
const messages: Anthropic.MessageParam[] = [
{ role: 'user', content: userMessage },
];
while (true) {
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
tools,
messages,
});
console.log('\nStop reason:', response.stop_reason);
// Add assistant response to messages
messages.push({
role: 'assistant',
content: response.content,
});
// Check if Claude wants to use tools
if (response.stop_reason === 'tool_use') {
const toolResults: Anthropic.ToolResultBlockParam[] = [];
// Execute each tool
for (const block of response.content) {
if (block.type === 'tool_use') {
console.log(`\nExecuting tool: ${block.name}`);
console.log('Input:', block.input);
let result: string;
// Execute the appropriate tool
if (block.name === 'get_weather') {
result = executeWeatherTool(
block.input.location as string,
block.input.unit as string | undefined
);
} else if (block.name === 'get_time') {
result = executeTimeTool(block.input.timezone as string);
} else {
result = `Unknown tool: ${block.name}`;
}
console.log('Result:', result);
toolResults.push({
type: 'tool_result',
tool_use_id: block.id,
content: result,
});
}
}
// Add tool results to messages
messages.push({
role: 'user',
content: toolResults,
});
} else {
// Final response - no more tools needed
const textBlock = response.content.find(block => block.type === 'text');
if (textBlock && textBlock.type === 'text') {
console.log('\nFinal response:', textBlock.text);
return textBlock.text;
}
break;
}
}
}
// Example 3: Multiple tools in one turn
async function multipleToolsInOneTurn() {
const messages: Anthropic.MessageParam[] = [
{
role: 'user',
content: 'What is the weather in New York and what time is it in Tokyo?',
},
];
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
tools,
messages,
});
console.log('Claude requested', response.content.filter(b => b.type === 'tool_use').length, 'tools');
if (response.stop_reason === 'tool_use') {
const toolResults: Anthropic.ToolResultBlockParam[] = [];
for (const block of response.content) {
if (block.type === 'tool_use') {
console.log(`\n- ${block.name}:`, block.input);
let result: string;
if (block.name === 'get_weather') {
result = executeWeatherTool(block.input.location as string);
} else if (block.name === 'get_time') {
result = executeTimeTool(block.input.timezone as string);
} else {
result = 'Unknown tool';
}
toolResults.push({
type: 'tool_result',
tool_use_id: block.id,
content: result,
});
}
}
// Continue conversation with tool results
messages.push({ role: 'assistant', content: response.content });
messages.push({ role: 'user', content: toolResults });
const finalResponse = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
tools,
messages,
});
const textBlock = finalResponse.content.find(b => b.type === 'text');
if (textBlock && textBlock.type === 'text') {
console.log('\nFinal answer:', textBlock.text);
}
}
}
// Example 4: Error handling in tool execution
async function toolExecutionWithErrorHandling(userMessage: string) {
const messages: Anthropic.MessageParam[] = [
{ role: 'user', content: userMessage },
];
try {
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
tools,
messages,
});
messages.push({ role: 'assistant', content: response.content });
if (response.stop_reason === 'tool_use') {
const toolResults: Anthropic.ToolResultBlockParam[] = [];
for (const block of response.content) {
if (block.type === 'tool_use') {
try {
let result: string;
if (block.name === 'get_weather') {
result = executeWeatherTool(block.input.location as string);
} else if (block.name === 'get_time') {
result = executeTimeTool(block.input.timezone as string);
} else {
throw new Error(`Unknown tool: ${block.name}`);
}
toolResults.push({
type: 'tool_result',
tool_use_id: block.id,
content: result,
});
} catch (error) {
// Return error to Claude
toolResults.push({
type: 'tool_result',
tool_use_id: block.id,
content: `Error executing tool: ${error.message}`,
is_error: true,
});
}
}
}
messages.push({ role: 'user', content: toolResults });
// Get final response
const finalResponse = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
tools,
messages,
});
const textBlock = finalResponse.content.find(b => b.type === 'text');
if (textBlock && textBlock.type === 'text') {
console.log('Final response:', textBlock.text);
}
}
} catch (error) {
console.error('API error:', error);
throw error;
}
}
// Run examples
if (require.main === module) {
console.log('=== Basic Tool Use ===\n');
basicToolUse()
.then(() => {
console.log('\n\n=== Tool Execution Loop ===\n');
return toolExecutionLoop('What is the weather in London and what time is it there?');
})
.then(() => {
console.log('\n\n=== Multiple Tools ===\n');
return multipleToolsInOneTurn();
})
.then(() => {
console.log('\n\n=== Error Handling ===\n');
return toolExecutionWithErrorHandling('What is the weather in Mars?');
})
.catch(console.error);
}
export { basicToolUse, toolExecutionLoop, multipleToolsInOneTurn, toolExecutionWithErrorHandling };

392
templates/vision-image.ts Normal file
View File

@@ -0,0 +1,392 @@
import Anthropic from '@anthropic-ai/sdk';
import fs from 'fs';
import path from 'path';
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY || '',
});
// Example 1: Single image analysis
async function analyzeSingleImage(imagePath: string) {
// Read and encode image as base64
const imageData = fs.readFileSync(imagePath);
const base64Image = imageData.toString('base64');
// Determine media type from file extension
const ext = path.extname(imagePath).toLowerCase();
const mediaTypeMap: Record<string, string> = {
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.png': 'image/png',
'.webp': 'image/webp',
'.gif': 'image/gif',
};
const mediaType = mediaTypeMap[ext] || 'image/jpeg';
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: [
{
type: 'image',
source: {
type: 'base64',
media_type: mediaType,
data: base64Image,
},
},
{
type: 'text',
text: 'What is in this image? Describe it in detail.',
},
],
},
],
});
const textContent = message.content.find(block => block.type === 'text');
if (textContent && textContent.type === 'text') {
console.log('Claude:', textContent.text);
}
return message;
}
// Example 2: Multiple images comparison
async function compareImages(image1Path: string, image2Path: string) {
const image1Data = fs.readFileSync(image1Path).toString('base64');
const image2Data = fs.readFileSync(image2Path).toString('base64');
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: [
{
type: 'text',
text: 'Compare these two images. What are the similarities and differences?',
},
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: image1Data,
},
},
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: image2Data,
},
},
],
},
],
});
const textContent = message.content.find(block => block.type === 'text');
if (textContent && textContent.type === 'text') {
console.log('Comparison:', textContent.text);
}
}
// Example 3: Vision with tools
const searchTool: Anthropic.Tool = {
name: 'search_product',
description: 'Search for similar products',
input_schema: {
type: 'object',
properties: {
keywords: {
type: 'array',
items: { type: 'string' },
description: 'Keywords to search for',
},
},
required: ['keywords'],
},
};
async function visionWithTools(imagePath: string) {
const imageData = fs.readFileSync(imagePath).toString('base64');
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
tools: [searchTool],
messages: [
{
role: 'user',
content: [
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: imageData,
},
},
{
type: 'text',
text: 'Identify the objects in this image and search for similar products',
},
],
},
],
});
console.log('Stop reason:', message.stop_reason);
if (message.stop_reason === 'tool_use') {
for (const block of message.content) {
if (block.type === 'tool_use') {
console.log('Tool requested:', block.name);
console.log('Search keywords:', block.input);
}
}
}
}
// Example 4: Multi-turn conversation with images
async function multiTurnVision(imagePath: string) {
const imageData = fs.readFileSync(imagePath).toString('base64');
const messages: Anthropic.MessageParam[] = [
{
role: 'user',
content: [
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: imageData,
},
},
{
type: 'text',
text: 'What objects are visible in this image?',
},
],
},
];
// First turn
const response1 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
const text1 = response1.content.find(b => b.type === 'text');
if (text1 && text1.type === 'text') {
console.log('Claude:', text1.text);
messages.push({ role: 'assistant', content: text1.text });
}
// Second turn - follow-up question (image still in context)
messages.push({
role: 'user',
content: 'What color is the largest object?',
});
const response2 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages,
});
const text2 = response2.content.find(b => b.type === 'text');
if (text2 && text2.type === 'text') {
console.log('Claude:', text2.text);
}
}
// Example 5: Vision with prompt caching
async function visionWithCaching(imagePath: string) {
const imageData = fs.readFileSync(imagePath).toString('base64');
// First request - cache the image
const response1 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: [
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: imageData,
},
cache_control: { type: 'ephemeral' },
},
{
type: 'text',
text: 'Describe the main objects in this image',
},
],
},
],
});
console.log('First request - cache creation:', response1.usage.cache_creation_input_tokens);
const text1 = response1.content.find(b => b.type === 'text');
if (text1 && text1.type === 'text') {
console.log('Response 1:', text1.text);
}
// Second request - use cached image (within 5 minutes)
const response2 = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: [
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: imageData, // Same image
},
cache_control: { type: 'ephemeral' },
},
{
type: 'text',
text: 'What colors are prominent in this image?',
},
],
},
],
});
console.log('Second request - cache read:', response2.usage.cache_read_input_tokens);
console.log('Token savings: ~90%');
}
// Example 6: Image URL (if accessible)
async function analyzeImageFromURL(imageUrl: string) {
// Note: Image must be publicly accessible
const message = await anthropic.messages.create({
model: 'claude-sonnet-4-5-20250929',
max_tokens: 1024,
messages: [
{
role: 'user',
content: [
{
type: 'image',
source: {
type: 'url',
url: imageUrl,
},
},
{
type: 'text',
text: 'Analyze this image',
},
],
},
],
});
const textContent = message.content.find(block => block.type === 'text');
if (textContent && textContent.type === 'text') {
console.log('Analysis:', textContent.text);
}
}
// Example 7: Image validation helper
function validateImage(filePath: string): { valid: boolean; error?: string } {
if (!fs.existsSync(filePath)) {
return { valid: false, error: 'File does not exist' };
}
const stats = fs.statSync(filePath);
const fileSizeMB = stats.size / (1024 * 1024);
if (fileSizeMB > 5) {
return { valid: false, error: 'Image exceeds 5MB limit' };
}
const ext = path.extname(filePath).toLowerCase();
const supportedFormats = ['.jpg', '.jpeg', '.png', '.webp', '.gif'];
if (!supportedFormats.includes(ext)) {
return { valid: false, error: `Unsupported format. Use: ${supportedFormats.join(', ')}` };
}
return { valid: true };
}
// Example 8: Batch image analysis
async function analyzeMultipleImages(imagePaths: string[]) {
const results = [];
for (const imagePath of imagePaths) {
console.log(`\nAnalyzing: ${imagePath}`);
const validation = validateImage(imagePath);
if (!validation.valid) {
console.error(`Error: ${validation.error}`);
continue;
}
try {
const result = await analyzeSingleImage(imagePath);
results.push({ imagePath, result });
} catch (error) {
console.error(`Failed to analyze ${imagePath}:`, error);
}
// Rate limiting pause
await new Promise(resolve => setTimeout(resolve, 1000));
}
return results;
}
// Run examples (with placeholder paths)
if (require.main === module) {
const exampleImagePath = './example-image.jpg';
// Check if example image exists
if (fs.existsSync(exampleImagePath)) {
console.log('=== Single Image Analysis ===\n');
analyzeSingleImage(exampleImagePath)
.then(() => {
console.log('\n=== Vision with Caching ===\n');
return visionWithCaching(exampleImagePath);
})
.catch(console.error);
} else {
console.log('Example image not found. Create example-image.jpg to test.');
console.log('\nValidation example:');
const validation = validateImage('./non-existent.jpg');
console.log(validation);
}
}
export {
analyzeSingleImage,
compareImages,
visionWithTools,
multiTurnVision,
visionWithCaching,
analyzeImageFromURL,
validateImage,
analyzeMultipleImages,
};

80
templates/wrangler.jsonc Normal file
View File

@@ -0,0 +1,80 @@
// wrangler.jsonc - Cloudflare Workers configuration for Claude API
{
"name": "claude-api-worker",
"main": "src/index.ts",
"compatibility_date": "2025-01-01",
// Bindings
"vars": {
"ENVIRONMENT": "production"
},
// Environment variables (secrets)
// Set with: wrangler secret put ANTHROPIC_API_KEY
// "ANTHROPIC_API_KEY": "sk-ant-..." // Never commit this!
// KV namespace (optional - for caching)
"kv_namespaces": [
{
"binding": "CACHE",
"id": "your-kv-namespace-id",
"preview_id": "your-kv-preview-id"
}
],
// D1 database (optional - for conversation storage)
"d1_databases": [
{
"binding": "DB",
"database_name": "claude-conversations",
"database_id": "your-d1-database-id"
}
],
// Durable Objects (optional - for rate limiting)
"durable_objects": {
"bindings": [
{
"name": "RATE_LIMITER",
"class_name": "RateLimiter",
"script_name": "claude-api-worker"
}
]
},
"migrations": [
{
"tag": "v1",
"new_classes": ["RateLimiter"]
}
],
// Routes (if deploying to custom domain)
"routes": [
{
"pattern": "api.example.com/chat/*",
"zone_name": "example.com"
}
],
// Limits and features
"limits": {
"cpu_ms": 50 // 50ms CPU time
},
"compatibility_flags": [
"nodejs_compat" // If using Node.js APIs
],
// Observability
"observability": {
"enabled": true
},
// Tail consumers (for logging)
"tail_consumers": [
{
"service": "logging-worker"
}
]
}