Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:23:50 +08:00
commit 5135e7aaf4
24 changed files with 4973 additions and 0 deletions

View File

@@ -0,0 +1,86 @@
// Agent class with multiple tools
// AI SDK Core - Agent class for multi-step execution
import { Agent, tool } from 'ai';
import { anthropic } from '@ai-sdk/anthropic';
import { z } from 'zod';
// Create agent with tools
const weatherAgent = new Agent({
model: anthropic('claude-sonnet-4-5'),
system: 'You are a weather assistant. Always provide temperature in the user\'s preferred unit.',
tools: {
getWeather: tool({
description: 'Get current weather for a location',
inputSchema: z.object({
location: z.string(),
}),
execute: async ({ location }) => {
console.log(`[Tool] Getting weather for ${location}...`);
// Simulate API call
return {
location,
temperature: 72,
condition: 'sunny',
humidity: 65,
unit: 'fahrenheit',
};
},
}),
convertTemp: tool({
description: 'Convert temperature between Fahrenheit and Celsius',
inputSchema: z.object({
fahrenheit: z.number(),
}),
execute: async ({ fahrenheit }) => {
console.log(`[Tool] Converting ${fahrenheit}°F to Celsius...`);
const celsius = Math.round(((fahrenheit - 32) * 5 / 9) * 10) / 10;
return { celsius };
},
}),
getAirQuality: tool({
description: 'Get air quality index for a location',
inputSchema: z.object({
location: z.string(),
}),
execute: async ({ location }) => {
console.log(`[Tool] Getting air quality for ${location}...`);
// Simulate API call
return {
location,
aqi: 42,
level: 'good',
pollutants: {
pm25: 8,
pm10: 15,
o3: 35,
},
};
},
}),
},
});
async function main() {
console.log('Starting agent conversation...\n');
const result = await weatherAgent.run({
messages: [
{
role: 'user',
content: 'What is the weather in San Francisco? Tell me in Celsius and include air quality.',
},
],
});
console.log('\n--- Agent Response ---');
console.log(result.text);
console.log('\n--- Execution Summary ---');
console.log('Total steps:', result.steps);
console.log('Tools used:', result.toolCalls?.map(tc => tc.toolName).join(', ') || 'none');
}
main().catch(console.error);

View File

@@ -0,0 +1,77 @@
// Anthropic provider configuration
// AI SDK Core - Anthropic (Claude) setup and usage
import { generateText } from 'ai';
import { anthropic } from '@ai-sdk/anthropic';
async function main() {
console.log('=== Anthropic (Claude) Provider Setup ===\n');
// Method 1: Use environment variable (recommended)
// ANTHROPIC_API_KEY=sk-ant-...
const model1 = anthropic('claude-sonnet-4-5');
// Method 2: Explicit API key
const model2 = anthropic('claude-sonnet-4-5', {
apiKey: process.env.ANTHROPIC_API_KEY,
});
// Available models (Claude 4.x family - current)
const models = {
sonnet45: anthropic('claude-sonnet-4-5'), // Latest, recommended
opus4: anthropic('claude-opus-4-0'), // Highest intelligence
haiku45: anthropic('claude-haiku-4-5'), // Fastest
};
// Legacy models (Claude 3.x - deprecated, use Claude 4.x instead)
// const legacyModels = {
// sonnet35: anthropic('claude-3-5-sonnet-20241022'),
// opus3: anthropic('claude-3-opus-20240229'),
// haiku3: anthropic('claude-3-haiku-20240307'),
// };
// Example: Generate text with Claude
console.log('Generating text with Claude Sonnet 4.5...\n');
const result = await generateText({
model: models.sonnet45,
prompt: 'Explain what makes Claude different from other AI assistants in 2 sentences.',
maxOutputTokens: 150,
});
console.log('Response:', result.text);
console.log('\nUsage:');
console.log('- Prompt tokens:', result.usage.promptTokens);
console.log('- Completion tokens:', result.usage.completionTokens);
console.log('- Total tokens:', result.usage.totalTokens);
// Example: Long context handling
console.log('\n=== Long Context Example ===\n');
const longContextResult = await generateText({
model: models.sonnet45,
messages: [
{
role: 'user',
content: 'I will give you a long document to analyze. Here it is: ' + 'Lorem ipsum '.repeat(1000),
},
{
role: 'user',
content: 'Now summarize the key points.',
},
],
maxOutputTokens: 200,
});
console.log('Long context summary:', longContextResult.text);
// Model selection guide
console.log('\n=== Model Selection Guide ===');
console.log('- Claude Sonnet 4.5: Latest model, best balance (recommended)');
console.log('- Claude Opus 4.0: Highest intelligence for complex reasoning');
console.log('- Claude Haiku 4.5: Fastest and most cost-effective');
console.log('\nAll Claude 4.x models support extended context windows');
console.log('Note: Claude 3.x models deprecated in 2025, use Claude 4.x instead');
}
main().catch(console.error);

View File

@@ -0,0 +1,119 @@
// Cloudflare Workers with workers-ai-provider
// AI SDK Core - Cloudflare Workers AI integration
import { Hono } from 'hono';
import { generateText, streamText } from 'ai';
import { createWorkersAI } from 'workers-ai-provider';
// Environment interface for Workers AI binding
interface Env {
AI: Ai;
}
const app = new Hono<{ Bindings: Env }>();
// Example 1: Basic text generation
app.post('/chat', async (c) => {
// IMPORTANT: Create provider inside handler to avoid startup overhead
const workersai = createWorkersAI({ binding: c.env.AI });
const { message } = await c.req.json();
const result = await generateText({
model: workersai('@cf/meta/llama-3.1-8b-instruct'),
prompt: message,
maxOutputTokens: 500,
});
return c.json({ response: result.text });
});
// Example 2: Streaming response
app.post('/chat/stream', async (c) => {
const workersai = createWorkersAI({ binding: c.env.AI });
const { message } = await c.req.json();
const stream = streamText({
model: workersai('@cf/meta/llama-3.1-8b-instruct'),
prompt: message,
});
// Return stream to client
return stream.toDataStreamResponse();
});
// Example 3: Structured output
app.post('/extract', async (c) => {
const workersai = createWorkersAI({ binding: c.env.AI });
const { generateObject } = await import('ai');
const { z } = await import('zod');
const { text } = await c.req.json();
const result = await generateObject({
model: workersai('@cf/meta/llama-3.1-8b-instruct'),
schema: z.object({
summary: z.string(),
keyPoints: z.array(z.string()),
}),
prompt: `Extract key information from: ${text}`,
});
return c.json(result.object);
});
// Example 4: Health check
app.get('/health', (c) => {
return c.json({ status: 'ok', ai: 'ready' });
});
export default app;
/*
* wrangler.jsonc configuration:
*
* {
* "name": "ai-sdk-worker",
* "compatibility_date": "2025-10-21",
* "main": "src/index.ts",
* "ai": {
* "binding": "AI"
* }
* }
*/
/*
* IMPORTANT NOTES:
*
* 1. Startup Optimization:
* - Move `createWorkersAI` inside handlers (not top-level)
* - Avoid importing complex Zod schemas at top level
* - Monitor startup time (must be <400ms)
*
* 2. Available Models:
* - @cf/meta/llama-3.1-8b-instruct (recommended)
* - @cf/meta/llama-3.1-70b-instruct
* - @cf/mistral/mistral-7b-instruct-v0.1
* - See: https://developers.cloudflare.com/workers-ai/models/
*
* 3. When to use workers-ai-provider:
* - Multi-provider scenarios (OpenAI + Workers AI)
* - Using AI SDK UI hooks
* - Need consistent API across providers
*
* 4. When to use native binding:
* - Cloudflare-only deployment
* - Maximum performance
* - See: cloudflare-workers-ai skill
*
* 5. Testing:
* npx wrangler dev
* curl -X POST http://localhost:8787/chat \
* -H "Content-Type: application/json" \
* -d '{"message": "Hello!"}'
*
* 6. Deployment:
* npx wrangler deploy
*/

View File

@@ -0,0 +1,37 @@
// Structured output with Zod schema validation
// AI SDK Core - generateObject() with Zod
import { generateObject } from 'ai';
import { openai } from '@ai-sdk/openai';
import { z } from 'zod';
// Define Zod schema
const PersonSchema = z.object({
name: z.string().describe('Person full name'),
age: z.number().describe('Person age in years'),
role: z.enum(['engineer', 'designer', 'manager', 'other']).describe('Job role'),
skills: z.array(z.string()).describe('List of technical skills'),
experience: z.object({
years: z.number(),
companies: z.array(z.string()),
}),
});
async function main() {
const result = await generateObject({
model: openai('gpt-4'),
schema: PersonSchema,
prompt: 'Generate a profile for a senior software engineer with 8 years of experience.',
});
console.log('Generated object:');
console.log(JSON.stringify(result.object, null, 2));
// TypeScript knows the exact type
console.log('\nAccessing typed properties:');
console.log('Name:', result.object.name);
console.log('Skills:', result.object.skills.join(', '));
console.log('Years of experience:', result.object.experience.years);
}
main().catch(console.error);

View File

@@ -0,0 +1,20 @@
// Simple text generation with OpenAI
// AI SDK Core - generateText() basic example
import { generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
async function main() {
const result = await generateText({
model: openai('gpt-4-turbo'),
prompt: 'What is TypeScript? Explain in 2 sentences.',
maxOutputTokens: 100,
temperature: 0.7,
});
console.log('Generated text:', result.text);
console.log('Tokens used:', result.usage.totalTokens);
console.log('Finish reason:', result.finishReason);
}
main().catch(console.error);

87
templates/google-setup.ts Normal file
View File

@@ -0,0 +1,87 @@
// Google provider configuration
// AI SDK Core - Google (Gemini) setup and usage
import { generateText } from 'ai';
import { google } from '@ai-sdk/google';
async function main() {
console.log('=== Google (Gemini) Provider Setup ===\n');
// Method 1: Use environment variable (recommended)
// GOOGLE_GENERATIVE_AI_API_KEY=...
const model1 = google('gemini-2.5-pro');
// Method 2: Explicit API key
const model2 = google('gemini-2.5-pro', {
apiKey: process.env.GOOGLE_GENERATIVE_AI_API_KEY,
});
// Available models
const models = {
pro: google('gemini-2.5-pro'), // Best for reasoning
flash: google('gemini-2.5-flash'), // Fast and efficient
flashLite: google('gemini-2.5-flash-lite'), // Ultra-fast (if available)
};
// Example: Generate text with Gemini
console.log('Generating text with Gemini 2.5 Pro...\n');
const result = await generateText({
model: models.pro,
prompt: 'Explain what makes Gemini good at multimodal tasks in 2 sentences.',
maxOutputTokens: 150,
});
console.log('Response:', result.text);
console.log('\nUsage:');
console.log('- Prompt tokens:', result.usage.promptTokens);
console.log('- Completion tokens:', result.usage.completionTokens);
console.log('- Total tokens:', result.usage.totalTokens);
// Example: Structured output with Gemini
console.log('\n=== Structured Output Example ===\n');
const { generateObject } = await import('ai');
const { z } = await import('zod');
const structuredResult = await generateObject({
model: models.pro,
schema: z.object({
title: z.string(),
summary: z.string(),
keyPoints: z.array(z.string()),
}),
prompt: 'Summarize the benefits of using Gemini AI.',
});
console.log('Structured output:');
console.log(JSON.stringify(structuredResult.object, null, 2));
// Error handling example
console.log('\n=== Error Handling ===\n');
try {
const result2 = await generateText({
model: google('gemini-2.5-pro'),
prompt: 'Hello',
});
console.log('Success:', result2.text);
} catch (error: any) {
if (error.message?.includes('SAFETY')) {
console.error('Error: Content filtered by safety settings');
} else if (error.message?.includes('QUOTA_EXCEEDED')) {
console.error('Error: API quota exceeded');
} else {
console.error('Error:', error.message);
}
}
// Model selection guide
console.log('\n=== Model Selection Guide ===');
console.log('- Gemini 2.5 Pro: Best for complex reasoning and analysis');
console.log('- Gemini 2.5 Flash: Fast and cost-effective for most tasks');
console.log('- Gemini 2.5 Flash Lite: Ultra-fast for simple tasks');
console.log('\nGemini has generous free tier limits and excels at multimodal tasks');
}
main().catch(console.error);

View File

@@ -0,0 +1,112 @@
// Multi-step execution with stopWhen conditions
// AI SDK Core - Control multi-step workflows
import { generateText, tool, stopWhen, stepCountIs, hasToolCall } from 'ai';
import { openai } from '@ai-sdk/openai';
import { z } from 'zod';
async function example1_stepCount() {
console.log('=== Example 1: Stop after N steps ===\n');
const result = await generateText({
model: openai('gpt-4'),
tools: {
research: tool({
description: 'Research a topic',
inputSchema: z.object({ topic: z.string() }),
execute: async ({ topic }) => {
console.log(`[Tool] Researching ${topic}...`);
return { info: `Research data about ${topic}` };
},
}),
analyze: tool({
description: 'Analyze research data',
inputSchema: z.object({ data: z.string() }),
execute: async ({ data }) => {
console.log(`[Tool] Analyzing data...`);
return { analysis: `Analysis of ${data}` };
},
}),
},
prompt: 'Research TypeScript and analyze the findings.',
stopWhen: stepCountIs(3), // Stop after 3 steps
});
console.log('\nResult:', result.text);
console.log('Steps taken:', result.steps);
}
async function example2_specificTool() {
console.log('\n=== Example 2: Stop when specific tool called ===\n');
const result = await generateText({
model: openai('gpt-4'),
tools: {
search: tool({
description: 'Search for information',
inputSchema: z.object({ query: z.string() }),
execute: async ({ query }) => {
console.log(`[Tool] Searching for: ${query}`);
return { results: `Search results for ${query}` };
},
}),
summarize: tool({
description: 'Create final summary',
inputSchema: z.object({ content: z.string() }),
execute: async ({ content }) => {
console.log(`[Tool] Creating summary...`);
return { summary: `Summary of ${content}` };
},
}),
},
prompt: 'Search for information about AI and create a summary.',
stopWhen: hasToolCall('summarize'), // Stop when summarize is called
});
console.log('\nResult:', result.text);
console.log('Final tool called:', result.toolCalls?.[result.toolCalls.length - 1]?.toolName);
}
async function example3_customCondition() {
console.log('\n=== Example 3: Custom stop condition ===\n');
const result = await generateText({
model: openai('gpt-4'),
tools: {
calculate: tool({
description: 'Perform calculation',
inputSchema: z.object({ expression: z.string() }),
execute: async ({ expression }) => {
console.log(`[Tool] Calculating: ${expression}`);
return { result: 42 };
},
}),
finish: tool({
description: 'Mark task as complete',
inputSchema: z.object({ status: z.string() }),
execute: async ({ status }) => {
console.log(`[Tool] Finishing with status: ${status}`);
return { done: true };
},
}),
},
prompt: 'Solve a math problem and finish.',
stopWhen: (step) => {
// Stop if:
// - More than 5 steps, OR
// - 'finish' tool was called
return step.stepCount > 5 || step.hasToolCall('finish');
},
});
console.log('\nResult:', result.text);
console.log('Stopped at step:', result.steps);
}
async function main() {
await example1_stepCount();
await example2_specificTool();
await example3_customCondition();
}
main().catch(console.error);

View File

@@ -0,0 +1,150 @@
// Next.js Server Action with AI SDK
// AI SDK Core - Server Actions for Next.js App Router
'use server';
import { generateObject, generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
import { z } from 'zod';
// Example 1: Simple text generation
export async function generateStory(theme: string) {
const result = await generateText({
model: openai('gpt-4-turbo'),
prompt: `Write a short story about: ${theme}`,
maxOutputTokens: 500,
});
return result.text;
}
// Example 2: Structured output (recipe generation)
export async function generateRecipe(ingredients: string[]) {
const RecipeSchema = z.object({
name: z.string(),
description: z.string(),
ingredients: z.array(
z.object({
name: z.string(),
amount: z.string(),
})
),
instructions: z.array(z.string()),
cookingTime: z.number().describe('Cooking time in minutes'),
servings: z.number(),
});
const result = await generateObject({
model: openai('gpt-4'),
schema: RecipeSchema,
prompt: `Create a recipe using these ingredients: ${ingredients.join(', ')}`,
});
return result.object;
}
// Example 3: Data extraction
export async function extractContactInfo(text: string) {
const ContactSchema = z.object({
name: z.string().optional(),
email: z.string().email().optional(),
phone: z.string().optional(),
company: z.string().optional(),
});
const result = await generateObject({
model: openai('gpt-4'),
schema: ContactSchema,
prompt: `Extract contact information from this text: ${text}`,
});
return result.object;
}
// Example 4: Error handling in Server Action
export async function generateWithErrorHandling(prompt: string) {
try {
const result = await generateText({
model: openai('gpt-4-turbo'),
prompt,
maxOutputTokens: 200,
});
return { success: true, data: result.text };
} catch (error: any) {
console.error('AI generation error:', error);
return {
success: false,
error: 'Failed to generate response. Please try again.',
};
}
}
/*
* Usage in Client Component:
*
* 'use client';
*
* import { useState } from 'react';
* import { generateStory, generateRecipe } from './actions';
*
* export default function AIForm() {
* const [result, setResult] = useState('');
* const [loading, setLoading] = useState(false);
*
* async function handleGenerateStory(formData: FormData) {
* setLoading(true);
* const theme = formData.get('theme') as string;
* const story = await generateStory(theme);
* setResult(story);
* setLoading(false);
* }
*
* async function handleGenerateRecipe(formData: FormData) {
* setLoading(true);
* const ingredients = (formData.get('ingredients') as string).split(',');
* const recipe = await generateRecipe(ingredients);
* setResult(JSON.stringify(recipe, null, 2));
* setLoading(false);
* }
*
* return (
* <div>
* <form action={handleGenerateStory}>
* <input name="theme" placeholder="Story theme" required />
* <button disabled={loading}>Generate Story</button>
* </form>
*
* <form action={handleGenerateRecipe}>
* <input name="ingredients" placeholder="flour, eggs, sugar" required />
* <button disabled={loading}>Generate Recipe</button>
* </form>
*
* {result && <pre>{result}</pre>}
* </div>
* );
* }
*/
/*
* File Structure:
*
* app/
* ├── actions.ts # This file (Server Actions)
* ├── page.tsx # Client component using actions
* └── api/
* └── chat/
* └── route.ts # Alternative: API Route for streaming
*
* Note: Server Actions are recommended for mutations and non-streaming AI calls.
* For streaming, use API Routes with streamText().toDataStreamResponse()
*/
/*
* Environment Variables (.env.local):
*
* OPENAI_API_KEY=sk-...
* ANTHROPIC_API_KEY=sk-ant-...
* GOOGLE_GENERATIVE_AI_API_KEY=...
*/

81
templates/openai-setup.ts Normal file
View File

@@ -0,0 +1,81 @@
// OpenAI provider configuration
// AI SDK Core - OpenAI setup and usage
import { generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
async function main() {
console.log('=== OpenAI Provider Setup ===\n');
// Method 1: Use environment variable (recommended)
// OPENAI_API_KEY=sk-...
const model1 = openai('gpt-4-turbo');
// Method 2: Explicit API key
const model2 = openai('gpt-4', {
apiKey: process.env.OPENAI_API_KEY,
});
// Available models (latest)
const models = {
gpt51: openai('gpt-5.1'), // Latest flagship model (Nov 2025)
gpt5Pro: openai('gpt-5-pro'), // Advanced reasoning
gpt41: openai('gpt-4.1'), // Latest GPT-4 series
o3: openai('o3'), // Reasoning model
gpt4Turbo: openai('gpt-4-turbo'), // Previous generation (still excellent)
gpt35Turbo: openai('gpt-3.5-turbo'), // Fast, cost-effective
};
// Older models (still functional)
// const olderModels = {
// gpt5: openai('gpt-5'), // Superseded by gpt-5.1
// gpt4: openai('gpt-4'), // Use gpt-4-turbo instead
// };
// Example: Generate text with GPT-4
console.log('Generating text with GPT-4 Turbo...\n');
const result = await generateText({
model: models.gpt4Turbo,
prompt: 'Explain the difference between GPT-3.5 and GPT-4 in one sentence.',
maxOutputTokens: 100,
});
console.log('Response:', result.text);
console.log('\nUsage:');
console.log('- Prompt tokens:', result.usage.promptTokens);
console.log('- Completion tokens:', result.usage.completionTokens);
console.log('- Total tokens:', result.usage.totalTokens);
// Example: Error handling
console.log('\n=== Error Handling ===\n');
try {
const result2 = await generateText({
model: openai('gpt-4-turbo'),
prompt: 'Hello',
});
console.log('Success:', result2.text);
} catch (error: any) {
if (error.statusCode === 401) {
console.error('Error: Invalid API key');
} else if (error.statusCode === 429) {
console.error('Error: Rate limit exceeded');
} else if (error.statusCode >= 500) {
console.error('Error: OpenAI server issue');
} else {
console.error('Error:', error.message);
}
}
// Model selection guide
console.log('\n=== Model Selection Guide ===');
console.log('- gpt-5.1: Latest flagship model (November 2025)');
console.log('- gpt-5-pro: Advanced reasoning and complex tasks');
console.log('- o3: Specialized reasoning model');
console.log('- gpt-4.1: Latest GPT-4 series, excellent quality');
console.log('- gpt-4-turbo: Previous generation, still very capable');
console.log('- gpt-3.5-turbo: Fast and cost-effective for simple tasks');
}
main().catch(console.error);

42
templates/package.json Normal file
View File

@@ -0,0 +1,42 @@
{
"name": "ai-sdk-core-example",
"version": "1.0.0",
"type": "module",
"description": "AI SDK Core examples - Backend AI with generateText, streamText, generateObject, and tools",
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"type-check": "tsc --noEmit"
},
"dependencies": {
"ai": "^5.0.95",
"@ai-sdk/openai": "^2.0.68",
"@ai-sdk/anthropic": "^2.0.45",
"@ai-sdk/google": "^2.0.38",
"workers-ai-provider": "^2.0.0",
"zod": "^3.23.8"
},
"devDependencies": {
"@types/node": "^20.11.0",
"tsx": "^4.7.0",
"typescript": "^5.3.3"
},
"keywords": [
"ai",
"ai-sdk",
"vercel",
"openai",
"anthropic",
"google",
"gemini",
"claude",
"gpt-4",
"llm",
"text-generation",
"structured-output",
"zod"
],
"author": "",
"license": "MIT"
}

View File

@@ -0,0 +1,52 @@
// Streaming structured output with partial updates
// AI SDK Core - streamObject() with Zod
import { streamObject } from 'ai';
import { google } from '@ai-sdk/google';
import { z } from 'zod';
// Define schema for RPG characters
const CharacterSchema = z.object({
characters: z.array(
z.object({
name: z.string(),
class: z.enum(['warrior', 'mage', 'rogue', 'cleric']),
level: z.number(),
stats: z.object({
hp: z.number(),
mana: z.number(),
strength: z.number(),
intelligence: z.number(),
}),
inventory: z.array(z.string()),
})
),
});
async function main() {
const stream = streamObject({
model: google('gemini-2.5-pro'),
schema: CharacterSchema,
prompt: 'Generate 3 diverse RPG characters with complete stats and starting inventory.',
});
console.log('Streaming structured object (partial updates):');
console.log('---\n');
// Stream partial object updates
for await (const partialObject of stream.partialObjectStream) {
console.clear(); // Clear console for visual effect
console.log('Current partial object:');
console.log(JSON.stringify(partialObject, null, 2));
}
// Get final complete object
const result = await stream.result;
console.log('\n---');
console.log('Final complete object:');
console.log(JSON.stringify(result.object, null, 2));
console.log('\nCharacter count:', result.object.characters.length);
}
main().catch(console.error);

View File

@@ -0,0 +1,39 @@
// Streaming chat with messages
// AI SDK Core - streamText() with chat messages
import { streamText } from 'ai';
import { anthropic } from '@ai-sdk/anthropic';
async function main() {
const stream = streamText({
model: anthropic('claude-sonnet-4-5'),
messages: [
{
role: 'system',
content: 'You are a helpful assistant that writes concise responses.',
},
{
role: 'user',
content: 'Tell me a short story about AI and humanity working together.',
},
],
maxOutputTokens: 500,
});
console.log('Streaming response:');
console.log('---');
// Stream text chunks to console
for await (const chunk of stream.textStream) {
process.stdout.write(chunk);
}
console.log('\n---');
// Get final result with metadata
const result = await stream.result;
console.log('\nTokens used:', result.usage.totalTokens);
console.log('Finish reason:', result.finishReason);
}
main().catch(console.error);

75
templates/tools-basic.ts Normal file
View File

@@ -0,0 +1,75 @@
// Basic tool calling example
// AI SDK Core - Tool calling with generateText()
import { generateText, tool } from 'ai';
import { openai } from '@ai-sdk/openai';
import { z } from 'zod';
async function main() {
const result = await generateText({
model: openai('gpt-4'),
tools: {
weather: tool({
description: 'Get the current weather for a location',
inputSchema: z.object({
location: z.string().describe('City name, e.g. "San Francisco"'),
unit: z.enum(['celsius', 'fahrenheit']).optional().describe('Temperature unit'),
}),
execute: async ({ location, unit = 'fahrenheit' }) => {
// Simulate API call to weather service
console.log(`[Tool] Fetching weather for ${location}...`);
// In production, call real weather API here
const mockWeather = {
location,
temperature: unit === 'celsius' ? 22 : 72,
condition: 'sunny',
humidity: 65,
unit,
};
return mockWeather;
},
}),
convertTemperature: tool({
description: 'Convert temperature between Celsius and Fahrenheit',
inputSchema: z.object({
value: z.number(),
from: z.enum(['celsius', 'fahrenheit']),
to: z.enum(['celsius', 'fahrenheit']),
}),
execute: async ({ value, from, to }) => {
console.log(`[Tool] Converting ${value}°${from} to ${to}...`);
if (from === to) return { value, unit: to };
let result: number;
if (from === 'celsius' && to === 'fahrenheit') {
result = (value * 9 / 5) + 32;
} else {
result = (value - 32) * 5 / 9;
}
return { value: Math.round(result * 10) / 10, unit: to };
},
}),
},
prompt: 'What is the weather in Tokyo? Please tell me in Celsius.',
maxOutputTokens: 200,
});
console.log('\n--- AI Response ---');
console.log(result.text);
console.log('\n--- Tool Calls ---');
console.log('Number of tool calls:', result.toolCalls?.length || 0);
if (result.toolCalls) {
result.toolCalls.forEach((call, i) => {
console.log(`\n${i + 1}. ${call.toolName}`);
console.log(' Input:', JSON.stringify(call.input));
console.log(' Output:', JSON.stringify(call.output));
});
}
}
main().catch(console.error);