Initial commit
This commit is contained in:
229
templates/audio-transcription.ts
Normal file
229
templates/audio-transcription.ts
Normal file
@@ -0,0 +1,229 @@
|
||||
/**
|
||||
* OpenAI Audio API - Whisper Transcription Examples
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - Basic audio transcription
|
||||
* - Supported audio formats
|
||||
* - Both SDK and fetch approaches
|
||||
* - Error handling
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai';
|
||||
import fs from 'fs';
|
||||
import FormData from 'form-data';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// BASIC TRANSCRIPTION (SDK)
|
||||
// =============================================================================
|
||||
|
||||
async function basicTranscription() {
|
||||
const transcription = await openai.audio.transcriptions.create({
|
||||
file: fs.createReadStream('./audio.mp3'),
|
||||
model: 'whisper-1',
|
||||
});
|
||||
|
||||
console.log('Transcription:', transcription.text);
|
||||
|
||||
return transcription.text;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// TRANSCRIPTION WITH FETCH
|
||||
// =============================================================================
|
||||
|
||||
async function transcriptionFetch() {
|
||||
const formData = new FormData();
|
||||
formData.append('file', fs.createReadStream('./audio.mp3'));
|
||||
formData.append('model', 'whisper-1');
|
||||
|
||||
const response = await fetch('https://api.openai.com/v1/audio/transcriptions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,
|
||||
...formData.getHeaders(),
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
const data: any = await response.json();
|
||||
console.log('Transcription:', data.text);
|
||||
|
||||
return data.text;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MULTIPLE AUDIO FORMATS
|
||||
// =============================================================================
|
||||
|
||||
async function multipleFormats() {
|
||||
const formats = ['mp3', 'wav', 'm4a', 'webm'];
|
||||
|
||||
for (const format of formats) {
|
||||
const filename = `./audio.${format}`;
|
||||
|
||||
if (fs.existsSync(filename)) {
|
||||
console.log(`Transcribing ${format}...`);
|
||||
|
||||
const transcription = await openai.audio.transcriptions.create({
|
||||
file: fs.createReadStream(filename),
|
||||
model: 'whisper-1',
|
||||
});
|
||||
|
||||
console.log(`${format.toUpperCase()}: ${transcription.text}`);
|
||||
} else {
|
||||
console.log(`${filename} not found, skipping...`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ERROR HANDLING
|
||||
// =============================================================================
|
||||
|
||||
async function withErrorHandling(audioFilePath: string) {
|
||||
try {
|
||||
// Check if file exists
|
||||
if (!fs.existsSync(audioFilePath)) {
|
||||
throw new Error(`Audio file not found: ${audioFilePath}`);
|
||||
}
|
||||
|
||||
// Check file size (Whisper has limits)
|
||||
const stats = fs.statSync(audioFilePath);
|
||||
const fileSizeMB = stats.size / (1024 * 1024);
|
||||
|
||||
console.log(`File size: ${fileSizeMB.toFixed(2)} MB`);
|
||||
|
||||
if (fileSizeMB > 25) {
|
||||
console.warn('Warning: File larger than 25MB may be rejected');
|
||||
}
|
||||
|
||||
// Transcribe
|
||||
const transcription = await openai.audio.transcriptions.create({
|
||||
file: fs.createReadStream(audioFilePath),
|
||||
model: 'whisper-1',
|
||||
});
|
||||
|
||||
return transcription.text;
|
||||
} catch (error: any) {
|
||||
if (error.message.includes('file not found')) {
|
||||
console.error('Audio file not found');
|
||||
} else if (error.message.includes('file too large')) {
|
||||
console.error('Audio file exceeds size limit');
|
||||
} else if (error.message.includes('unsupported format')) {
|
||||
console.error('Audio format not supported');
|
||||
} else {
|
||||
console.error('Transcription error:', error.message);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// BATCH TRANSCRIPTION
|
||||
// =============================================================================
|
||||
|
||||
async function batchTranscription(audioFiles: string[]) {
|
||||
const results = [];
|
||||
|
||||
for (const filePath of audioFiles) {
|
||||
console.log(`Transcribing: ${filePath}`);
|
||||
|
||||
try {
|
||||
const transcription = await openai.audio.transcriptions.create({
|
||||
file: fs.createReadStream(filePath),
|
||||
model: 'whisper-1',
|
||||
});
|
||||
|
||||
results.push({
|
||||
file: filePath,
|
||||
text: transcription.text,
|
||||
success: true,
|
||||
});
|
||||
|
||||
console.log(`✓ ${filePath}: ${transcription.text.substring(0, 50)}...`);
|
||||
} catch (error: any) {
|
||||
results.push({
|
||||
file: filePath,
|
||||
error: error.message,
|
||||
success: false,
|
||||
});
|
||||
|
||||
console.error(`✗ ${filePath}: ${error.message}`);
|
||||
}
|
||||
|
||||
// Wait 1 second between requests to avoid rate limits
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
}
|
||||
|
||||
console.log(`\nCompleted: ${results.filter(r => r.success).length}/${results.length}`);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// SAVE TRANSCRIPTION TO FILE
|
||||
// =============================================================================
|
||||
|
||||
async function transcribeAndSave(audioFilePath: string, outputFilePath: string) {
|
||||
const transcription = await openai.audio.transcriptions.create({
|
||||
file: fs.createReadStream(audioFilePath),
|
||||
model: 'whisper-1',
|
||||
});
|
||||
|
||||
fs.writeFileSync(outputFilePath, transcription.text);
|
||||
|
||||
console.log(`Transcription saved to: ${outputFilePath}`);
|
||||
console.log(`Content: ${transcription.text}`);
|
||||
|
||||
return transcription.text;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI Whisper Transcription Examples ===\n');
|
||||
|
||||
console.log('Note: This script requires audio files to run.');
|
||||
console.log('Supported formats: mp3, mp4, mpeg, mpga, m4a, wav, webm\n');
|
||||
|
||||
// Example 1: Basic transcription (uncomment when you have audio.mp3)
|
||||
// console.log('1. Basic Transcription:');
|
||||
// await basicTranscription();
|
||||
// console.log();
|
||||
|
||||
// Example 2: Transcription with fetch
|
||||
// console.log('2. Transcription with Fetch:');
|
||||
// await transcriptionFetch();
|
||||
// console.log();
|
||||
|
||||
// Example 3: Multiple formats
|
||||
// console.log('3. Multiple Formats:');
|
||||
// await multipleFormats();
|
||||
// console.log();
|
||||
|
||||
// Example 4: Save to file
|
||||
// console.log('4. Transcribe and Save:');
|
||||
// await transcribeAndSave('./audio.mp3', './transcription.txt');
|
||||
// console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
basicTranscription,
|
||||
transcriptionFetch,
|
||||
multipleFormats,
|
||||
withErrorHandling,
|
||||
batchTranscription,
|
||||
transcribeAndSave,
|
||||
};
|
||||
24
templates/chat-completion-basic.ts
Normal file
24
templates/chat-completion-basic.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
// Basic Chat Completion with GPT-5
|
||||
// Simple example showing the minimal setup for chat completions
|
||||
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
async function basicChatCompletion() {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: 'What are the three laws of robotics?'
|
||||
}
|
||||
],
|
||||
});
|
||||
|
||||
console.log(completion.choices[0].message.content);
|
||||
}
|
||||
|
||||
basicChatCompletion();
|
||||
73
templates/chat-completion-nodejs.ts
Normal file
73
templates/chat-completion-nodejs.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
// Complete Chat Completion Example (Node.js SDK)
|
||||
// Shows multi-turn conversation, GPT-5 parameters, and error handling
|
||||
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
async function chatWithGPT5() {
|
||||
try {
|
||||
// Multi-turn conversation
|
||||
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: 'system',
|
||||
content: 'You are a helpful assistant that explains complex topics simply.'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: 'Explain quantum computing to a 10-year-old'
|
||||
}
|
||||
];
|
||||
|
||||
// First turn with GPT-5 specific parameters
|
||||
const completion1 = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: messages,
|
||||
reasoning_effort: 'medium', // GPT-5 parameter
|
||||
verbosity: 'high', // GPT-5 parameter
|
||||
max_tokens: 500,
|
||||
});
|
||||
|
||||
const assistantMessage = completion1.choices[0].message;
|
||||
console.log('Assistant:', assistantMessage.content);
|
||||
|
||||
// Add assistant response to conversation
|
||||
messages.push(assistantMessage);
|
||||
|
||||
// Follow-up question
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: 'Can you give me an example?'
|
||||
});
|
||||
|
||||
// Second turn
|
||||
const completion2 = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: messages,
|
||||
reasoning_effort: 'medium',
|
||||
verbosity: 'medium',
|
||||
max_tokens: 300,
|
||||
});
|
||||
|
||||
console.log('Assistant:', completion2.choices[0].message.content);
|
||||
|
||||
// Token usage
|
||||
console.log('\nToken usage:');
|
||||
console.log('- Prompt tokens:', completion2.usage?.prompt_tokens);
|
||||
console.log('- Completion tokens:', completion2.usage?.completion_tokens);
|
||||
console.log('- Total tokens:', completion2.usage?.total_tokens);
|
||||
|
||||
} catch (error: any) {
|
||||
if (error.status === 401) {
|
||||
console.error('Invalid API key. Check OPENAI_API_KEY environment variable.');
|
||||
} else if (error.status === 429) {
|
||||
console.error('Rate limit exceeded. Please wait and try again.');
|
||||
} else {
|
||||
console.error('Error:', error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chatWithGPT5();
|
||||
101
templates/cloudflare-worker.ts
Normal file
101
templates/cloudflare-worker.ts
Normal file
@@ -0,0 +1,101 @@
|
||||
// Complete Cloudflare Worker with OpenAI Integration
|
||||
// Supports both streaming and non-streaming chat completions
|
||||
|
||||
interface Env {
|
||||
OPENAI_API_KEY: string;
|
||||
}
|
||||
|
||||
interface ChatRequest {
|
||||
message: string;
|
||||
stream?: boolean;
|
||||
}
|
||||
|
||||
export default {
|
||||
async fetch(request: Request, env: Env): Promise<Response> {
|
||||
// CORS headers
|
||||
const corsHeaders = {
|
||||
'Access-Control-Allow-Origin': '*',
|
||||
'Access-Control-Allow-Methods': 'POST, OPTIONS',
|
||||
'Access-Control-Allow-Headers': 'Content-Type',
|
||||
};
|
||||
|
||||
// Handle CORS preflight
|
||||
if (request.method === 'OPTIONS') {
|
||||
return new Response(null, { headers: corsHeaders });
|
||||
}
|
||||
|
||||
if (request.method !== 'POST') {
|
||||
return new Response('Method not allowed', { status: 405 });
|
||||
}
|
||||
|
||||
try {
|
||||
const { message, stream } = await request.json() as ChatRequest;
|
||||
|
||||
if (!message) {
|
||||
return new Response(
|
||||
JSON.stringify({ error: 'Message is required' }),
|
||||
{ status: 400, headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
|
||||
);
|
||||
}
|
||||
|
||||
// Call OpenAI
|
||||
const openaiResponse = await fetch('https://api.openai.com/v1/chat/completions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${env.OPENAI_API_KEY}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'gpt-5',
|
||||
messages: [
|
||||
{ role: 'user', content: message }
|
||||
],
|
||||
stream: stream || false,
|
||||
reasoning_effort: 'medium',
|
||||
max_tokens: 500,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!openaiResponse.ok) {
|
||||
const error = await openaiResponse.text();
|
||||
return new Response(
|
||||
JSON.stringify({ error: `OpenAI API error: ${error}` }),
|
||||
{ status: openaiResponse.status, headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
|
||||
);
|
||||
}
|
||||
|
||||
// Streaming response
|
||||
if (stream) {
|
||||
return new Response(openaiResponse.body, {
|
||||
headers: {
|
||||
...corsHeaders,
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
'Connection': 'keep-alive',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Non-streaming response
|
||||
const data = await openaiResponse.json();
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
response: data.choices[0].message.content,
|
||||
usage: data.usage,
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
...corsHeaders,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
} catch (error: any) {
|
||||
return new Response(
|
||||
JSON.stringify({ error: error.message || 'Internal server error' }),
|
||||
{ status: 500, headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
|
||||
);
|
||||
}
|
||||
},
|
||||
};
|
||||
267
templates/embeddings.ts
Normal file
267
templates/embeddings.ts
Normal file
@@ -0,0 +1,267 @@
|
||||
/**
|
||||
* OpenAI Embeddings API - Complete Examples
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - Basic embeddings generation
|
||||
* - Custom dimensions for storage optimization
|
||||
* - Batch processing multiple texts
|
||||
* - RAG (Retrieval-Augmented Generation) pattern
|
||||
* - Cosine similarity for semantic search
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// BASIC EMBEDDINGS
|
||||
// =============================================================================
|
||||
|
||||
async function basicEmbedding() {
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: 'text-embedding-3-small',
|
||||
input: 'The food was delicious and the waiter was friendly.',
|
||||
});
|
||||
|
||||
console.log('Embedding dimensions:', embedding.data[0].embedding.length);
|
||||
console.log('First 5 values:', embedding.data[0].embedding.slice(0, 5));
|
||||
console.log('Token usage:', embedding.usage);
|
||||
|
||||
return embedding.data[0].embedding;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// CUSTOM DIMENSIONS (Storage Optimization)
|
||||
// =============================================================================
|
||||
|
||||
async function customDimensions() {
|
||||
// Default: 1536 dimensions
|
||||
const fullEmbedding = await openai.embeddings.create({
|
||||
model: 'text-embedding-3-small',
|
||||
input: 'Sample text',
|
||||
});
|
||||
|
||||
console.log('Full dimensions:', fullEmbedding.data[0].embedding.length);
|
||||
|
||||
// Reduced: 256 dimensions (6x storage reduction)
|
||||
const reducedEmbedding = await openai.embeddings.create({
|
||||
model: 'text-embedding-3-small',
|
||||
input: 'Sample text',
|
||||
dimensions: 256,
|
||||
});
|
||||
|
||||
console.log('Reduced dimensions:', reducedEmbedding.data[0].embedding.length);
|
||||
|
||||
return reducedEmbedding.data[0].embedding;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// BATCH PROCESSING
|
||||
// =============================================================================
|
||||
|
||||
async function batchEmbeddings() {
|
||||
const texts = [
|
||||
'First document about TypeScript',
|
||||
'Second document about Python',
|
||||
'Third document about JavaScript',
|
||||
];
|
||||
|
||||
const response = await openai.embeddings.create({
|
||||
model: 'text-embedding-3-small',
|
||||
input: texts,
|
||||
dimensions: 512, // Optional: reduce dimensions
|
||||
});
|
||||
|
||||
// Process results
|
||||
const embeddings = response.data.map((item, index) => ({
|
||||
text: texts[index],
|
||||
embedding: item.embedding,
|
||||
}));
|
||||
|
||||
console.log(`Generated ${embeddings.length} embeddings`);
|
||||
console.log('Total tokens used:', response.usage.total_tokens);
|
||||
|
||||
return embeddings;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// COSINE SIMILARITY
|
||||
// =============================================================================
|
||||
|
||||
function cosineSimilarity(a: number[], b: number[]): number {
|
||||
const dotProduct = a.reduce((sum, val, i) => sum + val * b[i], 0);
|
||||
const magnitudeA = Math.sqrt(a.reduce((sum, val) => sum + val * val, 0));
|
||||
const magnitudeB = Math.sqrt(b.reduce((sum, val) => sum + val * val, 0));
|
||||
return dotProduct / (magnitudeA * magnitudeB);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// L2 NORMALIZATION
|
||||
// =============================================================================
|
||||
|
||||
function normalizeL2(vector: number[]): number[] {
|
||||
const magnitude = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0));
|
||||
return vector.map(val => val / magnitude);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// SEMANTIC SEARCH
|
||||
// =============================================================================
|
||||
|
||||
interface Document {
|
||||
text: string;
|
||||
embedding: number[];
|
||||
}
|
||||
|
||||
async function semanticSearch(query: string, documents: Document[]) {
|
||||
// Embed the query
|
||||
const queryEmbedding = await openai.embeddings.create({
|
||||
model: 'text-embedding-3-small',
|
||||
input: query,
|
||||
});
|
||||
|
||||
const queryVector = queryEmbedding.data[0].embedding;
|
||||
|
||||
// Calculate similarity scores
|
||||
const results = documents.map(doc => ({
|
||||
text: doc.text,
|
||||
similarity: cosineSimilarity(queryVector, doc.embedding),
|
||||
}));
|
||||
|
||||
// Sort by similarity (highest first)
|
||||
results.sort((a, b) => b.similarity - a.similarity);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// RAG (Retrieval-Augmented Generation)
|
||||
// =============================================================================
|
||||
|
||||
async function ragExample() {
|
||||
// 1. Create knowledge base
|
||||
const knowledgeBase = [
|
||||
'TypeScript is a superset of JavaScript that adds static typing.',
|
||||
'Python is a high-level programming language known for readability.',
|
||||
'React is a JavaScript library for building user interfaces.',
|
||||
'Node.js is a JavaScript runtime built on Chrome\'s V8 engine.',
|
||||
];
|
||||
|
||||
// 2. Generate embeddings for knowledge base
|
||||
const embeddingsResponse = await openai.embeddings.create({
|
||||
model: 'text-embedding-3-small',
|
||||
input: knowledgeBase,
|
||||
});
|
||||
|
||||
const documents: Document[] = knowledgeBase.map((text, index) => ({
|
||||
text,
|
||||
embedding: embeddingsResponse.data[index].embedding,
|
||||
}));
|
||||
|
||||
// 3. User query
|
||||
const userQuery = 'What is TypeScript?';
|
||||
|
||||
// 4. Find relevant documents
|
||||
const searchResults = await semanticSearch(userQuery, documents);
|
||||
const topResults = searchResults.slice(0, 2); // Top 2 most relevant
|
||||
|
||||
console.log('Most relevant documents:');
|
||||
topResults.forEach(result => {
|
||||
console.log(`- [${result.similarity.toFixed(3)}] ${result.text}`);
|
||||
});
|
||||
|
||||
// 5. Generate answer using retrieved context
|
||||
const context = topResults.map(r => r.text).join('\n\n');
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: `Answer the question using the following context:\n\n${context}`,
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: userQuery,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
console.log('\nAnswer:', completion.choices[0].message.content);
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// DIMENSION REDUCTION (Post-Generation)
|
||||
// =============================================================================
|
||||
|
||||
async function manualDimensionReduction() {
|
||||
// Get full embedding
|
||||
const response = await openai.embeddings.create({
|
||||
model: 'text-embedding-3-small',
|
||||
input: 'Testing 123',
|
||||
});
|
||||
|
||||
const fullEmbedding = response.data[0].embedding;
|
||||
console.log('Full dimensions:', fullEmbedding.length);
|
||||
|
||||
// Truncate to 256 dimensions
|
||||
const truncated = fullEmbedding.slice(0, 256);
|
||||
console.log('Truncated dimensions:', truncated.length);
|
||||
|
||||
// Normalize (recommended after truncation)
|
||||
const normalized = normalizeL2(truncated);
|
||||
|
||||
return normalized;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI Embeddings Examples ===\n');
|
||||
|
||||
// Example 1: Basic embedding
|
||||
console.log('1. Basic Embedding:');
|
||||
await basicEmbedding();
|
||||
console.log();
|
||||
|
||||
// Example 2: Custom dimensions
|
||||
console.log('2. Custom Dimensions:');
|
||||
await customDimensions();
|
||||
console.log();
|
||||
|
||||
// Example 3: Batch processing
|
||||
console.log('3. Batch Processing:');
|
||||
await batchEmbeddings();
|
||||
console.log();
|
||||
|
||||
// Example 4: RAG pattern
|
||||
console.log('4. RAG (Retrieval-Augmented Generation):');
|
||||
await ragExample();
|
||||
console.log();
|
||||
|
||||
// Example 5: Manual dimension reduction
|
||||
console.log('5. Manual Dimension Reduction:');
|
||||
await manualDimensionReduction();
|
||||
console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
basicEmbedding,
|
||||
customDimensions,
|
||||
batchEmbeddings,
|
||||
semanticSearch,
|
||||
ragExample,
|
||||
cosineSimilarity,
|
||||
normalizeL2,
|
||||
};
|
||||
151
templates/function-calling.ts
Normal file
151
templates/function-calling.ts
Normal file
@@ -0,0 +1,151 @@
|
||||
// Function Calling (Tool Use) with GPT-5
|
||||
// Complete example showing tool definition, execution, and multi-turn flow
|
||||
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// Define available tools/functions
|
||||
const tools: OpenAI.Chat.ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
description: 'Get the current weather for a location',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
location: {
|
||||
type: 'string',
|
||||
description: 'The city name, e.g., San Francisco'
|
||||
},
|
||||
unit: {
|
||||
type: 'string',
|
||||
enum: ['celsius', 'fahrenheit'],
|
||||
description: 'Temperature unit'
|
||||
}
|
||||
},
|
||||
required: ['location']
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'search_web',
|
||||
description: 'Search the web for information',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: {
|
||||
type: 'string',
|
||||
description: 'The search query'
|
||||
}
|
||||
},
|
||||
required: ['query']
|
||||
}
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
// Implement the actual functions
|
||||
async function getWeather(location: string, unit: string = 'fahrenheit'): Promise<string> {
|
||||
// In production, call a real weather API
|
||||
return JSON.stringify({
|
||||
location,
|
||||
temperature: 72,
|
||||
unit,
|
||||
condition: 'sunny',
|
||||
forecast: 'Clear skies throughout the day'
|
||||
});
|
||||
}
|
||||
|
||||
async function searchWeb(query: string): Promise<string> {
|
||||
// In production, call a real search API
|
||||
return JSON.stringify({
|
||||
query,
|
||||
results: [
|
||||
{ title: 'Example Result 1', snippet: 'This is a sample search result...' },
|
||||
{ title: 'Example Result 2', snippet: 'Another sample result...' }
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
// Execute function based on name
|
||||
async function executeFunction(name: string, argumentsJson: string): Promise<string> {
|
||||
const args = JSON.parse(argumentsJson);
|
||||
|
||||
switch (name) {
|
||||
case 'get_weather':
|
||||
return await getWeather(args.location, args.unit);
|
||||
case 'search_web':
|
||||
return await searchWeb(args.query);
|
||||
default:
|
||||
throw new Error(`Unknown function: ${name}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function chatWithTools(userMessage: string) {
|
||||
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: 'user',
|
||||
content: userMessage
|
||||
}
|
||||
];
|
||||
|
||||
console.log('User:', userMessage);
|
||||
|
||||
// Keep looping until model doesn't call any tools
|
||||
while (true) {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: messages,
|
||||
tools: tools,
|
||||
});
|
||||
|
||||
const message = completion.choices[0].message;
|
||||
messages.push(message);
|
||||
|
||||
// If no tool calls, we're done
|
||||
if (!message.tool_calls) {
|
||||
console.log('Assistant:', message.content);
|
||||
return message.content;
|
||||
}
|
||||
|
||||
// Execute all tool calls
|
||||
console.log(`\nCalling ${message.tool_calls.length} tool(s)...`);
|
||||
|
||||
for (const toolCall of message.tool_calls) {
|
||||
console.log(`- ${toolCall.function.name}(${toolCall.function.arguments})`);
|
||||
|
||||
const result = await executeFunction(
|
||||
toolCall.function.name,
|
||||
toolCall.function.arguments
|
||||
);
|
||||
|
||||
console.log(` Result: ${result}`);
|
||||
|
||||
// Add tool result to conversation
|
||||
messages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
content: result
|
||||
});
|
||||
}
|
||||
|
||||
console.log('\nModel processing tool results...\n');
|
||||
}
|
||||
}
|
||||
|
||||
// Example usage
|
||||
async function main() {
|
||||
await chatWithTools('What is the weather in San Francisco in celsius?');
|
||||
|
||||
console.log('\n---\n');
|
||||
|
||||
await chatWithTools('Search for the latest TypeScript features');
|
||||
}
|
||||
|
||||
main();
|
||||
336
templates/image-editing.ts
Normal file
336
templates/image-editing.ts
Normal file
@@ -0,0 +1,336 @@
|
||||
/**
|
||||
* OpenAI Images API - Image Editing Examples (GPT-Image-1)
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - Basic image editing
|
||||
* - Compositing multiple images
|
||||
* - Transparent backgrounds
|
||||
* - Different output formats
|
||||
* - Compression settings
|
||||
*
|
||||
* NOTE: Image editing uses multipart/form-data, not JSON
|
||||
*/
|
||||
|
||||
import fs from 'fs';
|
||||
import FormData from 'form-data';
|
||||
|
||||
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
|
||||
|
||||
// =============================================================================
|
||||
// BASIC IMAGE EDITING
|
||||
// =============================================================================
|
||||
|
||||
async function basicEdit() {
|
||||
const formData = new FormData();
|
||||
formData.append('model', 'gpt-image-1');
|
||||
formData.append('image', fs.createReadStream('./input-image.jpg'));
|
||||
formData.append('prompt', 'Change the sky to a sunset with orange and pink colors');
|
||||
formData.append('size', '1024x1024');
|
||||
|
||||
const response = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...formData.getHeaders(),
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
const data: any = await response.json();
|
||||
console.log('Edited image URL:', data.data[0].url);
|
||||
|
||||
return data.data[0].url;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// COMPOSITE TWO IMAGES
|
||||
// =============================================================================
|
||||
|
||||
async function compositeImages() {
|
||||
const formData = new FormData();
|
||||
formData.append('model', 'gpt-image-1');
|
||||
formData.append('image', fs.createReadStream('./woman.jpg'));
|
||||
formData.append('image_2', fs.createReadStream('./logo.png'));
|
||||
formData.append('prompt', 'Add the logo to the woman\'s top, as if stamped into the fabric.');
|
||||
formData.append('input_fidelity', 'high'); // Stay close to original
|
||||
formData.append('size', '1024x1024');
|
||||
|
||||
const response = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...formData.getHeaders(),
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
const data: any = await response.json();
|
||||
console.log('Composite image URL:', data.data[0].url);
|
||||
|
||||
return data.data[0].url;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// REMOVE BACKGROUND (Transparent)
|
||||
// =============================================================================
|
||||
|
||||
async function removeBackground() {
|
||||
const formData = new FormData();
|
||||
formData.append('model', 'gpt-image-1');
|
||||
formData.append('image', fs.createReadStream('./product.jpg'));
|
||||
formData.append('prompt', 'Remove the background, keeping only the product.');
|
||||
formData.append('format', 'png'); // Required for transparency
|
||||
formData.append('background', 'transparent');
|
||||
formData.append('size', '1024x1024');
|
||||
|
||||
const response = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...formData.getHeaders(),
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
const data: any = await response.json();
|
||||
console.log('Transparent background URL:', data.data[0].url);
|
||||
|
||||
// Download and save
|
||||
const imageResponse = await fetch(data.data[0].url);
|
||||
const buffer = Buffer.from(await imageResponse.arrayBuffer());
|
||||
fs.writeFileSync('product-no-bg.png', buffer);
|
||||
console.log('Saved to: product-no-bg.png');
|
||||
|
||||
return data.data[0].url;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// INPUT FIDELITY OPTIONS
|
||||
// =============================================================================
|
||||
|
||||
async function fidelityComparison() {
|
||||
const baseFormData = () => {
|
||||
const formData = new FormData();
|
||||
formData.append('model', 'gpt-image-1');
|
||||
formData.append('image', fs.createReadStream('./portrait.jpg'));
|
||||
formData.append('prompt', 'Add sunglasses to the person');
|
||||
return formData;
|
||||
};
|
||||
|
||||
// Low fidelity (more creative freedom)
|
||||
const lowFidelity = baseFormData();
|
||||
lowFidelity.append('input_fidelity', 'low');
|
||||
|
||||
const lowResponse = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...lowFidelity.getHeaders(),
|
||||
},
|
||||
body: lowFidelity,
|
||||
});
|
||||
|
||||
const lowData: any = await lowResponse.json();
|
||||
console.log('Low fidelity URL:', lowData.data[0].url);
|
||||
|
||||
// High fidelity (stay closer to original)
|
||||
const highFidelity = baseFormData();
|
||||
highFidelity.append('input_fidelity', 'high');
|
||||
|
||||
const highResponse = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...highFidelity.getHeaders(),
|
||||
},
|
||||
body: highFidelity,
|
||||
});
|
||||
|
||||
const highData: any = await highResponse.json();
|
||||
console.log('High fidelity URL:', highData.data[0].url);
|
||||
|
||||
return { low: lowData.data[0].url, high: highData.data[0].url };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// OUTPUT FORMATS AND COMPRESSION
|
||||
// =============================================================================
|
||||
|
||||
async function formatComparison() {
|
||||
const basePrompt = 'Add a blue sky to the background';
|
||||
|
||||
// PNG (supports transparency, larger file)
|
||||
const pngFormData = new FormData();
|
||||
pngFormData.append('model', 'gpt-image-1');
|
||||
pngFormData.append('image', fs.createReadStream('./scene.jpg'));
|
||||
pngFormData.append('prompt', basePrompt);
|
||||
pngFormData.append('format', 'png');
|
||||
|
||||
const pngResponse = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...pngFormData.getHeaders(),
|
||||
},
|
||||
body: pngFormData,
|
||||
});
|
||||
|
||||
const pngData: any = await pngResponse.json();
|
||||
console.log('PNG format URL:', pngData.data[0].url);
|
||||
|
||||
// JPEG (smaller file, no transparency)
|
||||
const jpegFormData = new FormData();
|
||||
jpegFormData.append('model', 'gpt-image-1');
|
||||
jpegFormData.append('image', fs.createReadStream('./scene.jpg'));
|
||||
jpegFormData.append('prompt', basePrompt);
|
||||
jpegFormData.append('format', 'jpeg');
|
||||
jpegFormData.append('output_compression', '80'); // 0-100
|
||||
|
||||
const jpegResponse = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...jpegFormData.getHeaders(),
|
||||
},
|
||||
body: jpegFormData,
|
||||
});
|
||||
|
||||
const jpegData: any = await jpegResponse.json();
|
||||
console.log('JPEG format URL:', jpegData.data[0].url);
|
||||
|
||||
// WebP (best compression, supports transparency)
|
||||
const webpFormData = new FormData();
|
||||
webpFormData.append('model', 'gpt-image-1');
|
||||
webpFormData.append('image', fs.createReadStream('./scene.jpg'));
|
||||
webpFormData.append('prompt', basePrompt);
|
||||
webpFormData.append('format', 'webp');
|
||||
webpFormData.append('output_compression', '85');
|
||||
|
||||
const webpResponse = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...webpFormData.getHeaders(),
|
||||
},
|
||||
body: webpFormData,
|
||||
});
|
||||
|
||||
const webpData: any = await webpResponse.json();
|
||||
console.log('WebP format URL:', webpData.data[0].url);
|
||||
|
||||
return { png: pngData.data[0].url, jpeg: jpegData.data[0].url, webp: webpData.data[0].url };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// COMMON EDITING TASKS
|
||||
// =============================================================================
|
||||
|
||||
async function commonEdits() {
|
||||
// 1. Color correction
|
||||
const colorCorrect = new FormData();
|
||||
colorCorrect.append('model', 'gpt-image-1');
|
||||
colorCorrect.append('image', fs.createReadStream('./photo.jpg'));
|
||||
colorCorrect.append('prompt', 'Increase brightness and saturation, make colors more vibrant');
|
||||
|
||||
// 2. Object removal
|
||||
const objectRemoval = new FormData();
|
||||
objectRemoval.append('model', 'gpt-image-1');
|
||||
objectRemoval.append('image', fs.createReadStream('./scene.jpg'));
|
||||
objectRemoval.append('prompt', 'Remove the person from the background');
|
||||
|
||||
// 3. Style transfer
|
||||
const styleTransfer = new FormData();
|
||||
styleTransfer.append('model', 'gpt-image-1');
|
||||
styleTransfer.append('image', fs.createReadStream('./photo.jpg'));
|
||||
styleTransfer.append('prompt', 'Transform this photo into a watercolor painting style');
|
||||
|
||||
// 4. Add text/overlay
|
||||
const addText = new FormData();
|
||||
addText.append('model', 'gpt-image-1');
|
||||
addText.append('image', fs.createReadStream('./poster.jpg'));
|
||||
addText.append('prompt', 'Add the text "SALE" in large bold letters at the top');
|
||||
|
||||
console.log('Common editing tasks prepared');
|
||||
|
||||
return { colorCorrect, objectRemoval, styleTransfer, addText };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ERROR HANDLING
|
||||
// =============================================================================
|
||||
|
||||
async function withErrorHandling() {
|
||||
try {
|
||||
const formData = new FormData();
|
||||
formData.append('model', 'gpt-image-1');
|
||||
formData.append('image', fs.createReadStream('./input.jpg'));
|
||||
formData.append('prompt', 'Edit the image');
|
||||
|
||||
const response = await fetch('https://api.openai.com/v1/images/edits', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${OPENAI_API_KEY}`,
|
||||
...formData.getHeaders(),
|
||||
},
|
||||
body: formData,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(`API error: ${error.error?.message}`);
|
||||
}
|
||||
|
||||
const data: any = await response.json();
|
||||
return data.data[0].url;
|
||||
} catch (error: any) {
|
||||
if (error.message.includes('file not found')) {
|
||||
console.error('Input image file not found');
|
||||
} else if (error.message.includes('rate limit')) {
|
||||
console.error('Rate limit exceeded - wait and retry');
|
||||
} else {
|
||||
console.error('Unexpected error:', error.message);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI Image Editing (GPT-Image-1) Examples ===\n');
|
||||
|
||||
console.log('Note: This script requires input images to run.');
|
||||
console.log('Create test images first or modify the file paths.\n');
|
||||
|
||||
// Uncomment the examples you want to run:
|
||||
|
||||
// console.log('1. Basic Edit:');
|
||||
// await basicEdit();
|
||||
// console.log();
|
||||
|
||||
// console.log('2. Composite Images:');
|
||||
// await compositeImages();
|
||||
// console.log();
|
||||
|
||||
// console.log('3. Remove Background:');
|
||||
// await removeBackground();
|
||||
// console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
basicEdit,
|
||||
compositeImages,
|
||||
removeBackground,
|
||||
fidelityComparison,
|
||||
formatComparison,
|
||||
commonEdits,
|
||||
withErrorHandling,
|
||||
};
|
||||
349
templates/image-generation.ts
Normal file
349
templates/image-generation.ts
Normal file
@@ -0,0 +1,349 @@
|
||||
/**
|
||||
* OpenAI Images API - DALL-E 3 Generation Examples
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - Basic image generation
|
||||
* - Quality settings (standard vs HD)
|
||||
* - Style options (vivid vs natural)
|
||||
* - Different sizes and formats
|
||||
* - Base64 output
|
||||
* - Saving images to disk
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai';
|
||||
import fs from 'fs';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// BASIC IMAGE GENERATION
|
||||
// =============================================================================
|
||||
|
||||
async function basicGeneration() {
|
||||
const image = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt: 'A white siamese cat with striking blue eyes',
|
||||
size: '1024x1024',
|
||||
quality: 'standard',
|
||||
style: 'vivid',
|
||||
n: 1,
|
||||
});
|
||||
|
||||
console.log('Generated image URL:', image.data[0].url);
|
||||
console.log('Revised prompt:', image.data[0].revised_prompt);
|
||||
|
||||
return image.data[0].url;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// QUALITY COMPARISON
|
||||
// =============================================================================
|
||||
|
||||
async function qualityComparison() {
|
||||
const prompt = 'A futuristic city at sunset with flying cars';
|
||||
|
||||
// Standard quality (faster, cheaper)
|
||||
console.log('Generating standard quality image...');
|
||||
const standard = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt,
|
||||
quality: 'standard',
|
||||
});
|
||||
|
||||
console.log('Standard quality URL:', standard.data[0].url);
|
||||
|
||||
// HD quality (finer details, more expensive)
|
||||
console.log('Generating HD quality image...');
|
||||
const hd = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt,
|
||||
quality: 'hd',
|
||||
});
|
||||
|
||||
console.log('HD quality URL:', hd.data[0].url);
|
||||
|
||||
return { standard: standard.data[0].url, hd: hd.data[0].url };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// STYLE COMPARISON
|
||||
// =============================================================================
|
||||
|
||||
async function styleComparison() {
|
||||
const prompt = 'A mountain landscape with a lake';
|
||||
|
||||
// Vivid style (hyper-real, dramatic)
|
||||
console.log('Generating vivid style image...');
|
||||
const vivid = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt,
|
||||
style: 'vivid',
|
||||
});
|
||||
|
||||
console.log('Vivid style URL:', vivid.data[0].url);
|
||||
|
||||
// Natural style (more realistic, less dramatic)
|
||||
console.log('Generating natural style image...');
|
||||
const natural = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt,
|
||||
style: 'natural',
|
||||
});
|
||||
|
||||
console.log('Natural style URL:', natural.data[0].url);
|
||||
|
||||
return { vivid: vivid.data[0].url, natural: natural.data[0].url };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// DIFFERENT SIZES
|
||||
// =============================================================================
|
||||
|
||||
async function differentSizes() {
|
||||
const prompt = 'A minimalist logo for a tech company';
|
||||
|
||||
// Square
|
||||
const square = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt,
|
||||
size: '1024x1024',
|
||||
});
|
||||
|
||||
console.log('Square (1024x1024):', square.data[0].url);
|
||||
|
||||
// Portrait
|
||||
const portrait = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt,
|
||||
size: '1024x1792',
|
||||
});
|
||||
|
||||
console.log('Portrait (1024x1792):', portrait.data[0].url);
|
||||
|
||||
// Landscape
|
||||
const landscape = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt,
|
||||
size: '1792x1024',
|
||||
});
|
||||
|
||||
console.log('Landscape (1792x1024):', landscape.data[0].url);
|
||||
|
||||
return { square, portrait, landscape };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// BASE64 OUTPUT
|
||||
// =============================================================================
|
||||
|
||||
async function base64Output() {
|
||||
const image = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt: 'A cyberpunk street scene at night',
|
||||
response_format: 'b64_json',
|
||||
});
|
||||
|
||||
const base64Data = image.data[0].b64_json;
|
||||
|
||||
console.log('Base64 data length:', base64Data?.length);
|
||||
console.log('First 100 chars:', base64Data?.substring(0, 100));
|
||||
|
||||
// Convert to buffer and save
|
||||
if (base64Data) {
|
||||
const buffer = Buffer.from(base64Data, 'base64');
|
||||
fs.writeFileSync('generated-image.png', buffer);
|
||||
console.log('Image saved to: generated-image.png');
|
||||
}
|
||||
|
||||
return base64Data;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// DOWNLOAD AND SAVE IMAGE
|
||||
// =============================================================================
|
||||
|
||||
async function downloadAndSave(url: string, filename: string) {
|
||||
const response = await fetch(url);
|
||||
const arrayBuffer = await response.arrayBuffer();
|
||||
const buffer = Buffer.from(arrayBuffer);
|
||||
|
||||
fs.writeFileSync(filename, buffer);
|
||||
console.log(`Image saved to: ${filename}`);
|
||||
}
|
||||
|
||||
async function generateAndSave() {
|
||||
const image = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt: 'A serene Japanese garden with cherry blossoms',
|
||||
size: '1024x1024',
|
||||
quality: 'hd',
|
||||
style: 'natural',
|
||||
});
|
||||
|
||||
const url = image.data[0].url;
|
||||
await downloadAndSave(url, 'japanese-garden.png');
|
||||
|
||||
return url;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// DETAILED PROMPT EXAMPLES
|
||||
// =============================================================================
|
||||
|
||||
async function detailedPrompts() {
|
||||
// Example 1: Specific art style
|
||||
const artStyle = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt: 'An oil painting of a sunset over the ocean in the style of Claude Monet',
|
||||
style: 'natural',
|
||||
});
|
||||
|
||||
console.log('Art style result:', artStyle.data[0].url);
|
||||
|
||||
// Example 2: Detailed composition
|
||||
const detailed = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt: 'A professional product photo of a smartwatch on a white marble surface, with soft studio lighting from the left, shallow depth of field, commercial photography style',
|
||||
quality: 'hd',
|
||||
style: 'natural',
|
||||
});
|
||||
|
||||
console.log('Detailed composition:', detailed.data[0].url);
|
||||
|
||||
// Example 3: Character design
|
||||
const character = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt: 'A friendly robot character with round edges, bright blue and white colors, large expressive eyes, modern minimalist design, 3D render style',
|
||||
style: 'vivid',
|
||||
});
|
||||
|
||||
console.log('Character design:', character.data[0].url);
|
||||
|
||||
return { artStyle, detailed, character };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ERROR HANDLING
|
||||
// =============================================================================
|
||||
|
||||
async function withErrorHandling() {
|
||||
try {
|
||||
const image = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt: 'A beautiful landscape',
|
||||
});
|
||||
|
||||
return image.data[0].url;
|
||||
} catch (error: any) {
|
||||
if (error.status === 400) {
|
||||
console.error('Bad request - check your prompt for policy violations');
|
||||
} else if (error.status === 401) {
|
||||
console.error('Invalid API key');
|
||||
} else if (error.status === 429) {
|
||||
console.error('Rate limit exceeded - wait and retry');
|
||||
} else {
|
||||
console.error('Unexpected error:', error.message);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// BATCH GENERATION (Sequential)
|
||||
// =============================================================================
|
||||
|
||||
async function batchGeneration() {
|
||||
const prompts = [
|
||||
'A red apple on a wooden table',
|
||||
'A blue butterfly on a flower',
|
||||
'A green forest path in autumn',
|
||||
];
|
||||
|
||||
const results = [];
|
||||
|
||||
for (const prompt of prompts) {
|
||||
console.log(`Generating: ${prompt}`);
|
||||
|
||||
const image = await openai.images.generate({
|
||||
model: 'dall-e-3',
|
||||
prompt,
|
||||
size: '1024x1024',
|
||||
quality: 'standard',
|
||||
});
|
||||
|
||||
results.push({
|
||||
prompt,
|
||||
url: image.data[0].url,
|
||||
revised_prompt: image.data[0].revised_prompt,
|
||||
});
|
||||
|
||||
// Wait 1 second between requests to avoid rate limits
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
}
|
||||
|
||||
console.log(`Generated ${results.length} images`);
|
||||
return results;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI DALL-E 3 Image Generation Examples ===\n');
|
||||
|
||||
// Example 1: Basic generation
|
||||
console.log('1. Basic Image Generation:');
|
||||
await basicGeneration();
|
||||
console.log();
|
||||
|
||||
// Example 2: Quality comparison
|
||||
console.log('2. Quality Comparison (Standard vs HD):');
|
||||
await qualityComparison();
|
||||
console.log();
|
||||
|
||||
// Example 3: Style comparison
|
||||
console.log('3. Style Comparison (Vivid vs Natural):');
|
||||
await styleComparison();
|
||||
console.log();
|
||||
|
||||
// Example 4: Different sizes
|
||||
console.log('4. Different Sizes:');
|
||||
await differentSizes();
|
||||
console.log();
|
||||
|
||||
// Example 5: Base64 output
|
||||
console.log('5. Base64 Output and Save:');
|
||||
await base64Output();
|
||||
console.log();
|
||||
|
||||
// Example 6: Generate and save
|
||||
console.log('6. Generate and Save:');
|
||||
await generateAndSave();
|
||||
console.log();
|
||||
|
||||
// Example 7: Detailed prompts
|
||||
console.log('7. Detailed Prompt Examples:');
|
||||
await detailedPrompts();
|
||||
console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
basicGeneration,
|
||||
qualityComparison,
|
||||
styleComparison,
|
||||
differentSizes,
|
||||
base64Output,
|
||||
generateAndSave,
|
||||
detailedPrompts,
|
||||
batchGeneration,
|
||||
withErrorHandling,
|
||||
};
|
||||
399
templates/moderation.ts
Normal file
399
templates/moderation.ts
Normal file
@@ -0,0 +1,399 @@
|
||||
/**
|
||||
* OpenAI Moderation API - Content Safety Examples
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - Basic content moderation
|
||||
* - All 11 safety categories
|
||||
* - Category scoring and thresholds
|
||||
* - Batch moderation
|
||||
* - Production patterns for UGC
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// BASIC MODERATION
|
||||
// =============================================================================
|
||||
|
||||
async function basicModeration() {
|
||||
const moderation = await openai.moderations.create({
|
||||
model: 'omni-moderation-latest',
|
||||
input: 'I want to hurt someone.',
|
||||
});
|
||||
|
||||
const result = moderation.results[0];
|
||||
|
||||
console.log('Flagged:', result.flagged);
|
||||
console.log('Categories flagged:');
|
||||
Object.entries(result.categories).forEach(([category, flagged]) => {
|
||||
if (flagged) {
|
||||
console.log(` - ${category}: ${result.category_scores[category].toFixed(4)}`);
|
||||
}
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ALL SAFETY CATEGORIES
|
||||
// =============================================================================
|
||||
|
||||
async function allCategories() {
|
||||
const examples = {
|
||||
sexual: 'Explicit sexual content example',
|
||||
hate: 'Hateful speech based on identity',
|
||||
harassment: 'Bullying and intimidation example',
|
||||
'self-harm': 'Content promoting self-harm',
|
||||
'sexual/minors': 'Any sexualization of minors',
|
||||
'hate/threatening': 'Violent threats based on identity',
|
||||
'violence/graphic': 'Graphic violence description',
|
||||
'self-harm/intent': 'Active suicidal ideation',
|
||||
'self-harm/instructions': 'How to self-harm instructions',
|
||||
'harassment/threatening': 'Violent threat toward individual',
|
||||
violence: 'General violent content',
|
||||
};
|
||||
|
||||
console.log('Safety Categories:\n');
|
||||
|
||||
for (const [category, description] of Object.entries(examples)) {
|
||||
console.log(`${category}:`);
|
||||
console.log(` Description: ${description}`);
|
||||
}
|
||||
|
||||
console.log('\nNote: Use actual content for real moderation, not descriptions');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// CATEGORY SCORES AND THRESHOLDS
|
||||
// =============================================================================
|
||||
|
||||
interface Thresholds {
|
||||
[key: string]: number;
|
||||
}
|
||||
|
||||
const recommendedThresholds: Thresholds = {
|
||||
sexual: 0.5,
|
||||
hate: 0.4,
|
||||
harassment: 0.5,
|
||||
'self-harm': 0.3,
|
||||
'sexual/minors': 0.1, // Lower threshold for child safety
|
||||
'hate/threatening': 0.3,
|
||||
'violence/graphic': 0.5,
|
||||
'self-harm/intent': 0.2,
|
||||
'self-harm/instructions': 0.2,
|
||||
'harassment/threatening': 0.3,
|
||||
violence: 0.5,
|
||||
};
|
||||
|
||||
function checkThresholds(result: any, thresholds: Thresholds): boolean {
|
||||
return Object.entries(result.category_scores).some(
|
||||
([category, score]) => score > (thresholds[category] || 0.5)
|
||||
);
|
||||
}
|
||||
|
||||
async function withCustomThresholds(text: string) {
|
||||
const moderation = await openai.moderations.create({
|
||||
model: 'omni-moderation-latest',
|
||||
input: text,
|
||||
});
|
||||
|
||||
const result = moderation.results[0];
|
||||
|
||||
const isFlagged = checkThresholds(result, recommendedThresholds);
|
||||
|
||||
console.log('Content:', text);
|
||||
console.log('API flagged:', result.flagged);
|
||||
console.log('Custom thresholds flagged:', isFlagged);
|
||||
|
||||
if (isFlagged) {
|
||||
console.log('Flagged categories:');
|
||||
Object.entries(result.category_scores).forEach(([category, score]) => {
|
||||
const threshold = recommendedThresholds[category] || 0.5;
|
||||
if (score > threshold) {
|
||||
console.log(` - ${category}: ${score.toFixed(4)} (threshold: ${threshold})`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return { result, isFlagged };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// BATCH MODERATION
|
||||
// =============================================================================
|
||||
|
||||
async function batchModeration() {
|
||||
const texts = [
|
||||
'This is a normal, safe comment',
|
||||
'Potentially harmful content example',
|
||||
'Another safe piece of text',
|
||||
];
|
||||
|
||||
const moderation = await openai.moderations.create({
|
||||
model: 'omni-moderation-latest',
|
||||
input: texts,
|
||||
});
|
||||
|
||||
moderation.results.forEach((result, index) => {
|
||||
console.log(`\nInput ${index + 1}: "${texts[index]}"`);
|
||||
console.log('Flagged:', result.flagged);
|
||||
|
||||
if (result.flagged) {
|
||||
const flaggedCategories = Object.keys(result.categories).filter(
|
||||
cat => result.categories[cat]
|
||||
);
|
||||
console.log('Categories:', flaggedCategories.join(', '));
|
||||
}
|
||||
});
|
||||
|
||||
return moderation.results;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// PRODUCTION PATTERN - UGC MODERATION
|
||||
// =============================================================================
|
||||
|
||||
interface ModerationDecision {
|
||||
allowed: boolean;
|
||||
reason?: string;
|
||||
severity?: 'low' | 'medium' | 'high' | 'error';
|
||||
scores?: any;
|
||||
}
|
||||
|
||||
async function moderateUserContent(userInput: string): Promise<ModerationDecision> {
|
||||
try {
|
||||
const moderation = await openai.moderations.create({
|
||||
model: 'omni-moderation-latest',
|
||||
input: userInput,
|
||||
});
|
||||
|
||||
const result = moderation.results[0];
|
||||
|
||||
// Immediate block for severe categories
|
||||
const severeCategories = [
|
||||
'sexual/minors',
|
||||
'self-harm/intent',
|
||||
'hate/threatening',
|
||||
'harassment/threatening',
|
||||
];
|
||||
|
||||
for (const category of severeCategories) {
|
||||
if (result.categories[category]) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Content violates policy: ${category}`,
|
||||
severity: 'high',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// High-confidence violence check
|
||||
if (result.category_scores.violence > 0.8) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: 'High-confidence violence detected',
|
||||
severity: 'medium',
|
||||
};
|
||||
}
|
||||
|
||||
// Self-harm content requires human review
|
||||
if (result.categories['self-harm']) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: 'Content flagged for human review',
|
||||
severity: 'medium',
|
||||
};
|
||||
}
|
||||
|
||||
// Allow content
|
||||
return {
|
||||
allowed: true,
|
||||
scores: result.category_scores,
|
||||
};
|
||||
} catch (error: any) {
|
||||
console.error('Moderation error:', error);
|
||||
|
||||
// Fail closed: block on error
|
||||
return {
|
||||
allowed: false,
|
||||
reason: 'Moderation service unavailable',
|
||||
severity: 'error',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// CATEGORY-SPECIFIC FILTERING
|
||||
// =============================================================================
|
||||
|
||||
async function filterByCategory(text: string, categoriesToCheck: string[]) {
|
||||
const moderation = await openai.moderations.create({
|
||||
model: 'omni-moderation-latest',
|
||||
input: text,
|
||||
});
|
||||
|
||||
const result = moderation.results[0];
|
||||
|
||||
const violations = categoriesToCheck.filter(
|
||||
category => result.categories[category]
|
||||
);
|
||||
|
||||
if (violations.length > 0) {
|
||||
console.log('Content violates:', violations.join(', '));
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log('Content passed specified category checks');
|
||||
return true;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// LOGGING AND AUDIT TRAIL
|
||||
// =============================================================================
|
||||
|
||||
interface ModerationLog {
|
||||
timestamp: string;
|
||||
content: string;
|
||||
flagged: boolean;
|
||||
categories: string[];
|
||||
scores: any;
|
||||
action: 'allowed' | 'blocked' | 'review';
|
||||
}
|
||||
|
||||
async function moderateWithLogging(content: string): Promise<ModerationLog> {
|
||||
const moderation = await openai.moderations.create({
|
||||
model: 'omni-moderation-latest',
|
||||
input: content,
|
||||
});
|
||||
|
||||
const result = moderation.results[0];
|
||||
|
||||
const flaggedCategories = Object.keys(result.categories).filter(
|
||||
cat => result.categories[cat]
|
||||
);
|
||||
|
||||
const log: ModerationLog = {
|
||||
timestamp: new Date().toISOString(),
|
||||
content: content.substring(0, 100), // Truncate for logging
|
||||
flagged: result.flagged,
|
||||
categories: flaggedCategories,
|
||||
scores: result.category_scores,
|
||||
action: result.flagged ? 'blocked' : 'allowed',
|
||||
};
|
||||
|
||||
// In production: save to database or logging service
|
||||
console.log('Moderation log:', JSON.stringify(log, null, 2));
|
||||
|
||||
return log;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// USER FEEDBACK PATTERN
|
||||
// =============================================================================
|
||||
|
||||
function getUserFriendlyMessage(result: any): string {
|
||||
if (!result.flagged) {
|
||||
return 'Content approved';
|
||||
}
|
||||
|
||||
const flaggedCategories = Object.keys(result.categories).filter(
|
||||
cat => result.categories[cat]
|
||||
);
|
||||
|
||||
// Don't reveal exact detection details
|
||||
if (flaggedCategories.some(cat => cat.includes('harm'))) {
|
||||
return 'Your content appears to contain concerning material. Please review our community guidelines.';
|
||||
}
|
||||
|
||||
if (flaggedCategories.includes('harassment') || flaggedCategories.includes('hate')) {
|
||||
return 'Your content may be disrespectful or harmful to others. Please rephrase.';
|
||||
}
|
||||
|
||||
if (flaggedCategories.includes('violence')) {
|
||||
return 'Your content contains violent themes that violate our policies.';
|
||||
}
|
||||
|
||||
return 'Your content doesn\'t meet our community guidelines. Please revise and try again.';
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ERROR HANDLING
|
||||
// =============================================================================
|
||||
|
||||
async function withErrorHandling(text: string) {
|
||||
try {
|
||||
const moderation = await openai.moderations.create({
|
||||
model: 'omni-moderation-latest',
|
||||
input: text,
|
||||
});
|
||||
|
||||
return moderation.results[0];
|
||||
} catch (error: any) {
|
||||
if (error.status === 401) {
|
||||
console.error('Invalid API key');
|
||||
} else if (error.status === 429) {
|
||||
console.error('Rate limit exceeded - implement retry logic');
|
||||
} else if (error.status === 500) {
|
||||
console.error('OpenAI service error - fail closed and block content');
|
||||
} else {
|
||||
console.error('Unexpected error:', error.message);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI Moderation API Examples ===\n');
|
||||
|
||||
// Example 1: Basic moderation
|
||||
console.log('1. Basic Moderation:');
|
||||
await basicModeration();
|
||||
console.log();
|
||||
|
||||
// Example 2: All categories
|
||||
console.log('2. All Safety Categories:');
|
||||
allCategories();
|
||||
console.log();
|
||||
|
||||
// Example 3: Custom thresholds
|
||||
console.log('3. Custom Thresholds:');
|
||||
await withCustomThresholds('This is a test message');
|
||||
console.log();
|
||||
|
||||
// Example 4: Batch moderation
|
||||
console.log('4. Batch Moderation:');
|
||||
await batchModeration();
|
||||
console.log();
|
||||
|
||||
// Example 5: Production pattern
|
||||
console.log('5. Production UGC Moderation:');
|
||||
const decision = await moderateUserContent('Safe user comment');
|
||||
console.log('Decision:', decision);
|
||||
console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
basicModeration,
|
||||
allCategories,
|
||||
withCustomThresholds,
|
||||
batchModeration,
|
||||
moderateUserContent,
|
||||
filterByCategory,
|
||||
moderateWithLogging,
|
||||
getUserFriendlyMessage,
|
||||
withErrorHandling,
|
||||
};
|
||||
23
templates/package.json
Normal file
23
templates/package.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"name": "openai-api-examples",
|
||||
"version": "1.0.0",
|
||||
"description": "OpenAI API examples for Chat Completions, Embeddings, Images, Audio, and Moderation",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"chat-basic": "tsx templates/chat-completion-basic.ts",
|
||||
"chat-nodejs": "tsx templates/chat-completion-nodejs.ts",
|
||||
"stream": "tsx templates/streaming-chat.ts",
|
||||
"functions": "tsx templates/function-calling.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"openai": "^6.7.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.11.0",
|
||||
"tsx": "^4.7.0",
|
||||
"typescript": "^5.3.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
}
|
||||
}
|
||||
415
templates/rate-limit-handling.ts
Normal file
415
templates/rate-limit-handling.ts
Normal file
@@ -0,0 +1,415 @@
|
||||
/**
|
||||
* OpenAI Rate Limit Handling - Production Patterns
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - Exponential backoff
|
||||
* - Rate limit header monitoring
|
||||
* - Request queuing
|
||||
* - Retry logic
|
||||
* - Circuit breaker pattern
|
||||
* - Token bucket algorithm
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// EXPONENTIAL BACKOFF
|
||||
// =============================================================================
|
||||
|
||||
async function exponentialBackoff<T>(
|
||||
fn: () => Promise<T>,
|
||||
maxRetries: number = 3,
|
||||
baseDelay: number = 1000
|
||||
): Promise<T> {
|
||||
for (let i = 0; i < maxRetries; i++) {
|
||||
try {
|
||||
return await fn();
|
||||
} catch (error: any) {
|
||||
// Only retry on rate limit errors
|
||||
if (error.status === 429 && i < maxRetries - 1) {
|
||||
const delay = baseDelay * Math.pow(2, i); // 1s, 2s, 4s
|
||||
console.log(`Rate limit hit. Retrying in ${delay}ms...`);
|
||||
|
||||
await new Promise(resolve => setTimeout(resolve, delay));
|
||||
continue;
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Max retries exceeded');
|
||||
}
|
||||
|
||||
// Usage example
|
||||
async function chatWithRetry() {
|
||||
return exponentialBackoff(async () => {
|
||||
return await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: 'Hello!' }],
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// RATE LIMIT HEADER MONITORING
|
||||
// =============================================================================
|
||||
|
||||
interface RateLimitInfo {
|
||||
limitRequests: number;
|
||||
remainingRequests: number;
|
||||
resetRequests: string;
|
||||
limitTokens: number;
|
||||
remainingTokens: number;
|
||||
resetTokens: string;
|
||||
}
|
||||
|
||||
async function checkRateLimits(): Promise<RateLimitInfo> {
|
||||
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: 'ping' }],
|
||||
max_tokens: 1,
|
||||
}),
|
||||
});
|
||||
|
||||
const rateLimits: RateLimitInfo = {
|
||||
limitRequests: parseInt(response.headers.get('x-ratelimit-limit-requests') || '0'),
|
||||
remainingRequests: parseInt(response.headers.get('x-ratelimit-remaining-requests') || '0'),
|
||||
resetRequests: response.headers.get('x-ratelimit-reset-requests') || '',
|
||||
limitTokens: parseInt(response.headers.get('x-ratelimit-limit-tokens') || '0'),
|
||||
remainingTokens: parseInt(response.headers.get('x-ratelimit-remaining-tokens') || '0'),
|
||||
resetTokens: response.headers.get('x-ratelimit-reset-tokens') || '',
|
||||
};
|
||||
|
||||
console.log('Rate limits:', rateLimits);
|
||||
|
||||
return rateLimits;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// REQUEST QUEUE
|
||||
// =============================================================================
|
||||
|
||||
class RequestQueue {
|
||||
private queue: Array<() => Promise<any>> = [];
|
||||
private processing = false;
|
||||
private requestsPerMinute: number;
|
||||
private lastRequestTime: number = 0;
|
||||
|
||||
constructor(requestsPerMinute: number) {
|
||||
this.requestsPerMinute = requestsPerMinute;
|
||||
}
|
||||
|
||||
async enqueue<T>(fn: () => Promise<T>): Promise<T> {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.queue.push(async () => {
|
||||
try {
|
||||
const result = await fn();
|
||||
resolve(result);
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
|
||||
this.processQueue();
|
||||
});
|
||||
}
|
||||
|
||||
private async processQueue() {
|
||||
if (this.processing || this.queue.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.processing = true;
|
||||
|
||||
while (this.queue.length > 0) {
|
||||
const now = Date.now();
|
||||
const minInterval = 60000 / this.requestsPerMinute;
|
||||
const timeSinceLastRequest = now - this.lastRequestTime;
|
||||
|
||||
if (timeSinceLastRequest < minInterval) {
|
||||
await new Promise(resolve =>
|
||||
setTimeout(resolve, minInterval - timeSinceLastRequest)
|
||||
);
|
||||
}
|
||||
|
||||
const fn = this.queue.shift();
|
||||
if (fn) {
|
||||
this.lastRequestTime = Date.now();
|
||||
await fn();
|
||||
}
|
||||
}
|
||||
|
||||
this.processing = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Usage example
|
||||
const queue = new RequestQueue(50); // 50 requests per minute
|
||||
|
||||
async function queuedRequest() {
|
||||
return queue.enqueue(async () => {
|
||||
return await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: 'Hello!' }],
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// CIRCUIT BREAKER
|
||||
// =============================================================================
|
||||
|
||||
class CircuitBreaker {
|
||||
private failures = 0;
|
||||
private successCount = 0;
|
||||
private lastFailureTime = 0;
|
||||
private state: 'CLOSED' | 'OPEN' | 'HALF_OPEN' = 'CLOSED';
|
||||
|
||||
constructor(
|
||||
private failureThreshold: number = 5,
|
||||
private successThreshold: number = 2,
|
||||
private timeout: number = 60000 // 1 minute
|
||||
) {}
|
||||
|
||||
async execute<T>(fn: () => Promise<T>): Promise<T> {
|
||||
if (this.state === 'OPEN') {
|
||||
const now = Date.now();
|
||||
if (now - this.lastFailureTime < this.timeout) {
|
||||
throw new Error('Circuit breaker is OPEN');
|
||||
}
|
||||
|
||||
this.state = 'HALF_OPEN';
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await fn();
|
||||
|
||||
if (this.state === 'HALF_OPEN') {
|
||||
this.successCount++;
|
||||
|
||||
if (this.successCount >= this.successThreshold) {
|
||||
this.state = 'CLOSED';
|
||||
this.failures = 0;
|
||||
this.successCount = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
this.failures++;
|
||||
this.lastFailureTime = Date.now();
|
||||
|
||||
if (this.failures >= this.failureThreshold) {
|
||||
this.state = 'OPEN';
|
||||
console.error('Circuit breaker tripped to OPEN');
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
getState() {
|
||||
return this.state;
|
||||
}
|
||||
}
|
||||
|
||||
// Usage example
|
||||
const breaker = new CircuitBreaker(5, 2, 60000);
|
||||
|
||||
async function protectedRequest() {
|
||||
return breaker.execute(async () => {
|
||||
return await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: 'Hello!' }],
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// TOKEN BUCKET ALGORITHM
|
||||
// =============================================================================
|
||||
|
||||
class TokenBucket {
|
||||
private tokens: number;
|
||||
private lastRefill: number;
|
||||
|
||||
constructor(
|
||||
private capacity: number,
|
||||
private refillRate: number // tokens per second
|
||||
) {
|
||||
this.tokens = capacity;
|
||||
this.lastRefill = Date.now();
|
||||
}
|
||||
|
||||
private refill() {
|
||||
const now = Date.now();
|
||||
const elapsed = (now - this.lastRefill) / 1000;
|
||||
const tokensToAdd = elapsed * this.refillRate;
|
||||
|
||||
this.tokens = Math.min(this.capacity, this.tokens + tokensToAdd);
|
||||
this.lastRefill = now;
|
||||
}
|
||||
|
||||
async consume(tokens: number = 1): Promise<void> {
|
||||
this.refill();
|
||||
|
||||
if (this.tokens >= tokens) {
|
||||
this.tokens -= tokens;
|
||||
return;
|
||||
}
|
||||
|
||||
// Wait until enough tokens are available
|
||||
const deficit = tokens - this.tokens;
|
||||
const waitTime = (deficit / this.refillRate) * 1000;
|
||||
|
||||
await new Promise(resolve => setTimeout(resolve, waitTime));
|
||||
this.tokens = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Usage example
|
||||
const bucket = new TokenBucket(10, 2); // 10 tokens, refill 2 per second
|
||||
|
||||
async function rateLimitedRequest() {
|
||||
await bucket.consume(1);
|
||||
|
||||
return await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: 'Hello!' }],
|
||||
});
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// COMBINED PRODUCTION PATTERN
|
||||
// =============================================================================
|
||||
|
||||
class RateLimitedClient {
|
||||
private queue: RequestQueue;
|
||||
private breaker: CircuitBreaker;
|
||||
private bucket: TokenBucket;
|
||||
|
||||
constructor() {
|
||||
this.queue = new RequestQueue(50); // 50 RPM
|
||||
this.breaker = new CircuitBreaker(5, 2, 60000);
|
||||
this.bucket = new TokenBucket(50, 1); // 50 tokens, 1 per second
|
||||
}
|
||||
|
||||
async chatCompletion(params: any, maxRetries: number = 3) {
|
||||
return this.queue.enqueue(async () => {
|
||||
return exponentialBackoff(async () => {
|
||||
return this.breaker.execute(async () => {
|
||||
await this.bucket.consume(1);
|
||||
|
||||
return await openai.chat.completions.create(params);
|
||||
});
|
||||
}, maxRetries);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Usage
|
||||
const client = new RateLimitedClient();
|
||||
|
||||
async function productionRequest() {
|
||||
return client.chatCompletion({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: 'Hello!' }],
|
||||
});
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MONITORING AND LOGGING
|
||||
// =============================================================================
|
||||
|
||||
interface RequestLog {
|
||||
timestamp: string;
|
||||
success: boolean;
|
||||
retries: number;
|
||||
error?: string;
|
||||
latency: number;
|
||||
}
|
||||
|
||||
async function monitoredRequest(): Promise<RequestLog> {
|
||||
const startTime = Date.now();
|
||||
let retries = 0;
|
||||
|
||||
try {
|
||||
const result = await exponentialBackoff(async () => {
|
||||
retries++;
|
||||
return await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: 'Hello!' }],
|
||||
});
|
||||
});
|
||||
|
||||
const latency = Date.now() - startTime;
|
||||
|
||||
return {
|
||||
timestamp: new Date().toISOString(),
|
||||
success: true,
|
||||
retries: retries - 1,
|
||||
latency,
|
||||
};
|
||||
} catch (error: any) {
|
||||
const latency = Date.now() - startTime;
|
||||
|
||||
return {
|
||||
timestamp: new Date().toISOString(),
|
||||
success: false,
|
||||
retries: retries - 1,
|
||||
error: error.message,
|
||||
latency,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI Rate Limit Handling Examples ===\n');
|
||||
|
||||
// Example 1: Exponential backoff
|
||||
console.log('1. Exponential Backoff:');
|
||||
await chatWithRetry();
|
||||
console.log('Request successful with retry logic');
|
||||
console.log();
|
||||
|
||||
// Example 2: Check rate limits
|
||||
console.log('2. Check Rate Limits:');
|
||||
await checkRateLimits();
|
||||
console.log();
|
||||
|
||||
// Example 3: Production pattern
|
||||
console.log('3. Production Rate-Limited Client:');
|
||||
await productionRequest();
|
||||
console.log('Request processed through production pipeline');
|
||||
console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
exponentialBackoff,
|
||||
checkRateLimits,
|
||||
RequestQueue,
|
||||
CircuitBreaker,
|
||||
TokenBucket,
|
||||
RateLimitedClient,
|
||||
monitoredRequest,
|
||||
};
|
||||
38
templates/streaming-chat.ts
Normal file
38
templates/streaming-chat.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
// Streaming Chat Completion (Node.js SDK)
|
||||
// Real-time token-by-token delivery for better UX
|
||||
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
async function streamingChat() {
|
||||
console.log('Streaming response:\n');
|
||||
|
||||
const stream = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: 'You are a creative writer.'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: 'Write a short poem about coding'
|
||||
}
|
||||
],
|
||||
stream: true,
|
||||
max_tokens: 200,
|
||||
});
|
||||
|
||||
// Process stream chunks
|
||||
for await (const chunk of stream) {
|
||||
const content = chunk.choices[0]?.delta?.content || '';
|
||||
process.stdout.write(content);
|
||||
}
|
||||
|
||||
console.log('\n\nStream complete!');
|
||||
}
|
||||
|
||||
streamingChat();
|
||||
86
templates/streaming-fetch.ts
Normal file
86
templates/streaming-fetch.ts
Normal file
@@ -0,0 +1,86 @@
|
||||
// Streaming Chat Completion (Fetch API - Cloudflare Workers)
|
||||
// Server-Sent Events (SSE) parsing for edge runtimes
|
||||
|
||||
interface Env {
|
||||
OPENAI_API_KEY: string;
|
||||
}
|
||||
|
||||
export default {
|
||||
async fetch(request: Request, env: Env): Promise<Response> {
|
||||
if (request.method !== 'POST') {
|
||||
return new Response('Method not allowed', { status: 405 });
|
||||
}
|
||||
|
||||
const { message } = await request.json() as { message: string };
|
||||
|
||||
// Call OpenAI with streaming
|
||||
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${env.OPENAI_API_KEY}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'gpt-5',
|
||||
messages: [
|
||||
{ role: 'user', content: message }
|
||||
],
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
// Create a transformed stream for the client
|
||||
const { readable, writable } = new TransformStream();
|
||||
const writer = writable.getWriter();
|
||||
const encoder = new TextEncoder();
|
||||
|
||||
// Process SSE stream
|
||||
(async () => {
|
||||
const reader = response.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader!.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
const lines = chunk.split('\n').filter(line => line.trim() !== '');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
|
||||
if (data === '[DONE]') {
|
||||
await writer.write(encoder.encode('data: [DONE]\n\n'));
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
const content = json.choices[0]?.delta?.content || '';
|
||||
|
||||
if (content) {
|
||||
// Forward to client
|
||||
await writer.write(encoder.encode(`data: ${JSON.stringify({ content })}\n\n`));
|
||||
}
|
||||
} catch (e) {
|
||||
// Skip invalid JSON
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
await writer.close();
|
||||
}
|
||||
})();
|
||||
|
||||
return new Response(readable, {
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
'Connection': 'keep-alive',
|
||||
},
|
||||
});
|
||||
},
|
||||
};
|
||||
440
templates/structured-output.ts
Normal file
440
templates/structured-output.ts
Normal file
@@ -0,0 +1,440 @@
|
||||
/**
|
||||
* OpenAI Structured Outputs - JSON Schema Examples
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - JSON schema with strict mode
|
||||
* - Complex nested schemas
|
||||
* - Type-safe responses
|
||||
* - Validation patterns
|
||||
* - Common use cases (extraction, classification, formatting)
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// BASIC STRUCTURED OUTPUT
|
||||
// =============================================================================
|
||||
|
||||
async function basicStructuredOutput() {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o', // Best for structured outputs
|
||||
messages: [
|
||||
{ role: 'user', content: 'Generate a person profile' }
|
||||
],
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: 'person_profile',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
age: { type: 'number' },
|
||||
email: { type: 'string' },
|
||||
},
|
||||
required: ['name', 'age', 'email'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const person = JSON.parse(completion.choices[0].message.content!);
|
||||
console.log('Person:', person);
|
||||
|
||||
return person;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// COMPLEX NESTED SCHEMA
|
||||
// =============================================================================
|
||||
|
||||
async function complexSchema() {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Generate a company organizational structure' }
|
||||
],
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: 'org_structure',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
company: { type: 'string' },
|
||||
founded: { type: 'number' },
|
||||
departments: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
head: { type: 'string' },
|
||||
employees: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
role: { type: 'string' },
|
||||
years: { type: 'number' },
|
||||
},
|
||||
required: ['name', 'role', 'years'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
required: ['name', 'head', 'employees'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
required: ['company', 'founded', 'departments'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const org = JSON.parse(completion.choices[0].message.content!);
|
||||
console.log('Organization:', JSON.stringify(org, null, 2));
|
||||
|
||||
return org;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// DATA EXTRACTION
|
||||
// =============================================================================
|
||||
|
||||
async function extractData() {
|
||||
const text = `
|
||||
John Doe is a 35-year-old software engineer living in San Francisco.
|
||||
He works at TechCorp and has been there for 5 years.
|
||||
His email is john.doe@example.com and his phone is (555) 123-4567.
|
||||
`;
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: 'Extract structured information from the provided text.',
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: text,
|
||||
},
|
||||
],
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: 'extracted_info',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
age: { type: 'number' },
|
||||
occupation: { type: 'string' },
|
||||
location: { type: 'string' },
|
||||
company: { type: 'string' },
|
||||
tenure_years: { type: 'number' },
|
||||
contact: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
email: { type: 'string' },
|
||||
phone: { type: 'string' },
|
||||
},
|
||||
required: ['email', 'phone'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
required: ['name', 'age', 'occupation', 'location', 'company', 'tenure_years', 'contact'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const extracted = JSON.parse(completion.choices[0].message.content!);
|
||||
console.log('Extracted:', JSON.stringify(extracted, null, 2));
|
||||
|
||||
return extracted;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// CLASSIFICATION
|
||||
// =============================================================================
|
||||
|
||||
async function classifyText() {
|
||||
const text = 'This product is absolutely terrible. It broke after one day of use. Very disappointed!';
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: 'Classify the sentiment and extract key information from product reviews.',
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: text,
|
||||
},
|
||||
],
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: 'review_classification',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
sentiment: {
|
||||
type: 'string',
|
||||
enum: ['positive', 'negative', 'neutral'],
|
||||
},
|
||||
confidence: { type: 'number' },
|
||||
category: {
|
||||
type: 'string',
|
||||
enum: ['product_quality', 'customer_service', 'shipping', 'pricing', 'other'],
|
||||
},
|
||||
issues: {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
rating_estimate: { type: 'number' },
|
||||
},
|
||||
required: ['sentiment', 'confidence', 'category', 'issues', 'rating_estimate'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const classification = JSON.parse(completion.choices[0].message.content!);
|
||||
console.log('Classification:', JSON.stringify(classification, null, 2));
|
||||
|
||||
return classification;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// SIMPLE JSON MODE (Without Strict Schema)
|
||||
// =============================================================================
|
||||
|
||||
async function simpleJsonMode() {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [
|
||||
{ role: 'user', content: 'List 3 programming languages and their use cases as JSON' }
|
||||
],
|
||||
response_format: { type: 'json_object' },
|
||||
});
|
||||
|
||||
const data = JSON.parse(completion.choices[0].message.content!);
|
||||
console.log('JSON output:', data);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ENUM VALUES
|
||||
// =============================================================================
|
||||
|
||||
async function withEnums() {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Categorize this as a bug report: The app crashes on startup' }
|
||||
],
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: 'issue_categorization',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
enum: ['bug', 'feature_request', 'question', 'documentation'],
|
||||
},
|
||||
severity: {
|
||||
type: 'string',
|
||||
enum: ['critical', 'high', 'medium', 'low'],
|
||||
},
|
||||
component: {
|
||||
type: 'string',
|
||||
enum: ['frontend', 'backend', 'database', 'infrastructure', 'unknown'],
|
||||
},
|
||||
},
|
||||
required: ['type', 'severity', 'component'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const categorization = JSON.parse(completion.choices[0].message.content!);
|
||||
console.log('Categorization:', categorization);
|
||||
|
||||
return categorization;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// VALIDATION EXAMPLE
|
||||
// =============================================================================
|
||||
|
||||
function validateSchema<T>(data: any, expectedFields: string[]): T {
|
||||
for (const field of expectedFields) {
|
||||
if (!(field in data)) {
|
||||
throw new Error(`Missing required field: ${field}`);
|
||||
}
|
||||
}
|
||||
|
||||
return data as T;
|
||||
}
|
||||
|
||||
async function withValidation() {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Generate a user profile with name and email' }
|
||||
],
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: 'user_profile',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
email: { type: 'string' },
|
||||
},
|
||||
required: ['name', 'email'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const raw = JSON.parse(completion.choices[0].message.content!);
|
||||
|
||||
// Validate before using
|
||||
interface UserProfile {
|
||||
name: string;
|
||||
email: string;
|
||||
}
|
||||
|
||||
const validated = validateSchema<UserProfile>(raw, ['name', 'email']);
|
||||
console.log('Validated user:', validated);
|
||||
|
||||
return validated;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// BATCH EXTRACTION
|
||||
// =============================================================================
|
||||
|
||||
async function batchExtraction(texts: string[]) {
|
||||
const results = [];
|
||||
|
||||
for (const text of texts) {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{ role: 'system', content: 'Extract key information as structured data' },
|
||||
{ role: 'user', content: text },
|
||||
],
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: 'extracted_data',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
summary: { type: 'string' },
|
||||
key_points: {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
},
|
||||
required: ['summary', 'key_points'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const extracted = JSON.parse(completion.choices[0].message.content!);
|
||||
results.push({ text, extracted });
|
||||
|
||||
// Rate limit protection
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
}
|
||||
|
||||
console.log(`Extracted ${results.length} items`);
|
||||
return results;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI Structured Outputs Examples ===\n');
|
||||
|
||||
// Example 1: Basic
|
||||
console.log('1. Basic Structured Output:');
|
||||
await basicStructuredOutput();
|
||||
console.log();
|
||||
|
||||
// Example 2: Complex schema
|
||||
console.log('2. Complex Nested Schema:');
|
||||
await complexSchema();
|
||||
console.log();
|
||||
|
||||
// Example 3: Data extraction
|
||||
console.log('3. Data Extraction:');
|
||||
await extractData();
|
||||
console.log();
|
||||
|
||||
// Example 4: Classification
|
||||
console.log('4. Text Classification:');
|
||||
await classifyText();
|
||||
console.log();
|
||||
|
||||
// Example 5: Simple JSON mode
|
||||
console.log('5. Simple JSON Mode:');
|
||||
await simpleJsonMode();
|
||||
console.log();
|
||||
|
||||
// Example 6: Enums
|
||||
console.log('6. Enum Values:');
|
||||
await withEnums();
|
||||
console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
basicStructuredOutput,
|
||||
complexSchema,
|
||||
extractData,
|
||||
classifyText,
|
||||
simpleJsonMode,
|
||||
withEnums,
|
||||
withValidation,
|
||||
batchExtraction,
|
||||
};
|
||||
372
templates/text-to-speech.ts
Normal file
372
templates/text-to-speech.ts
Normal file
@@ -0,0 +1,372 @@
|
||||
/**
|
||||
* OpenAI Audio API - Text-to-Speech Examples
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - Basic TTS with all 11 voices
|
||||
* - Different models (tts-1, tts-1-hd, gpt-4o-mini-tts)
|
||||
* - Voice instructions (gpt-4o-mini-tts only)
|
||||
* - Speed control
|
||||
* - Different audio formats
|
||||
* - Streaming TTS
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai';
|
||||
import fs from 'fs';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// BASIC TTS
|
||||
// =============================================================================
|
||||
|
||||
async function basicTTS() {
|
||||
const mp3 = await openai.audio.speech.create({
|
||||
model: 'tts-1',
|
||||
voice: 'alloy',
|
||||
input: 'The quick brown fox jumped over the lazy dog.',
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(await mp3.arrayBuffer());
|
||||
fs.writeFileSync('speech.mp3', buffer);
|
||||
|
||||
console.log('Speech saved to: speech.mp3');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ALL 11 VOICES
|
||||
// =============================================================================
|
||||
|
||||
async function allVoices() {
|
||||
const voices = [
|
||||
'alloy', // Neutral, balanced
|
||||
'ash', // Clear, professional
|
||||
'ballad', // Warm, storytelling
|
||||
'coral', // Soft, friendly
|
||||
'echo', // Calm, measured
|
||||
'fable', // Expressive, narrative
|
||||
'onyx', // Deep, authoritative
|
||||
'nova', // Bright, energetic
|
||||
'sage', // Wise, thoughtful
|
||||
'shimmer', // Gentle, soothing
|
||||
'verse', // Poetic, rhythmic
|
||||
] as const;
|
||||
|
||||
const text = 'Hello, this is a voice sample.';
|
||||
|
||||
for (const voice of voices) {
|
||||
console.log(`Generating ${voice} voice...`);
|
||||
|
||||
const mp3 = await openai.audio.speech.create({
|
||||
model: 'tts-1',
|
||||
voice,
|
||||
input: text,
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(await mp3.arrayBuffer());
|
||||
fs.writeFileSync(`speech-${voice}.mp3`, buffer);
|
||||
|
||||
// Wait 500ms between requests
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
}
|
||||
|
||||
console.log('All voice samples generated!');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MODEL COMPARISON
|
||||
// =============================================================================
|
||||
|
||||
async function modelComparison() {
|
||||
const text = 'This is a test of different TTS models.';
|
||||
|
||||
// tts-1 (standard quality, fastest)
|
||||
console.log('Generating with tts-1...');
|
||||
const tts1 = await openai.audio.speech.create({
|
||||
model: 'tts-1',
|
||||
voice: 'nova',
|
||||
input: text,
|
||||
});
|
||||
|
||||
const buffer1 = Buffer.from(await tts1.arrayBuffer());
|
||||
fs.writeFileSync('tts-1-output.mp3', buffer1);
|
||||
|
||||
// tts-1-hd (high quality)
|
||||
console.log('Generating with tts-1-hd...');
|
||||
const tts1Hd = await openai.audio.speech.create({
|
||||
model: 'tts-1-hd',
|
||||
voice: 'nova',
|
||||
input: text,
|
||||
});
|
||||
|
||||
const buffer2 = Buffer.from(await tts1Hd.arrayBuffer());
|
||||
fs.writeFileSync('tts-1-hd-output.mp3', buffer2);
|
||||
|
||||
console.log('Model comparison complete!');
|
||||
console.log('tts-1 file size:', buffer1.length, 'bytes');
|
||||
console.log('tts-1-hd file size:', buffer2.length, 'bytes');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// VOICE INSTRUCTIONS (gpt-4o-mini-tts)
|
||||
// =============================================================================
|
||||
|
||||
async function voiceInstructions() {
|
||||
// Example 1: Calm and professional
|
||||
const professional = await openai.audio.speech.create({
|
||||
model: 'gpt-4o-mini-tts',
|
||||
voice: 'nova',
|
||||
input: 'Welcome to our customer support line. How can I help you today?',
|
||||
instructions: 'Speak in a calm, professional, and friendly tone suitable for customer service.',
|
||||
});
|
||||
|
||||
const buffer1 = Buffer.from(await professional.arrayBuffer());
|
||||
fs.writeFileSync('professional-tone.mp3', buffer1);
|
||||
|
||||
// Example 2: Energetic and enthusiastic
|
||||
const energetic = await openai.audio.speech.create({
|
||||
model: 'gpt-4o-mini-tts',
|
||||
voice: 'nova',
|
||||
input: 'Get ready for the biggest sale of the year! Don\'t miss out!',
|
||||
instructions: 'Use an enthusiastic, energetic tone perfect for marketing and advertisements.',
|
||||
});
|
||||
|
||||
const buffer2 = Buffer.from(await energetic.arrayBuffer());
|
||||
fs.writeFileSync('energetic-tone.mp3', buffer2);
|
||||
|
||||
// Example 3: Calm and soothing
|
||||
const soothing = await openai.audio.speech.create({
|
||||
model: 'gpt-4o-mini-tts',
|
||||
voice: 'shimmer',
|
||||
input: 'Take a deep breath. Relax your shoulders. Let all tension fade away.',
|
||||
instructions: 'Adopt a calm, soothing voice suitable for meditation and relaxation guidance.',
|
||||
});
|
||||
|
||||
const buffer3 = Buffer.from(await soothing.arrayBuffer());
|
||||
fs.writeFileSync('soothing-tone.mp3', buffer3);
|
||||
|
||||
console.log('Voice instruction examples generated!');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// SPEED CONTROL
|
||||
// =============================================================================
|
||||
|
||||
async function speedControl() {
|
||||
const text = 'This sentence will be spoken at different speeds.';
|
||||
|
||||
const speeds = [0.5, 0.75, 1.0, 1.25, 1.5, 2.0];
|
||||
|
||||
for (const speed of speeds) {
|
||||
console.log(`Generating at ${speed}x speed...`);
|
||||
|
||||
const mp3 = await openai.audio.speech.create({
|
||||
model: 'tts-1',
|
||||
voice: 'alloy',
|
||||
input: text,
|
||||
speed,
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(await mp3.arrayBuffer());
|
||||
fs.writeFileSync(`speech-${speed}x.mp3`, buffer);
|
||||
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
}
|
||||
|
||||
console.log('Speed variations generated!');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// DIFFERENT AUDIO FORMATS
|
||||
// =============================================================================
|
||||
|
||||
async function differentFormats() {
|
||||
const text = 'Testing different audio formats.';
|
||||
|
||||
const formats = ['mp3', 'opus', 'aac', 'flac', 'wav', 'pcm'] as const;
|
||||
|
||||
for (const format of formats) {
|
||||
console.log(`Generating ${format} format...`);
|
||||
|
||||
const audio = await openai.audio.speech.create({
|
||||
model: 'tts-1',
|
||||
voice: 'alloy',
|
||||
input: text,
|
||||
response_format: format,
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(await audio.arrayBuffer());
|
||||
const extension = format === 'pcm' ? 'raw' : format;
|
||||
fs.writeFileSync(`speech.${extension}`, buffer);
|
||||
|
||||
console.log(` ${format}: ${buffer.length} bytes`);
|
||||
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
}
|
||||
|
||||
console.log('All format examples generated!');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// LONG TEXT HANDLING
|
||||
// =============================================================================
|
||||
|
||||
async function longText() {
|
||||
const longText = `
|
||||
This is a longer piece of text that demonstrates how TTS handles extended content.
|
||||
The model can process up to 4096 characters in a single request.
|
||||
You can use this for narrating articles, generating audiobooks, or creating voice-overs.
|
||||
The speech will maintain natural pacing and intonation throughout.
|
||||
`.trim();
|
||||
|
||||
const mp3 = await openai.audio.speech.create({
|
||||
model: 'tts-1-hd',
|
||||
voice: 'fable', // Good for narration
|
||||
input: longText,
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(await mp3.arrayBuffer());
|
||||
fs.writeFileSync('long-narration.mp3', buffer);
|
||||
|
||||
console.log('Long narration generated!');
|
||||
console.log('Text length:', longText.length, 'characters');
|
||||
console.log('Audio size:', buffer.length, 'bytes');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// STREAMING TTS (Server-Sent Events)
|
||||
// =============================================================================
|
||||
|
||||
async function streamingTTS() {
|
||||
const response = await fetch('https://api.openai.com/v1/audio/speech', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'gpt-4o-mini-tts',
|
||||
voice: 'nova',
|
||||
input: 'This is a streaming audio example. The audio will be generated and delivered in chunks.',
|
||||
stream_format: 'sse', // Server-Sent Events
|
||||
}),
|
||||
});
|
||||
|
||||
console.log('Streaming TTS...');
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
const chunks: Uint8Array[] = [];
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader!.read();
|
||||
if (done) break;
|
||||
|
||||
chunks.push(value);
|
||||
console.log('Received chunk:', value.length, 'bytes');
|
||||
}
|
||||
|
||||
// Combine chunks
|
||||
const totalLength = chunks.reduce((acc, chunk) => acc + chunk.length, 0);
|
||||
const combined = new Uint8Array(totalLength);
|
||||
let offset = 0;
|
||||
|
||||
for (const chunk of chunks) {
|
||||
combined.set(chunk, offset);
|
||||
offset += chunk.length;
|
||||
}
|
||||
|
||||
fs.writeFileSync('streaming-output.mp3', Buffer.from(combined));
|
||||
console.log('Streaming TTS saved to: streaming-output.mp3');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ERROR HANDLING
|
||||
// =============================================================================
|
||||
|
||||
async function withErrorHandling() {
|
||||
try {
|
||||
const mp3 = await openai.audio.speech.create({
|
||||
model: 'tts-1',
|
||||
voice: 'alloy',
|
||||
input: 'Hello world',
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(await mp3.arrayBuffer());
|
||||
fs.writeFileSync('output.mp3', buffer);
|
||||
|
||||
return 'output.mp3';
|
||||
} catch (error: any) {
|
||||
if (error.message.includes('input too long')) {
|
||||
console.error('Text exceeds 4096 character limit');
|
||||
} else if (error.message.includes('invalid voice')) {
|
||||
console.error('Voice not recognized - use one of the 11 supported voices');
|
||||
} else if (error.status === 429) {
|
||||
console.error('Rate limit exceeded - wait and retry');
|
||||
} else {
|
||||
console.error('TTS error:', error.message);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI Text-to-Speech Examples ===\n');
|
||||
|
||||
// Example 1: Basic TTS
|
||||
console.log('1. Basic TTS:');
|
||||
await basicTTS();
|
||||
console.log();
|
||||
|
||||
// Example 2: All voices (uncomment to generate all)
|
||||
// console.log('2. All 11 Voices:');
|
||||
// await allVoices();
|
||||
// console.log();
|
||||
|
||||
// Example 3: Model comparison
|
||||
console.log('3. Model Comparison:');
|
||||
await modelComparison();
|
||||
console.log();
|
||||
|
||||
// Example 4: Voice instructions
|
||||
console.log('4. Voice Instructions (gpt-4o-mini-tts):');
|
||||
await voiceInstructions();
|
||||
console.log();
|
||||
|
||||
// Example 5: Speed control
|
||||
console.log('5. Speed Control:');
|
||||
await speedControl();
|
||||
console.log();
|
||||
|
||||
// Example 6: Different formats
|
||||
console.log('6. Different Audio Formats:');
|
||||
await differentFormats();
|
||||
console.log();
|
||||
|
||||
// Example 7: Long text
|
||||
console.log('7. Long Text Narration:');
|
||||
await longText();
|
||||
console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
basicTTS,
|
||||
allVoices,
|
||||
modelComparison,
|
||||
voiceInstructions,
|
||||
speedControl,
|
||||
differentFormats,
|
||||
longText,
|
||||
streamingTTS,
|
||||
withErrorHandling,
|
||||
};
|
||||
443
templates/vision-gpt4o.ts
Normal file
443
templates/vision-gpt4o.ts
Normal file
@@ -0,0 +1,443 @@
|
||||
/**
|
||||
* OpenAI Vision API - GPT-4o Image Understanding
|
||||
*
|
||||
* This template demonstrates:
|
||||
* - Image via URL
|
||||
* - Image via base64
|
||||
* - Multiple images in one request
|
||||
* - Detailed image analysis
|
||||
* - OCR / text extraction
|
||||
* - Object detection
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai';
|
||||
import fs from 'fs';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
// =============================================================================
|
||||
// IMAGE VIA URL
|
||||
// =============================================================================
|
||||
|
||||
async function imageViaUrl() {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'What is in this image?' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
console.log('Image description:', completion.choices[0].message.content);
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// IMAGE VIA BASE64
|
||||
// =============================================================================
|
||||
|
||||
async function imageViaBase64() {
|
||||
// Read image file
|
||||
const imageBuffer = fs.readFileSync('./image.jpg');
|
||||
const base64Image = imageBuffer.toString('base64');
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Describe this image in detail' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: `data:image/jpeg;base64,${base64Image}`,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
console.log('Description:', completion.choices[0].message.content);
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MULTIPLE IMAGES
|
||||
// =============================================================================
|
||||
|
||||
async function multipleImages() {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Compare these two images. What are the differences?' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'https://example.com/image1.jpg',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'https://example.com/image2.jpg',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
console.log('Comparison:', completion.choices[0].message.content);
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// DETAILED IMAGE ANALYSIS
|
||||
// =============================================================================
|
||||
|
||||
async function detailedAnalysis(imageUrl: string) {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: 'You are an expert image analyst. Provide detailed, structured analysis of images.',
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Analyze this image in detail. Include:
|
||||
1. Main subject/objects
|
||||
2. Colors and composition
|
||||
3. Lighting and mood
|
||||
4. Background elements
|
||||
5. Any text visible
|
||||
6. Estimated context/setting`,
|
||||
},
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: imageUrl },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
console.log('Detailed analysis:', completion.choices[0].message.content);
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// OCR / TEXT EXTRACTION
|
||||
// =============================================================================
|
||||
|
||||
async function extractText(imageUrl: string) {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Extract all text visible in this image' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: imageUrl },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
console.log('Extracted text:', completion.choices[0].message.content);
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// OBJECT DETECTION
|
||||
// =============================================================================
|
||||
|
||||
async function detectObjects(imageUrl: string) {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'List all objects visible in this image with their approximate locations' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: imageUrl },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
console.log('Objects detected:', completion.choices[0].message.content);
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// IMAGE CLASSIFICATION
|
||||
// =============================================================================
|
||||
|
||||
async function classifyImage(imageUrl: string) {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: 'Classify this image into categories: nature, urban, people, objects, abstract, other',
|
||||
},
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: imageUrl },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
console.log('Classification:', completion.choices[0].message.content);
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// STRUCTURED OUTPUT WITH VISION
|
||||
// =============================================================================
|
||||
|
||||
async function structuredVisionOutput(imageUrl: string) {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Analyze this image and return structured data' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: imageUrl },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: {
|
||||
name: 'image_analysis',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
main_subject: { type: 'string' },
|
||||
objects: {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
colors: {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
mood: { type: 'string' },
|
||||
setting: { type: 'string' },
|
||||
has_text: { type: 'boolean' },
|
||||
},
|
||||
required: ['main_subject', 'objects', 'colors', 'mood', 'setting', 'has_text'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const analysis = JSON.parse(completion.choices[0].message.content!);
|
||||
console.log('Structured analysis:', JSON.stringify(analysis, null, 2));
|
||||
|
||||
return analysis;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MULTI-TURN CONVERSATION WITH VISION
|
||||
// =============================================================================
|
||||
|
||||
async function conversationWithVision() {
|
||||
const messages: any[] = [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'What is in this image?' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'https://example.com/image.jpg',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
// First turn
|
||||
const response1 = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages,
|
||||
});
|
||||
|
||||
console.log('Turn 1:', response1.choices[0].message.content);
|
||||
messages.push(response1.choices[0].message);
|
||||
|
||||
// Follow-up question
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: 'Can you describe the colors in more detail?',
|
||||
});
|
||||
|
||||
const response2 = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages,
|
||||
});
|
||||
|
||||
console.log('Turn 2:', response2.choices[0].message.content);
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// BATCH IMAGE ANALYSIS
|
||||
// =============================================================================
|
||||
|
||||
async function batchAnalysis(imageUrls: string[]) {
|
||||
const results = [];
|
||||
|
||||
for (const url of imageUrls) {
|
||||
console.log(`Analyzing: ${url}`);
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Briefly describe this image' },
|
||||
{ type: 'image_url', image_url: { url } },
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
results.push({
|
||||
url,
|
||||
description: completion.choices[0].message.content,
|
||||
});
|
||||
|
||||
// Rate limit protection
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
}
|
||||
|
||||
console.log(`Analyzed ${results.length} images`);
|
||||
return results;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ERROR HANDLING
|
||||
// =============================================================================
|
||||
|
||||
async function withErrorHandling(imageUrl: string) {
|
||||
try {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'What is in this image?' },
|
||||
{ type: 'image_url', image_url: { url: imageUrl } },
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
return completion.choices[0].message.content;
|
||||
} catch (error: any) {
|
||||
if (error.message.includes('invalid image')) {
|
||||
console.error('Image URL is invalid or inaccessible');
|
||||
} else if (error.message.includes('base64')) {
|
||||
console.error('Base64 encoding error');
|
||||
} else if (error.status === 429) {
|
||||
console.error('Rate limit exceeded');
|
||||
} else {
|
||||
console.error('Vision API error:', error.message);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN EXECUTION
|
||||
// =============================================================================
|
||||
|
||||
async function main() {
|
||||
console.log('=== OpenAI Vision (GPT-4o) Examples ===\n');
|
||||
|
||||
// Example 1: Image via URL
|
||||
console.log('1. Image via URL:');
|
||||
await imageViaUrl();
|
||||
console.log();
|
||||
|
||||
// Example 2: Image via base64 (uncomment when you have image.jpg)
|
||||
// console.log('2. Image via Base64:');
|
||||
// await imageViaBase64();
|
||||
// console.log();
|
||||
|
||||
// Example 3: Multiple images
|
||||
// console.log('3. Multiple Images:');
|
||||
// await multipleImages();
|
||||
// console.log();
|
||||
}
|
||||
|
||||
// Run if executed directly
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
|
||||
export {
|
||||
imageViaUrl,
|
||||
imageViaBase64,
|
||||
multipleImages,
|
||||
detailedAnalysis,
|
||||
extractText,
|
||||
detectObjects,
|
||||
classifyImage,
|
||||
structuredVisionOutput,
|
||||
conversationWithVision,
|
||||
batchAnalysis,
|
||||
withErrorHandling,
|
||||
};
|
||||
Reference in New Issue
Block a user