Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:25:17 +08:00
commit 07f3f3c71c
22 changed files with 5007 additions and 0 deletions

View File

@@ -0,0 +1,273 @@
/**
* Background Mode Example
*
* Demonstrates long-running tasks with background mode (up to 10 minutes).
* Standard mode timeout: 60 seconds
* Background mode timeout: 10 minutes
*/
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function basicBackgroundMode() {
console.log('=== Basic Background Mode ===\n');
// Start background task
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Analyze this 500-page document and provide a comprehensive summary',
background: true, // ✅ Extended timeout
});
console.log('Task started:', response.id);
console.log('Status:', response.status); // "in_progress"
// Poll for completion
let result = await openai.responses.retrieve(response.id);
while (result.status === 'in_progress') {
console.log('Still processing...');
await new Promise((resolve) => setTimeout(resolve, 5000)); // Check every 5 seconds
result = await openai.responses.retrieve(response.id);
}
if (result.status === 'completed') {
console.log('\nCompleted!');
console.log('Result:', result.output_text);
} else if (result.status === 'failed') {
console.error('Task failed:', result.error);
}
}
async function backgroundWithCodeInterpreter() {
console.log('=== Background Mode + Code Interpreter ===\n');
// Long-running data analysis
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Process this large dataset and generate detailed statistical analysis',
background: true,
tools: [{ type: 'code_interpreter' }],
});
console.log('Analysis started:', response.id);
// Poll with progress updates
let checks = 0;
let result = await openai.responses.retrieve(response.id);
while (result.status === 'in_progress') {
checks++;
console.log(`Check ${checks}: Still processing...`);
await new Promise((resolve) => setTimeout(resolve, 10000)); // Check every 10 seconds
result = await openai.responses.retrieve(response.id);
}
if (result.status === 'completed') {
console.log(`\nCompleted after ${checks} checks`);
console.log('Analysis:', result.output_text);
}
}
async function backgroundWithFileSearch() {
console.log('=== Background Mode + File Search ===\n');
// Upload large document
const file = await openai.files.create({
file: Buffer.from('Large document content...'),
purpose: 'assistants',
});
// Long-running file analysis
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Read this entire document and extract all key insights, metrics, and action items',
background: true,
tools: [{ type: 'file_search', file_ids: [file.id] }],
});
console.log('File analysis started:', response.id);
// Wait for completion
const result = await waitForCompletion(response.id);
console.log('Insights:', result.output_text);
}
async function backgroundWithWebSearch() {
console.log('=== Background Mode + Web Search ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Research the top 50 AI companies and create a comprehensive comparison report',
background: true,
tools: [{ type: 'web_search' }],
});
console.log('Research started:', response.id);
const result = await waitForCompletion(response.id);
console.log('Report:', result.output_text);
}
async function multipleBackgroundTasks() {
console.log('=== Multiple Background Tasks ===\n');
// Start multiple tasks in parallel
const task1 = openai.responses.create({
model: 'gpt-5',
input: 'Analyze Q1 financial data',
background: true,
tools: [{ type: 'code_interpreter' }],
});
const task2 = openai.responses.create({
model: 'gpt-5',
input: 'Research competitor landscape',
background: true,
tools: [{ type: 'web_search' }],
});
const task3 = openai.responses.create({
model: 'gpt-5',
input: 'Summarize customer feedback documents',
background: true,
tools: [{ type: 'file_search', file_ids: ['file_123'] }],
});
// Wait for all
const [response1, response2, response3] = await Promise.all([task1, task2, task3]);
console.log('All tasks started:');
console.log('Task 1:', response1.id);
console.log('Task 2:', response2.id);
console.log('Task 3:', response3.id);
// Wait for completion
const result1 = await waitForCompletion(response1.id);
const result2 = await waitForCompletion(response2.id);
const result3 = await waitForCompletion(response3.id);
console.log('\nAll tasks completed!');
console.log('Q1 Analysis:', result1.output_text);
console.log('Competitor Research:', result2.output_text);
console.log('Customer Feedback:', result3.output_text);
}
async function backgroundWithStatusTracking() {
console.log('=== Background Mode with Status Tracking ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Complex multi-step research task',
background: true,
});
console.log('Task ID:', response.id);
// Track status with detailed logging
let previousStatus = '';
let result = await openai.responses.retrieve(response.id);
while (result.status === 'in_progress') {
if (result.status !== previousStatus) {
console.log(`Status changed: ${previousStatus}${result.status}`);
previousStatus = result.status;
}
// Log additional info if available
if (result.metadata) {
console.log('Metadata:', result.metadata);
}
await new Promise((resolve) => setTimeout(resolve, 5000));
result = await openai.responses.retrieve(response.id);
}
console.log('Final status:', result.status);
}
async function handleBackgroundErrors() {
console.log('=== Error Handling ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Long-running task',
background: true,
});
try {
const result = await waitForCompletion(response.id, {
maxWaitTime: 5 * 60 * 1000, // 5 minutes max
checkInterval: 5000,
});
console.log('Success:', result.output_text);
} catch (error: any) {
if (error.message === 'TIMEOUT') {
console.error('Task exceeded maximum wait time');
console.error('Task ID:', response.id);
console.error('Check status later or increase timeout');
} else if (error.status === 'failed') {
console.error('Task failed:', error.error);
} else {
console.error('Unexpected error:', error);
}
}
}
async function cancelBackgroundTask() {
console.log('=== Cancel Background Task ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Long task',
background: true,
});
console.log('Task started:', response.id);
// Cancel after 10 seconds
await new Promise((resolve) => setTimeout(resolve, 10000));
try {
await openai.responses.cancel(response.id);
console.log('Task cancelled:', response.id);
} catch (error: any) {
console.error('Cancellation error:', error.message);
}
}
// Helper function
async function waitForCompletion(
responseId: string,
options: { maxWaitTime?: number; checkInterval?: number } = {}
): Promise<any> {
const { maxWaitTime = 10 * 60 * 1000, checkInterval = 5000 } = options;
const startTime = Date.now();
let result = await openai.responses.retrieve(responseId);
while (result.status === 'in_progress') {
if (Date.now() - startTime > maxWaitTime) {
throw new Error('TIMEOUT');
}
await new Promise((resolve) => setTimeout(resolve, checkInterval));
result = await openai.responses.retrieve(responseId);
}
if (result.status === 'failed') {
throw result;
}
return result;
}
// Run examples
// basicBackgroundMode();
// backgroundWithCodeInterpreter();
// multipleBackgroundTasks();
// handleBackgroundErrors();

View File

@@ -0,0 +1,64 @@
/**
* Basic Response Example
*
* Simple text generation using the OpenAI Responses API.
* This is the simplest way to use the Responses API.
*/
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function basicResponse() {
// Simple text input
const response = await openai.responses.create({
model: 'gpt-5', // or 'gpt-5-mini', 'gpt-4o'
input: 'What are the 5 Ds of dodgeball?',
});
// Get text output
console.log(response.output_text);
// Or inspect full output array
response.output.forEach((item) => {
console.log('Type:', item.type);
if (item.type === 'message') {
console.log('Content:', item.content);
}
});
// Check usage
console.log('Tokens used:', response.usage.total_tokens);
}
async function basicResponseWithMessages() {
// Using message array format (like Chat Completions)
const response = await openai.responses.create({
model: 'gpt-5',
input: [
{ role: 'developer', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain quantum computing in one sentence.' },
],
});
console.log(response.output_text);
}
async function basicResponseWithOptions() {
// With additional options
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Write a haiku about coding',
store: false, // Don't store conversation (saves costs)
temperature: 0.7, // Creativity level (0-2)
});
console.log(response.output_text);
}
// Run examples
basicResponse();
// basicResponseWithMessages();
// basicResponseWithOptions();

View File

@@ -0,0 +1,337 @@
/**
* Cloudflare Workers Example
*
* Demonstrates using the Responses API in Cloudflare Workers without the SDK.
* Uses native fetch API for zero dependencies.
*/
export interface Env {
OPENAI_API_KEY: string;
}
export default {
async fetch(request: Request, env: Env): Promise<Response> {
// Handle CORS preflight
if (request.method === 'OPTIONS') {
return new Response(null, {
headers: {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type',
},
});
}
if (request.method !== 'POST') {
return new Response('Method not allowed', { status: 405 });
}
try {
const { input } = await request.json<{ input: string }>();
// Basic response
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
input,
});
return new Response(JSON.stringify(response), {
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
},
});
} catch (error: any) {
return new Response(JSON.stringify({ error: error.message }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
});
}
},
};
// Helper: Create response
async function createResponse(apiKey: string, params: any) {
const response = await fetch('https://api.openai.com/v1/responses', {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify(params),
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.error?.message || 'OpenAI API error');
}
return response.json();
}
// Example: Stateful conversation
export const conversationWorker = {
async fetch(request: Request, env: Env): Promise<Response> {
const { conversationId, input } = await request.json<{
conversationId?: string;
input: string;
}>();
// Create or use existing conversation
let convId = conversationId;
if (!convId) {
const conv = await createConversation(env.OPENAI_API_KEY);
convId = conv.id;
}
// Create response with conversation
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
conversation: convId,
input,
});
return new Response(
JSON.stringify({
conversationId: convId,
output: response.output_text,
}),
{
headers: { 'Content-Type': 'application/json' },
}
);
},
};
// Helper: Create conversation
async function createConversation(apiKey: string) {
const response = await fetch('https://api.openai.com/v1/conversations', {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({}),
});
return response.json();
}
// Example: With MCP tools
export const mcpWorker = {
async fetch(request: Request, env: Env): Promise<Response> {
const { input } = await request.json<{ input: string }>();
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
input,
tools: [
{
type: 'mcp',
server_label: 'stripe',
server_url: 'https://mcp.stripe.com',
authorization: env.STRIPE_OAUTH_TOKEN,
},
],
});
return new Response(JSON.stringify(response), {
headers: { 'Content-Type': 'application/json' },
});
},
};
// Example: With Code Interpreter
export const codeInterpreterWorker = {
async fetch(request: Request, env: Env): Promise<Response> {
const { input } = await request.json<{ input: string }>();
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
input,
tools: [{ type: 'code_interpreter' }],
});
return new Response(JSON.stringify(response), {
headers: { 'Content-Type': 'application/json' },
});
},
};
// Example: With File Search
export const fileSearchWorker = {
async fetch(request: Request, env: Env): Promise<Response> {
const { input, fileIds } = await request.json<{
input: string;
fileIds: string[];
}>();
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
input,
tools: [{ type: 'file_search', file_ids: fileIds }],
});
return new Response(JSON.stringify(response), {
headers: { 'Content-Type': 'application/json' },
});
},
};
// Example: With Web Search
export const webSearchWorker = {
async fetch(request: Request, env: Env): Promise<Response> {
const { input } = await request.json<{ input: string }>();
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
input,
tools: [{ type: 'web_search' }],
});
return new Response(JSON.stringify(response), {
headers: { 'Content-Type': 'application/json' },
});
},
};
// Example: Background mode
export const backgroundWorker = {
async fetch(request: Request, env: Env): Promise<Response> {
const { input, responseId } = await request.json<{
input?: string;
responseId?: string;
}>();
// Start background task
if (input) {
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
input,
background: true,
});
return new Response(
JSON.stringify({
responseId: response.id,
status: response.status,
}),
{
headers: { 'Content-Type': 'application/json' },
}
);
}
// Check status
if (responseId) {
const response = await fetch(
`https://api.openai.com/v1/responses/${responseId}`,
{
headers: {
'Authorization': `Bearer ${env.OPENAI_API_KEY}`,
},
}
);
const data = await response.json();
return new Response(JSON.stringify(data), {
headers: { 'Content-Type': 'application/json' },
});
}
return new Response('Invalid request', { status: 400 });
},
};
// Example: Error handling
export const errorHandlingWorker = {
async fetch(request: Request, env: Env): Promise<Response> {
try {
const { input } = await request.json<{ input: string }>();
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
input,
});
return new Response(JSON.stringify(response), {
headers: { 'Content-Type': 'application/json' },
});
} catch (error: any) {
// Handle specific errors
if (error.type === 'rate_limit_error') {
return new Response(
JSON.stringify({ error: 'Rate limit exceeded', retry_after: error.retry_after }),
{
status: 429,
headers: { 'Content-Type': 'application/json' },
}
);
}
if (error.type === 'mcp_connection_error') {
return new Response(
JSON.stringify({ error: 'MCP server connection failed' }),
{
status: 502,
headers: { 'Content-Type': 'application/json' },
}
);
}
// Generic error
return new Response(
JSON.stringify({ error: error.message || 'Internal error' }),
{
status: 500,
headers: { 'Content-Type': 'application/json' },
}
);
}
},
};
// Example: Polymorphic outputs
export const polymorphicWorker = {
async fetch(request: Request, env: Env): Promise<Response> {
const { input } = await request.json<{ input: string }>();
const response = await createResponse(env.OPENAI_API_KEY, {
model: 'gpt-5',
input,
tools: [{ type: 'code_interpreter' }, { type: 'web_search' }],
});
// Process different output types
const processedOutput: any = {
text: response.output_text,
reasoning: [],
toolCalls: [],
};
response.output.forEach((item: any) => {
if (item.type === 'reasoning') {
processedOutput.reasoning.push(item.summary[0].text);
}
if (item.type === 'code_interpreter_call') {
processedOutput.toolCalls.push({
type: 'code_interpreter',
input: item.input,
output: item.output,
});
}
if (item.type === 'web_search_call') {
processedOutput.toolCalls.push({
type: 'web_search',
query: item.query,
results: item.results,
});
}
});
return new Response(JSON.stringify(processedOutput), {
headers: { 'Content-Type': 'application/json' },
});
},
};

View File

@@ -0,0 +1,227 @@
/**
* Code Interpreter Example
*
* Demonstrates server-side Python code execution for data analysis,
* calculations, and visualizations.
*/
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function basicCalculation() {
console.log('=== Basic Calculation ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Calculate the mean, median, and mode of: 10, 20, 30, 40, 50',
tools: [{ type: 'code_interpreter' }],
});
console.log('Response:', response.output_text);
// Inspect code execution
response.output.forEach((item) => {
if (item.type === 'code_interpreter_call') {
console.log('\nCode executed:');
console.log(item.input);
console.log('\nResult:', item.output);
}
});
}
async function dataAnalysis() {
console.log('=== Data Analysis ===\n');
const salesData = [
{ month: 'Jan', revenue: 10000 },
{ month: 'Feb', revenue: 12000 },
{ month: 'Mar', revenue: 11500 },
{ month: 'Apr', revenue: 13000 },
{ month: 'May', revenue: 14500 },
{ month: 'Jun', revenue: 16000 },
];
const response = await openai.responses.create({
model: 'gpt-5',
input: `Analyze this sales data and provide insights:
${JSON.stringify(salesData, null, 2)}
Calculate:
1. Total revenue
2. Average monthly revenue
3. Growth rate from Jan to Jun
4. Best performing month`,
tools: [{ type: 'code_interpreter' }],
});
console.log('Analysis:', response.output_text);
}
async function chartGeneration() {
console.log('=== Chart Generation ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: `Create a bar chart showing monthly revenue:
- Jan: $10,000
- Feb: $12,000
- Mar: $11,500
- Apr: $13,000
- May: $14,500
- Jun: $16,000`,
tools: [{ type: 'code_interpreter' }],
});
console.log('Response:', response.output_text);
// Find chart output
response.output.forEach((item) => {
if (item.type === 'code_interpreter_call') {
console.log('\nChart code:');
console.log(item.input);
// Check for file outputs (charts saved as files)
if (item.outputs) {
item.outputs.forEach((output) => {
if (output.type === 'image') {
console.log('Chart URL:', output.url);
}
});
}
}
});
}
async function fileProcessing() {
console.log('=== File Processing ===\n');
// Upload file first
const file = await openai.files.create({
file: Buffer.from('name,age,city\nAlice,30,NYC\nBob,25,LA\nCharlie,35,Chicago'),
purpose: 'assistants',
});
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Analyze the CSV file and tell me the average age',
tools: [
{
type: 'code_interpreter',
file_ids: [file.id], // ✅ Access uploaded file
},
],
});
console.log('Analysis:', response.output_text);
}
async function complexCalculation() {
console.log('=== Complex Calculation ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: `Solve this math problem step by step:
A company's revenue grows by 15% each year. If the revenue in year 1 is $100,000:
1. What will the revenue be in year 5?
2. What is the total revenue across all 5 years?
3. What year will the revenue first exceed $200,000?`,
tools: [{ type: 'code_interpreter' }],
});
console.log('Solution:', response.output_text);
// Show step-by-step reasoning
response.output.forEach((item) => {
if (item.type === 'reasoning') {
console.log('\nReasoning:', item.summary[0].text);
}
if (item.type === 'code_interpreter_call') {
console.log('\nCode:', item.input);
console.log('Result:', item.output);
}
});
}
async function statisticalAnalysis() {
console.log('=== Statistical Analysis ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: `Perform statistical analysis on this dataset:
[12, 15, 18, 20, 22, 25, 28, 30, 35, 40]
Calculate:
1. Standard deviation
2. Variance
3. 25th, 50th, 75th percentiles
4. Outliers (if any)`,
tools: [{ type: 'code_interpreter' }],
});
console.log('Analysis:', response.output_text);
}
async function codeInterpreterWithTimeout() {
console.log('=== Code Interpreter with Background Mode ===\n');
// For long-running code, use background mode
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Process this large dataset and generate a comprehensive report',
background: true, // ✅ Extended timeout for long-running code
tools: [{ type: 'code_interpreter' }],
});
// Poll for completion
let result = await openai.responses.retrieve(response.id);
while (result.status === 'in_progress') {
console.log('Still processing...');
await new Promise((resolve) => setTimeout(resolve, 5000));
result = await openai.responses.retrieve(response.id);
}
if (result.status === 'completed') {
console.log('Result:', result.output_text);
} else {
console.error('Failed:', result.error);
}
}
async function handleCodeInterpreterErrors() {
console.log('=== Error Handling ===\n');
try {
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Run this Python code: import invalid_module',
tools: [{ type: 'code_interpreter' }],
});
// Check for execution errors in output
response.output.forEach((item) => {
if (item.type === 'code_interpreter_call' && item.error) {
console.error('Code execution error:', item.error);
}
});
} catch (error: any) {
if (error.type === 'code_interpreter_timeout') {
console.error('Code execution timed out. Use background mode for long tasks.');
} else {
console.error('Error:', error.message);
}
}
}
// Run examples
basicCalculation();
// dataAnalysis();
// chartGeneration();
// fileProcessing();
// complexCalculation();
// statisticalAnalysis();
// codeInterpreterWithTimeout();

271
templates/file-search.ts Normal file
View File

@@ -0,0 +1,271 @@
/**
* File Search Example
*
* Demonstrates RAG (Retrieval-Augmented Generation) without building
* your own vector store. OpenAI handles embeddings and search automatically.
*/
import OpenAI from 'openai';
import fs from 'fs';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function basicFileSearch() {
console.log('=== Basic File Search ===\n');
// 1. Upload file (one-time setup)
const file = await openai.files.create({
file: fs.createReadStream('./knowledge-base.pdf'),
purpose: 'assistants',
});
console.log('File uploaded:', file.id);
// 2. Search file for information
const response = await openai.responses.create({
model: 'gpt-5',
input: 'What does the document say about pricing?',
tools: [
{
type: 'file_search',
file_ids: [file.id],
},
],
});
console.log('Answer:', response.output_text);
// 3. Inspect search results
response.output.forEach((item) => {
if (item.type === 'file_search_call') {
console.log('\nSearch query:', item.query);
console.log('Relevant chunks:', item.results.length);
item.results.forEach((result, idx) => {
console.log(`\nChunk ${idx + 1}:`);
console.log('Text:', result.text.substring(0, 200) + '...');
console.log('Score:', result.score);
console.log('File:', result.file_id);
});
}
});
}
async function multipleFileSearch() {
console.log('=== Multiple File Search ===\n');
// Upload multiple files
const file1 = await openai.files.create({
file: fs.createReadStream('./product-guide.pdf'),
purpose: 'assistants',
});
const file2 = await openai.files.create({
file: fs.createReadStream('./pricing-doc.pdf'),
purpose: 'assistants',
});
const file3 = await openai.files.create({
file: fs.createReadStream('./faq.pdf'),
purpose: 'assistants',
});
// Search across all files
const response = await openai.responses.create({
model: 'gpt-5',
input: 'What are the key features and how much does the premium plan cost?',
tools: [
{
type: 'file_search',
file_ids: [file1.id, file2.id, file3.id], // ✅ Multiple files
},
],
});
console.log('Answer (synthesized from all files):', response.output_text);
}
async function conversationalFileSearch() {
console.log('=== Conversational File Search ===\n');
// Upload knowledge base
const file = await openai.files.create({
file: fs.createReadStream('./company-handbook.pdf'),
purpose: 'assistants',
});
// Create conversation
const conv = await openai.conversations.create();
// First question
const response1 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'What is the PTO policy?',
tools: [{ type: 'file_search', file_ids: [file.id] }],
});
console.log('Q1:', response1.output_text);
// Follow-up question (model remembers previous answer)
const response2 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'How do I request it?',
tools: [{ type: 'file_search', file_ids: [file.id] }],
});
console.log('Q2:', response2.output_text);
// Model knows "it" refers to PTO from previous turn
}
async function fileSearchWithCitations() {
console.log('=== File Search with Citations ===\n');
const file = await openai.files.create({
file: fs.createReadStream('./research-paper.pdf'),
purpose: 'assistants',
});
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Summarize the key findings and provide citations',
tools: [{ type: 'file_search', file_ids: [file.id] }],
});
console.log('Summary:', response.output_text);
// Extract citations
response.output.forEach((item) => {
if (item.type === 'file_search_call') {
console.log('\nCitations:');
item.results.forEach((result, idx) => {
console.log(`[${idx + 1}] File: ${result.file_id}, Page: ${result.page || 'N/A'}`);
});
}
});
}
async function filterSearchResults() {
console.log('=== Filter Search Results by Relevance ===\n');
const file = await openai.files.create({
file: fs.createReadStream('./large-document.pdf'),
purpose: 'assistants',
});
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Find all mentions of "quarterly revenue" in the document',
tools: [{ type: 'file_search', file_ids: [file.id] }],
});
// Filter high-confidence results
response.output.forEach((item) => {
if (item.type === 'file_search_call') {
const highConfidence = item.results.filter((r) => r.score > 0.7);
console.log(`Found ${highConfidence.length} high-confidence matches:`);
highConfidence.forEach((result) => {
console.log('Text:', result.text);
console.log('Score:', result.score);
console.log('---');
});
}
});
}
async function supportedFileTypes() {
console.log('=== Supported File Types ===\n');
// Upload different file types
const pdfFile = await openai.files.create({
file: fs.createReadStream('./document.pdf'),
purpose: 'assistants',
});
const textFile = await openai.files.create({
file: fs.createReadStream('./notes.txt'),
purpose: 'assistants',
});
const markdownFile = await openai.files.create({
file: fs.createReadStream('./README.md'),
purpose: 'assistants',
});
const codeFile = await openai.files.create({
file: fs.createReadStream('./main.ts'),
purpose: 'assistants',
});
// Search across different file types
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Find information about the authentication system',
tools: [
{
type: 'file_search',
file_ids: [pdfFile.id, textFile.id, markdownFile.id, codeFile.id],
},
],
});
console.log('Answer:', response.output_text);
}
async function handleFileSearchErrors() {
console.log('=== Error Handling ===\n');
try {
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Search for information',
tools: [
{
type: 'file_search',
file_ids: ['file_invalid'], // ❌ Invalid file ID
},
],
});
} catch (error: any) {
if (error.type === 'invalid_request_error') {
console.error('File not found. Upload file first.');
} else {
console.error('Error:', error.message);
}
}
}
async function listUploadedFiles() {
console.log('=== List Uploaded Files ===\n');
const files = await openai.files.list({
purpose: 'assistants',
});
console.log(`Found ${files.data.length} files:`);
files.data.forEach((file) => {
console.log('ID:', file.id);
console.log('Filename:', file.filename);
console.log('Size:', file.bytes, 'bytes');
console.log('Created:', new Date(file.created_at * 1000));
console.log('---');
});
}
async function deleteFile(fileId: string) {
// Delete file (cleanup)
await openai.files.delete(fileId);
console.log('File deleted:', fileId);
}
// Run examples
// basicFileSearch();
// multipleFileSearch();
// conversationalFileSearch();
// fileSearchWithCitations();
// filterSearchResults();
// listUploadedFiles();

View File

@@ -0,0 +1,241 @@
/**
* Image Generation Example
*
* Demonstrates integrated DALL-E image generation in the Responses API.
*/
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function basicImageGeneration() {
console.log('=== Basic Image Generation ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Create an image of a futuristic cityscape at sunset',
tools: [{ type: 'image_generation' }],
});
console.log('Response:', response.output_text);
// Find image in output
response.output.forEach((item) => {
if (item.type === 'image_generation_call') {
console.log('\nPrompt used:', item.prompt);
console.log('Image URL:', item.output.url);
console.log('Image expires in 1 hour');
}
});
}
async function conversationalImageGeneration() {
console.log('=== Conversational Image Generation ===\n');
// Create conversation
const conv = await openai.conversations.create();
// First request
const response1 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'Create an image of a cartoon cat wearing a wizard hat',
tools: [{ type: 'image_generation' }],
});
console.log('Turn 1:', response1.output_text);
// Modification request (model remembers previous image)
const response2 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'Make it more colorful and add a magic wand',
tools: [{ type: 'image_generation' }],
});
console.log('Turn 2:', response2.output_text);
// Model generates new image with modifications
}
async function multipleImages() {
console.log('=== Multiple Images ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Create 3 different logo designs for a tech startup',
tools: [{ type: 'image_generation' }],
});
console.log('Response:', response.output_text);
// Collect all images
const images: string[] = [];
response.output.forEach((item) => {
if (item.type === 'image_generation_call') {
images.push(item.output.url);
}
});
console.log(`\nGenerated ${images.length} images:`);
images.forEach((url, idx) => {
console.log(`Image ${idx + 1}: ${url}`);
});
}
async function imageWithSpecifications() {
console.log('=== Image with Specifications ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: `Create an image with these specifications:
- Subject: Modern minimalist office space
- Style: Photorealistic
- Lighting: Natural daylight from large windows
- Colors: Neutral tones (white, gray, wood)
- Details: Include plants and modern furniture`,
tools: [{ type: 'image_generation' }],
});
console.log('Response:', response.output_text);
}
async function imageForPresentation() {
console.log('=== Image for Presentation ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Create a professional infographic showing the growth of AI adoption from 2020 to 2025',
tools: [{ type: 'image_generation' }],
});
console.log('Response:', response.output_text);
}
async function saveImageToFile() {
console.log('=== Save Image to File ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Create an image of a mountain landscape',
tools: [{ type: 'image_generation' }],
});
// Find and download image
for (const item of response.output) {
if (item.type === 'image_generation_call') {
const imageUrl = item.output.url;
console.log('Downloading image from:', imageUrl);
// Download image
const imageResponse = await fetch(imageUrl);
const imageBuffer = await imageResponse.arrayBuffer();
// Save to file
const fs = await import('fs');
fs.writeFileSync('./generated-image.png', Buffer.from(imageBuffer));
console.log('Image saved to: ./generated-image.png');
}
}
}
async function iterativeImageRefinement() {
console.log('=== Iterative Image Refinement ===\n');
const conv = await openai.conversations.create();
// Initial image
const response1 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'Create a logo for a coffee shop',
tools: [{ type: 'image_generation' }],
});
console.log('Initial design:', response1.output_text);
// Refinement 1
const response2 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'Make the colors warmer and add a coffee bean illustration',
tools: [{ type: 'image_generation' }],
});
console.log('Refinement 1:', response2.output_text);
// Refinement 2
const response3 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'Perfect! Can you make it circular instead of square?',
tools: [{ type: 'image_generation' }],
});
console.log('Final design:', response3.output_text);
}
async function handleImageGenerationErrors() {
console.log('=== Error Handling ===\n');
try {
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Create an image [multiple requests]',
tools: [{ type: 'image_generation' }],
});
console.log('Success:', response.output_text);
} catch (error: any) {
if (error.type === 'rate_limit_error') {
console.error('DALL-E rate limit exceeded');
console.error('Retry after:', error.headers?.['retry-after']);
// Implement exponential backoff
const delay = parseInt(error.headers?.['retry-after'] || '5') * 1000;
console.log(`Waiting ${delay}ms before retry...`);
await new Promise((resolve) => setTimeout(resolve, delay));
// Retry request
const retryResponse = await openai.responses.create({
model: 'gpt-5',
input: 'Create an image',
tools: [{ type: 'image_generation' }],
});
console.log('Retry success:', retryResponse.output_text);
} else if (error.type === 'content_policy_violation') {
console.error('Image prompt violates content policy');
console.error('Please revise prompt to comply with guidelines');
} else {
console.error('Error:', error.message);
}
}
}
async function combinedImageAndAnalysis() {
console.log('=== Image Generation + Code Interpreter ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Create a chart showing sales growth from 2020-2025, then generate an image visualization',
tools: [
{ type: 'code_interpreter' },
{ type: 'image_generation' },
],
});
console.log('Response:', response.output_text);
// Model uses code interpreter for data, then image generation for visualization
}
// Run examples
basicImageGeneration();
// conversationalImageGeneration();
// multipleImages();
// imageWithSpecifications();
// saveImageToFile();
// iterativeImageRefinement();

View File

@@ -0,0 +1,203 @@
/**
* MCP Server Integration Example
*
* Demonstrates how to connect to external MCP (Model Context Protocol) servers
* for tool integration. MCP is built into the Responses API.
*/
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function basicMCPIntegration() {
console.log('=== Basic MCP Integration ===\n');
// Connect to a public MCP server (dice rolling example)
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Roll 2d6 dice for me',
tools: [
{
type: 'mcp',
server_label: 'dice',
server_url: 'https://dmcp.example.com', // Replace with real MCP server
},
],
});
console.log('Response:', response.output_text);
// Inspect MCP tool calls
response.output.forEach((item) => {
if (item.type === 'mcp_list_tools') {
console.log('\nDiscovered tools:', item.tools);
}
if (item.type === 'mcp_call') {
console.log('\nTool called:', item.name);
console.log('Arguments:', item.arguments);
console.log('Output:', item.output);
}
});
}
async function mcpWithAuthentication() {
console.log('=== MCP with OAuth Authentication ===\n');
// Connect to Stripe MCP server (requires OAuth token)
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Create a payment link for $20',
tools: [
{
type: 'mcp',
server_label: 'stripe',
server_url: 'https://mcp.stripe.com',
authorization: process.env.STRIPE_OAUTH_ACCESS_TOKEN, // ✅ OAuth token
},
],
});
console.log('Response:', response.output_text);
// Find payment link in output
response.output.forEach((item) => {
if (item.type === 'mcp_call' && item.name === 'create_payment_link') {
console.log('\nPayment link created:', item.output);
}
});
}
async function multipleMCPServers() {
console.log('=== Multiple MCP Servers ===\n');
// Connect to multiple MCP servers at once
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Check my Stripe balance and create a payment link for the remaining amount',
tools: [
{
type: 'mcp',
server_label: 'stripe',
server_url: 'https://mcp.stripe.com',
authorization: process.env.STRIPE_OAUTH_TOKEN,
},
{
type: 'mcp',
server_label: 'database',
server_url: 'https://db-mcp.example.com',
authorization: process.env.DB_API_KEY,
},
],
});
console.log('Response:', response.output_text);
}
async function mcpWithConversation() {
console.log('=== MCP with Stateful Conversation ===\n');
// Create conversation
const conv = await openai.conversations.create();
// First turn: Use MCP tool
const response1 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'Create a $50 payment link for premium subscription',
tools: [
{
type: 'mcp',
server_label: 'stripe',
server_url: 'https://mcp.stripe.com',
authorization: process.env.STRIPE_OAUTH_TOKEN,
},
],
});
console.log('Turn 1:', response1.output_text);
// Second turn: Model remembers previous action
const response2 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'Can you show me the details of that payment link?',
});
console.log('Turn 2:', response2.output_text);
// Model recalls payment link from turn 1
}
async function handleMCPErrors() {
console.log('=== MCP Error Handling ===\n');
try {
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Use the Stripe tool',
tools: [
{
type: 'mcp',
server_label: 'stripe',
server_url: 'https://mcp.stripe.com',
authorization: process.env.STRIPE_OAUTH_TOKEN,
},
],
});
console.log('Success:', response.output_text);
} catch (error: any) {
// Handle specific MCP errors
if (error.type === 'mcp_connection_error') {
console.error('MCP server connection failed:', error.message);
console.error('Check server URL and network connectivity');
} else if (error.type === 'mcp_authentication_error') {
console.error('MCP authentication failed:', error.message);
console.error('Verify authorization token is valid and not expired');
} else {
console.error('Unexpected error:', error);
}
}
}
/**
* Custom MCP Server Example
*
* If you want to build your own MCP server, it needs to implement:
* 1. POST /mcp/list_tools - Return available tools
* 2. POST /mcp/call_tool - Execute tool and return result
*
* Example MCP server response format:
*/
const exampleMCPListToolsResponse = {
tools: [
{
name: 'get_weather',
description: 'Get current weather for a city',
input_schema: {
type: 'object',
properties: {
city: { type: 'string' },
units: { type: 'string', enum: ['celsius', 'fahrenheit'] },
},
required: ['city'],
},
},
],
};
const exampleMCPCallToolResponse = {
result: {
temperature: 72,
condition: 'sunny',
humidity: 45,
},
};
// Run examples
basicMCPIntegration();
// mcpWithAuthentication();
// multipleMCPServers();
// mcpWithConversation();
// handleMCPErrors();

30
templates/package.json Normal file
View File

@@ -0,0 +1,30 @@
{
"name": "openai-responses-examples",
"version": "1.0.0",
"description": "OpenAI Responses API Examples",
"type": "module",
"scripts": {
"basic": "tsx templates/basic-response.ts",
"conversation": "tsx templates/stateful-conversation.ts",
"mcp": "tsx templates/mcp-integration.ts",
"code": "tsx templates/code-interpreter.ts",
"file": "tsx templates/file-search.ts",
"web": "tsx templates/web-search.ts",
"image": "tsx templates/image-generation.ts",
"background": "tsx templates/background-mode.ts",
"worker": "wrangler dev templates/cloudflare-worker.ts"
},
"dependencies": {
"openai": "^5.19.1"
},
"devDependencies": {
"@cloudflare/workers-types": "^5.0.0",
"@types/node": "^20.0.0",
"tsx": "^4.7.1",
"typescript": "^5.3.3",
"wrangler": "^3.95.0"
},
"engines": {
"node": ">=18.0.0"
}
}

View File

@@ -0,0 +1,120 @@
/**
* Stateful Conversation Example
*
* Demonstrates automatic state management using conversation IDs.
* The model remembers previous turns automatically.
*/
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function automaticStateManagement() {
console.log('=== Automatic State Management ===\n');
// 1. Create conversation
const conversation = await openai.conversations.create({
metadata: {
user_id: 'user_123',
session_type: 'support',
},
});
console.log('Conversation ID:', conversation.id);
// 2. First turn
const response1 = await openai.responses.create({
model: 'gpt-5',
conversation: conversation.id, // ✅ Reuse this ID
input: 'What are the 5 Ds of dodgeball?',
});
console.log('Turn 1:', response1.output_text);
console.log('');
// 3. Second turn - model remembers context
const response2 = await openai.responses.create({
model: 'gpt-5',
conversation: conversation.id, // ✅ Same ID
input: 'Tell me more about the first one',
});
console.log('Turn 2:', response2.output_text);
// Model knows "first one" refers to first D from previous turn
console.log('');
// 4. Third turn - still remembers everything
const response3 = await openai.responses.create({
model: 'gpt-5',
conversation: conversation.id, // ✅ Same ID
input: 'What was my original question?',
});
console.log('Turn 3:', response3.output_text);
// Model recalls original question from turn 1
}
async function manualStateManagement() {
console.log('=== Manual State Management ===\n');
// Alternative: Manually manage history array
let history = [
{ role: 'user', content: 'Tell me a joke' },
];
// First turn
const response = await openai.responses.create({
model: 'gpt-5',
input: history,
store: true, // Optional: store for retrieval later
});
console.log('Turn 1:', response.output_text);
// Add response to history
history = [
...history,
...response.output.map((el) => ({
role: el.role,
content: el.content,
})),
];
// Second turn
history.push({ role: 'user', content: 'Tell me another' });
const secondResponse = await openai.responses.create({
model: 'gpt-5',
input: history, // ✅ Full history
});
console.log('Turn 2:', secondResponse.output_text);
}
async function listConversations() {
// List all conversations (for user dashboard)
const conversations = await openai.conversations.list({
limit: 10,
});
console.log('=== Recent Conversations ===');
conversations.data.forEach((conv) => {
console.log('ID:', conv.id);
console.log('Created:', new Date(conv.created_at * 1000));
console.log('Metadata:', conv.metadata);
console.log('');
});
}
async function deleteConversation(conversationId: string) {
// Delete conversation (cleanup)
await openai.conversations.delete(conversationId);
console.log('Conversation deleted:', conversationId);
}
// Run examples
automaticStateManagement();
// manualStateManagement();
// listConversations();

195
templates/web-search.ts Normal file
View File

@@ -0,0 +1,195 @@
/**
* Web Search Example
*
* Demonstrates real-time web search for current information.
* No cutoff date limitations.
*/
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function basicWebSearch() {
console.log('=== Basic Web Search ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'What are the latest updates on GPT-5?',
tools: [{ type: 'web_search' }],
});
console.log('Answer:', response.output_text);
// Inspect search results
response.output.forEach((item) => {
if (item.type === 'web_search_call') {
console.log('\nSearch query:', item.query);
console.log('Sources:', item.results.length);
item.results.forEach((result, idx) => {
console.log(`\nSource ${idx + 1}:`);
console.log('Title:', result.title);
console.log('URL:', result.url);
console.log('Snippet:', result.snippet);
});
}
});
}
async function currentEvents() {
console.log('=== Current Events ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'What are the top tech news stories today?',
tools: [{ type: 'web_search' }],
});
console.log('News summary:', response.output_text);
}
async function factChecking() {
console.log('=== Fact Checking ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Is it true that GPT-5 was released in 2025? Find recent sources.',
tools: [{ type: 'web_search' }],
});
console.log('Fact check:', response.output_text);
// Get source citations
response.output.forEach((item) => {
if (item.type === 'web_search_call') {
console.log('\nSources:');
item.results.forEach((result) => {
console.log('-', result.url);
});
}
});
}
async function researchQuestion() {
console.log('=== Research Question ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'What are the pros and cons of using Cloudflare Workers for serverless applications?',
tools: [{ type: 'web_search' }],
});
console.log('Research findings:', response.output_text);
}
async function conversationalWebSearch() {
console.log('=== Conversational Web Search ===\n');
// Create conversation
const conv = await openai.conversations.create();
// First question
const response1 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'What is the current price of Bitcoin?',
tools: [{ type: 'web_search' }],
});
console.log('Q1:', response1.output_text);
// Follow-up question (model remembers previous answer)
const response2 = await openai.responses.create({
model: 'gpt-5',
conversation: conv.id,
input: 'How has it changed in the last 24 hours?',
tools: [{ type: 'web_search' }],
});
console.log('Q2:', response2.output_text);
}
async function comparisonResearch() {
console.log('=== Comparison Research ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Compare the features and pricing of OpenAI GPT-5 vs Anthropic Claude 3.5 Sonnet',
tools: [{ type: 'web_search' }],
});
console.log('Comparison:', response.output_text);
}
async function localInformation() {
console.log('=== Local Information ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'What are the best restaurants in San Francisco for Italian food?',
tools: [{ type: 'web_search' }],
});
console.log('Recommendations:', response.output_text);
}
async function productReviews() {
console.log('=== Product Reviews ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'What are people saying about the iPhone 16 Pro? Find recent reviews.',
tools: [{ type: 'web_search' }],
});
console.log('Review summary:', response.output_text);
}
async function combinedTools() {
console.log('=== Combined Tools (Web Search + Code Interpreter) ===\n');
const response = await openai.responses.create({
model: 'gpt-5',
input: 'Find the current Bitcoin price and calculate what $1000 would be worth',
tools: [
{ type: 'web_search' },
{ type: 'code_interpreter' },
],
});
console.log('Answer:', response.output_text);
// Model uses web search to get price, then code interpreter to calculate
}
async function webSearchWithFileSearch() {
console.log('=== Web Search + File Search ===\n');
// Upload internal document
const file = await openai.files.create({
file: Buffer.from('Internal policy: Always check external sources for pricing info'),
purpose: 'assistants',
});
const response = await openai.responses.create({
model: 'gpt-5',
input: 'What is our policy on competitor pricing research?',
tools: [
{ type: 'file_search', file_ids: [file.id] },
{ type: 'web_search' },
],
});
console.log('Answer:', response.output_text);
// Model checks internal policy, then searches web if needed
}
// Run examples
basicWebSearch();
// currentEvents();
// factChecking();
// researchQuestion();
// conversationalWebSearch();
// comparisonResearch();