Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:25:37 +08:00
commit 13df4850f7
29 changed files with 6729 additions and 0 deletions

View File

@@ -0,0 +1,175 @@
/**
* Next.js App Router - API Route for Chat
*
* File: app/api/chat/route.ts
*
* Handles streaming chat completions with TheSys C1 API.
*
* Features:
* - Streaming responses
* - OpenAI SDK integration
* - Error handling
* - CORS headers
*/
import { NextRequest, NextResponse } from "next/server";
import OpenAI from "openai";
import { transformStream } from "@crayonai/stream";
const client = new OpenAI({
baseURL: "https://api.thesys.dev/v1/embed",
apiKey: process.env.THESYS_API_KEY,
});
// System prompt for the AI
const SYSTEM_PROMPT = `You are a helpful AI assistant that generates interactive user interfaces.
When responding:
- Use clear, concise language
- Generate appropriate UI components (charts, tables, forms) when beneficial
- Ask clarifying questions when needed
- Be friendly and professional`;
export async function POST(req: NextRequest) {
try {
const { prompt, previousC1Response } = await req.json();
if (!prompt || typeof prompt !== "string") {
return NextResponse.json(
{ error: "Invalid prompt" },
{ status: 400 }
);
}
// Check API key
if (!process.env.THESYS_API_KEY) {
console.error("THESYS_API_KEY is not set");
return NextResponse.json(
{ error: "Server configuration error" },
{ status: 500 }
);
}
// Build messages array
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: SYSTEM_PROMPT },
{ role: "user", content: prompt },
];
// If there's previous context, include it
if (previousC1Response) {
messages.splice(1, 0, {
role: "assistant",
content: previousC1Response,
});
}
// Create streaming completion
const stream = await client.chat.completions.create({
model: "c1/openai/gpt-5/v-20250930", // or claude-sonnet-4/v-20250930
messages,
stream: true,
temperature: 0.7,
max_tokens: 2000,
});
// Transform OpenAI stream to C1 format
const responseStream = transformStream(stream, (chunk) => {
return chunk.choices[0]?.delta?.content || "";
}) as ReadableStream<string>;
return new NextResponse(responseStream, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type",
},
});
} catch (error) {
console.error("Chat API Error:", error);
// Handle specific OpenAI errors
if (error instanceof OpenAI.APIError) {
return NextResponse.json(
{
error: error.message,
type: error.type,
code: error.code,
},
{ status: error.status || 500 }
);
}
return NextResponse.json(
{ error: "Internal server error" },
{ status: 500 }
);
}
}
// Handle preflight requests
export async function OPTIONS() {
return new NextResponse(null, {
headers: {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type",
},
});
}
/**
* Alternative: Using Anthropic (Claude) models
*
* const stream = await client.chat.completions.create({
* model: "c1/anthropic/claude-sonnet-4/v-20250617",
* messages,
* stream: true,
* temperature: 0.8,
* max_tokens: 4096,
* });
*/
/**
* Alternative: With message persistence
*
* import { db } from "@/lib/db";
*
* export async function POST(req: NextRequest) {
* const { userId } = auth(); // Clerk, NextAuth, etc.
* const { prompt, threadId } = await req.json();
*
* // Save user message
* await db.insert(messages).values({
* threadId,
* userId,
* role: "user",
* content: prompt,
* });
*
* // Get conversation history
* const history = await db
* .select()
* .from(messages)
* .where(eq(messages.threadId, threadId))
* .orderBy(messages.createdAt);
*
* const llmMessages = history.map((m) => ({
* role: m.role,
* content: m.content,
* }));
*
* const stream = await client.chat.completions.create({
* model: "c1/openai/gpt-5/v-20250930",
* messages: [{ role: "system", content: SYSTEM_PROMPT }, ...llmMessages],
* stream: true,
* });
*
* // ... transform and return stream
*
* // Save assistant response after streaming completes
* // (You'd need to handle this in the client or use a callback)
* }
*/

View File

@@ -0,0 +1,128 @@
/**
* Next.js App Router - Page Component with C1Chat
*
* File: app/page.tsx
*
* Simplest possible integration - just drop in C1Chat and point to API route.
*
* Features:
* - Pre-built C1Chat component
* - Automatic state management
* - Thread support (optional)
* - Responsive design
*/
"use client";
import { C1Chat } from "@thesysai/genui-sdk";
import { themePresets } from "@crayonai/react-ui";
import "@crayonai/react-ui/styles/index.css";
export default function Home() {
return (
<main className="min-h-screen bg-gray-50 dark:bg-gray-900">
<div className="container mx-auto p-4">
<C1Chat
apiUrl="/api/chat"
agentName="AI Assistant"
logoUrl="https://placehold.co/100x100/3b82f6/ffffff?text=AI"
theme={themePresets.default}
/>
</div>
</main>
);
}
/**
* Alternative: With custom theme and dark mode
*
* import { useState, useEffect } from "react";
*
* function useSystemTheme() {
* const [theme, setTheme] = useState<"light" | "dark">("light");
*
* useEffect(() => {
* const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)");
* setTheme(mediaQuery.matches ? "dark" : "light");
*
* const handler = (e: MediaQueryListEvent) => {
* setTheme(e.matches ? "dark" : "light");
* };
*
* mediaQuery.addEventListener("change", handler);
* return () => mediaQuery.removeEventListener("change", handler);
* }, []);
*
* return theme;
* }
*
* export default function Home() {
* const systemTheme = useSystemTheme();
*
* return (
* <C1Chat
* apiUrl="/api/chat"
* theme={{ ...themePresets.candy, mode: systemTheme }}
* />
* );
* }
*/
/**
* Alternative: With thread management
*
* import {
* useThreadListManager,
* useThreadManager,
* } from "@thesysai/genui-sdk";
*
* export default function Home() {
* const threadListManager = useThreadListManager({
* fetchThreadList: async () => {
* const res = await fetch("/api/threads");
* return res.json();
* },
* deleteThread: async (threadId: string) => {
* await fetch(`/api/threads/${threadId}`, { method: "DELETE" });
* },
* updateThread: async (thread) => {
* const res = await fetch(`/api/threads/${thread.threadId}`, {
* method: "PUT",
* body: JSON.stringify(thread),
* });
* return res.json();
* },
* createThread: async (firstMessage) => {
* const res = await fetch("/api/threads", {
* method: "POST",
* body: JSON.stringify({ title: firstMessage.message }),
* });
* return res.json();
* },
* onSwitchToNew: () => {
* window.history.replaceState(null, "", "/");
* },
* onSelectThread: (threadId) => {
* window.history.replaceState(null, "", `/?threadId=${threadId}`);
* },
* });
*
* const threadManager = useThreadManager({
* threadListManager,
* loadThread: async (threadId) => {
* const res = await fetch(`/api/threads/${threadId}/messages`);
* return res.json();
* },
* onUpdateMessage: async ({ message }) => {
* // Handle message updates
* },
* });
*
* return (
* <C1Chat
* threadManager={threadManager}
* threadListManager={threadListManager}
* />
* );
* }
*/

View File

@@ -0,0 +1,43 @@
{
"name": "thesys-nextjs-example",
"version": "1.0.0",
"private": true,
"description": "Next.js App Router integration with TheSys Generative UI",
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint"
},
"dependencies": {
"@thesysai/genui-sdk": "^0.6.40",
"@crayonai/react-ui": "^0.8.42",
"@crayonai/react-core": "^0.7.6",
"@crayonai/stream": "^0.1.0",
"next": "^15.1.4",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react-error-boundary": "^5.0.0",
"openai": "^4.73.0",
"zod": "^3.24.1",
"zod-to-json-schema": "^3.24.1"
},
"devDependencies": {
"@types/node": "^22.0.0",
"@types/react": "^19.0.0",
"@types/react-dom": "^19.0.0",
"typescript": "^5.7.3",
"eslint": "^9.0.0",
"eslint-config-next": "^15.1.4",
"tailwindcss": "^4.1.14",
"postcss": "^8.4.49",
"autoprefixer": "^10.4.20"
},
"optionalDependencies": {
"@tavily/core": "^1.0.0",
"@clerk/nextjs": "^6.10.0"
},
"engines": {
"node": ">=20.0.0"
}
}

View File

@@ -0,0 +1,325 @@
/**
* Next.js API Route with Tool Calling
*
* File: app/api/chat-with-tools/route.ts
*
* Demonstrates tool calling integration with TheSys C1.
* Includes:
* - Zod schema definitions
* - Web search tool (Tavily)
* - Product inventory tool
* - Order creation tool
* - Streaming with tool execution
*/
import { NextRequest, NextResponse } from "next/server";
import OpenAI from "openai";
import { z } from "zod";
import zodToJsonSchema from "zod-to-json-schema";
import { transformStream } from "@crayonai/stream";
import { TavilySearchAPIClient } from "@tavily/core";
const client = new OpenAI({
baseURL: "https://api.thesys.dev/v1/embed",
apiKey: process.env.THESYS_API_KEY,
});
const tavily = new TavilySearchAPIClient({
apiKey: process.env.TAVILY_API_KEY || "",
});
// ============================================================================
// Tool Schemas
// ============================================================================
const webSearchSchema = z.object({
query: z.string().describe("The search query"),
max_results: z
.number()
.int()
.min(1)
.max(10)
.default(5)
.describe("Maximum number of results"),
});
const productLookupSchema = z.object({
product_type: z
.enum(["gloves", "hat", "scarf", "all"])
.optional()
.describe("Type of product to lookup, or 'all' for everything"),
});
const orderItemSchema = z.discriminatedUnion("type", [
z.object({
type: z.literal("gloves"),
size: z.enum(["S", "M", "L", "XL"]),
color: z.string(),
quantity: z.number().int().min(1),
}),
z.object({
type: z.literal("hat"),
style: z.enum(["beanie", "baseball", "fedora"]),
color: z.string(),
quantity: z.number().int().min(1),
}),
z.object({
type: z.literal("scarf"),
length: z.enum(["short", "medium", "long"]),
material: z.enum(["wool", "cotton", "silk"]),
quantity: z.number().int().min(1),
}),
]);
const createOrderSchema = z.object({
customer_email: z.string().email().describe("Customer email address"),
items: z.array(orderItemSchema).min(1).describe("Items to order"),
});
// ============================================================================
// Tool Definitions
// ============================================================================
const webSearchTool = {
type: "function" as const,
function: {
name: "web_search",
description: "Search the web for current information using Tavily API",
parameters: zodToJsonSchema(webSearchSchema),
},
};
const productLookupTool = {
type: "function" as const,
function: {
name: "lookup_product",
description: "Look up products in inventory",
parameters: zodToJsonSchema(productLookupSchema),
},
};
const createOrderTool = {
type: "function" as const,
function: {
name: "create_order",
description: "Create a new product order",
parameters: zodToJsonSchema(createOrderSchema),
},
};
// ============================================================================
// Tool Execution Functions
// ============================================================================
async function executeWebSearch(args: z.infer<typeof webSearchSchema>) {
const validated = webSearchSchema.parse(args);
const results = await tavily.search(validated.query, {
maxResults: validated.max_results,
includeAnswer: true,
});
return {
query: validated.query,
answer: results.answer,
results: results.results.map((r) => ({
title: r.title,
url: r.url,
snippet: r.content,
})),
};
}
async function executeProductLookup(
args: z.infer<typeof productLookupSchema>
) {
const validated = productLookupSchema.parse(args);
// Mock inventory - replace with actual database query
const inventory = {
gloves: [
{ id: 1, size: "M", color: "blue", price: 29.99, stock: 15 },
{ id: 2, size: "L", color: "red", price: 29.99, stock: 8 },
],
hat: [
{ id: 3, style: "beanie", color: "black", price: 19.99, stock: 20 },
{ id: 4, style: "baseball", color: "navy", price: 24.99, stock: 12 },
],
scarf: [
{ id: 5, length: "medium", material: "wool", price: 34.99, stock: 10 },
],
};
if (validated.product_type && validated.product_type !== "all") {
return {
type: validated.product_type,
products: inventory[validated.product_type],
};
}
return { type: "all", inventory };
}
async function executeCreateOrder(args: z.infer<typeof createOrderSchema>) {
const validated = createOrderSchema.parse(args);
// Mock order creation - replace with actual database insert
const orderId = `ORD-${Date.now()}`;
// Simulate saving to database
console.log("Creating order:", {
orderId,
customer: validated.customer_email,
items: validated.items,
});
return {
success: true,
orderId,
customer_email: validated.customer_email,
items: validated.items,
total: validated.items.reduce(
(sum, item) => sum + item.quantity * 29.99,
0
), // Mock price
message: `Order ${orderId} created successfully`,
};
}
// ============================================================================
// API Route Handler
// ============================================================================
export async function POST(req: NextRequest) {
try {
const { prompt, previousC1Response } = await req.json();
if (!prompt || typeof prompt !== "string") {
return NextResponse.json(
{ error: "Invalid prompt" },
{ status: 400 }
);
}
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{
role: "system",
content: `You are a helpful shopping assistant with access to tools.
You can:
1. Search the web for product information
2. Look up products in our inventory
3. Create orders for customers
Always use tools when appropriate. Be friendly and helpful.`,
},
{ role: "user", content: prompt },
];
if (previousC1Response) {
messages.splice(1, 0, {
role: "assistant",
content: previousC1Response,
});
}
// Create streaming completion with tools
const llmStream = await client.beta.chat.completions.runTools({
model: "c1/anthropic/claude-sonnet-4/v-20250617",
messages,
stream: true,
tools: [webSearchTool, productLookupTool, createOrderTool],
toolChoice: "auto", // Let AI decide when to use tools
temperature: 0.7,
});
// Handle tool execution
llmStream.on("message", async (event) => {
if (event.tool_calls) {
for (const toolCall of event.tool_calls) {
try {
let result;
switch (toolCall.function.name) {
case "web_search":
const searchArgs = JSON.parse(toolCall.function.arguments);
result = await executeWebSearch(searchArgs);
break;
case "lookup_product":
const lookupArgs = JSON.parse(toolCall.function.arguments);
result = await executeProductLookup(lookupArgs);
break;
case "create_order":
const orderArgs = JSON.parse(toolCall.function.arguments);
result = await executeCreateOrder(orderArgs);
break;
default:
throw new Error(`Unknown tool: ${toolCall.function.name}`);
}
console.log(`Tool ${toolCall.function.name} executed:`, result);
// Tool results are automatically sent back to the LLM
// by the runTools method
} catch (error) {
console.error(`Tool execution error:`, error);
// Error will be sent back to LLM
}
}
}
});
// Transform stream to C1 format
const responseStream = transformStream(llmStream, (chunk) => {
return chunk.choices[0]?.delta?.content || "";
}) as ReadableStream<string>;
return new NextResponse(responseStream, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
},
});
} catch (error) {
console.error("Chat API Error:", error);
if (error instanceof z.ZodError) {
return NextResponse.json(
{
error: "Validation error",
details: error.errors,
},
{ status: 400 }
);
}
if (error instanceof OpenAI.APIError) {
return NextResponse.json(
{
error: error.message,
type: error.type,
},
{ status: error.status || 500 }
);
}
return NextResponse.json(
{ error: "Internal server error" },
{ status: 500 }
);
}
}
export async function OPTIONS() {
return new NextResponse(null, {
headers: {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type",
},
});
}