Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:25:37 +08:00
commit 13df4850f7
29 changed files with 6729 additions and 0 deletions

View File

@@ -0,0 +1,191 @@
/**
* Cloudflare Workers + Vite Frontend Setup
*
* File: src/App.tsx
*
* Frontend configuration for Vite + React app deployed with Cloudflare Workers.
* Uses relative paths since Worker and frontend run on same origin.
*
* Key Differences from standalone Vite:
* - API URLs are relative (not absolute)
* - No CORS issues (same origin)
* - Worker handles routing, serves static assets
*/
import "@crayonai/react-ui/styles/index.css";
import { ThemeProvider, C1Component } from "@thesysai/genui-sdk";
import { useState } from "react";
import "./App.css";
export default function App() {
const [isLoading, setIsLoading] = useState(false);
const [c1Response, setC1Response] = useState("");
const [question, setQuestion] = useState("");
const [error, setError] = useState<string | null>(null);
const makeApiCall = async (query: string, previousResponse?: string) => {
if (!query.trim()) return;
setIsLoading(true);
setError(null);
try {
// NOTE: Using relative path - Worker handles this on same domain
const response = await fetch("/api/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
prompt: query,
previousC1Response: previousResponse || c1Response,
}),
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.error || `HTTP ${response.status}`);
}
const data = await response.json();
setC1Response(data.response);
setQuestion("");
} catch (err) {
console.error("API Error:", err);
setError(err instanceof Error ? err.message : "Failed to get response");
} finally {
setIsLoading(false);
}
};
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
makeApiCall(question);
};
return (
<div className="app-container">
<header className="app-header">
<h1>Cloudflare AI Assistant</h1>
<p>Powered by Workers + TheSys C1</p>
</header>
<form onSubmit={handleSubmit} className="input-form">
<input
type="text"
value={question}
onChange={(e) => setQuestion(e.target.value)}
placeholder="Ask me anything..."
disabled={isLoading}
className="question-input"
autoFocus
/>
<button
type="submit"
disabled={isLoading || !question.trim()}
className="submit-button"
>
{isLoading ? "Processing..." : "Send"}
</button>
</form>
{error && (
<div className="error-message">
<strong>Error:</strong> {error}
</div>
)}
{c1Response && (
<div className="response-container">
<ThemeProvider>
<C1Component
c1Response={c1Response}
isStreaming={isLoading}
updateMessage={(message) => setC1Response(message)}
onAction={({ llmFriendlyMessage }) => {
if (!isLoading) {
makeApiCall(llmFriendlyMessage, c1Response);
}
}}
/>
</ThemeProvider>
</div>
)}
</div>
);
}
/**
* vite.config.ts Configuration
*
* IMPORTANT: When using @cloudflare/vite-plugin, the Worker runs
* alongside Vite on the same port, so use relative API paths.
*
* import { defineConfig } from "vite";
* import react from "@vitejs/plugin-react";
* import { cloudflare } from "@cloudflare/vite-plugin";
*
* export default defineConfig({
* plugins: [
* react(),
* cloudflare({
* configPath: "./wrangler.jsonc",
* }),
* ],
* build: {
* outDir: "dist",
* },
* });
*/
/**
* Alternative: Streaming Setup
*
* For streaming responses, modify the API call:
*
* const makeStreamingApiCall = async (query: string) => {
* setIsLoading(true);
* setC1Response("");
*
* const response = await fetch("/api/chat/stream", {
* method: "POST",
* headers: { "Content-Type": "application/json" },
* body: JSON.stringify({ prompt: query }),
* });
*
* if (!response.ok) {
* throw new Error("Stream failed");
* }
*
* const reader = response.body?.getReader();
* if (!reader) return;
*
* const decoder = new TextDecoder();
* let accumulated = "";
*
* while (true) {
* const { done, value } = await reader.read();
* if (done) break;
*
* const chunk = decoder.decode(value);
* accumulated += chunk;
* setC1Response(accumulated);
* }
*
* setIsLoading(false);
* };
*/
/**
* Deployment Steps:
*
* 1. Build frontend:
* npm run build
*
* 2. Deploy to Cloudflare:
* npx wrangler deploy
*
* 3. Set secrets:
* npx wrangler secret put THESYS_API_KEY
*
* 4. Test:
* Visit your-worker.workers.dev
*/

View File

@@ -0,0 +1,247 @@
/**
* Cloudflare Worker Backend with Hono + TheSys C1
*
* File: backend/src/index.ts
*
* Features:
* - Hono routing
* - TheSys C1 API proxy
* - Streaming support
* - Static assets serving
* - CORS handling
*/
import { Hono } from "hono";
import { cors } from "hono/cors";
import { serveStatic } from "hono/cloudflare-workers";
type Bindings = {
THESYS_API_KEY: string;
ASSETS: Fetcher;
};
const app = new Hono<{ Bindings: Bindings }>();
// CORS middleware
app.use("/*", cors({
origin: "*",
allowMethods: ["GET", "POST", "OPTIONS"],
allowHeaders: ["Content-Type", "Authorization"],
}));
// ============================================================================
// Chat API Endpoint
// ============================================================================
app.post("/api/chat", async (c) => {
try {
const { prompt, previousC1Response } = await c.req.json();
if (!prompt || typeof prompt !== "string") {
return c.json({ error: "Invalid prompt" }, 400);
}
// Check API key binding
if (!c.env.THESYS_API_KEY) {
console.error("THESYS_API_KEY binding not found");
return c.json({ error: "Server configuration error" }, 500);
}
// Build messages
const messages = [
{
role: "system",
content: "You are a helpful AI assistant that generates interactive UI.",
},
{
role: "user",
content: prompt,
},
];
if (previousC1Response) {
messages.splice(1, 0, {
role: "assistant",
content: previousC1Response,
});
}
// Call TheSys C1 API
const response = await fetch(
"https://api.thesys.dev/v1/embed/chat/completions",
{
method: "POST",
headers: {
"Authorization": `Bearer ${c.env.THESYS_API_KEY}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
model: "c1/openai/gpt-5/v-20250930",
messages,
stream: false, // Or handle streaming
temperature: 0.7,
max_tokens: 2000,
}),
}
);
if (!response.ok) {
const error = await response.text();
console.error("TheSys API Error:", error);
return c.json(
{ error: "Failed to get AI response" },
response.status
);
}
const data = await response.json();
return c.json({
response: data.choices[0]?.message?.content || "",
usage: data.usage,
});
} catch (error) {
console.error("Chat endpoint error:", error);
return c.json(
{ error: error instanceof Error ? error.message : "Internal error" },
500
);
}
});
// ============================================================================
// Streaming Chat Endpoint
// ============================================================================
app.post("/api/chat/stream", async (c) => {
try {
const { prompt } = await c.req.json();
const response = await fetch(
"https://api.thesys.dev/v1/embed/chat/completions",
{
method: "POST",
headers: {
"Authorization": `Bearer ${c.env.THESYS_API_KEY}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
model: "c1/openai/gpt-5/v-20250930",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: prompt },
],
stream: true,
}),
}
);
if (!response.ok) {
return c.json({ error: "Stream failed" }, response.status);
}
// Return the stream directly
return new Response(response.body, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
},
});
} catch (error) {
console.error("Stream error:", error);
return c.json({ error: "Stream failed" }, 500);
}
});
// ============================================================================
// Health Check
// ============================================================================
app.get("/api/health", (c) => {
return c.json({
status: "ok",
timestamp: new Date().toISOString(),
});
});
// ============================================================================
// Serve Static Assets (Vite build output)
// ============================================================================
app.get("/*", serveStatic({ root: "./", mimes: {} }));
export default app;
/**
* Alternative: Using Workers AI directly (cheaper for some models)
*
* type Bindings = {
* AI: any; // Cloudflare AI binding
* };
*
* app.post("/api/chat", async (c) => {
* const { prompt } = await c.req.json();
*
* const aiResponse = await c.env.AI.run('@cf/meta/llama-3-8b-instruct', {
* messages: [
* { role: "system", content: "You are a helpful assistant." },
* { role: "user", content: prompt },
* ],
* });
*
* // Then optionally send to TheSys C1 for UI generation
* const c1Response = await fetch("https://api.thesys.dev/v1/embed/chat/completions", {
* method: "POST",
* headers: {
* "Authorization": `Bearer ${c.env.THESYS_API_KEY}`,
* "Content-Type": "application/json",
* },
* body: JSON.stringify({
* model: "c1/openai/gpt-5/v-20250930",
* messages: [
* {
* role: "system",
* content: "Generate a UI for this content: " + aiResponse.response,
* },
* ],
* }),
* });
*
* // ... return c1Response
* });
*/
/**
* Alternative: With D1 Database for message persistence
*
* type Bindings = {
* THESYS_API_KEY: string;
* DB: D1Database; // D1 binding
* };
*
* app.post("/api/chat", async (c) => {
* const { userId, threadId, prompt } = await c.req.json();
*
* // Save user message
* await c.env.DB.prepare(
* "INSERT INTO messages (thread_id, user_id, role, content) VALUES (?, ?, ?, ?)"
* )
* .bind(threadId, userId, "user", prompt)
* .run();
*
* // Get conversation history
* const { results } = await c.env.DB.prepare(
* "SELECT role, content FROM messages WHERE thread_id = ? ORDER BY created_at"
* )
* .bind(threadId)
* .all();
*
* const messages = [
* { role: "system", content: "You are a helpful assistant." },
* ...results,
* ];
*
* // Call TheSys API with full history...
* });
*/

View File

@@ -0,0 +1,106 @@
{
// Cloudflare Worker Configuration with Static Assets
//
// This configures a Worker that serves a Vite+React frontend
// and handles API routes for TheSys C1 integration.
//
// Prerequisites:
// 1. Set THESYS_API_KEY secret: npx wrangler secret put THESYS_API_KEY
// 2. Build frontend: npm run build
// 3. Deploy: npx wrangler deploy
"name": "thesys-chat-worker",
"compatibility_date": "2025-10-26",
"compatibility_flags": ["nodejs_compat"],
// Main worker file (Hono backend)
"main": "backend/src/index.ts",
// Static assets configuration (Vite build output)
"assets": {
"directory": "dist",
"binding": "ASSETS",
"html_handling": "auto-trailing-slash",
"not_found_handling": "single-page-application"
},
// Environment variables (non-sensitive)
"vars": {
"ENVIRONMENT": "production",
"LOG_LEVEL": "info"
},
// Secrets (set via CLI, not in this file!)
// npx wrangler secret put THESYS_API_KEY
// npx wrangler secret put TAVILY_API_KEY (optional, for tool calling)
// Optional: D1 Database binding for message persistence
// "d1_databases": [
// {
// "binding": "DB",
// "database_name": "thesys-chat-db",
// "database_id": "your-database-id"
// }
// ],
// Optional: KV namespace for caching
// "kv_namespaces": [
// {
// "binding": "KV",
// "id": "your-kv-id"
// }
// ],
// Optional: Workers AI binding (for hybrid approach)
// "ai": {
// "binding": "AI"
// },
// Optional: Durable Objects for real-time features
// "durable_objects": {
// "bindings": [
// {
// "name": "CHAT_SESSION",
// "class_name": "ChatSession",
// "script_name": "thesys-chat-worker"
// }
// ]
// },
// Node.js compatibility for packages like OpenAI SDK
"node_compat": true,
// Build configuration
"build": {
"command": "npm run build"
},
// Development settings
"dev": {
"port": 8787,
"local_protocol": "http"
},
// Observability
"observability": {
"enabled": true
},
// Routes (optional - for custom domains)
// "routes": [
// {
// "pattern": "chat.yourdomain.com/*",
// "zone_name": "yourdomain.com"
// }
// ],
// Workers Limits
"limits": {
"cpu_ms": 50000
},
// Placement (optional - for closer to users)
// "placement": {
// "mode": "smart"
// }
}

View File

@@ -0,0 +1,175 @@
/**
* Next.js App Router - API Route for Chat
*
* File: app/api/chat/route.ts
*
* Handles streaming chat completions with TheSys C1 API.
*
* Features:
* - Streaming responses
* - OpenAI SDK integration
* - Error handling
* - CORS headers
*/
import { NextRequest, NextResponse } from "next/server";
import OpenAI from "openai";
import { transformStream } from "@crayonai/stream";
const client = new OpenAI({
baseURL: "https://api.thesys.dev/v1/embed",
apiKey: process.env.THESYS_API_KEY,
});
// System prompt for the AI
const SYSTEM_PROMPT = `You are a helpful AI assistant that generates interactive user interfaces.
When responding:
- Use clear, concise language
- Generate appropriate UI components (charts, tables, forms) when beneficial
- Ask clarifying questions when needed
- Be friendly and professional`;
export async function POST(req: NextRequest) {
try {
const { prompt, previousC1Response } = await req.json();
if (!prompt || typeof prompt !== "string") {
return NextResponse.json(
{ error: "Invalid prompt" },
{ status: 400 }
);
}
// Check API key
if (!process.env.THESYS_API_KEY) {
console.error("THESYS_API_KEY is not set");
return NextResponse.json(
{ error: "Server configuration error" },
{ status: 500 }
);
}
// Build messages array
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: SYSTEM_PROMPT },
{ role: "user", content: prompt },
];
// If there's previous context, include it
if (previousC1Response) {
messages.splice(1, 0, {
role: "assistant",
content: previousC1Response,
});
}
// Create streaming completion
const stream = await client.chat.completions.create({
model: "c1/openai/gpt-5/v-20250930", // or claude-sonnet-4/v-20250930
messages,
stream: true,
temperature: 0.7,
max_tokens: 2000,
});
// Transform OpenAI stream to C1 format
const responseStream = transformStream(stream, (chunk) => {
return chunk.choices[0]?.delta?.content || "";
}) as ReadableStream<string>;
return new NextResponse(responseStream, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type",
},
});
} catch (error) {
console.error("Chat API Error:", error);
// Handle specific OpenAI errors
if (error instanceof OpenAI.APIError) {
return NextResponse.json(
{
error: error.message,
type: error.type,
code: error.code,
},
{ status: error.status || 500 }
);
}
return NextResponse.json(
{ error: "Internal server error" },
{ status: 500 }
);
}
}
// Handle preflight requests
export async function OPTIONS() {
return new NextResponse(null, {
headers: {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type",
},
});
}
/**
* Alternative: Using Anthropic (Claude) models
*
* const stream = await client.chat.completions.create({
* model: "c1/anthropic/claude-sonnet-4/v-20250617",
* messages,
* stream: true,
* temperature: 0.8,
* max_tokens: 4096,
* });
*/
/**
* Alternative: With message persistence
*
* import { db } from "@/lib/db";
*
* export async function POST(req: NextRequest) {
* const { userId } = auth(); // Clerk, NextAuth, etc.
* const { prompt, threadId } = await req.json();
*
* // Save user message
* await db.insert(messages).values({
* threadId,
* userId,
* role: "user",
* content: prompt,
* });
*
* // Get conversation history
* const history = await db
* .select()
* .from(messages)
* .where(eq(messages.threadId, threadId))
* .orderBy(messages.createdAt);
*
* const llmMessages = history.map((m) => ({
* role: m.role,
* content: m.content,
* }));
*
* const stream = await client.chat.completions.create({
* model: "c1/openai/gpt-5/v-20250930",
* messages: [{ role: "system", content: SYSTEM_PROMPT }, ...llmMessages],
* stream: true,
* });
*
* // ... transform and return stream
*
* // Save assistant response after streaming completes
* // (You'd need to handle this in the client or use a callback)
* }
*/

View File

@@ -0,0 +1,128 @@
/**
* Next.js App Router - Page Component with C1Chat
*
* File: app/page.tsx
*
* Simplest possible integration - just drop in C1Chat and point to API route.
*
* Features:
* - Pre-built C1Chat component
* - Automatic state management
* - Thread support (optional)
* - Responsive design
*/
"use client";
import { C1Chat } from "@thesysai/genui-sdk";
import { themePresets } from "@crayonai/react-ui";
import "@crayonai/react-ui/styles/index.css";
export default function Home() {
return (
<main className="min-h-screen bg-gray-50 dark:bg-gray-900">
<div className="container mx-auto p-4">
<C1Chat
apiUrl="/api/chat"
agentName="AI Assistant"
logoUrl="https://placehold.co/100x100/3b82f6/ffffff?text=AI"
theme={themePresets.default}
/>
</div>
</main>
);
}
/**
* Alternative: With custom theme and dark mode
*
* import { useState, useEffect } from "react";
*
* function useSystemTheme() {
* const [theme, setTheme] = useState<"light" | "dark">("light");
*
* useEffect(() => {
* const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)");
* setTheme(mediaQuery.matches ? "dark" : "light");
*
* const handler = (e: MediaQueryListEvent) => {
* setTheme(e.matches ? "dark" : "light");
* };
*
* mediaQuery.addEventListener("change", handler);
* return () => mediaQuery.removeEventListener("change", handler);
* }, []);
*
* return theme;
* }
*
* export default function Home() {
* const systemTheme = useSystemTheme();
*
* return (
* <C1Chat
* apiUrl="/api/chat"
* theme={{ ...themePresets.candy, mode: systemTheme }}
* />
* );
* }
*/
/**
* Alternative: With thread management
*
* import {
* useThreadListManager,
* useThreadManager,
* } from "@thesysai/genui-sdk";
*
* export default function Home() {
* const threadListManager = useThreadListManager({
* fetchThreadList: async () => {
* const res = await fetch("/api/threads");
* return res.json();
* },
* deleteThread: async (threadId: string) => {
* await fetch(`/api/threads/${threadId}`, { method: "DELETE" });
* },
* updateThread: async (thread) => {
* const res = await fetch(`/api/threads/${thread.threadId}`, {
* method: "PUT",
* body: JSON.stringify(thread),
* });
* return res.json();
* },
* createThread: async (firstMessage) => {
* const res = await fetch("/api/threads", {
* method: "POST",
* body: JSON.stringify({ title: firstMessage.message }),
* });
* return res.json();
* },
* onSwitchToNew: () => {
* window.history.replaceState(null, "", "/");
* },
* onSelectThread: (threadId) => {
* window.history.replaceState(null, "", `/?threadId=${threadId}`);
* },
* });
*
* const threadManager = useThreadManager({
* threadListManager,
* loadThread: async (threadId) => {
* const res = await fetch(`/api/threads/${threadId}/messages`);
* return res.json();
* },
* onUpdateMessage: async ({ message }) => {
* // Handle message updates
* },
* });
*
* return (
* <C1Chat
* threadManager={threadManager}
* threadListManager={threadListManager}
* />
* );
* }
*/

View File

@@ -0,0 +1,43 @@
{
"name": "thesys-nextjs-example",
"version": "1.0.0",
"private": true,
"description": "Next.js App Router integration with TheSys Generative UI",
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint"
},
"dependencies": {
"@thesysai/genui-sdk": "^0.6.40",
"@crayonai/react-ui": "^0.8.42",
"@crayonai/react-core": "^0.7.6",
"@crayonai/stream": "^0.1.0",
"next": "^15.1.4",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react-error-boundary": "^5.0.0",
"openai": "^4.73.0",
"zod": "^3.24.1",
"zod-to-json-schema": "^3.24.1"
},
"devDependencies": {
"@types/node": "^22.0.0",
"@types/react": "^19.0.0",
"@types/react-dom": "^19.0.0",
"typescript": "^5.7.3",
"eslint": "^9.0.0",
"eslint-config-next": "^15.1.4",
"tailwindcss": "^4.1.14",
"postcss": "^8.4.49",
"autoprefixer": "^10.4.20"
},
"optionalDependencies": {
"@tavily/core": "^1.0.0",
"@clerk/nextjs": "^6.10.0"
},
"engines": {
"node": ">=20.0.0"
}
}

View File

@@ -0,0 +1,325 @@
/**
* Next.js API Route with Tool Calling
*
* File: app/api/chat-with-tools/route.ts
*
* Demonstrates tool calling integration with TheSys C1.
* Includes:
* - Zod schema definitions
* - Web search tool (Tavily)
* - Product inventory tool
* - Order creation tool
* - Streaming with tool execution
*/
import { NextRequest, NextResponse } from "next/server";
import OpenAI from "openai";
import { z } from "zod";
import zodToJsonSchema from "zod-to-json-schema";
import { transformStream } from "@crayonai/stream";
import { TavilySearchAPIClient } from "@tavily/core";
const client = new OpenAI({
baseURL: "https://api.thesys.dev/v1/embed",
apiKey: process.env.THESYS_API_KEY,
});
const tavily = new TavilySearchAPIClient({
apiKey: process.env.TAVILY_API_KEY || "",
});
// ============================================================================
// Tool Schemas
// ============================================================================
const webSearchSchema = z.object({
query: z.string().describe("The search query"),
max_results: z
.number()
.int()
.min(1)
.max(10)
.default(5)
.describe("Maximum number of results"),
});
const productLookupSchema = z.object({
product_type: z
.enum(["gloves", "hat", "scarf", "all"])
.optional()
.describe("Type of product to lookup, or 'all' for everything"),
});
const orderItemSchema = z.discriminatedUnion("type", [
z.object({
type: z.literal("gloves"),
size: z.enum(["S", "M", "L", "XL"]),
color: z.string(),
quantity: z.number().int().min(1),
}),
z.object({
type: z.literal("hat"),
style: z.enum(["beanie", "baseball", "fedora"]),
color: z.string(),
quantity: z.number().int().min(1),
}),
z.object({
type: z.literal("scarf"),
length: z.enum(["short", "medium", "long"]),
material: z.enum(["wool", "cotton", "silk"]),
quantity: z.number().int().min(1),
}),
]);
const createOrderSchema = z.object({
customer_email: z.string().email().describe("Customer email address"),
items: z.array(orderItemSchema).min(1).describe("Items to order"),
});
// ============================================================================
// Tool Definitions
// ============================================================================
const webSearchTool = {
type: "function" as const,
function: {
name: "web_search",
description: "Search the web for current information using Tavily API",
parameters: zodToJsonSchema(webSearchSchema),
},
};
const productLookupTool = {
type: "function" as const,
function: {
name: "lookup_product",
description: "Look up products in inventory",
parameters: zodToJsonSchema(productLookupSchema),
},
};
const createOrderTool = {
type: "function" as const,
function: {
name: "create_order",
description: "Create a new product order",
parameters: zodToJsonSchema(createOrderSchema),
},
};
// ============================================================================
// Tool Execution Functions
// ============================================================================
async function executeWebSearch(args: z.infer<typeof webSearchSchema>) {
const validated = webSearchSchema.parse(args);
const results = await tavily.search(validated.query, {
maxResults: validated.max_results,
includeAnswer: true,
});
return {
query: validated.query,
answer: results.answer,
results: results.results.map((r) => ({
title: r.title,
url: r.url,
snippet: r.content,
})),
};
}
async function executeProductLookup(
args: z.infer<typeof productLookupSchema>
) {
const validated = productLookupSchema.parse(args);
// Mock inventory - replace with actual database query
const inventory = {
gloves: [
{ id: 1, size: "M", color: "blue", price: 29.99, stock: 15 },
{ id: 2, size: "L", color: "red", price: 29.99, stock: 8 },
],
hat: [
{ id: 3, style: "beanie", color: "black", price: 19.99, stock: 20 },
{ id: 4, style: "baseball", color: "navy", price: 24.99, stock: 12 },
],
scarf: [
{ id: 5, length: "medium", material: "wool", price: 34.99, stock: 10 },
],
};
if (validated.product_type && validated.product_type !== "all") {
return {
type: validated.product_type,
products: inventory[validated.product_type],
};
}
return { type: "all", inventory };
}
async function executeCreateOrder(args: z.infer<typeof createOrderSchema>) {
const validated = createOrderSchema.parse(args);
// Mock order creation - replace with actual database insert
const orderId = `ORD-${Date.now()}`;
// Simulate saving to database
console.log("Creating order:", {
orderId,
customer: validated.customer_email,
items: validated.items,
});
return {
success: true,
orderId,
customer_email: validated.customer_email,
items: validated.items,
total: validated.items.reduce(
(sum, item) => sum + item.quantity * 29.99,
0
), // Mock price
message: `Order ${orderId} created successfully`,
};
}
// ============================================================================
// API Route Handler
// ============================================================================
export async function POST(req: NextRequest) {
try {
const { prompt, previousC1Response } = await req.json();
if (!prompt || typeof prompt !== "string") {
return NextResponse.json(
{ error: "Invalid prompt" },
{ status: 400 }
);
}
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{
role: "system",
content: `You are a helpful shopping assistant with access to tools.
You can:
1. Search the web for product information
2. Look up products in our inventory
3. Create orders for customers
Always use tools when appropriate. Be friendly and helpful.`,
},
{ role: "user", content: prompt },
];
if (previousC1Response) {
messages.splice(1, 0, {
role: "assistant",
content: previousC1Response,
});
}
// Create streaming completion with tools
const llmStream = await client.beta.chat.completions.runTools({
model: "c1/anthropic/claude-sonnet-4/v-20250617",
messages,
stream: true,
tools: [webSearchTool, productLookupTool, createOrderTool],
toolChoice: "auto", // Let AI decide when to use tools
temperature: 0.7,
});
// Handle tool execution
llmStream.on("message", async (event) => {
if (event.tool_calls) {
for (const toolCall of event.tool_calls) {
try {
let result;
switch (toolCall.function.name) {
case "web_search":
const searchArgs = JSON.parse(toolCall.function.arguments);
result = await executeWebSearch(searchArgs);
break;
case "lookup_product":
const lookupArgs = JSON.parse(toolCall.function.arguments);
result = await executeProductLookup(lookupArgs);
break;
case "create_order":
const orderArgs = JSON.parse(toolCall.function.arguments);
result = await executeCreateOrder(orderArgs);
break;
default:
throw new Error(`Unknown tool: ${toolCall.function.name}`);
}
console.log(`Tool ${toolCall.function.name} executed:`, result);
// Tool results are automatically sent back to the LLM
// by the runTools method
} catch (error) {
console.error(`Tool execution error:`, error);
// Error will be sent back to LLM
}
}
}
});
// Transform stream to C1 format
const responseStream = transformStream(llmStream, (chunk) => {
return chunk.choices[0]?.delta?.content || "";
}) as ReadableStream<string>;
return new NextResponse(responseStream, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
},
});
} catch (error) {
console.error("Chat API Error:", error);
if (error instanceof z.ZodError) {
return NextResponse.json(
{
error: "Validation error",
details: error.errors,
},
{ status: 400 }
);
}
if (error instanceof OpenAI.APIError) {
return NextResponse.json(
{
error: error.message,
type: error.type,
},
{ status: error.status || 500 }
);
}
return NextResponse.json(
{ error: "Internal server error" },
{ status: 500 }
);
}
}
export async function OPTIONS() {
return new NextResponse(null, {
headers: {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type",
},
});
}

View File

@@ -0,0 +1,267 @@
# Python Backend Templates for TheSys Generative UI
This directory contains production-ready Python backend templates for integrating TheSys C1 Generative UI API.
## Available Templates
### 1. FastAPI Backend (`fastapi-chat.py`)
Modern async web framework with automatic API documentation.
**Features**:
- Async streaming support
- Built-in request validation with Pydantic
- Automatic OpenAPI docs
- CORS middleware configured
- Type hints throughout
**Run**:
```bash
# Install dependencies
pip install -r requirements.txt
# Set environment variable
export THESYS_API_KEY=sk-th-your-key-here
# Run server
python fastapi-chat.py
# Or with uvicorn directly
uvicorn fastapi-chat:app --reload --port 8000
```
**API Docs**: Visit `http://localhost:8000/docs` for interactive API documentation
---
### 2. Flask Backend (`flask-chat.py`)
Lightweight and flexible web framework.
**Features**:
- Simple and familiar Flask API
- CORS support with flask-cors
- Streaming response handling
- Easy to customize and extend
**Run**:
```bash
# Install dependencies
pip install -r requirements.txt
# Set environment variable
export THESYS_API_KEY=sk-th-your-key-here
# Run server
python flask-chat.py
# Or with flask CLI
export FLASK_APP=flask-chat.py
flask run --port 5000
```
---
## Setup
### 1. Install Dependencies
```bash
# Create virtual environment
python -m venv venv
source venv/bin/activate # On Windows: venv\Scripts\activate
# Install all dependencies
pip install -r requirements.txt
# OR install only what you need
pip install thesys-genui-sdk openai python-dotenv
# For FastAPI
pip install fastapi uvicorn
# For Flask
pip install flask flask-cors
```
### 2. Environment Variables
Create a `.env` file:
```bash
THESYS_API_KEY=sk-th-your-api-key-here
```
Get your API key from: https://console.thesys.dev/keys
### 3. Choose Your Model
Both templates use different models by default to show variety:
**FastAPI**: Uses Claude Sonnet 4
```python
model="c1/anthropic/claude-sonnet-4/v-20250930"
```
**Flask**: Uses GPT 5
```python
model="c1/openai/gpt-5/v-20250930"
```
Change to any supported model:
- `c1/anthropic/claude-sonnet-4/v-20250930` - Claude Sonnet 4 (stable)
- `c1/openai/gpt-5/v-20250930` - GPT 5 (stable)
- `c1-exp/openai/gpt-4.1/v-20250617` - GPT 4.1 (experimental)
- `c1-exp/anthropic/claude-3.5-haiku/v-20250709` - Claude 3.5 Haiku (experimental)
---
## Frontend Integration
### React + Vite Example
```typescript
const makeApiCall = async (prompt: string) => {
const response = await fetch("http://localhost:8000/api/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ prompt })
});
const reader = response.body?.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
setC1Response(prev => prev + chunk);
}
};
```
### Next.js API Route (Proxy)
```typescript
// app/api/chat/route.ts
export async function POST(req: Request) {
const { prompt } = await req.json();
const response = await fetch("http://localhost:8000/api/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ prompt })
});
return new Response(response.body, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache"
}
});
}
```
---
## Production Deployment
### Environment Variables
```bash
# Production
THESYS_API_KEY=sk-th-production-key
HOST=0.0.0.0
PORT=8000
ENVIRONMENT=production
ALLOWED_ORIGINS=https://your-frontend.com
```
### FastAPI (Recommended for Production)
```bash
# Install production server
pip install gunicorn
# Run with Gunicorn
gunicorn fastapi-chat:app \
--workers 4 \
--worker-class uvicorn.workers.UvicornWorker \
--bind 0.0.0.0:8000 \
--timeout 120
```
### Flask Production
```bash
# Install production server
pip install gunicorn
# Run with Gunicorn
gunicorn flask-chat:app \
--workers 4 \
--bind 0.0.0.0:5000 \
--timeout 120
```
### Docker Example
```dockerfile
FROM python:3.12-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY fastapi-chat.py .
ENV THESYS_API_KEY=""
ENV PORT=8000
CMD ["uvicorn", "fastapi-chat:app", "--host", "0.0.0.0", "--port", "8000"]
```
---
## Troubleshooting
### Common Issues
**1. Import Error: `thesys_genui_sdk` not found**
```bash
pip install thesys-genui-sdk
```
**2. CORS Errors**
Update CORS configuration in the template to match your frontend URL:
```python
allow_origins=["http://localhost:5173"] # Vite default
```
**3. Streaming Not Working**
Ensure:
- `stream=True` in the API call
- Using `@with_c1_response` decorator
- Proper response headers set
**4. Authentication Failed (401)**
Check that `THESYS_API_KEY` is set correctly:
```python
import os
print(os.getenv("THESYS_API_KEY")) # Should not be None
```
---
## Next Steps
1. Copy the template you want to use
2. Install dependencies from `requirements.txt`
3. Set your `THESYS_API_KEY` in `.env`
4. Run the server
5. Connect your React frontend
6. Customize the system prompt and model as needed
For more examples, see the main SKILL.md documentation.

View File

@@ -0,0 +1,125 @@
"""
TheSys Generative UI - FastAPI Backend Example
This example demonstrates how to set up a FastAPI backend that integrates
with TheSys C1 API for streaming generative UI responses.
Dependencies:
- fastapi
- uvicorn
- thesys-genui-sdk
- openai
- python-dotenv
"""
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from thesys_genui_sdk import with_c1_response, write_content
import openai
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Initialize FastAPI app
app = FastAPI(
title="TheSys C1 API Backend",
description="FastAPI backend for TheSys Generative UI",
version="1.0.0"
)
# Configure CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Configure for your frontend URL in production
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize OpenAI client for TheSys C1 API
client = openai.OpenAI(
base_url="https://api.thesys.dev/v1/embed",
api_key=os.getenv("THESYS_API_KEY")
)
# Request model
class ChatRequest(BaseModel):
prompt: str
thread_id: str | None = None
response_id: str | None = None
@app.get("/")
async def root():
"""Health check endpoint"""
return {
"status": "ok",
"message": "TheSys C1 API Backend is running"
}
@app.post("/api/chat")
@with_c1_response # Automatically handles streaming headers
async def chat_endpoint(request: ChatRequest):
"""
Streaming chat endpoint that generates UI components.
Args:
request: ChatRequest with prompt and optional thread/response IDs
Returns:
StreamingResponse with C1-formatted UI chunks
"""
try:
# Create streaming completion request
stream = client.chat.completions.create(
model="c1/anthropic/claude-sonnet-4/v-20250930",
messages=[
{
"role": "system",
"content": "You are a helpful AI assistant that creates interactive user interfaces."
},
{
"role": "user",
"content": request.prompt
}
],
stream=True,
temperature=0.7,
max_tokens=4096
)
# Stream chunks to frontend
async def generate():
for chunk in stream:
content = chunk.choices[0].delta.content
if content:
yield write_content(content)
return StreamingResponse(
generate(),
media_type="text/event-stream"
)
except Exception as e:
return {
"error": str(e),
"message": "Failed to generate response"
}
if __name__ == "__main__":
import uvicorn
# Run the server
uvicorn.run(
"fastapi-chat:app",
host="0.0.0.0",
port=8000,
reload=True,
log_level="info"
)

View File

@@ -0,0 +1,119 @@
"""
TheSys Generative UI - Flask Backend Example
This example demonstrates how to set up a Flask backend that integrates
with TheSys C1 API for streaming generative UI responses.
Dependencies:
- flask
- flask-cors
- thesys-genui-sdk
- openai
- python-dotenv
"""
from flask import Flask, request, Response, jsonify
from flask_cors import CORS
from thesys_genui_sdk import with_c1_response, write_content
import openai
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Initialize Flask app
app = Flask(__name__)
# Configure CORS
CORS(app, resources={
r"/api/*": {
"origins": "*", # Configure for your frontend URL in production
"allow_headers": "*",
"expose_headers": "*"
}
})
# Initialize OpenAI client for TheSys C1 API
client = openai.OpenAI(
base_url="https://api.thesys.dev/v1/embed",
api_key=os.getenv("THESYS_API_KEY")
)
@app.route("/")
def root():
"""Health check endpoint"""
return jsonify({
"status": "ok",
"message": "TheSys C1 API Backend is running"
})
@app.route("/api/chat", methods=["POST"])
@with_c1_response # Automatically handles streaming headers
def chat():
"""
Streaming chat endpoint that generates UI components.
Request JSON:
{
"prompt": str,
"thread_id": str (optional),
"response_id": str (optional)
}
Returns:
StreamingResponse with C1-formatted UI chunks
"""
try:
data = request.get_json()
prompt = data.get("prompt")
if not prompt:
return jsonify({"error": "Prompt is required"}), 400
# Create streaming completion request
stream = client.chat.completions.create(
model="c1/openai/gpt-5/v-20250930",
messages=[
{
"role": "system",
"content": "You are a helpful AI assistant that creates interactive user interfaces."
},
{
"role": "user",
"content": prompt
}
],
stream=True,
temperature=0.7,
max_tokens=4096
)
# Stream chunks to frontend
def generate():
for chunk in stream:
content = chunk.choices[0].delta.content
if content:
yield write_content(content)
return Response(
generate(),
mimetype="text/event-stream"
)
except Exception as e:
return jsonify({
"error": str(e),
"message": "Failed to generate response"
}), 500
if __name__ == "__main__":
# Run the server
app.run(
host="0.0.0.0",
port=5000,
debug=True
)

View File

@@ -0,0 +1,18 @@
# TheSys Generative UI - Python Backend Dependencies
# Core dependencies
thesys-genui-sdk>=0.1.0
openai>=1.59.5
python-dotenv>=1.0.1
# FastAPI dependencies (for fastapi-chat.py)
fastapi>=0.115.6
uvicorn[standard]>=0.34.0
pydantic>=2.10.5
# Flask dependencies (for flask-chat.py)
flask>=3.1.0
flask-cors>=5.0.0
# Optional: For enhanced error handling
python-multipart>=0.0.20

View File

@@ -0,0 +1,409 @@
/**
* Streaming Utilities for TheSys C1
*
* Helper functions for handling streaming responses from
* OpenAI SDK, TheSys API, and transforming streams for C1.
*
* Works with any framework (Vite, Next.js, Cloudflare Workers).
*/
/**
* Convert a ReadableStream to a string
*/
export async function streamToString(stream: ReadableStream<string>): Promise<string> {
const reader = stream.getReader();
const decoder = new TextDecoder();
let result = "";
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
// value might be string or Uint8Array
if (typeof value === "string") {
result += value;
} else {
result += decoder.decode(value, { stream: true });
}
}
// Final decode with stream: false
result += decoder.decode();
return result;
} finally {
reader.releaseLock();
}
}
/**
* Convert a ReadableStream to an array of chunks
*/
export async function streamToArray<T>(stream: ReadableStream<T>): Promise<T[]> {
const reader = stream.getReader();
const chunks: T[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
chunks.push(value);
}
return chunks;
} finally {
reader.releaseLock();
}
}
/**
* Create a pass-through stream that allows reading while data flows
*/
export function createPassThroughStream<T>(): {
readable: ReadableStream<T>;
writable: WritableStream<T>;
} {
const { readable, writable } = new TransformStream<T, T>();
return { readable, writable };
}
/**
* Transform a stream with a callback function
* Similar to @crayonai/stream's transformStream
*/
export function transformStream<TInput, TOutput>(
source: ReadableStream<TInput>,
transformer: (chunk: TInput) => TOutput | null,
options?: {
onStart?: () => void;
onEnd?: (data: { accumulated: TOutput[] }) => void;
onError?: (error: Error) => void;
}
): ReadableStream<TOutput> {
const accumulated: TOutput[] = [];
return new ReadableStream<TOutput>({
async start(controller) {
options?.onStart?.();
const reader = source.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
options?.onEnd?.({ accumulated });
controller.close();
break;
}
const transformed = transformer(value);
if (transformed !== null) {
accumulated.push(transformed);
controller.enqueue(transformed);
}
}
} catch (error) {
const err = error instanceof Error ? error : new Error(String(error));
options?.onError?.(err);
controller.error(err);
} finally {
reader.releaseLock();
}
},
});
}
/**
* Merge multiple streams into one
*/
export function mergeStreams<T>(...streams: ReadableStream<T>[]): ReadableStream<T> {
return new ReadableStream<T>({
async start(controller) {
try {
await Promise.all(
streams.map(async (stream) => {
const reader = stream.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
controller.enqueue(value);
}
} finally {
reader.releaseLock();
}
})
);
controller.close();
} catch (error) {
controller.error(error);
}
},
});
}
/**
* Split a stream into multiple streams
*/
export function splitStream<T>(
source: ReadableStream<T>,
count: number
): ReadableStream<T>[] {
if (count < 2) throw new Error("Count must be at least 2");
const readers: ReadableStreamDefaultController<T>[] = [];
const streams = Array.from({ length: count }, () => {
return new ReadableStream<T>({
start(controller) {
readers.push(controller);
},
});
});
(async () => {
const reader = source.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
readers.forEach((r) => r.close());
break;
}
readers.forEach((r) => r.enqueue(value));
}
} catch (error) {
readers.forEach((r) => r.error(error));
} finally {
reader.releaseLock();
}
})();
return streams;
}
/**
* Buffer chunks until a condition is met, then flush
*/
export function bufferStream<T>(
source: ReadableStream<T>,
shouldFlush: (buffer: T[]) => boolean
): ReadableStream<T[]> {
return new ReadableStream<T[]>({
async start(controller) {
const reader = source.getReader();
let buffer: T[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
if (buffer.length > 0) {
controller.enqueue([...buffer]);
}
controller.close();
break;
}
buffer.push(value);
if (shouldFlush(buffer)) {
controller.enqueue([...buffer]);
buffer = [];
}
}
} catch (error) {
controller.error(error);
} finally {
reader.releaseLock();
}
},
});
}
/**
* Rate limit a stream (delay between chunks)
*/
export function rateLimit<T>(
source: ReadableStream<T>,
delayMs: number
): ReadableStream<T> {
return new ReadableStream<T>({
async start(controller) {
const reader = source.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
controller.close();
break;
}
controller.enqueue(value);
// Wait before next chunk
if (delayMs > 0) {
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
}
} catch (error) {
controller.error(error);
} finally {
reader.releaseLock();
}
},
});
}
/**
* Retry a stream creation if it fails
*/
export async function retryStream<T>(
createStream: () => Promise<ReadableStream<T>>,
maxRetries: number = 3,
delayMs: number = 1000
): Promise<ReadableStream<T>> {
let lastError: Error | null = null;
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
return await createStream();
} catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
console.error(`Stream creation attempt ${attempt + 1} failed:`, lastError);
if (attempt < maxRetries - 1) {
// Exponential backoff
const waitTime = delayMs * Math.pow(2, attempt);
await new Promise((resolve) => setTimeout(resolve, waitTime));
}
}
}
throw lastError || new Error("Failed to create stream");
}
/**
* Parse Server-Sent Events (SSE) stream
*/
export function parseSSE(
source: ReadableStream<Uint8Array>
): ReadableStream<{ event?: string; data: string }> {
const decoder = new TextDecoder();
let buffer = "";
return new ReadableStream({
async start(controller) {
const reader = source.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
controller.close();
break;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
let event = "";
let data = "";
for (const line of lines) {
if (line.startsWith("event:")) {
event = line.slice(6).trim();
} else if (line.startsWith("data:")) {
data += line.slice(5).trim();
} else if (line === "") {
// Empty line signals end of message
if (data) {
controller.enqueue({ event: event || undefined, data });
event = "";
data = "";
}
}
}
}
} catch (error) {
controller.error(error);
} finally {
reader.releaseLock();
}
},
});
}
/**
* Handle backpressure in streams
*/
export function handleBackpressure<T>(
source: ReadableStream<T>,
highWaterMark: number = 10
): ReadableStream<T> {
return new ReadableStream<T>(
{
async start(controller) {
const reader = source.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
controller.close();
break;
}
controller.enqueue(value);
// Check if we need to apply backpressure
if (controller.desiredSize !== null && controller.desiredSize <= 0) {
// Wait a bit before continuing
await new Promise((resolve) => setTimeout(resolve, 10));
}
}
} catch (error) {
controller.error(error);
} finally {
reader.releaseLock();
}
},
},
{ highWaterMark }
);
}
/**
* Log stream chunks for debugging
*/
export function debugStream<T>(
source: ReadableStream<T>,
label: string = "Stream"
): ReadableStream<T> {
let count = 0;
return transformStream(
source,
(chunk) => {
console.log(`[${label}] Chunk ${++count}:`, chunk);
return chunk;
},
{
onStart: () => console.log(`[${label}] Stream started`),
onEnd: ({ accumulated }) =>
console.log(`[${label}] Stream ended. Total chunks: ${accumulated.length}`),
onError: (error) => console.error(`[${label}] Stream error:`, error),
}
);
}

View File

@@ -0,0 +1,318 @@
/**
* Reusable Theme Configurations for TheSys C1
*
* Collection of custom theme objects that can be used across
* any framework (Vite, Next.js, Cloudflare Workers).
*
* Usage:
* import { darkTheme, lightTheme, oceanTheme } from "./theme-config";
*
* <C1Chat theme={oceanTheme} />
*/
export interface C1Theme {
mode: "light" | "dark";
colors: {
primary: string;
secondary: string;
background: string;
foreground: string;
border: string;
muted: string;
accent: string;
destructive?: string;
success?: string;
warning?: string;
};
fonts: {
body: string;
heading: string;
mono?: string;
};
borderRadius: string;
spacing: {
base: string;
};
}
// ============================================================================
// Light Themes
// ============================================================================
export const lightTheme: C1Theme = {
mode: "light",
colors: {
primary: "#3b82f6", // Blue
secondary: "#8b5cf6", // Purple
background: "#ffffff",
foreground: "#1f2937",
border: "#e5e7eb",
muted: "#f3f4f6",
accent: "#10b981", // Green
destructive: "#ef4444", // Red
success: "#10b981", // Green
warning: "#f59e0b", // Amber
},
fonts: {
body: "'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif",
heading: "'Inter', sans-serif",
mono: "'Fira Code', 'Courier New', monospace",
},
borderRadius: "8px",
spacing: {
base: "16px",
},
};
export const oceanTheme: C1Theme = {
mode: "light",
colors: {
primary: "#0ea5e9", // Sky blue
secondary: "#06b6d4", // Cyan
background: "#f0f9ff",
foreground: "#0c4a6e",
border: "#bae6fd",
muted: "#e0f2fe",
accent: "#0891b2",
destructive: "#dc2626",
success: "#059669",
warning: "#d97706",
},
fonts: {
body: "'Nunito', sans-serif",
heading: "'Nunito', sans-serif",
mono: "'JetBrains Mono', monospace",
},
borderRadius: "12px",
spacing: {
base: "16px",
},
};
export const sunsetTheme: C1Theme = {
mode: "light",
colors: {
primary: "#f59e0b", // Amber
secondary: "#f97316", // Orange
background: "#fffbeb",
foreground: "#78350f",
border: "#fed7aa",
muted: "#fef3c7",
accent: "#ea580c",
destructive: "#dc2626",
success: "#16a34a",
warning: "#f59e0b",
},
fonts: {
body: "'Poppins', sans-serif",
heading: "'Poppins', sans-serif",
mono: "'Source Code Pro', monospace",
},
borderRadius: "6px",
spacing: {
base: "16px",
},
};
// ============================================================================
// Dark Themes
// ============================================================================
export const darkTheme: C1Theme = {
mode: "dark",
colors: {
primary: "#60a5fa", // Light blue
secondary: "#a78bfa", // Light purple
background: "#111827",
foreground: "#f9fafb",
border: "#374151",
muted: "#1f2937",
accent: "#34d399",
destructive: "#f87171",
success: "#34d399",
warning: "#fbbf24",
},
fonts: {
body: "'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif",
heading: "'Inter', sans-serif",
mono: "'Fira Code', 'Courier New', monospace",
},
borderRadius: "8px",
spacing: {
base: "16px",
},
};
export const midnightTheme: C1Theme = {
mode: "dark",
colors: {
primary: "#818cf8", // Indigo
secondary: "#c084fc", // Purple
background: "#0f172a",
foreground: "#e2e8f0",
border: "#334155",
muted: "#1e293b",
accent: "#8b5cf6",
destructive: "#f87171",
success: "#4ade80",
warning: "#facc15",
},
fonts: {
body: "'Roboto', sans-serif",
heading: "'Roboto', sans-serif",
mono: "'IBM Plex Mono', monospace",
},
borderRadius: "10px",
spacing: {
base: "16px",
},
};
export const forestTheme: C1Theme = {
mode: "dark",
colors: {
primary: "#4ade80", // Green
secondary: "#22d3ee", // Cyan
background: "#064e3b",
foreground: "#d1fae5",
border: "#065f46",
muted: "#047857",
accent: "#10b981",
destructive: "#fca5a5",
success: "#6ee7b7",
warning: "#fde047",
},
fonts: {
body: "'Lato', sans-serif",
heading: "'Lato', sans-serif",
mono: "'Consolas', monospace",
},
borderRadius: "8px",
spacing: {
base: "18px",
},
};
// ============================================================================
// High Contrast Themes (Accessibility)
// ============================================================================
export const highContrastLight: C1Theme = {
mode: "light",
colors: {
primary: "#0000ff", // Pure blue
secondary: "#ff00ff", // Pure magenta
background: "#ffffff",
foreground: "#000000",
border: "#000000",
muted: "#f5f5f5",
accent: "#008000", // Pure green
destructive: "#ff0000",
success: "#008000",
warning: "#ff8800",
},
fonts: {
body: "'Arial', sans-serif",
heading: "'Arial', bold, sans-serif",
mono: "'Courier New', monospace",
},
borderRadius: "2px",
spacing: {
base: "20px",
},
};
export const highContrastDark: C1Theme = {
mode: "dark",
colors: {
primary: "#00ccff", // Bright cyan
secondary: "#ff00ff", // Bright magenta
background: "#000000",
foreground: "#ffffff",
border: "#ffffff",
muted: "#1a1a1a",
accent: "#00ff00", // Bright green
destructive: "#ff0000",
success: "#00ff00",
warning: "#ffaa00",
},
fonts: {
body: "'Arial', sans-serif",
heading: "'Arial', bold, sans-serif",
mono: "'Courier New', monospace",
},
borderRadius: "2px",
spacing: {
base: "20px",
},
};
// ============================================================================
// Theme Utilities
// ============================================================================
/**
* Get system theme preference
*/
export function getSystemTheme(): "light" | "dark" {
if (typeof window === "undefined") return "light";
return window.matchMedia("(prefers-color-scheme: dark)").matches
? "dark"
: "light";
}
/**
* Listen to system theme changes
*/
export function onSystemThemeChange(callback: (theme: "light" | "dark") => void) {
if (typeof window === "undefined") return () => {};
const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)");
const handler = (e: MediaQueryListEvent) => {
callback(e.matches ? "dark" : "light");
};
mediaQuery.addEventListener("change", handler);
return () => mediaQuery.removeEventListener("change", handler);
}
/**
* Get theme based on user preference
*/
export function getTheme(
preference: "light" | "dark" | "system",
lightThemeConfig: C1Theme = lightTheme,
darkThemeConfig: C1Theme = darkTheme
): C1Theme {
if (preference === "system") {
const systemPref = getSystemTheme();
return systemPref === "dark" ? darkThemeConfig : lightThemeConfig;
}
return preference === "dark" ? darkThemeConfig : lightThemeConfig;
}
/**
* All available themes by name
*/
export const themes = {
light: lightTheme,
dark: darkTheme,
ocean: oceanTheme,
sunset: sunsetTheme,
midnight: midnightTheme,
forest: forestTheme,
"high-contrast-light": highContrastLight,
"high-contrast-dark": highContrastDark,
} as const;
export type ThemeName = keyof typeof themes;
/**
* Get theme by name
*/
export function getThemeByName(name: ThemeName): C1Theme {
return themes[name];
}

View File

@@ -0,0 +1,327 @@
/**
* Common Zod Schemas for Tool Calling
*
* Reusable schemas for common tools across any framework.
* These schemas provide runtime validation and type safety.
*
* Usage:
* import { webSearchTool, createOrderTool } from "./tool-schemas";
* import zodToJsonSchema from "zod-to-json-schema";
*
* const tools = [webSearchTool, createOrderTool];
*
* await client.beta.chat.completions.runTools({
* model: "c1/openai/gpt-5/v-20250930",
* messages: [...],
* tools,
* });
*/
import { z } from "zod";
import zodToJsonSchema from "zod-to-json-schema";
// ============================================================================
// Web Search Tool
// ============================================================================
export const webSearchSchema = z.object({
query: z.string().min(1).describe("The search query"),
max_results: z
.number()
.int()
.min(1)
.max(10)
.default(5)
.describe("Maximum number of results to return (1-10)"),
include_answer: z
.boolean()
.default(true)
.describe("Include AI-generated answer summary"),
});
export type WebSearchArgs = z.infer<typeof webSearchSchema>;
export const webSearchTool = {
type: "function" as const,
function: {
name: "web_search",
description:
"Search the web for current information using a search API. Use this for recent events, news, or information that may have changed recently.",
parameters: zodToJsonSchema(webSearchSchema),
},
};
// ============================================================================
// Product/Inventory Tools
// ============================================================================
export const productLookupSchema = z.object({
product_type: z
.enum(["gloves", "hat", "scarf", "all"])
.optional()
.describe("Type of product to lookup, or 'all' for entire inventory"),
filter: z
.object({
min_price: z.number().optional(),
max_price: z.number().optional(),
in_stock_only: z.boolean().default(true),
})
.optional()
.describe("Optional filters for product search"),
});
export type ProductLookupArgs = z.infer<typeof productLookupSchema>;
export const productLookupTool = {
type: "function" as const,
function: {
name: "lookup_product",
description:
"Look up products in the inventory database. Returns product details including price, availability, and specifications.",
parameters: zodToJsonSchema(productLookupSchema),
},
};
// ============================================================================
// Order Creation Tool
// ============================================================================
const orderItemSchema = z.discriminatedUnion("type", [
z.object({
type: z.literal("gloves"),
size: z.enum(["XS", "S", "M", "L", "XL", "XXL"]),
color: z.string().min(1),
quantity: z.number().int().min(1).max(100),
}),
z.object({
type: z.literal("hat"),
style: z.enum(["beanie", "baseball", "fedora", "bucket"]),
color: z.string().min(1),
quantity: z.number().int().min(1).max(100),
}),
z.object({
type: z.literal("scarf"),
length: z.enum(["short", "medium", "long"]),
material: z.enum(["wool", "cotton", "silk", "cashmere"]),
quantity: z.number().int().min(1).max(100),
}),
]);
export const createOrderSchema = z.object({
customer_email: z
.string()
.email()
.describe("Customer's email address for order confirmation"),
items: z
.array(orderItemSchema)
.min(1)
.max(20)
.describe("Array of items to include in the order (max 20)"),
shipping_address: z.object({
street: z.string().min(1),
city: z.string().min(1),
state: z.string().length(2), // US state code
zip: z.string().regex(/^\d{5}(-\d{4})?$/), // ZIP or ZIP+4
country: z.string().default("US"),
}),
notes: z.string().optional().describe("Optional order notes or instructions"),
});
export type CreateOrderArgs = z.infer<typeof createOrderSchema>;
export type OrderItem = z.infer<typeof orderItemSchema>;
export const createOrderTool = {
type: "function" as const,
function: {
name: "create_order",
description:
"Create a new product order with customer information, items, and shipping address. Returns order ID and confirmation details.",
parameters: zodToJsonSchema(createOrderSchema),
},
};
// ============================================================================
// Database Query Tool
// ============================================================================
export const databaseQuerySchema = z.object({
query_type: z
.enum(["select", "aggregate", "search"])
.describe("Type of database query to perform"),
table: z
.string()
.describe("Database table name (e.g., 'users', 'products', 'orders')"),
filters: z
.record(z.any())
.optional()
.describe("Filter conditions as key-value pairs"),
limit: z.number().int().min(1).max(100).default(20).describe("Result limit"),
});
export type DatabaseQueryArgs = z.infer<typeof databaseQuerySchema>;
export const databaseQueryTool = {
type: "function" as const,
function: {
name: "query_database",
description:
"Query the database for information. Supports select, aggregate, and search operations on various tables.",
parameters: zodToJsonSchema(databaseQuerySchema),
},
};
// ============================================================================
// Data Visualization Tool
// ============================================================================
export const createVisualizationSchema = z.object({
chart_type: z
.enum(["bar", "line", "pie", "scatter", "area"])
.describe("Type of chart to create"),
data: z
.array(
z.object({
label: z.string(),
value: z.number(),
})
)
.min(1)
.describe("Data points for the visualization"),
title: z.string().min(1).describe("Chart title"),
x_label: z.string().optional().describe("X-axis label"),
y_label: z.string().optional().describe("Y-axis label"),
});
export type CreateVisualizationArgs = z.infer<typeof createVisualizationSchema>;
export const createVisualizationTool = {
type: "function" as const,
function: {
name: "create_visualization",
description:
"Create a data visualization chart. Returns chart configuration that will be rendered in the UI.",
parameters: zodToJsonSchema(createVisualizationSchema),
},
};
// ============================================================================
// Email Tool
// ============================================================================
export const sendEmailSchema = z.object({
to: z.string().email().describe("Recipient email address"),
subject: z.string().min(1).max(200).describe("Email subject line"),
body: z.string().min(1).describe("Email body content (supports HTML)"),
cc: z.array(z.string().email()).optional().describe("CC recipients"),
bcc: z.array(z.string().email()).optional().describe("BCC recipients"),
});
export type SendEmailArgs = z.infer<typeof sendEmailSchema>;
export const sendEmailTool = {
type: "function" as const,
function: {
name: "send_email",
description:
"Send an email to one or more recipients. Use this to send notifications, confirmations, or responses to customers.",
parameters: zodToJsonSchema(sendEmailSchema),
},
};
// ============================================================================
// Calendar/Scheduling Tool
// ============================================================================
export const scheduleEventSchema = z.object({
title: z.string().min(1).describe("Event title"),
start_time: z.string().datetime().describe("Event start time (ISO 8601)"),
end_time: z.string().datetime().describe("Event end time (ISO 8601)"),
description: z.string().optional().describe("Event description"),
attendees: z
.array(z.string().email())
.optional()
.describe("List of attendee email addresses"),
location: z.string().optional().describe("Event location or meeting link"),
reminder_minutes: z
.number()
.int()
.min(0)
.default(15)
.describe("Minutes before event to send reminder"),
});
export type ScheduleEventArgs = z.infer<typeof scheduleEventSchema>;
export const scheduleEventTool = {
type: "function" as const,
function: {
name: "schedule_event",
description:
"Schedule a calendar event with attendees, location, and reminders.",
parameters: zodToJsonSchema(scheduleEventSchema),
},
};
// ============================================================================
// File Upload Tool
// ============================================================================
export const uploadFileSchema = z.object({
file_name: z.string().min(1).describe("Name of the file"),
file_type: z
.string()
.describe("MIME type (e.g., 'image/png', 'application/pdf')"),
file_size: z.number().int().min(1).describe("File size in bytes"),
description: z.string().optional().describe("File description or metadata"),
});
export type UploadFileArgs = z.infer<typeof uploadFileSchema>;
export const uploadFileTool = {
type: "function" as const,
function: {
name: "upload_file",
description:
"Upload a file to cloud storage. Returns storage URL and file metadata.",
parameters: zodToJsonSchema(uploadFileSchema),
},
};
// ============================================================================
// Export All Tools
// ============================================================================
export const allTools = [
webSearchTool,
productLookupTool,
createOrderTool,
databaseQueryTool,
createVisualizationTool,
sendEmailTool,
scheduleEventTool,
uploadFileTool,
];
/**
* Helper to get tools by category
*/
export function getToolsByCategory(category: "ecommerce" | "data" | "communication" | "all") {
const categories = {
ecommerce: [productLookupTool, createOrderTool],
data: [databaseQueryTool, createVisualizationTool],
communication: [sendEmailTool, scheduleEventTool],
all: allTools,
};
return categories[category];
}
/**
* Validation helper
*/
export function validateToolArgs<T extends z.ZodType>(
schema: T,
args: unknown
): z.infer<T> {
return schema.parse(args);
}

View File

@@ -0,0 +1,118 @@
/**
* Basic C1Chat Integration for Vite + React
*
* Minimal setup showing how to integrate TheSys Generative UI
* into a Vite + React application with custom backend.
*
* Features:
* - Simple form input
* - C1Component for custom UI control
* - Manual state management
* - Basic error handling
*
* Prerequisites:
* - Backend API endpoint at /api/chat
* - Environment variable: VITE_API_URL (optional, defaults to relative path)
*/
import "@crayonai/react-ui/styles/index.css";
import { ThemeProvider, C1Component } from "@thesysai/genui-sdk";
import { useState } from "react";
import "./App.css";
export default function App() {
const [isLoading, setIsLoading] = useState(false);
const [c1Response, setC1Response] = useState("");
const [question, setQuestion] = useState("");
const [error, setError] = useState<string | null>(null);
const apiUrl = import.meta.env.VITE_API_URL || "/api/chat";
const makeApiCall = async (query: string, previousResponse?: string) => {
if (!query.trim()) return;
setIsLoading(true);
setError(null);
try {
const response = await fetch(apiUrl, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
prompt: query,
previousC1Response: previousResponse || c1Response,
}),
});
if (!response.ok) {
throw new Error(`API Error: ${response.status} ${response.statusText}`);
}
const data = await response.json();
setC1Response(data.response || data.c1Response);
setQuestion(""); // Clear input after successful request
} catch (err) {
console.error("Error calling API:", err);
setError(err instanceof Error ? err.message : "Failed to get response");
} finally {
setIsLoading(false);
}
};
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
makeApiCall(question);
};
return (
<div className="app-container">
<header>
<h1>TheSys AI Assistant</h1>
<p>Ask me anything and I'll generate an interactive response</p>
</header>
<form onSubmit={handleSubmit} className="input-form">
<input
type="text"
value={question}
onChange={(e) => setQuestion(e.target.value)}
placeholder="Ask me anything..."
className="question-input"
disabled={isLoading}
autoFocus
/>
<button
type="submit"
className="submit-button"
disabled={isLoading || !question.trim()}
>
{isLoading ? "Processing..." : "Send"}
</button>
</form>
{error && (
<div className="error-message">
<strong>Error:</strong> {error}
</div>
)}
{c1Response && (
<div className="response-container">
<ThemeProvider>
<C1Component
c1Response={c1Response}
isStreaming={isLoading}
updateMessage={(message) => setC1Response(message)}
onAction={({ llmFriendlyMessage }) => {
// Handle interactive actions from generated UI
if (!isLoading) {
makeApiCall(llmFriendlyMessage, c1Response);
}
}}
/>
</ThemeProvider>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,208 @@
/**
* Custom C1Component Integration with Advanced State Management
*
* Shows how to use C1Component with full control over:
* - Message history
* - Conversation state
* - Custom UI layout
* - Error boundaries
*
* Use this when you need more control than C1Chat provides.
*/
import "@crayonai/react-ui/styles/index.css";
import { ThemeProvider, C1Component } from "@thesysai/genui-sdk";
import { useState, useRef, useEffect } from "react";
import { ErrorBoundary } from "react-error-boundary";
import "./App.css";
interface Message {
id: string;
role: "user" | "assistant";
content: string;
timestamp: Date;
}
function ErrorFallback({ error, resetErrorBoundary }: {
error: Error;
resetErrorBoundary: () => void;
}) {
return (
<div className="error-boundary">
<h2>Something went wrong</h2>
<pre className="error-details">{error.message}</pre>
<button onClick={resetErrorBoundary} className="retry-button">
Try again
</button>
</div>
);
}
export default function App() {
const [messages, setMessages] = useState<Message[]>([]);
const [currentResponse, setCurrentResponse] = useState("");
const [isStreaming, setIsStreaming] = useState(false);
const [inputValue, setInputValue] = useState("");
const messagesEndRef = useRef<HTMLDivElement>(null);
// Auto-scroll to bottom when new messages arrive
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}, [messages, currentResponse]);
const sendMessage = async (userMessage: string) => {
if (!userMessage.trim() || isStreaming) return;
// Add user message
const userMsg: Message = {
id: crypto.randomUUID(),
role: "user",
content: userMessage,
timestamp: new Date(),
};
setMessages((prev) => [...prev, userMsg]);
setInputValue("");
setIsStreaming(true);
setCurrentResponse("");
try {
const response = await fetch("/api/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
messages: [...messages, userMsg].map((m) => ({
role: m.role,
content: m.content,
})),
}),
});
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
const data = await response.json();
// Add assistant response
const assistantMsg: Message = {
id: crypto.randomUUID(),
role: "assistant",
content: data.response,
timestamp: new Date(),
};
setCurrentResponse(data.response);
setMessages((prev) => [...prev, assistantMsg]);
} catch (error) {
console.error("Error sending message:", error);
// Add error message
const errorMsg: Message = {
id: crypto.randomUUID(),
role: "assistant",
content: `Error: ${error instanceof Error ? error.message : "Failed to get response"}`,
timestamp: new Date(),
};
setMessages((prev) => [...prev, errorMsg]);
} finally {
setIsStreaming(false);
}
};
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
sendMessage(inputValue);
};
const clearConversation = () => {
setMessages([]);
setCurrentResponse("");
};
return (
<ErrorBoundary FallbackComponent={ErrorFallback}>
<div className="chat-container">
<div className="chat-header">
<h1>AI Assistant</h1>
<button onClick={clearConversation} className="clear-button">
Clear
</button>
</div>
<div className="messages-container">
{messages.map((message, index) => (
<div
key={message.id}
className={`message message-${message.role}`}
>
<div className="message-header">
<span className="message-role">
{message.role === "user" ? "You" : "AI"}
</span>
<span className="message-time">
{message.timestamp.toLocaleTimeString()}
</span>
</div>
{message.role === "assistant" ? (
<ThemeProvider>
<C1Component
c1Response={message.content}
isStreaming={
index === messages.length - 1 && isStreaming
}
updateMessage={(updatedContent) => {
setCurrentResponse(updatedContent);
setMessages((prev) =>
prev.map((m) =>
m.id === message.id
? { ...m, content: updatedContent }
: m
)
);
}}
onAction={({ llmFriendlyMessage }) => {
sendMessage(llmFriendlyMessage);
}}
/>
</ThemeProvider>
) : (
<div className="message-content">{message.content}</div>
)}
</div>
))}
{isStreaming && !currentResponse && (
<div className="loading-indicator">
<div className="spinner" />
<span>AI is thinking...</span>
</div>
)}
<div ref={messagesEndRef} />
</div>
<form onSubmit={handleSubmit} className="input-container">
<input
type="text"
value={inputValue}
onChange={(e) => setInputValue(e.target.value)}
placeholder="Type your message..."
disabled={isStreaming}
className="message-input"
autoFocus
/>
<button
type="submit"
disabled={!inputValue.trim() || isStreaming}
className="send-button"
>
{isStreaming ? "..." : "Send"}
</button>
</form>
</div>
</ErrorBoundary>
);
}

View File

@@ -0,0 +1,40 @@
{
"name": "thesys-vite-react-example",
"private": true,
"version": "1.0.0",
"type": "module",
"description": "Vite + React integration with TheSys Generative UI",
"scripts": {
"dev": "vite",
"build": "tsc && vite build",
"preview": "vite preview",
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0"
},
"dependencies": {
"@thesysai/genui-sdk": "^0.6.40",
"@crayonai/react-ui": "^0.8.42",
"@crayonai/react-core": "^0.7.6",
"@crayonai/stream": "^0.1.0",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react-error-boundary": "^5.0.0",
"openai": "^4.73.0",
"zod": "^3.24.1",
"zod-to-json-schema": "^3.24.1"
},
"devDependencies": {
"@types/react": "^19.0.0",
"@types/react-dom": "^19.0.0",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"@vitejs/plugin-react": "^4.3.4",
"eslint": "^9.0.0",
"eslint-plugin-react-hooks": "^5.0.0",
"eslint-plugin-react-refresh": "^0.4.16",
"typescript": "^5.7.3",
"vite": "^6.0.5"
},
"optionalDependencies": {
"@tavily/core": "^1.0.0"
}
}

View File

@@ -0,0 +1,220 @@
/**
* TheSys C1 with Custom Theming and Dark Mode
*
* Demonstrates:
* - Custom theme configuration
* - Dark mode toggle
* - System theme detection
* - Theme presets
* - CSS variable overrides
*/
import "@crayonai/react-ui/styles/index.css";
import { C1Chat, ThemeProvider } from "@thesysai/genui-sdk";
import { themePresets } from "@crayonai/react-ui";
import { useState, useEffect } from "react";
import "./App.css";
type ThemeMode = "light" | "dark" | "system";
// Custom theme object
const customLightTheme = {
mode: "light" as const,
colors: {
primary: "#3b82f6",
secondary: "#8b5cf6",
background: "#ffffff",
foreground: "#1f2937",
border: "#e5e7eb",
muted: "#f3f4f6",
accent: "#10b981",
},
fonts: {
body: "'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif",
heading: "'Poppins', sans-serif",
mono: "'Fira Code', 'Courier New', monospace",
},
borderRadius: "8px",
spacing: {
base: "16px",
},
};
const customDarkTheme = {
...customLightTheme,
mode: "dark" as const,
colors: {
primary: "#60a5fa",
secondary: "#a78bfa",
background: "#111827",
foreground: "#f9fafb",
border: "#374151",
muted: "#1f2937",
accent: "#34d399",
},
};
function useSystemTheme(): "light" | "dark" {
const [systemTheme, setSystemTheme] = useState<"light" | "dark">(
() =>
window.matchMedia("(prefers-color-scheme: dark)").matches
? "dark"
: "light"
);
useEffect(() => {
const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)");
const handler = (e: MediaQueryListEvent) => {
setSystemTheme(e.matches ? "dark" : "light");
};
mediaQuery.addEventListener("change", handler);
return () => mediaQuery.removeEventListener("change", handler);
}, []);
return systemTheme;
}
export default function ThemedChat() {
const [themeMode, setThemeMode] = useState<ThemeMode>(
() => (localStorage.getItem("theme-mode") as ThemeMode) || "system"
);
const [usePreset, setUsePreset] = useState(false);
const systemTheme = useSystemTheme();
// Determine actual theme to use
const actualTheme =
themeMode === "system" ? systemTheme : themeMode;
// Choose theme object
const theme = usePreset
? themePresets.candy // Use built-in preset
: actualTheme === "dark"
? customDarkTheme
: customLightTheme;
// Persist theme preference
useEffect(() => {
localStorage.setItem("theme-mode", themeMode);
// Apply to document for app-wide styling
document.documentElement.setAttribute("data-theme", actualTheme);
}, [themeMode, actualTheme]);
return (
<div className="themed-app">
<div className="theme-controls">
<div className="theme-selector">
<h3>Theme Mode</h3>
<div className="button-group">
<button
className={themeMode === "light" ? "active" : ""}
onClick={() => setThemeMode("light")}
>
Light
</button>
<button
className={themeMode === "dark" ? "active" : ""}
onClick={() => setThemeMode("dark")}
>
🌙 Dark
</button>
<button
className={themeMode === "system" ? "active" : ""}
onClick={() => setThemeMode("system")}
>
💻 System
</button>
</div>
</div>
<div className="theme-type">
<h3>Theme Type</h3>
<div className="button-group">
<button
className={!usePreset ? "active" : ""}
onClick={() => setUsePreset(false)}
>
Custom
</button>
<button
className={usePreset ? "active" : ""}
onClick={() => setUsePreset(true)}
>
Preset (Candy)
</button>
</div>
</div>
</div>
<div className="chat-wrapper">
<ThemeProvider theme={{ ...theme, mode: actualTheme }}>
<C1Chat
apiUrl="/api/chat"
agentName="Themed AI Assistant"
logoUrl="https://placehold.co/100x100/3b82f6/ffffff?text=AI"
/>
</ThemeProvider>
</div>
<div className="theme-info">
<h3>Current Theme</h3>
<pre className="theme-preview">
{JSON.stringify(
{
mode: actualTheme,
usingPreset: usePreset,
preferredMode: themeMode,
systemPreference: systemTheme,
},
null,
2
)}
</pre>
</div>
</div>
);
}
/**
* CSS Example (App.css):
*
* [data-theme="light"] {
* --app-bg: #ffffff;
* --app-text: #1f2937;
* }
*
* [data-theme="dark"] {
* --app-bg: #111827;
* --app-text: #f9fafb;
* }
*
* .themed-app {
* background: var(--app-bg);
* color: var(--app-text);
* min-height: 100vh;
* transition: background-color 0.3s ease, color 0.3s ease;
* }
*
* .theme-controls {
* padding: 2rem;
* display: flex;
* gap: 2rem;
* border-bottom: 1px solid var(--app-text);
* }
*
* .button-group button {
* padding: 0.5rem 1rem;
* border: 1px solid var(--app-text);
* background: transparent;
* color: var(--app-text);
* cursor: pointer;
* transition: all 0.2s;
* }
*
* .button-group button.active {
* background: var(--app-text);
* color: var(--app-bg);
* }
*/

View File

@@ -0,0 +1,276 @@
/**
* Tool Calling Integration Example
*
* Demonstrates how to integrate tool calling (function calling) with TheSys C1.
* Shows:
* - Web search tool with Tavily API
* - Product inventory lookup
* - Order creation with Zod validation
* - Interactive UI for tool results
*
* Backend Requirements:
* - OpenAI SDK with runTools support
* - Zod for schema validation
* - Tool execution handlers
*/
import "@crayonai/react-ui/styles/index.css";
import { ThemeProvider, C1Component } from "@thesysai/genui-sdk";
import { useState } from "react";
import "./App.css";
// Example tool schemas (these match backend Zod schemas)
interface WebSearchTool {
name: "web_search";
args: {
query: string;
max_results: number;
};
}
interface ProductLookupTool {
name: "lookup_product";
args: {
product_type?: "gloves" | "hat" | "scarf";
};
}
interface CreateOrderTool {
name: "create_order";
args: {
customer_email: string;
items: Array<{
type: "gloves" | "hat" | "scarf";
quantity: number;
[key: string]: any;
}>;
};
}
type ToolCall = WebSearchTool | ProductLookupTool | CreateOrderTool;
export default function ToolCallingExample() {
const [isLoading, setIsLoading] = useState(false);
const [c1Response, setC1Response] = useState("");
const [question, setQuestion] = useState("");
const [activeTools, setActiveTools] = useState<string[]>([]);
const makeApiCall = async (query: string, previousResponse?: string) => {
if (!query.trim()) return;
setIsLoading(true);
setActiveTools([]);
try {
const response = await fetch("/api/chat-with-tools", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
prompt: query,
previousC1Response: previousResponse,
}),
});
if (!response.ok) {
throw new Error(`API Error: ${response.status}`);
}
// Handle streaming response
const reader = response.body?.getReader();
if (!reader) throw new Error("No response body");
const decoder = new TextDecoder();
let accumulatedResponse = "";
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split("\n");
for (const line of lines) {
if (line.startsWith("data: ")) {
try {
const data = JSON.parse(line.slice(6));
if (data.type === "tool_call") {
// Track which tools are being called
setActiveTools((prev) => [...prev, data.tool_name]);
} else if (data.type === "content") {
accumulatedResponse += data.content;
setC1Response(accumulatedResponse);
}
} catch (e) {
// Skip invalid JSON
}
}
}
}
setQuestion("");
} catch (err) {
console.error("Error:", err);
setC1Response(
`Error: ${err instanceof Error ? err.message : "Failed to get response"}`
);
} finally {
setIsLoading(false);
}
};
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
makeApiCall(question);
};
// Example prompts to demonstrate tools
const examplePrompts = [
"Search the web for the latest AI news",
"Show me available products in the inventory",
"Create an order for 2 blue gloves size M and 1 red hat",
];
return (
<div className="tool-calling-container">
<header>
<h1>AI Assistant with Tools</h1>
<p>Ask me to search the web, check inventory, or create orders</p>
</header>
<div className="example-prompts">
<h3>Try these examples:</h3>
{examplePrompts.map((prompt, index) => (
<button
key={index}
onClick={() => {
setQuestion(prompt);
makeApiCall(prompt);
}}
className="example-button"
disabled={isLoading}
>
{prompt}
</button>
))}
</div>
<form onSubmit={handleSubmit} className="input-form">
<input
type="text"
value={question}
onChange={(e) => setQuestion(e.target.value)}
placeholder="Ask me to use a tool..."
className="question-input"
disabled={isLoading}
/>
<button
type="submit"
className="submit-button"
disabled={isLoading || !question.trim()}
>
{isLoading ? "Processing..." : "Send"}
</button>
</form>
{activeTools.length > 0 && (
<div className="active-tools">
<h4>Active Tools:</h4>
<div className="tool-badges">
{activeTools.map((tool, index) => (
<span key={index} className="tool-badge">
{tool}
</span>
))}
</div>
</div>
)}
{c1Response && (
<div className="response-container">
<ThemeProvider>
<C1Component
c1Response={c1Response}
isStreaming={isLoading}
updateMessage={(message) => setC1Response(message)}
onAction={({ llmFriendlyMessage, rawAction }) => {
console.log("Tool action:", rawAction);
if (!isLoading) {
makeApiCall(llmFriendlyMessage, c1Response);
}
}}
/>
</ThemeProvider>
</div>
)}
<div className="tool-info">
<h3>Available Tools</h3>
<ul>
<li>
<strong>web_search</strong> - Search the web for current information
</li>
<li>
<strong>lookup_product</strong> - Check product inventory
</li>
<li>
<strong>create_order</strong> - Create a new product order
</li>
</ul>
</div>
</div>
);
}
/**
* Backend API Example (route.ts or server.ts):
*
* import { z } from "zod";
* import zodToJsonSchema from "zod-to-json-schema";
* import OpenAI from "openai";
* import { TavilySearchAPIClient } from "@tavily/core";
*
* const webSearchSchema = z.object({
* query: z.string(),
* max_results: z.number().int().min(1).max(10).default(5),
* });
*
* const webSearchTool = {
* type: "function" as const,
* function: {
* name: "web_search",
* description: "Search the web for current information",
* parameters: zodToJsonSchema(webSearchSchema),
* },
* };
*
* const client = new OpenAI({
* baseURL: "https://api.thesys.dev/v1/embed",
* apiKey: process.env.THESYS_API_KEY,
* });
*
* const tavily = new TavilySearchAPIClient({
* apiKey: process.env.TAVILY_API_KEY,
* });
*
* export async function POST(req) {
* const { prompt } = await req.json();
*
* const stream = await client.beta.chat.completions.runTools({
* model: "c1/openai/gpt-5/v-20250930",
* messages: [
* {
* role: "system",
* content: "You are a helpful assistant with access to tools.",
* },
* { role: "user", content: prompt },
* ],
* stream: true,
* tools: [webSearchTool, productLookupTool, createOrderTool],
* toolChoice: "auto",
* });
*
* // Handle tool execution and streaming...
* }
*/