Initial commit
This commit is contained in:
267
templates/python-backend/README.md
Normal file
267
templates/python-backend/README.md
Normal file
@@ -0,0 +1,267 @@
|
||||
# Python Backend Templates for TheSys Generative UI
|
||||
|
||||
This directory contains production-ready Python backend templates for integrating TheSys C1 Generative UI API.
|
||||
|
||||
## Available Templates
|
||||
|
||||
### 1. FastAPI Backend (`fastapi-chat.py`)
|
||||
|
||||
Modern async web framework with automatic API documentation.
|
||||
|
||||
**Features**:
|
||||
- Async streaming support
|
||||
- Built-in request validation with Pydantic
|
||||
- Automatic OpenAPI docs
|
||||
- CORS middleware configured
|
||||
- Type hints throughout
|
||||
|
||||
**Run**:
|
||||
```bash
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Set environment variable
|
||||
export THESYS_API_KEY=sk-th-your-key-here
|
||||
|
||||
# Run server
|
||||
python fastapi-chat.py
|
||||
|
||||
# Or with uvicorn directly
|
||||
uvicorn fastapi-chat:app --reload --port 8000
|
||||
```
|
||||
|
||||
**API Docs**: Visit `http://localhost:8000/docs` for interactive API documentation
|
||||
|
||||
---
|
||||
|
||||
### 2. Flask Backend (`flask-chat.py`)
|
||||
|
||||
Lightweight and flexible web framework.
|
||||
|
||||
**Features**:
|
||||
- Simple and familiar Flask API
|
||||
- CORS support with flask-cors
|
||||
- Streaming response handling
|
||||
- Easy to customize and extend
|
||||
|
||||
**Run**:
|
||||
```bash
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Set environment variable
|
||||
export THESYS_API_KEY=sk-th-your-key-here
|
||||
|
||||
# Run server
|
||||
python flask-chat.py
|
||||
|
||||
# Or with flask CLI
|
||||
export FLASK_APP=flask-chat.py
|
||||
flask run --port 5000
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Install Dependencies
|
||||
|
||||
```bash
|
||||
# Create virtual environment
|
||||
python -m venv venv
|
||||
source venv/bin/activate # On Windows: venv\Scripts\activate
|
||||
|
||||
# Install all dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# OR install only what you need
|
||||
pip install thesys-genui-sdk openai python-dotenv
|
||||
|
||||
# For FastAPI
|
||||
pip install fastapi uvicorn
|
||||
|
||||
# For Flask
|
||||
pip install flask flask-cors
|
||||
```
|
||||
|
||||
### 2. Environment Variables
|
||||
|
||||
Create a `.env` file:
|
||||
|
||||
```bash
|
||||
THESYS_API_KEY=sk-th-your-api-key-here
|
||||
```
|
||||
|
||||
Get your API key from: https://console.thesys.dev/keys
|
||||
|
||||
### 3. Choose Your Model
|
||||
|
||||
Both templates use different models by default to show variety:
|
||||
|
||||
**FastAPI**: Uses Claude Sonnet 4
|
||||
```python
|
||||
model="c1/anthropic/claude-sonnet-4/v-20250930"
|
||||
```
|
||||
|
||||
**Flask**: Uses GPT 5
|
||||
```python
|
||||
model="c1/openai/gpt-5/v-20250930"
|
||||
```
|
||||
|
||||
Change to any supported model:
|
||||
- `c1/anthropic/claude-sonnet-4/v-20250930` - Claude Sonnet 4 (stable)
|
||||
- `c1/openai/gpt-5/v-20250930` - GPT 5 (stable)
|
||||
- `c1-exp/openai/gpt-4.1/v-20250617` - GPT 4.1 (experimental)
|
||||
- `c1-exp/anthropic/claude-3.5-haiku/v-20250709` - Claude 3.5 Haiku (experimental)
|
||||
|
||||
---
|
||||
|
||||
## Frontend Integration
|
||||
|
||||
### React + Vite Example
|
||||
|
||||
```typescript
|
||||
const makeApiCall = async (prompt: string) => {
|
||||
const response = await fetch("http://localhost:8000/api/chat", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({ prompt })
|
||||
});
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
setC1Response(prev => prev + chunk);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Next.js API Route (Proxy)
|
||||
|
||||
```typescript
|
||||
// app/api/chat/route.ts
|
||||
export async function POST(req: Request) {
|
||||
const { prompt } = await req.json();
|
||||
|
||||
const response = await fetch("http://localhost:8000/api/chat", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({ prompt })
|
||||
});
|
||||
|
||||
return new Response(response.body, {
|
||||
headers: {
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache"
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Production Deployment
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Production
|
||||
THESYS_API_KEY=sk-th-production-key
|
||||
HOST=0.0.0.0
|
||||
PORT=8000
|
||||
ENVIRONMENT=production
|
||||
ALLOWED_ORIGINS=https://your-frontend.com
|
||||
```
|
||||
|
||||
### FastAPI (Recommended for Production)
|
||||
|
||||
```bash
|
||||
# Install production server
|
||||
pip install gunicorn
|
||||
|
||||
# Run with Gunicorn
|
||||
gunicorn fastapi-chat:app \
|
||||
--workers 4 \
|
||||
--worker-class uvicorn.workers.UvicornWorker \
|
||||
--bind 0.0.0.0:8000 \
|
||||
--timeout 120
|
||||
```
|
||||
|
||||
### Flask Production
|
||||
|
||||
```bash
|
||||
# Install production server
|
||||
pip install gunicorn
|
||||
|
||||
# Run with Gunicorn
|
||||
gunicorn flask-chat:app \
|
||||
--workers 4 \
|
||||
--bind 0.0.0.0:5000 \
|
||||
--timeout 120
|
||||
```
|
||||
|
||||
### Docker Example
|
||||
|
||||
```dockerfile
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY fastapi-chat.py .
|
||||
|
||||
ENV THESYS_API_KEY=""
|
||||
ENV PORT=8000
|
||||
|
||||
CMD ["uvicorn", "fastapi-chat:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**1. Import Error: `thesys_genui_sdk` not found**
|
||||
```bash
|
||||
pip install thesys-genui-sdk
|
||||
```
|
||||
|
||||
**2. CORS Errors**
|
||||
Update CORS configuration in the template to match your frontend URL:
|
||||
```python
|
||||
allow_origins=["http://localhost:5173"] # Vite default
|
||||
```
|
||||
|
||||
**3. Streaming Not Working**
|
||||
Ensure:
|
||||
- `stream=True` in the API call
|
||||
- Using `@with_c1_response` decorator
|
||||
- Proper response headers set
|
||||
|
||||
**4. Authentication Failed (401)**
|
||||
Check that `THESYS_API_KEY` is set correctly:
|
||||
```python
|
||||
import os
|
||||
print(os.getenv("THESYS_API_KEY")) # Should not be None
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Copy the template you want to use
|
||||
2. Install dependencies from `requirements.txt`
|
||||
3. Set your `THESYS_API_KEY` in `.env`
|
||||
4. Run the server
|
||||
5. Connect your React frontend
|
||||
6. Customize the system prompt and model as needed
|
||||
|
||||
For more examples, see the main SKILL.md documentation.
|
||||
125
templates/python-backend/fastapi-chat.py
Normal file
125
templates/python-backend/fastapi-chat.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""
|
||||
TheSys Generative UI - FastAPI Backend Example
|
||||
|
||||
This example demonstrates how to set up a FastAPI backend that integrates
|
||||
with TheSys C1 API for streaming generative UI responses.
|
||||
|
||||
Dependencies:
|
||||
- fastapi
|
||||
- uvicorn
|
||||
- thesys-genui-sdk
|
||||
- openai
|
||||
- python-dotenv
|
||||
"""
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import StreamingResponse
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel
|
||||
from thesys_genui_sdk import with_c1_response, write_content
|
||||
import openai
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize FastAPI app
|
||||
app = FastAPI(
|
||||
title="TheSys C1 API Backend",
|
||||
description="FastAPI backend for TheSys Generative UI",
|
||||
version="1.0.0"
|
||||
)
|
||||
|
||||
# Configure CORS
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # Configure for your frontend URL in production
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Initialize OpenAI client for TheSys C1 API
|
||||
client = openai.OpenAI(
|
||||
base_url="https://api.thesys.dev/v1/embed",
|
||||
api_key=os.getenv("THESYS_API_KEY")
|
||||
)
|
||||
|
||||
# Request model
|
||||
class ChatRequest(BaseModel):
|
||||
prompt: str
|
||||
thread_id: str | None = None
|
||||
response_id: str | None = None
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
"""Health check endpoint"""
|
||||
return {
|
||||
"status": "ok",
|
||||
"message": "TheSys C1 API Backend is running"
|
||||
}
|
||||
|
||||
|
||||
@app.post("/api/chat")
|
||||
@with_c1_response # Automatically handles streaming headers
|
||||
async def chat_endpoint(request: ChatRequest):
|
||||
"""
|
||||
Streaming chat endpoint that generates UI components.
|
||||
|
||||
Args:
|
||||
request: ChatRequest with prompt and optional thread/response IDs
|
||||
|
||||
Returns:
|
||||
StreamingResponse with C1-formatted UI chunks
|
||||
"""
|
||||
try:
|
||||
# Create streaming completion request
|
||||
stream = client.chat.completions.create(
|
||||
model="c1/anthropic/claude-sonnet-4/v-20250930",
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful AI assistant that creates interactive user interfaces."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": request.prompt
|
||||
}
|
||||
],
|
||||
stream=True,
|
||||
temperature=0.7,
|
||||
max_tokens=4096
|
||||
)
|
||||
|
||||
# Stream chunks to frontend
|
||||
async def generate():
|
||||
for chunk in stream:
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield write_content(content)
|
||||
|
||||
return StreamingResponse(
|
||||
generate(),
|
||||
media_type="text/event-stream"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"error": str(e),
|
||||
"message": "Failed to generate response"
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
# Run the server
|
||||
uvicorn.run(
|
||||
"fastapi-chat:app",
|
||||
host="0.0.0.0",
|
||||
port=8000,
|
||||
reload=True,
|
||||
log_level="info"
|
||||
)
|
||||
119
templates/python-backend/flask-chat.py
Normal file
119
templates/python-backend/flask-chat.py
Normal file
@@ -0,0 +1,119 @@
|
||||
"""
|
||||
TheSys Generative UI - Flask Backend Example
|
||||
|
||||
This example demonstrates how to set up a Flask backend that integrates
|
||||
with TheSys C1 API for streaming generative UI responses.
|
||||
|
||||
Dependencies:
|
||||
- flask
|
||||
- flask-cors
|
||||
- thesys-genui-sdk
|
||||
- openai
|
||||
- python-dotenv
|
||||
"""
|
||||
|
||||
from flask import Flask, request, Response, jsonify
|
||||
from flask_cors import CORS
|
||||
from thesys_genui_sdk import with_c1_response, write_content
|
||||
import openai
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize Flask app
|
||||
app = Flask(__name__)
|
||||
|
||||
# Configure CORS
|
||||
CORS(app, resources={
|
||||
r"/api/*": {
|
||||
"origins": "*", # Configure for your frontend URL in production
|
||||
"allow_headers": "*",
|
||||
"expose_headers": "*"
|
||||
}
|
||||
})
|
||||
|
||||
# Initialize OpenAI client for TheSys C1 API
|
||||
client = openai.OpenAI(
|
||||
base_url="https://api.thesys.dev/v1/embed",
|
||||
api_key=os.getenv("THESYS_API_KEY")
|
||||
)
|
||||
|
||||
|
||||
@app.route("/")
|
||||
def root():
|
||||
"""Health check endpoint"""
|
||||
return jsonify({
|
||||
"status": "ok",
|
||||
"message": "TheSys C1 API Backend is running"
|
||||
})
|
||||
|
||||
|
||||
@app.route("/api/chat", methods=["POST"])
|
||||
@with_c1_response # Automatically handles streaming headers
|
||||
def chat():
|
||||
"""
|
||||
Streaming chat endpoint that generates UI components.
|
||||
|
||||
Request JSON:
|
||||
{
|
||||
"prompt": str,
|
||||
"thread_id": str (optional),
|
||||
"response_id": str (optional)
|
||||
}
|
||||
|
||||
Returns:
|
||||
StreamingResponse with C1-formatted UI chunks
|
||||
"""
|
||||
try:
|
||||
data = request.get_json()
|
||||
prompt = data.get("prompt")
|
||||
|
||||
if not prompt:
|
||||
return jsonify({"error": "Prompt is required"}), 400
|
||||
|
||||
# Create streaming completion request
|
||||
stream = client.chat.completions.create(
|
||||
model="c1/openai/gpt-5/v-20250930",
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful AI assistant that creates interactive user interfaces."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt
|
||||
}
|
||||
],
|
||||
stream=True,
|
||||
temperature=0.7,
|
||||
max_tokens=4096
|
||||
)
|
||||
|
||||
# Stream chunks to frontend
|
||||
def generate():
|
||||
for chunk in stream:
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield write_content(content)
|
||||
|
||||
return Response(
|
||||
generate(),
|
||||
mimetype="text/event-stream"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
"error": str(e),
|
||||
"message": "Failed to generate response"
|
||||
}), 500
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run the server
|
||||
app.run(
|
||||
host="0.0.0.0",
|
||||
port=5000,
|
||||
debug=True
|
||||
)
|
||||
18
templates/python-backend/requirements.txt
Normal file
18
templates/python-backend/requirements.txt
Normal file
@@ -0,0 +1,18 @@
|
||||
# TheSys Generative UI - Python Backend Dependencies
|
||||
|
||||
# Core dependencies
|
||||
thesys-genui-sdk>=0.1.0
|
||||
openai>=1.59.5
|
||||
python-dotenv>=1.0.1
|
||||
|
||||
# FastAPI dependencies (for fastapi-chat.py)
|
||||
fastapi>=0.115.6
|
||||
uvicorn[standard]>=0.34.0
|
||||
pydantic>=2.10.5
|
||||
|
||||
# Flask dependencies (for flask-chat.py)
|
||||
flask>=3.1.0
|
||||
flask-cors>=5.0.0
|
||||
|
||||
# Optional: For enhanced error handling
|
||||
python-multipart>=0.0.20
|
||||
Reference in New Issue
Block a user