Initial commit
This commit is contained in:
12
.claude-plugin/plugin.json
Normal file
12
.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"name": "cloudflare-hyperdrive",
|
||||||
|
"description": "Connect Workers to PostgreSQL/MySQL with Hyperdrives global pooling and caching. Use when: connecting to existing databases, setting up connection pools, using node-postgres/mysql2, integrating Drizzle/Prisma, or troubleshooting pool acquisition failures, TLS errors, nodejs_compat missing, or eval() disallowed.",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"author": {
|
||||||
|
"name": "Jeremy Dawes",
|
||||||
|
"email": "jeremy@jezweb.net"
|
||||||
|
},
|
||||||
|
"skills": [
|
||||||
|
"./"
|
||||||
|
]
|
||||||
|
}
|
||||||
3
README.md
Normal file
3
README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# cloudflare-hyperdrive
|
||||||
|
|
||||||
|
Connect Workers to PostgreSQL/MySQL with Hyperdrives global pooling and caching. Use when: connecting to existing databases, setting up connection pools, using node-postgres/mysql2, integrating Drizzle/Prisma, or troubleshooting pool acquisition failures, TLS errors, nodejs_compat missing, or eval() disallowed.
|
||||||
493
SKILL.md
Normal file
493
SKILL.md
Normal file
@@ -0,0 +1,493 @@
|
|||||||
|
---
|
||||||
|
name: cloudflare-hyperdrive
|
||||||
|
description: |
|
||||||
|
Connect Workers to PostgreSQL/MySQL with Hyperdrive's global pooling and caching. Use when: connecting to existing databases, setting up connection pools, using node-postgres/mysql2, integrating Drizzle/Prisma, or troubleshooting pool acquisition failures, TLS errors, nodejs_compat missing, or eval() disallowed.
|
||||||
|
license: MIT
|
||||||
|
---
|
||||||
|
|
||||||
|
# Cloudflare Hyperdrive
|
||||||
|
|
||||||
|
**Status**: Production Ready ✅
|
||||||
|
**Last Updated**: 2025-11-23
|
||||||
|
**Dependencies**: cloudflare-worker-base (recommended for Worker setup)
|
||||||
|
**Latest Versions**: wrangler@4.50.0, pg@8.16.3+ (minimum), postgres@3.4.7, mysql2@3.15.3
|
||||||
|
|
||||||
|
**Recent Updates (2025)**:
|
||||||
|
- **July 2025**: Configurable connection counts (min 5, max ~20 Free/~100 Paid)
|
||||||
|
- **May 2025**: 5x faster cache hits (regional prepared statement caching), FedRAMP Moderate authorization
|
||||||
|
- **April 2025**: Free plan availability (10 configs), MySQL GA support
|
||||||
|
- **March 2025**: 90% latency reduction (pools near database), IP access control (standard CF IP ranges)
|
||||||
|
- **nodejs_compat_v2**: pg driver no longer requires node_compat mode (auto-enabled with compatibility_date 2024-09-23+)
|
||||||
|
- **Limits**: 25 Hyperdrive configurations per account (Paid), 10 per account (Free)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Start (5 Minutes)
|
||||||
|
|
||||||
|
### 1. Create Hyperdrive Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For PostgreSQL
|
||||||
|
npx wrangler hyperdrive create my-postgres-db \
|
||||||
|
--connection-string="postgres://user:password@db-host.cloud:5432/database"
|
||||||
|
|
||||||
|
# For MySQL
|
||||||
|
npx wrangler hyperdrive create my-mysql-db \
|
||||||
|
--connection-string="mysql://user:password@db-host.cloud:3306/database"
|
||||||
|
|
||||||
|
# Output:
|
||||||
|
# ✅ Successfully created Hyperdrive configuration
|
||||||
|
#
|
||||||
|
# [[hyperdrive]]
|
||||||
|
# binding = "HYPERDRIVE"
|
||||||
|
# id = "a76a99bc-7901-48c9-9c15-c4b11b559606"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Save the `id` value** - you'll need it in the next step!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Configure Bindings in wrangler.jsonc
|
||||||
|
|
||||||
|
Add to your `wrangler.jsonc`:
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"name": "my-worker",
|
||||||
|
"main": "src/index.ts",
|
||||||
|
"compatibility_date": "2024-09-23",
|
||||||
|
"compatibility_flags": ["nodejs_compat"], // REQUIRED for database drivers
|
||||||
|
"hyperdrive": [
|
||||||
|
{
|
||||||
|
"binding": "HYPERDRIVE", // Available as env.HYPERDRIVE
|
||||||
|
"id": "a76a99bc-7901-48c9-9c15-c4b11b559606" // From wrangler hyperdrive create
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**CRITICAL:**
|
||||||
|
- `nodejs_compat` flag is **REQUIRED** for all database drivers
|
||||||
|
- `binding` is how you access Hyperdrive in code (`env.HYPERDRIVE`)
|
||||||
|
- `id` is the Hyperdrive configuration ID (NOT your database ID)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Install Database Driver
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For PostgreSQL (choose one)
|
||||||
|
npm install pg # node-postgres (most common)
|
||||||
|
npm install postgres # postgres.js (modern, minimum v3.4.5)
|
||||||
|
|
||||||
|
# For MySQL
|
||||||
|
npm install mysql2 # mysql2 (minimum v3.13.0)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. Query Your Database
|
||||||
|
|
||||||
|
**PostgreSQL with node-postgres (pg):**
|
||||||
|
```typescript
|
||||||
|
import { Client } from "pg";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(request: Request, env: Bindings, ctx: ExecutionContext) {
|
||||||
|
const client = new Client({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString
|
||||||
|
});
|
||||||
|
|
||||||
|
await client.connect();
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await client.query('SELECT * FROM users LIMIT 10');
|
||||||
|
return Response.json({ users: result.rows });
|
||||||
|
} finally {
|
||||||
|
// Clean up connection AFTER response is sent
|
||||||
|
ctx.waitUntil(client.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**MySQL with mysql2:**
|
||||||
|
```typescript
|
||||||
|
import { createConnection } from "mysql2/promise";
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(request: Request, env: Bindings, ctx: ExecutionContext) {
|
||||||
|
const connection = await createConnection({
|
||||||
|
host: env.HYPERDRIVE.host,
|
||||||
|
user: env.HYPERDRIVE.user,
|
||||||
|
password: env.HYPERDRIVE.password,
|
||||||
|
database: env.HYPERDRIVE.database,
|
||||||
|
port: env.HYPERDRIVE.port,
|
||||||
|
disableEval: true // REQUIRED for Workers (eval() not supported)
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
const [rows] = await connection.query('SELECT * FROM users LIMIT 10');
|
||||||
|
return Response.json({ users: rows });
|
||||||
|
} finally {
|
||||||
|
ctx.waitUntil(connection.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Deploy
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
**That's it!** Your Worker now connects to your existing database via Hyperdrive with:
|
||||||
|
- ✅ Global connection pooling
|
||||||
|
- ✅ Automatic query caching
|
||||||
|
- ✅ Reduced latency (eliminates 7 round trips)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## How Hyperdrive Works
|
||||||
|
|
||||||
|
Hyperdrive eliminates 7 connection round trips (TCP + TLS + auth) by:
|
||||||
|
- Edge connection setup near Worker (low latency)
|
||||||
|
- Connection pooling near database (March 2025: 90% latency reduction)
|
||||||
|
- Query caching at edge (May 2025: 5x faster cache hits)
|
||||||
|
|
||||||
|
**Result**: Single-region databases feel globally distributed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Setup Steps
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Cloudflare account with Workers access
|
||||||
|
- PostgreSQL (v9.0-17.x) or MySQL (v5.7-8.x) database
|
||||||
|
- Database accessible via public internet (TLS/SSL required) or private network (Cloudflare Tunnel)
|
||||||
|
- **April 2025**: Available on Free plan (10 configs) and Paid plan (25 configs)
|
||||||
|
|
||||||
|
### Connection String Formats
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# PostgreSQL
|
||||||
|
postgres://user:password@host:5432/database
|
||||||
|
postgres://user:password@host:5432/database?sslmode=require
|
||||||
|
|
||||||
|
# MySQL
|
||||||
|
mysql://user:password@host:3306/database
|
||||||
|
|
||||||
|
# URL-encode special chars: p@ssw$rd → p%40ssw%24rd
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Connection Patterns
|
||||||
|
|
||||||
|
### Single Connection (pg.Client)
|
||||||
|
```typescript
|
||||||
|
const client = new Client({ connectionString: env.HYPERDRIVE.connectionString });
|
||||||
|
await client.connect();
|
||||||
|
const result = await client.query('SELECT ...');
|
||||||
|
ctx.waitUntil(client.end()); // CRITICAL: Non-blocking cleanup
|
||||||
|
```
|
||||||
|
**Use for**: Simple queries, single query per request
|
||||||
|
|
||||||
|
### Connection Pool (pg.Pool)
|
||||||
|
```typescript
|
||||||
|
const pool = new Pool({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString,
|
||||||
|
max: 5 // CRITICAL: Workers limit is 6 connections (July 2025: configurable ~20 Free, ~100 Paid)
|
||||||
|
});
|
||||||
|
const [result1, result2] = await Promise.all([
|
||||||
|
pool.query('SELECT ...'),
|
||||||
|
pool.query('SELECT ...')
|
||||||
|
]);
|
||||||
|
ctx.waitUntil(pool.end());
|
||||||
|
```
|
||||||
|
**Use for**: Parallel queries in single request
|
||||||
|
|
||||||
|
### Connection Cleanup Rule
|
||||||
|
**ALWAYS use `ctx.waitUntil(client.end())`** - non-blocking cleanup after response sent
|
||||||
|
**NEVER use `await client.end()`** - blocks response, adds latency
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ORM Integration
|
||||||
|
|
||||||
|
### Drizzle ORM
|
||||||
|
```typescript
|
||||||
|
import { drizzle } from "drizzle-orm/postgres-js";
|
||||||
|
import postgres from "postgres";
|
||||||
|
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, { max: 5 });
|
||||||
|
const db = drizzle(sql);
|
||||||
|
const allUsers = await db.select().from(users);
|
||||||
|
ctx.waitUntil(sql.end());
|
||||||
|
```
|
||||||
|
|
||||||
|
### Prisma ORM
|
||||||
|
```typescript
|
||||||
|
import { PrismaPg } from "@prisma/adapter-pg";
|
||||||
|
import { PrismaClient } from "@prisma/client";
|
||||||
|
import { Pool } from "pg";
|
||||||
|
|
||||||
|
const pool = new Pool({ connectionString: env.HYPERDRIVE.connectionString, max: 5 });
|
||||||
|
const adapter = new PrismaPg(pool);
|
||||||
|
const prisma = new PrismaClient({ adapter });
|
||||||
|
const users = await prisma.user.findMany();
|
||||||
|
ctx.waitUntil(pool.end());
|
||||||
|
```
|
||||||
|
**Note**: Prisma requires driver adapters (`@prisma/adapter-pg`).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Local Development
|
||||||
|
|
||||||
|
**Option 1: Environment Variable (Recommended)**
|
||||||
|
```bash
|
||||||
|
export CLOUDFLARE_HYPERDRIVE_LOCAL_CONNECTION_STRING_HYPERDRIVE="postgres://user:password@localhost:5432/local_db"
|
||||||
|
npx wrangler dev
|
||||||
|
```
|
||||||
|
Safe to commit config, no credentials in wrangler.jsonc.
|
||||||
|
|
||||||
|
**Option 2: localConnectionString in wrangler.jsonc**
|
||||||
|
```jsonc
|
||||||
|
{ "hyperdrive": [{ "binding": "HYPERDRIVE", "id": "prod-id", "localConnectionString": "postgres://..." }] }
|
||||||
|
```
|
||||||
|
⚠️ Don't commit credentials to version control.
|
||||||
|
|
||||||
|
**Option 3: Remote Development**
|
||||||
|
```bash
|
||||||
|
npx wrangler dev --remote # ⚠️ Uses PRODUCTION database
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Query Caching
|
||||||
|
|
||||||
|
**Cached**: SELECT (non-mutating queries)
|
||||||
|
**NOT Cached**: INSERT, UPDATE, DELETE, volatile functions (LASTVAL, LAST_INSERT_ID)
|
||||||
|
|
||||||
|
**May 2025**: 5x faster cache hits via regional prepared statement caching.
|
||||||
|
|
||||||
|
**Critical for postgres.js:**
|
||||||
|
```typescript
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
prepare: true // REQUIRED for caching
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Check cache status:**
|
||||||
|
```typescript
|
||||||
|
response.headers.get('cf-cache-status'); // HIT, MISS, BYPASS, EXPIRED
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## TLS/SSL Configuration
|
||||||
|
|
||||||
|
**SSL Modes**: `require` (default), `verify-ca` (verify CA), `verify-full` (verify CA + hostname)
|
||||||
|
|
||||||
|
**Server Certificates (verify-ca/verify-full):**
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload certificate-authority --ca-cert root-ca.pem --name my-ca-cert
|
||||||
|
npx wrangler hyperdrive create my-db --connection-string="postgres://..." --ca-certificate-id <ID> --sslmode verify-full
|
||||||
|
```
|
||||||
|
|
||||||
|
**Client Certificates (mTLS):**
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload mtls-certificate --cert client-cert.pem --key client-key.pem --name my-cert
|
||||||
|
npx wrangler hyperdrive create my-db --connection-string="postgres://..." --mtls-certificate-id <ID>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Private Database Access (Cloudflare Tunnel)
|
||||||
|
|
||||||
|
Connect to databases in private networks (VPCs, on-premises):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Install cloudflared (macOS: brew install cloudflare/cloudflare/cloudflared)
|
||||||
|
# 2. Create tunnel
|
||||||
|
cloudflared tunnel create my-db-tunnel
|
||||||
|
|
||||||
|
# 3. Configure config.yml
|
||||||
|
# tunnel: <TUNNEL_ID>
|
||||||
|
# ingress:
|
||||||
|
# - hostname: db.example.com
|
||||||
|
# service: tcp://localhost:5432
|
||||||
|
|
||||||
|
# 4. Run tunnel
|
||||||
|
cloudflared tunnel run my-db-tunnel
|
||||||
|
|
||||||
|
# 5. Create Hyperdrive
|
||||||
|
npx wrangler hyperdrive create my-private-db --connection-string="postgres://user:password@db.example.com:5432/database"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Critical Rules
|
||||||
|
|
||||||
|
### Always Do
|
||||||
|
|
||||||
|
✅ Include `nodejs_compat` in `compatibility_flags`
|
||||||
|
✅ Use `ctx.waitUntil(client.end())` for connection cleanup
|
||||||
|
✅ Set `max: 5` for connection pools (Workers limit: 6)
|
||||||
|
✅ Enable TLS/SSL on your database (Hyperdrive requires it)
|
||||||
|
✅ Use prepared statements for caching (postgres.js: `prepare: true`)
|
||||||
|
✅ Set `disableEval: true` for mysql2 driver
|
||||||
|
✅ Handle errors gracefully with try/catch
|
||||||
|
✅ Use environment variables for local development connection strings
|
||||||
|
✅ Test locally with `wrangler dev` before deploying
|
||||||
|
|
||||||
|
### Never Do
|
||||||
|
|
||||||
|
❌ Skip `nodejs_compat` flag (causes "No such module" errors)
|
||||||
|
❌ Use private IP addresses directly (use Cloudflare Tunnel instead)
|
||||||
|
❌ Use `await client.end()` (blocks response, use `ctx.waitUntil()`)
|
||||||
|
❌ Set connection pool max > 5 (exceeds Workers' 6 connection limit)
|
||||||
|
❌ Wrap all queries in transactions (limits connection multiplexing)
|
||||||
|
❌ Use SQL-level PREPARE/EXECUTE/DEALLOCATE (unsupported)
|
||||||
|
❌ Use advisory locks, LISTEN/NOTIFY (PostgreSQL unsupported features)
|
||||||
|
❌ Use multi-statement queries in MySQL (unsupported)
|
||||||
|
❌ Commit database credentials to version control
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Wrangler Commands Reference
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create Hyperdrive configuration
|
||||||
|
wrangler hyperdrive create <name> --connection-string="postgres://..."
|
||||||
|
|
||||||
|
# List all Hyperdrive configurations
|
||||||
|
wrangler hyperdrive list
|
||||||
|
|
||||||
|
# Get details of a configuration
|
||||||
|
wrangler hyperdrive get <hyperdrive-id>
|
||||||
|
|
||||||
|
# Update connection string
|
||||||
|
wrangler hyperdrive update <hyperdrive-id> --connection-string="postgres://..."
|
||||||
|
|
||||||
|
# Delete configuration
|
||||||
|
wrangler hyperdrive delete <hyperdrive-id>
|
||||||
|
|
||||||
|
# Upload CA certificate
|
||||||
|
wrangler cert upload certificate-authority --ca-cert <file>.pem --name <name>
|
||||||
|
|
||||||
|
# Upload client certificate pair
|
||||||
|
wrangler cert upload mtls-certificate --cert <cert>.pem --key <key>.pem --name <name>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Supported Databases
|
||||||
|
|
||||||
|
**PostgreSQL (v9.0-17.x)**: AWS RDS/Aurora, Google Cloud SQL, Azure, Neon, Supabase, PlanetScale, Timescale, CockroachDB, Materialize, Fly.io, pgEdge, Prisma Postgres
|
||||||
|
|
||||||
|
**MySQL (v5.7-8.x)**: AWS RDS/Aurora, Google Cloud SQL, Azure, PlanetScale, MariaDB (April 2025 GA)
|
||||||
|
|
||||||
|
**NOT Supported**: SQL Server, MongoDB, Oracle
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Unsupported Features
|
||||||
|
|
||||||
|
### PostgreSQL
|
||||||
|
- SQL-level prepared statements (`PREPARE`, `EXECUTE`, `DEALLOCATE`)
|
||||||
|
- Advisory locks
|
||||||
|
- `LISTEN` and `NOTIFY`
|
||||||
|
- Per-session state modifications
|
||||||
|
|
||||||
|
### MySQL
|
||||||
|
- Non-UTF8 characters in queries
|
||||||
|
- `USE` statements
|
||||||
|
- Multi-statement queries
|
||||||
|
- Protocol-level prepared statements (`COM_STMT_PREPARE`)
|
||||||
|
- `COM_INIT_DB` messages
|
||||||
|
- Auth plugins other than `caching_sha2_password` or `mysql_native_password`
|
||||||
|
|
||||||
|
**Workaround**: For unsupported features, create a second direct client connection (without Hyperdrive).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Best Practices
|
||||||
|
|
||||||
|
1. **Avoid long-running transactions** - Limits connection multiplexing
|
||||||
|
2. **Use prepared statements** - Enables query caching (postgres.js: `prepare: true`)
|
||||||
|
3. **Set max: 5 for pools** - Stays within Workers' 6 connection limit
|
||||||
|
4. **Disable fetch_types if not needed** - Reduces latency (postgres.js)
|
||||||
|
5. **Use ctx.waitUntil() for cleanup** - Non-blocking connection close
|
||||||
|
6. **Cache-friendly queries** - Prefer SELECT over complex joins
|
||||||
|
7. **Index frequently queried columns** - Improves query performance
|
||||||
|
8. **Monitor with Hyperdrive analytics** - Track cache hit ratios and latency
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
See `references/troubleshooting.md` for complete error reference with solutions.
|
||||||
|
|
||||||
|
**Quick fixes:**
|
||||||
|
|
||||||
|
| Error | Solution |
|
||||||
|
|-------|----------|
|
||||||
|
| "No such module 'node:*'" | Add `nodejs_compat` to compatibility_flags |
|
||||||
|
| "TLS not supported by database" | Enable SSL/TLS on your database |
|
||||||
|
| "Connection refused" | Check firewall rules, allow public internet or use Tunnel |
|
||||||
|
| "Failed to acquire connection" | Use `ctx.waitUntil()` for cleanup, avoid long transactions |
|
||||||
|
| "Code generation from strings disallowed" | Set `disableEval: true` in mysql2 config |
|
||||||
|
| "Bad hostname" | Verify DNS resolves, check for typos |
|
||||||
|
| "Invalid database credentials" | Check username/password (case-sensitive) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Metrics and Analytics
|
||||||
|
|
||||||
|
[Hyperdrive Dashboard](https://dash.cloudflare.com/?to=/:account/workers/hyperdrive) → Select config → Metrics tab
|
||||||
|
|
||||||
|
**Available**: Query count, cache hit ratio, query latency (p50/p95/p99), connection latency, query/result bytes, error rate
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Credential Rotation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Option 1: Create new config (zero downtime)
|
||||||
|
wrangler hyperdrive create my-db-v2 --connection-string="postgres://new-creds..."
|
||||||
|
# Update wrangler.jsonc, deploy, delete old config
|
||||||
|
|
||||||
|
# Option 2: Update existing
|
||||||
|
wrangler hyperdrive update <id> --connection-string="postgres://new-creds..."
|
||||||
|
```
|
||||||
|
|
||||||
|
**Best practice**: Separate configs for staging/production.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [Official Documentation](https://developers.cloudflare.com/hyperdrive/)
|
||||||
|
- [Get Started Guide](https://developers.cloudflare.com/hyperdrive/get-started/)
|
||||||
|
- [How Hyperdrive Works](https://developers.cloudflare.com/hyperdrive/configuration/how-hyperdrive-works/)
|
||||||
|
- [Query Caching](https://developers.cloudflare.com/hyperdrive/configuration/query-caching/)
|
||||||
|
- [Local Development](https://developers.cloudflare.com/hyperdrive/configuration/local-development/)
|
||||||
|
- [TLS/SSL Certificates](https://developers.cloudflare.com/hyperdrive/configuration/tls-ssl-certificates-for-hyperdrive/)
|
||||||
|
- [Troubleshooting Guide](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
- [Wrangler Commands](https://developers.cloudflare.com/hyperdrive/reference/wrangler-commands/)
|
||||||
|
- [Supported Databases](https://developers.cloudflare.com/hyperdrive/reference/supported-databases-and-features/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Last Updated**: 2025-11-23
|
||||||
|
**Package Versions**: wrangler@4.50.0, pg@8.16.3+ (minimum), postgres@3.4.7, mysql2@3.15.3
|
||||||
|
**Production Tested**: Based on official Cloudflare documentation and community examples
|
||||||
117
plugin.lock.json
Normal file
117
plugin.lock.json
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
{
|
||||||
|
"$schema": "internal://schemas/plugin.lock.v1.json",
|
||||||
|
"pluginId": "gh:jezweb/claude-skills:skills/cloudflare-hyperdrive",
|
||||||
|
"normalized": {
|
||||||
|
"repo": null,
|
||||||
|
"ref": "refs/tags/v20251128.0",
|
||||||
|
"commit": "efb49861e6c6035ec3c370aadbfef2f3cabaf1cb",
|
||||||
|
"treeHash": "e4823604cd08cb0d32104e2325e1abc6cf7f0aba8fd81a3ca7bfffa16f17302e",
|
||||||
|
"generatedAt": "2025-11-28T10:18:58.126193Z",
|
||||||
|
"toolVersion": "publish_plugins.py@0.2.0"
|
||||||
|
},
|
||||||
|
"origin": {
|
||||||
|
"remote": "git@github.com:zhongweili/42plugin-data.git",
|
||||||
|
"branch": "master",
|
||||||
|
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
|
||||||
|
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
|
||||||
|
},
|
||||||
|
"manifest": {
|
||||||
|
"name": "cloudflare-hyperdrive",
|
||||||
|
"description": "Connect Workers to PostgreSQL/MySQL with Hyperdrives global pooling and caching. Use when: connecting to existing databases, setting up connection pools, using node-postgres/mysql2, integrating Drizzle/Prisma, or troubleshooting pool acquisition failures, TLS errors, nodejs_compat missing, or eval() disallowed.",
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"content": {
|
||||||
|
"files": [
|
||||||
|
{
|
||||||
|
"path": "README.md",
|
||||||
|
"sha256": "f4cb9169b51151602038b19bfb6960dc18be096b4b43c283e441b332218a8700"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "SKILL.md",
|
||||||
|
"sha256": "3ad52b7ae84ab58bbacf6297f469a8897a5ca6aebb44c47440b551c0ca092847"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "references/supported-databases.md",
|
||||||
|
"sha256": "da1e442b841c78c872956321ad6dae2169d42ee96d93fa616483ea6d99f4b2f6"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "references/troubleshooting.md",
|
||||||
|
"sha256": "95035a78b47f910bf373c476f6411bae72bce6b8d21b8d93a39894e2655d0061"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "references/prisma-integration.md",
|
||||||
|
"sha256": "4873a3757b6b154a297d020396ecba8f20cf5cd8df2171ea3cccaa44df511949"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "references/connection-pooling.md",
|
||||||
|
"sha256": "96b0d771621a835c42fb130d2ae84dd743b41d52b4e35e902c2e725f940ec9fb"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "references/query-caching.md",
|
||||||
|
"sha256": "592506fc94117bb5fd49242ac1bbdcf177ef12fe4cfc672dfd25a8775adca32d"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "references/wrangler-commands.md",
|
||||||
|
"sha256": "a541b2a1ff3b784ec6b99784085f8b6cc3aacb49cb21cebf4e260eeee17ee719"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "references/drizzle-integration.md",
|
||||||
|
"sha256": "66982aa7d1d9d2dc435fff5ce8a97e9d84b93cfc6dcf7ed6e668bec696188f8e"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "references/tls-ssl-setup.md",
|
||||||
|
"sha256": "fc3f814f0402feb0e17145023e9c4d5e4ba43fab6ddee331fbb012a826d74da0"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "scripts/check-versions.sh",
|
||||||
|
"sha256": "22cfbd325fa2049de9e7af424b3158ae96bacce61b9e007f7882746711ce97ce"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": ".claude-plugin/plugin.json",
|
||||||
|
"sha256": "285397079c5383f144bee85133c7ae4c9b712c818643ce1e5f1a73ac97d7c24e"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/wrangler-hyperdrive-config.jsonc",
|
||||||
|
"sha256": "be40d9b5abe5ca431be344a42fd74680bd07d9ae230139407a36579a5beab3be"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/drizzle-mysql.ts",
|
||||||
|
"sha256": "f7e7c88b851073095ee2aa82c52e5c60bd175507aed8a373d0ce338c4f86ef6b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/postgres-js.ts",
|
||||||
|
"sha256": "fd96acf57020f5a7dbfa3c2bc4cb40e6649a8ffc9d666e8730b025029199f9ac"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/postgres-basic.ts",
|
||||||
|
"sha256": "1759d311514b8cf4e50d99a26968202d4b57d9622b357a6f4a6fc0fd88ad0dd5"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/postgres-pool.ts",
|
||||||
|
"sha256": "e62c5594d38be643c7bfa98d3a6e260a3a6be4d7397a405d3cfe942b4eb2e821"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/drizzle-postgres.ts",
|
||||||
|
"sha256": "0e604001a9d6f4f1fbeaede6e1d2e9095d9be5f2a8292b2a7d662b25f4baf3ac"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/prisma-postgres.ts",
|
||||||
|
"sha256": "2f78d1c52b64f4f90cf8f2cb5adc2cd66ba58388395b0e64effffc689aedd70a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/local-dev-setup.sh",
|
||||||
|
"sha256": "331b72f4127219485ff195b06dc916764b01541aa308c2d4bdbdbaeaecb88212"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "templates/mysql2-basic.ts",
|
||||||
|
"sha256": "550aac67cb5d24c0614b62d640b242564d2c050897dd67e6a8ace09b4e46e347"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dirSha256": "e4823604cd08cb0d32104e2325e1abc6cf7f0aba8fd81a3ca7bfffa16f17302e"
|
||||||
|
},
|
||||||
|
"security": {
|
||||||
|
"scannedAt": null,
|
||||||
|
"scannerVersion": null,
|
||||||
|
"flags": []
|
||||||
|
}
|
||||||
|
}
|
||||||
521
references/connection-pooling.md
Normal file
521
references/connection-pooling.md
Normal file
@@ -0,0 +1,521 @@
|
|||||||
|
# Connection Pooling Guide
|
||||||
|
|
||||||
|
Complete guide to connection pooling patterns with Cloudflare Hyperdrive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
**Why Pooling Matters**:
|
||||||
|
- Workers have a limit of **6 concurrent external connections**
|
||||||
|
- Database drivers create new connections for each operation
|
||||||
|
- Connection pooling reuses connections across queries
|
||||||
|
- Hyperdrive provides global connection pooling near your database
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Workers Connection Limit
|
||||||
|
|
||||||
|
### The 6 Connection Rule
|
||||||
|
|
||||||
|
Cloudflare Workers can maintain **maximum 6 concurrent external TCP connections** (includes database connections).
|
||||||
|
|
||||||
|
**What counts toward limit**:
|
||||||
|
- ✅ Database connections (pg.Client, pg.Pool, mysql2)
|
||||||
|
- ✅ HTTP requests to external APIs
|
||||||
|
- ✅ WebSocket connections
|
||||||
|
- ✅ Any TCP socket connection
|
||||||
|
|
||||||
|
**What doesn't count**:
|
||||||
|
- ❌ Requests to Cloudflare services (D1, KV, R2, Workers AI)
|
||||||
|
- ❌ Requests within Cloudflare network
|
||||||
|
- ❌ HTTP requests to other Workers
|
||||||
|
|
||||||
|
**Source**: [Workers Platform Limits](https://developers.cloudflare.com/workers/platform/limits)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Connection Pool Sizing
|
||||||
|
|
||||||
|
### Rule of Thumb
|
||||||
|
|
||||||
|
**Set `max: 5` for connection pools** to stay within Workers' 6 connection limit.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// PostgreSQL (node-postgres)
|
||||||
|
const pool = new Pool({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString,
|
||||||
|
max: 5 // Leave 1 connection for other operations
|
||||||
|
});
|
||||||
|
|
||||||
|
// PostgreSQL (postgres.js)
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
max: 5
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Why Not 6?
|
||||||
|
|
||||||
|
- Reserve 1 connection for other operations
|
||||||
|
- Prevents hitting connection limit errors
|
||||||
|
- Allows concurrent database + API requests
|
||||||
|
- Safety margin for connection acquisition
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Connection Patterns
|
||||||
|
|
||||||
|
### Pattern 1: Single Connection (pg.Client)
|
||||||
|
|
||||||
|
**Use Case**: Simple queries, one query per request
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { Client } from "pg";
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(request, env, ctx) {
|
||||||
|
const client = new Client({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString
|
||||||
|
});
|
||||||
|
|
||||||
|
await client.connect();
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await client.query('SELECT * FROM users');
|
||||||
|
return Response.json(result.rows);
|
||||||
|
} finally {
|
||||||
|
ctx.waitUntil(client.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**Pros**:
|
||||||
|
- Simple, straightforward
|
||||||
|
- One connection per request
|
||||||
|
- Easy to reason about
|
||||||
|
|
||||||
|
**Cons**:
|
||||||
|
- Can't run parallel queries
|
||||||
|
- Creates new connection for each request (mitigated by Hyperdrive)
|
||||||
|
|
||||||
|
**When to use**:
|
||||||
|
- Single query per request
|
||||||
|
- No parallel operations needed
|
||||||
|
- Simple CRUD operations
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Pattern 2: Connection Pool (pg.Pool)
|
||||||
|
|
||||||
|
**Use Case**: Multiple parallel queries in single request
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { Pool } from "pg";
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(request, env, ctx) {
|
||||||
|
const pool = new Pool({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString,
|
||||||
|
max: 5,
|
||||||
|
idleTimeoutMillis: 30000,
|
||||||
|
connectionTimeoutMillis: 10000
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Run parallel queries
|
||||||
|
const [users, posts, stats] = await Promise.all([
|
||||||
|
pool.query('SELECT * FROM users'),
|
||||||
|
pool.query('SELECT * FROM posts'),
|
||||||
|
pool.query('SELECT COUNT(*) FROM comments')
|
||||||
|
]);
|
||||||
|
|
||||||
|
return Response.json({ users: users.rows, posts: posts.rows, stats: stats.rows });
|
||||||
|
} finally {
|
||||||
|
ctx.waitUntil(pool.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**Pros**:
|
||||||
|
- Parallel queries possible
|
||||||
|
- Better performance for multiple operations
|
||||||
|
- Efficient connection reuse
|
||||||
|
|
||||||
|
**Cons**:
|
||||||
|
- More complex
|
||||||
|
- Must manage pool lifecycle
|
||||||
|
- Must set max correctly
|
||||||
|
|
||||||
|
**When to use**:
|
||||||
|
- Multiple queries per request
|
||||||
|
- Parallel operations needed
|
||||||
|
- Complex data fetching
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Pattern 3: Reusable Pool (Advanced)
|
||||||
|
|
||||||
|
**⚠️ Advanced Pattern**: Create pool once, reuse across requests.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { Pool } from "pg";
|
||||||
|
|
||||||
|
// Create pool outside handler (reused across requests)
|
||||||
|
let pool: Pool | null = null;
|
||||||
|
|
||||||
|
function getPool(connectionString: string): Pool {
|
||||||
|
if (!pool) {
|
||||||
|
pool = new Pool({
|
||||||
|
connectionString,
|
||||||
|
max: 5,
|
||||||
|
idleTimeoutMillis: 60000
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return pool;
|
||||||
|
}
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(request, env, ctx) {
|
||||||
|
const pool = getPool(env.HYPERDRIVE.connectionString);
|
||||||
|
|
||||||
|
const result = await pool.query('SELECT * FROM users');
|
||||||
|
return Response.json(result.rows);
|
||||||
|
|
||||||
|
// NOTE: Don't call pool.end() here - pool is reused
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**Pros**:
|
||||||
|
- Maximum connection reuse
|
||||||
|
- Best performance
|
||||||
|
- No connection overhead
|
||||||
|
|
||||||
|
**Cons**:
|
||||||
|
- Global state (not recommended for Workers)
|
||||||
|
- Harder to debug
|
||||||
|
- Connection lifecycle management complex
|
||||||
|
|
||||||
|
**When to use**:
|
||||||
|
- High-traffic applications
|
||||||
|
- Performance-critical paths
|
||||||
|
- When you understand Workers execution model well
|
||||||
|
|
||||||
|
**⚠️ Caution**: Workers may be restarted, pool will be recreated. This pattern works but requires careful testing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Connection Pool Configuration
|
||||||
|
|
||||||
|
### pg.Pool Options
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const pool = new Pool({
|
||||||
|
// Connection string (Hyperdrive)
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString,
|
||||||
|
|
||||||
|
// Maximum connections in pool (CRITICAL: set to 5)
|
||||||
|
max: 5,
|
||||||
|
|
||||||
|
// Close idle connections after 30 seconds
|
||||||
|
idleTimeoutMillis: 30000,
|
||||||
|
|
||||||
|
// Timeout when acquiring connection (10 seconds)
|
||||||
|
connectionTimeoutMillis: 10000,
|
||||||
|
|
||||||
|
// Allow exiting process if no connections active
|
||||||
|
allowExitOnIdle: false
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### postgres.js Options
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
// Maximum connections
|
||||||
|
max: 5,
|
||||||
|
|
||||||
|
// Idle connection timeout (seconds)
|
||||||
|
idle_timeout: 30,
|
||||||
|
|
||||||
|
// Connection timeout (seconds)
|
||||||
|
connect_timeout: 10,
|
||||||
|
|
||||||
|
// Prepared statements (CRITICAL for caching)
|
||||||
|
prepare: true
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### mysql2 Options
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const pool = mysql.createPool({
|
||||||
|
host: env.HYPERDRIVE.host,
|
||||||
|
user: env.HYPERDRIVE.user,
|
||||||
|
password: env.HYPERDRIVE.password,
|
||||||
|
database: env.HYPERDRIVE.database,
|
||||||
|
port: env.HYPERDRIVE.port,
|
||||||
|
|
||||||
|
// Maximum connections
|
||||||
|
connectionLimit: 5,
|
||||||
|
|
||||||
|
// Queue if pool exhausted
|
||||||
|
queueLimit: 0,
|
||||||
|
|
||||||
|
// Required for Workers
|
||||||
|
disableEval: true
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Connection Cleanup
|
||||||
|
|
||||||
|
### The ctx.waitUntil() Pattern
|
||||||
|
|
||||||
|
**CRITICAL**: Always use `ctx.waitUntil()` to clean up connections AFTER response is sent.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
export default {
|
||||||
|
async fetch(request, env, ctx) {
|
||||||
|
const client = new Client({ connectionString: env.HYPERDRIVE.connectionString });
|
||||||
|
await client.connect();
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await client.query('SELECT ...');
|
||||||
|
return Response.json(result.rows); // Response sent here
|
||||||
|
} finally {
|
||||||
|
// Connection closed AFTER response sent (non-blocking)
|
||||||
|
ctx.waitUntil(client.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why**:
|
||||||
|
- Allows Worker to return response immediately
|
||||||
|
- Connection cleanup happens in background
|
||||||
|
- Prevents adding latency to response time
|
||||||
|
- Prevents connection leaks
|
||||||
|
|
||||||
|
**DON'T do this**:
|
||||||
|
```typescript
|
||||||
|
await client.end(); // ❌ Blocks response, adds latency
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Transaction Management
|
||||||
|
|
||||||
|
### Transactions and Connection Pooling
|
||||||
|
|
||||||
|
**Problem**: Transactions hold connections for their duration, limiting pool availability.
|
||||||
|
|
||||||
|
**Impact on Pooling**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Bad: Long transaction holds connection
|
||||||
|
const client = await pool.connect();
|
||||||
|
await client.query('BEGIN');
|
||||||
|
// ... many queries (connection held) ...
|
||||||
|
await client.query('COMMIT');
|
||||||
|
client.release();
|
||||||
|
```
|
||||||
|
|
||||||
|
**Better Approach**:
|
||||||
|
```typescript
|
||||||
|
// ✅ Good: Short transactions
|
||||||
|
const client = await pool.connect();
|
||||||
|
try {
|
||||||
|
await client.query('BEGIN');
|
||||||
|
await client.query('INSERT ...');
|
||||||
|
await client.query('UPDATE ...');
|
||||||
|
await client.query('COMMIT');
|
||||||
|
} catch (error) {
|
||||||
|
await client.query('ROLLBACK');
|
||||||
|
throw error;
|
||||||
|
} finally {
|
||||||
|
client.release(); // Return connection to pool quickly
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Best Practice**:
|
||||||
|
- Keep transactions as short as possible
|
||||||
|
- Avoid holding connections during I/O operations
|
||||||
|
- Release connections back to pool immediately after transaction
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Monitoring Connection Usage
|
||||||
|
|
||||||
|
### Check Active Connections
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// pg.Pool
|
||||||
|
console.log('Total clients:', pool.totalCount);
|
||||||
|
console.log('Idle clients:', pool.idleCount);
|
||||||
|
console.log('Waiting clients:', pool.waitingCount);
|
||||||
|
|
||||||
|
// postgres.js
|
||||||
|
// No built-in monitoring (check database side)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Database-Side Monitoring
|
||||||
|
|
||||||
|
**PostgreSQL**:
|
||||||
|
```sql
|
||||||
|
SELECT count(*) FROM pg_stat_activity WHERE datname = 'mydb';
|
||||||
|
```
|
||||||
|
|
||||||
|
**MySQL**:
|
||||||
|
```sql
|
||||||
|
SHOW STATUS LIKE 'Threads_connected';
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Connection Pool Exhaustion
|
||||||
|
|
||||||
|
### Symptoms
|
||||||
|
|
||||||
|
**Error**: `Failed to acquire a connection from the pool`
|
||||||
|
|
||||||
|
**Causes**:
|
||||||
|
1. Too many concurrent requests
|
||||||
|
2. Long-running queries holding connections
|
||||||
|
3. Transactions not releasing connections
|
||||||
|
4. Connection leaks (not calling `client.end()`)
|
||||||
|
|
||||||
|
### Solutions
|
||||||
|
|
||||||
|
**1. Use ctx.waitUntil() for cleanup**:
|
||||||
|
```typescript
|
||||||
|
ctx.waitUntil(client.end());
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Set connection timeouts**:
|
||||||
|
```typescript
|
||||||
|
const pool = new Pool({
|
||||||
|
max: 5,
|
||||||
|
connectionTimeoutMillis: 10000 // Fail fast if can't acquire
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Keep transactions short**:
|
||||||
|
```typescript
|
||||||
|
// Minimize time between BEGIN and COMMIT
|
||||||
|
```
|
||||||
|
|
||||||
|
**4. Monitor pool metrics**:
|
||||||
|
```typescript
|
||||||
|
console.log('Pool stats:', {
|
||||||
|
total: pool.totalCount,
|
||||||
|
idle: pool.idleCount,
|
||||||
|
waiting: pool.waitingCount
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Hyperdrive's Global Pooling
|
||||||
|
|
||||||
|
### How Hyperdrive Helps
|
||||||
|
|
||||||
|
**Without Hyperdrive**:
|
||||||
|
- Each Worker request creates new connection
|
||||||
|
- 7 round trips for each connection (TCP + TLS + auth)
|
||||||
|
- High latency, many connections to database
|
||||||
|
|
||||||
|
**With Hyperdrive**:
|
||||||
|
- Global connection pool near your database
|
||||||
|
- Connection reused across Workers globally
|
||||||
|
- Only 1 round trip (Worker to Hyperdrive edge)
|
||||||
|
- Reduced connections to database
|
||||||
|
|
||||||
|
**Result**:
|
||||||
|
- Lower latency
|
||||||
|
- Fewer connections to database
|
||||||
|
- Better scalability
|
||||||
|
|
||||||
|
### Hyperdrive + Local Pooling
|
||||||
|
|
||||||
|
**Best of both worlds**:
|
||||||
|
```typescript
|
||||||
|
// Local pool (max: 5) for parallel queries in single request
|
||||||
|
const pool = new Pool({ max: 5, connectionString: env.HYPERDRIVE.connectionString });
|
||||||
|
|
||||||
|
// Hyperdrive's global pool handles reuse across Workers globally
|
||||||
|
```
|
||||||
|
|
||||||
|
**Benefits**:
|
||||||
|
- Parallel queries within request (local pool)
|
||||||
|
- Connection reuse across Workers (Hyperdrive)
|
||||||
|
- Optimal performance
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Always set max: 5** for connection pools
|
||||||
|
2. **Use ctx.waitUntil()** for connection cleanup
|
||||||
|
3. **Keep transactions short** to free connections quickly
|
||||||
|
4. **Use connection pools** for parallel queries, single connections otherwise
|
||||||
|
5. **Monitor connection usage** in development
|
||||||
|
6. **Set connection timeouts** to fail fast
|
||||||
|
7. **Release connections immediately** after use
|
||||||
|
8. **Avoid holding connections** during I/O operations
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Common Mistakes
|
||||||
|
|
||||||
|
❌ **Mistake 1**: Setting max > 5
|
||||||
|
```typescript
|
||||||
|
const pool = new Pool({ max: 10 }); // Exceeds Workers' limit
|
||||||
|
```
|
||||||
|
|
||||||
|
❌ **Mistake 2**: Not cleaning up connections
|
||||||
|
```typescript
|
||||||
|
const client = new Client(...);
|
||||||
|
await client.connect();
|
||||||
|
const result = await client.query('SELECT ...');
|
||||||
|
return Response.json(result.rows);
|
||||||
|
// Connection leak! No client.end()
|
||||||
|
```
|
||||||
|
|
||||||
|
❌ **Mistake 3**: Blocking response with cleanup
|
||||||
|
```typescript
|
||||||
|
await client.end(); // Adds latency to response
|
||||||
|
```
|
||||||
|
|
||||||
|
❌ **Mistake 4**: Long transactions
|
||||||
|
```typescript
|
||||||
|
await client.query('BEGIN');
|
||||||
|
await fetch('https://api.example.com'); // Holding connection during HTTP request!
|
||||||
|
await client.query('COMMIT');
|
||||||
|
```
|
||||||
|
|
||||||
|
✅ **Correct Pattern**:
|
||||||
|
```typescript
|
||||||
|
const pool = new Pool({ max: 5, connectionString: env.HYPERDRIVE.connectionString });
|
||||||
|
|
||||||
|
try {
|
||||||
|
const [users, posts] = await Promise.all([
|
||||||
|
pool.query('SELECT * FROM users'),
|
||||||
|
pool.query('SELECT * FROM posts')
|
||||||
|
]);
|
||||||
|
return Response.json({ users: users.rows, posts: posts.rows });
|
||||||
|
} finally {
|
||||||
|
ctx.waitUntil(pool.end());
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [Workers Platform Limits](https://developers.cloudflare.com/workers/platform/limits)
|
||||||
|
- [node-postgres Pooling](https://node-postgres.com/apis/pool)
|
||||||
|
- [postgres.js Connection Options](https://github.com/porsager/postgres)
|
||||||
|
- [How Hyperdrive Works](https://developers.cloudflare.com/hyperdrive/configuration/how-hyperdrive-works/)
|
||||||
577
references/drizzle-integration.md
Normal file
577
references/drizzle-integration.md
Normal file
@@ -0,0 +1,577 @@
|
|||||||
|
# Drizzle ORM Integration Guide
|
||||||
|
|
||||||
|
Complete guide to using Drizzle ORM with Cloudflare Hyperdrive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
**Drizzle ORM** is a lightweight, TypeScript-first ORM with excellent type safety and performance.
|
||||||
|
|
||||||
|
**Why Drizzle + Hyperdrive?**
|
||||||
|
- ✅ Type-safe queries with full TypeScript support
|
||||||
|
- ✅ Zero runtime overhead (SQL is generated at build time)
|
||||||
|
- ✅ Works with both PostgreSQL and MySQL via Hyperdrive
|
||||||
|
- ✅ Simpler than Prisma (no code generation step in Worker)
|
||||||
|
- ✅ Better performance than traditional ORMs
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### PostgreSQL
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Drizzle ORM + postgres.js driver
|
||||||
|
npm install drizzle-orm postgres
|
||||||
|
|
||||||
|
# Dev dependencies
|
||||||
|
npm install -D drizzle-kit @types/node
|
||||||
|
```
|
||||||
|
|
||||||
|
### MySQL
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Drizzle ORM + mysql2 driver
|
||||||
|
npm install drizzle-orm mysql2
|
||||||
|
|
||||||
|
# Dev dependencies
|
||||||
|
npm install -D drizzle-kit @types/node
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## PostgreSQL Setup
|
||||||
|
|
||||||
|
### 1. Define Schema
|
||||||
|
|
||||||
|
Create `src/db/schema.ts`:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { pgTable, serial, varchar, text, timestamp, boolean } from "drizzle-orm/pg-core";
|
||||||
|
|
||||||
|
export const users = pgTable("users", {
|
||||||
|
id: serial("id").primaryKey(),
|
||||||
|
name: varchar("name", { length: 255 }).notNull(),
|
||||||
|
email: varchar("email", { length: 255 }).notNull().unique(),
|
||||||
|
createdAt: timestamp("created_at").defaultNow(),
|
||||||
|
updatedAt: timestamp("updated_at").defaultNow(),
|
||||||
|
});
|
||||||
|
|
||||||
|
export const posts = pgTable("posts", {
|
||||||
|
id: serial("id").primaryKey(),
|
||||||
|
title: varchar("title", { length: 255 }).notNull(),
|
||||||
|
content: text("content"),
|
||||||
|
published: boolean("published").default(false),
|
||||||
|
authorId: serial("author_id").references(() => users.id),
|
||||||
|
createdAt: timestamp("created_at").defaultNow(),
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Use in Worker
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { drizzle } from "drizzle-orm/postgres-js";
|
||||||
|
import postgres from "postgres";
|
||||||
|
import { users, posts } from "./db/schema";
|
||||||
|
import { eq } from "drizzle-orm";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(request: Request, env: Bindings, ctx: ExecutionContext) {
|
||||||
|
// Create postgres.js connection
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
max: 5,
|
||||||
|
prepare: true, // CRITICAL for caching
|
||||||
|
fetch_types: false // Disable if not using array types
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create Drizzle client
|
||||||
|
const db = drizzle(sql);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// INSERT
|
||||||
|
const [newUser] = await db.insert(users).values({
|
||||||
|
name: "John Doe",
|
||||||
|
email: `john.${Date.now()}@example.com`
|
||||||
|
}).returning();
|
||||||
|
|
||||||
|
// SELECT
|
||||||
|
const allUsers = await db.select().from(users);
|
||||||
|
|
||||||
|
// WHERE
|
||||||
|
const user = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.where(eq(users.email, "john@example.com"));
|
||||||
|
|
||||||
|
// JOIN
|
||||||
|
const usersWithPosts = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.leftJoin(posts, eq(users.id, posts.authorId));
|
||||||
|
|
||||||
|
// UPDATE
|
||||||
|
await db.update(users)
|
||||||
|
.set({ name: "Jane Doe" })
|
||||||
|
.where(eq(users.id, newUser.id));
|
||||||
|
|
||||||
|
// DELETE
|
||||||
|
// await db.delete(users).where(eq(users.id, 123));
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
newUser,
|
||||||
|
allUsers,
|
||||||
|
user,
|
||||||
|
usersWithPosts
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
ctx.waitUntil(sql.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Configure Drizzle Kit (Migrations)
|
||||||
|
|
||||||
|
Create `drizzle.config.ts` in project root:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import 'dotenv/config';
|
||||||
|
import { defineConfig } from 'drizzle-kit';
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
out: './drizzle',
|
||||||
|
schema: './src/db/schema.ts',
|
||||||
|
dialect: 'postgresql',
|
||||||
|
dbCredentials: {
|
||||||
|
url: process.env.DATABASE_URL!,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
Create `.env` file (for migrations only, NOT used in Worker):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Direct connection to database (for migrations)
|
||||||
|
DATABASE_URL="postgres://user:password@host:5432/database"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. Run Migrations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate migration files from schema
|
||||||
|
npx drizzle-kit generate
|
||||||
|
|
||||||
|
# Apply migrations to database
|
||||||
|
npx drizzle-kit migrate
|
||||||
|
|
||||||
|
# Push schema directly (no migration files)
|
||||||
|
npx drizzle-kit push
|
||||||
|
```
|
||||||
|
|
||||||
|
**Generated SQL** (in `drizzle/` folder):
|
||||||
|
```sql
|
||||||
|
-- drizzle/0000_initial.sql
|
||||||
|
CREATE TABLE IF NOT EXISTS "users" (
|
||||||
|
"id" serial PRIMARY KEY NOT NULL,
|
||||||
|
"name" varchar(255) NOT NULL,
|
||||||
|
"email" varchar(255) NOT NULL,
|
||||||
|
"created_at" timestamp DEFAULT now(),
|
||||||
|
"updated_at" timestamp DEFAULT now(),
|
||||||
|
CONSTRAINT "users_email_unique" UNIQUE("email")
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS "posts" (
|
||||||
|
"id" serial PRIMARY KEY NOT NULL,
|
||||||
|
"title" varchar(255) NOT NULL,
|
||||||
|
"content" text,
|
||||||
|
"published" boolean DEFAULT false,
|
||||||
|
"author_id" serial NOT NULL,
|
||||||
|
"created_at" timestamp DEFAULT now()
|
||||||
|
);
|
||||||
|
|
||||||
|
ALTER TABLE "posts" ADD CONSTRAINT "posts_author_id_users_id_fk"
|
||||||
|
FOREIGN KEY ("author_id") REFERENCES "users"("id");
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MySQL Setup
|
||||||
|
|
||||||
|
### 1. Define Schema
|
||||||
|
|
||||||
|
Create `src/db/schema.ts`:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { mysqlTable, int, varchar, text, timestamp, boolean } from "drizzle-orm/mysql-core";
|
||||||
|
|
||||||
|
export const users = mysqlTable("users", {
|
||||||
|
id: int("id").primaryKey().autoincrement(),
|
||||||
|
name: varchar("name", { length: 255 }).notNull(),
|
||||||
|
email: varchar("email", { length: 255 }).notNull(),
|
||||||
|
createdAt: timestamp("created_at").defaultNow(),
|
||||||
|
updatedAt: timestamp("updated_at").defaultNow(),
|
||||||
|
});
|
||||||
|
|
||||||
|
export const posts = mysqlTable("posts", {
|
||||||
|
id: int("id").primaryKey().autoincrement(),
|
||||||
|
title: varchar("title", { length: 255 }).notNull(),
|
||||||
|
content: text("content"),
|
||||||
|
published: boolean("published").default(false),
|
||||||
|
authorId: int("author_id").references(() => users.id),
|
||||||
|
createdAt: timestamp("created_at").defaultNow(),
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Use in Worker
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { drizzle } from "drizzle-orm/mysql2";
|
||||||
|
import { createConnection } from "mysql2";
|
||||||
|
import { users, posts } from "./db/schema";
|
||||||
|
import { eq } from "drizzle-orm";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(request: Request, env: Bindings, ctx: ExecutionContext) {
|
||||||
|
// Create mysql2 connection
|
||||||
|
const connection = createConnection({
|
||||||
|
host: env.HYPERDRIVE.host,
|
||||||
|
user: env.HYPERDRIVE.user,
|
||||||
|
password: env.HYPERDRIVE.password,
|
||||||
|
database: env.HYPERDRIVE.database,
|
||||||
|
port: env.HYPERDRIVE.port,
|
||||||
|
disableEval: true // REQUIRED for Workers
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create Drizzle client
|
||||||
|
const db = drizzle(connection);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// INSERT
|
||||||
|
await db.insert(users).values({
|
||||||
|
name: "John Doe",
|
||||||
|
email: `john.${Date.now()}@example.com`
|
||||||
|
});
|
||||||
|
|
||||||
|
// SELECT
|
||||||
|
const allUsers = await db.select().from(users);
|
||||||
|
|
||||||
|
// WHERE
|
||||||
|
const user = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.where(eq(users.id, 1));
|
||||||
|
|
||||||
|
return Response.json({ allUsers, user });
|
||||||
|
} finally {
|
||||||
|
ctx.waitUntil(
|
||||||
|
new Promise<void>((resolve) => {
|
||||||
|
connection.end(() => resolve());
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Configure Drizzle Kit (MySQL)
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import 'dotenv/config';
|
||||||
|
import { defineConfig } from 'drizzle-kit';
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
out: './drizzle',
|
||||||
|
schema: './src/db/schema.ts',
|
||||||
|
dialect: 'mysql',
|
||||||
|
dbCredentials: {
|
||||||
|
url: process.env.DATABASE_URL!,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Common Query Patterns
|
||||||
|
|
||||||
|
### Select Queries
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// All rows
|
||||||
|
const users = await db.select().from(users);
|
||||||
|
|
||||||
|
// With WHERE
|
||||||
|
const user = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.where(eq(users.id, 1));
|
||||||
|
|
||||||
|
// Multiple conditions (AND)
|
||||||
|
import { and } from "drizzle-orm";
|
||||||
|
const activeUsers = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.where(and(
|
||||||
|
eq(users.active, true),
|
||||||
|
gt(users.createdAt, new Date('2024-01-01'))
|
||||||
|
));
|
||||||
|
|
||||||
|
// Multiple conditions (OR)
|
||||||
|
import { or } from "drizzle-orm";
|
||||||
|
const result = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.where(or(
|
||||||
|
eq(users.role, 'admin'),
|
||||||
|
eq(users.role, 'moderator')
|
||||||
|
));
|
||||||
|
|
||||||
|
// Limit & Offset
|
||||||
|
const recentUsers = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.orderBy(users.createdAt)
|
||||||
|
.limit(10)
|
||||||
|
.offset(20);
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Insert Queries
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Single insert
|
||||||
|
await db.insert(users).values({
|
||||||
|
name: "John",
|
||||||
|
email: "john@example.com"
|
||||||
|
});
|
||||||
|
|
||||||
|
// Multiple inserts
|
||||||
|
await db.insert(users).values([
|
||||||
|
{ name: "Alice", email: "alice@example.com" },
|
||||||
|
{ name: "Bob", email: "bob@example.com" }
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Insert with RETURNING (PostgreSQL only)
|
||||||
|
const [newUser] = await db.insert(users).values({
|
||||||
|
name: "John",
|
||||||
|
email: "john@example.com"
|
||||||
|
}).returning();
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Update Queries
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Update
|
||||||
|
await db.update(users)
|
||||||
|
.set({ name: "Jane Doe" })
|
||||||
|
.where(eq(users.id, 1));
|
||||||
|
|
||||||
|
// Update with RETURNING (PostgreSQL)
|
||||||
|
const [updatedUser] = await db.update(users)
|
||||||
|
.set({ name: "Jane Doe" })
|
||||||
|
.where(eq(users.id, 1))
|
||||||
|
.returning();
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Delete Queries
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Delete
|
||||||
|
await db.delete(users)
|
||||||
|
.where(eq(users.id, 1));
|
||||||
|
|
||||||
|
// Delete with RETURNING (PostgreSQL)
|
||||||
|
const [deletedUser] = await db.delete(users)
|
||||||
|
.where(eq(users.id, 1))
|
||||||
|
.returning();
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Joins
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { eq } from "drizzle-orm";
|
||||||
|
|
||||||
|
// Left join
|
||||||
|
const usersWithPosts = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.leftJoin(posts, eq(users.id, posts.authorId));
|
||||||
|
|
||||||
|
// Inner join
|
||||||
|
const usersWithPublishedPosts = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.innerJoin(posts, eq(users.id, posts.authorId))
|
||||||
|
.where(eq(posts.published, true));
|
||||||
|
|
||||||
|
// Select specific columns
|
||||||
|
const result = await db.select({
|
||||||
|
userName: users.name,
|
||||||
|
postTitle: posts.title
|
||||||
|
})
|
||||||
|
.from(users)
|
||||||
|
.leftJoin(posts, eq(users.id, posts.authorId));
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Aggregations
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { count, sum, avg } from "drizzle-orm";
|
||||||
|
|
||||||
|
// Count
|
||||||
|
const [{ total }] = await db.select({
|
||||||
|
total: count()
|
||||||
|
}).from(users);
|
||||||
|
|
||||||
|
// Count with GROUP BY
|
||||||
|
const postCounts = await db.select({
|
||||||
|
authorId: posts.authorId,
|
||||||
|
count: count()
|
||||||
|
})
|
||||||
|
.from(posts)
|
||||||
|
.groupBy(posts.authorId);
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Relations
|
||||||
|
|
||||||
|
### Define Relations
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { relations } from "drizzle-orm";
|
||||||
|
|
||||||
|
export const usersRelations = relations(users, ({ many }) => ({
|
||||||
|
posts: many(posts),
|
||||||
|
}));
|
||||||
|
|
||||||
|
export const postsRelations = relations(posts, ({ one }) => ({
|
||||||
|
author: one(users, {
|
||||||
|
fields: [posts.authorId],
|
||||||
|
references: [users.id],
|
||||||
|
}),
|
||||||
|
}));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Relations
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Include related data
|
||||||
|
const usersWithPosts = await db.query.users.findMany({
|
||||||
|
with: {
|
||||||
|
posts: true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Nested relations
|
||||||
|
const usersWithPublishedPosts = await db.query.users.findMany({
|
||||||
|
with: {
|
||||||
|
posts: {
|
||||||
|
where: eq(posts.published, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## TypeScript Types
|
||||||
|
|
||||||
|
### Infer Types from Schema
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { users, posts } from "./db/schema";
|
||||||
|
import type { InferSelectModel, InferInsertModel } from "drizzle-orm";
|
||||||
|
|
||||||
|
// Select types (what you get from SELECT queries)
|
||||||
|
type User = InferSelectModel<typeof users>;
|
||||||
|
type Post = InferSelectModel<typeof posts>;
|
||||||
|
|
||||||
|
// Insert types (what you need for INSERT queries)
|
||||||
|
type NewUser = InferInsertModel<typeof users>;
|
||||||
|
type NewPost = InferInsertModel<typeof posts>;
|
||||||
|
|
||||||
|
// Usage
|
||||||
|
const user: User = await db.select()
|
||||||
|
.from(users)
|
||||||
|
.where(eq(users.id, 1))
|
||||||
|
.then(rows => rows[0]);
|
||||||
|
|
||||||
|
const newUser: NewUser = {
|
||||||
|
name: "John",
|
||||||
|
email: "john@example.com"
|
||||||
|
};
|
||||||
|
await db.insert(users).values(newUser);
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Use prepared statements** (postgres.js: `prepare: true`)
|
||||||
|
2. **Set max: 5** for connection pools
|
||||||
|
3. **Use ctx.waitUntil()** for cleanup
|
||||||
|
4. **Define types** from schema with InferSelectModel
|
||||||
|
5. **Use relations** for complex queries instead of manual joins
|
||||||
|
6. **Run migrations** outside of Worker (use drizzle-kit CLI)
|
||||||
|
7. **Use .env for migrations** (DATABASE_URL), not in Worker
|
||||||
|
8. **Version control migrations** in `drizzle/` folder
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Drizzle vs Raw SQL
|
||||||
|
|
||||||
|
### Drizzle ORM
|
||||||
|
|
||||||
|
**Pros**:
|
||||||
|
- ✅ Type-safe queries
|
||||||
|
- ✅ Auto-completion in IDE
|
||||||
|
- ✅ Compile-time error checking
|
||||||
|
- ✅ Easy migrations with drizzle-kit
|
||||||
|
- ✅ Relational queries with `db.query`
|
||||||
|
|
||||||
|
**Cons**:
|
||||||
|
- ❌ Slight learning curve
|
||||||
|
- ❌ More setup than raw SQL
|
||||||
|
|
||||||
|
### Raw SQL
|
||||||
|
|
||||||
|
**Pros**:
|
||||||
|
- ✅ Full SQL control
|
||||||
|
- ✅ Simpler for simple queries
|
||||||
|
- ✅ No ORM overhead
|
||||||
|
|
||||||
|
**Cons**:
|
||||||
|
- ❌ No type safety
|
||||||
|
- ❌ Manual type definitions
|
||||||
|
- ❌ More error-prone
|
||||||
|
|
||||||
|
**Recommendation**: Use Drizzle for type safety and developer experience.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [Drizzle ORM Docs](https://orm.drizzle.team/docs/overview)
|
||||||
|
- [Drizzle PostgreSQL Guide](https://orm.drizzle.team/docs/get-started-postgresql)
|
||||||
|
- [Drizzle MySQL Guide](https://orm.drizzle.team/docs/get-started-mysql)
|
||||||
|
- [Drizzle Kit Docs](https://orm.drizzle.team/kit-docs/overview)
|
||||||
|
- [Hyperdrive Drizzle Example](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-drivers-and-libraries/drizzle-orm/)
|
||||||
642
references/prisma-integration.md
Normal file
642
references/prisma-integration.md
Normal file
@@ -0,0 +1,642 @@
|
|||||||
|
# Prisma ORM Integration Guide
|
||||||
|
|
||||||
|
Complete guide to using Prisma ORM with Cloudflare Hyperdrive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
**Prisma ORM** is a popular Node.js and TypeScript ORM focused on type safety and developer experience.
|
||||||
|
|
||||||
|
**Why Prisma + Hyperdrive?**
|
||||||
|
- ✅ Excellent TypeScript support and auto-completion
|
||||||
|
- ✅ Powerful migrations and schema management
|
||||||
|
- ✅ Intuitive API with `.findMany()`, `.create()`, etc.
|
||||||
|
- ✅ Works with Hyperdrive via driver adapters
|
||||||
|
|
||||||
|
**CRITICAL**: Prisma requires **driver adapters** (`@prisma/adapter-pg`) to work with Hyperdrive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Prisma CLI and client
|
||||||
|
npm install prisma @prisma/client
|
||||||
|
|
||||||
|
# PostgreSQL driver and adapter
|
||||||
|
npm install pg @prisma/adapter-pg
|
||||||
|
|
||||||
|
# TypeScript types for pg
|
||||||
|
npm install -D @types/pg
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
### 1. Initialize Prisma
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx prisma init
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates:
|
||||||
|
- `prisma/` directory
|
||||||
|
- `prisma/schema.prisma` file
|
||||||
|
- `.env` file
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Configure Schema
|
||||||
|
|
||||||
|
Edit `prisma/schema.prisma`:
|
||||||
|
|
||||||
|
```prisma
|
||||||
|
generator client {
|
||||||
|
provider = "prisma-client-js"
|
||||||
|
previewFeatures = ["driverAdapters"] // REQUIRED for Hyperdrive
|
||||||
|
}
|
||||||
|
|
||||||
|
datasource db {
|
||||||
|
provider = "postgresql"
|
||||||
|
url = env("DATABASE_URL")
|
||||||
|
}
|
||||||
|
|
||||||
|
model User {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
name String
|
||||||
|
email String @unique
|
||||||
|
createdAt DateTime @default(now())
|
||||||
|
updatedAt DateTime @updatedAt
|
||||||
|
posts Post[]
|
||||||
|
}
|
||||||
|
|
||||||
|
model Post {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
title String
|
||||||
|
content String?
|
||||||
|
published Boolean @default(false)
|
||||||
|
authorId Int
|
||||||
|
author User @relation(fields: [authorId], references: [id])
|
||||||
|
createdAt DateTime @default(now())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Set Database URL
|
||||||
|
|
||||||
|
Edit `.env` (for migrations only, NOT used in Worker):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Direct connection to database (for migrations)
|
||||||
|
DATABASE_URL="postgres://user:password@host:5432/database"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important**: This `.env` file is only for running migrations locally. Workers get connection string from Hyperdrive binding.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. Generate Prisma Client
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx prisma generate --no-engine
|
||||||
|
```
|
||||||
|
|
||||||
|
**CRITICAL**: Use `--no-engine` flag for Workers compatibility.
|
||||||
|
|
||||||
|
This generates the Prisma Client in `node_modules/@prisma/client`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Run Migrations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create and apply migration
|
||||||
|
npx prisma migrate dev --name init
|
||||||
|
|
||||||
|
# Or apply existing migrations
|
||||||
|
npx prisma migrate deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
**Generated SQL** (in `prisma/migrations/` folder):
|
||||||
|
```sql
|
||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "User" (
|
||||||
|
"id" SERIAL NOT NULL,
|
||||||
|
"name" TEXT NOT NULL,
|
||||||
|
"email" TEXT NOT NULL,
|
||||||
|
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"updatedAt" TIMESTAMP(3) NOT NULL,
|
||||||
|
PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateTable
|
||||||
|
CREATE TABLE "Post" (
|
||||||
|
"id" SERIAL NOT NULL,
|
||||||
|
"title" TEXT NOT NULL,
|
||||||
|
"content" TEXT,
|
||||||
|
"published" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
"authorId" INTEGER NOT NULL,
|
||||||
|
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
PRIMARY KEY ("id")
|
||||||
|
);
|
||||||
|
|
||||||
|
-- CreateIndex
|
||||||
|
CREATE UNIQUE INDEX "User_email_key" ON "User"("email");
|
||||||
|
|
||||||
|
-- AddForeignKey
|
||||||
|
ALTER TABLE "Post" ADD CONSTRAINT "Post_authorId_fkey"
|
||||||
|
FOREIGN KEY ("authorId") REFERENCES "User"("id")
|
||||||
|
ON DELETE RESTRICT ON UPDATE CASCADE;
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Use in Worker
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { PrismaPg } from "@prisma/adapter-pg";
|
||||||
|
import { PrismaClient } from "@prisma/client";
|
||||||
|
import { Pool } from "pg";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(request: Request, env: Bindings, ctx: ExecutionContext): Promise<Response> {
|
||||||
|
// Create pg.Pool for driver adapter
|
||||||
|
const pool = new Pool({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString,
|
||||||
|
max: 5 // CRITICAL: Workers limit is 6 concurrent connections
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create Prisma driver adapter
|
||||||
|
const adapter = new PrismaPg(pool);
|
||||||
|
|
||||||
|
// Create Prisma client with adapter
|
||||||
|
const prisma = new PrismaClient({ adapter });
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Create user
|
||||||
|
const newUser = await prisma.user.create({
|
||||||
|
data: {
|
||||||
|
name: "John Doe",
|
||||||
|
email: `john.${Date.now()}@example.com`
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Find all users
|
||||||
|
const allUsers = await prisma.user.findMany();
|
||||||
|
|
||||||
|
// Find user by email
|
||||||
|
const user = await prisma.user.findUnique({
|
||||||
|
where: { email: "john@example.com" }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update user
|
||||||
|
await prisma.user.update({
|
||||||
|
where: { id: newUser.id },
|
||||||
|
data: { name: "Jane Doe" }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create post with relation
|
||||||
|
await prisma.post.create({
|
||||||
|
data: {
|
||||||
|
title: "My First Post",
|
||||||
|
content: "Hello World!",
|
||||||
|
published: true,
|
||||||
|
authorId: newUser.id
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Find users with posts (include relation)
|
||||||
|
const usersWithPosts = await prisma.user.findMany({
|
||||||
|
include: {
|
||||||
|
posts: true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
newUser,
|
||||||
|
allUsers,
|
||||||
|
user,
|
||||||
|
usersWithPosts
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
return Response.json({
|
||||||
|
error: error.message
|
||||||
|
}, { status: 500 });
|
||||||
|
} finally {
|
||||||
|
// Clean up pool connections
|
||||||
|
ctx.waitUntil(pool.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Common Query Patterns
|
||||||
|
|
||||||
|
### Create
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Create single record
|
||||||
|
const user = await prisma.user.create({
|
||||||
|
data: {
|
||||||
|
name: "John",
|
||||||
|
email: "john@example.com"
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create with relation
|
||||||
|
const post = await prisma.post.create({
|
||||||
|
data: {
|
||||||
|
title: "Hello",
|
||||||
|
content: "World",
|
||||||
|
author: {
|
||||||
|
connect: { id: userId }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create with nested relation
|
||||||
|
const userWithPost = await prisma.user.create({
|
||||||
|
data: {
|
||||||
|
name: "John",
|
||||||
|
email: "john@example.com",
|
||||||
|
posts: {
|
||||||
|
create: [
|
||||||
|
{ title: "First Post", content: "Hello" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Read
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Find all
|
||||||
|
const users = await prisma.user.findMany();
|
||||||
|
|
||||||
|
// Find with filter
|
||||||
|
const activeUsers = await prisma.user.findMany({
|
||||||
|
where: { active: true }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Find unique
|
||||||
|
const user = await prisma.user.findUnique({
|
||||||
|
where: { email: "john@example.com" }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Find first
|
||||||
|
const firstUser = await prisma.user.findFirst({
|
||||||
|
where: { name: "John" }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Find with relations
|
||||||
|
const usersWithPosts = await prisma.user.findMany({
|
||||||
|
include: {
|
||||||
|
posts: true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Pagination
|
||||||
|
const users = await prisma.user.findMany({
|
||||||
|
skip: 20,
|
||||||
|
take: 10
|
||||||
|
});
|
||||||
|
|
||||||
|
// Sorting
|
||||||
|
const users = await prisma.user.findMany({
|
||||||
|
orderBy: {
|
||||||
|
createdAt: 'desc'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Update
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Update one
|
||||||
|
const user = await prisma.user.update({
|
||||||
|
where: { id: 1 },
|
||||||
|
data: { name: "Jane" }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update many
|
||||||
|
const result = await prisma.user.updateMany({
|
||||||
|
where: { active: false },
|
||||||
|
data: { deleted: true }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Upsert (update or insert)
|
||||||
|
const user = await prisma.user.upsert({
|
||||||
|
where: { email: "john@example.com" },
|
||||||
|
update: { name: "John Updated" },
|
||||||
|
create: { name: "John", email: "john@example.com" }
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Delete
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Delete one
|
||||||
|
const user = await prisma.user.delete({
|
||||||
|
where: { id: 1 }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Delete many
|
||||||
|
const result = await prisma.user.deleteMany({
|
||||||
|
where: { active: false }
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Aggregations
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Count
|
||||||
|
const count = await prisma.user.count();
|
||||||
|
|
||||||
|
// Count with filter
|
||||||
|
const activeCount = await prisma.user.count({
|
||||||
|
where: { active: true }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Aggregate
|
||||||
|
const result = await prisma.post.aggregate({
|
||||||
|
_count: { id: true },
|
||||||
|
_avg: { views: true },
|
||||||
|
_sum: { views: true },
|
||||||
|
_min: { createdAt: true },
|
||||||
|
_max: { createdAt: true }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Group by
|
||||||
|
const result = await prisma.post.groupBy({
|
||||||
|
by: ['authorId'],
|
||||||
|
_count: { id: true }
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Complex Filters
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// AND
|
||||||
|
const users = await prisma.user.findMany({
|
||||||
|
where: {
|
||||||
|
AND: [
|
||||||
|
{ active: true },
|
||||||
|
{ role: 'admin' }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// OR
|
||||||
|
const users = await prisma.user.findMany({
|
||||||
|
where: {
|
||||||
|
OR: [
|
||||||
|
{ role: 'admin' },
|
||||||
|
{ role: 'moderator' }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// NOT
|
||||||
|
const users = await prisma.user.findMany({
|
||||||
|
where: {
|
||||||
|
NOT: {
|
||||||
|
email: {
|
||||||
|
endsWith: '@example.com'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Comparison operators
|
||||||
|
const users = await prisma.user.findMany({
|
||||||
|
where: {
|
||||||
|
createdAt: {
|
||||||
|
gte: new Date('2024-01-01'),
|
||||||
|
lt: new Date('2024-12-31')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// String filters
|
||||||
|
const users = await prisma.user.findMany({
|
||||||
|
where: {
|
||||||
|
email: {
|
||||||
|
contains: '@gmail.com' // or startsWith, endsWith
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Transactions
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Sequential operations (default)
|
||||||
|
const result = await prisma.$transaction([
|
||||||
|
prisma.user.create({ data: { name: "John", email: "john@example.com" } }),
|
||||||
|
prisma.post.create({ data: { title: "Hello", authorId: 1 } })
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Interactive transactions
|
||||||
|
const result = await prisma.$transaction(async (tx) => {
|
||||||
|
const user = await tx.user.create({
|
||||||
|
data: { name: "John", email: "john@example.com" }
|
||||||
|
});
|
||||||
|
|
||||||
|
const post = await tx.post.create({
|
||||||
|
data: { title: "Hello", authorId: user.id }
|
||||||
|
});
|
||||||
|
|
||||||
|
return { user, post };
|
||||||
|
});
|
||||||
|
|
||||||
|
// Transaction with error handling
|
||||||
|
try {
|
||||||
|
const result = await prisma.$transaction(async (tx) => {
|
||||||
|
const user = await tx.user.create({ data: { ... } });
|
||||||
|
|
||||||
|
if (someCondition) {
|
||||||
|
throw new Error("Rollback transaction");
|
||||||
|
}
|
||||||
|
|
||||||
|
return user;
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Transaction failed:", error);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## TypeScript Types
|
||||||
|
|
||||||
|
Prisma automatically generates TypeScript types:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { User, Post, Prisma } from "@prisma/client";
|
||||||
|
|
||||||
|
// Model types
|
||||||
|
const user: User = await prisma.user.findUnique({ where: { id: 1 } });
|
||||||
|
|
||||||
|
// Input types
|
||||||
|
const createUserData: Prisma.UserCreateInput = {
|
||||||
|
name: "John",
|
||||||
|
email: "john@example.com"
|
||||||
|
};
|
||||||
|
|
||||||
|
// Return types with relations
|
||||||
|
type UserWithPosts = Prisma.UserGetPayload<{
|
||||||
|
include: { posts: true }
|
||||||
|
}>;
|
||||||
|
|
||||||
|
const user: UserWithPosts = await prisma.user.findUnique({
|
||||||
|
where: { id: 1 },
|
||||||
|
include: { posts: true }
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Prisma Studio
|
||||||
|
|
||||||
|
View and edit database with Prisma Studio:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx prisma studio
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note**: Runs locally, connects to database directly (not via Hyperdrive).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Use driver adapters** (`@prisma/adapter-pg`) - REQUIRED for Hyperdrive
|
||||||
|
2. **Generate with --no-engine** - Required for Workers compatibility
|
||||||
|
3. **Set max: 5 for pg.Pool** - Stay within Workers' 6 connection limit
|
||||||
|
4. **Use ctx.waitUntil()** for cleanup
|
||||||
|
5. **Run migrations outside Worker** - Use `prisma migrate` locally
|
||||||
|
6. **Version control migrations** in `prisma/migrations/` folder
|
||||||
|
7. **Use `.env` for migrations only** - Not used in Worker runtime
|
||||||
|
8. **Re-generate client** after schema changes: `npx prisma generate --no-engine`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Prisma vs Drizzle
|
||||||
|
|
||||||
|
| Feature | Prisma | Drizzle |
|
||||||
|
|---------|--------|---------|
|
||||||
|
| **Type Safety** | ✅ Excellent | ✅ Excellent |
|
||||||
|
| **Migrations** | ✅ Prisma Migrate (powerful) | ✅ Drizzle Kit (simpler) |
|
||||||
|
| **API Style** | `.findMany()`, `.create()` | `.select().from()` (SQL-like) |
|
||||||
|
| **Bundle Size** | ⚠️ Larger | ✅ Smaller |
|
||||||
|
| **Workers Setup** | ⚠️ Needs adapters + --no-engine | ✅ Simpler setup |
|
||||||
|
| **Learning Curve** | ⚠️ Steeper | ✅ Easier (if you know SQL) |
|
||||||
|
| **Performance** | ✅ Good | ✅ Excellent |
|
||||||
|
|
||||||
|
**Recommendation**:
|
||||||
|
- **Use Prisma** if you want powerful migrations and intuitive API
|
||||||
|
- **Use Drizzle** if you want lighter bundle size and SQL-like queries
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Common Issues
|
||||||
|
|
||||||
|
### Error: "PrismaClient is unable to run in this browser environment"
|
||||||
|
|
||||||
|
**Cause**: Prisma Client not generated with `--no-engine` flag.
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
```bash
|
||||||
|
npx prisma generate --no-engine
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error: "Cannot find module '@prisma/client'"
|
||||||
|
|
||||||
|
**Cause**: Prisma Client not generated.
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
```bash
|
||||||
|
npm install @prisma/client
|
||||||
|
npx prisma generate --no-engine
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error: "Database xxx does not exist"
|
||||||
|
|
||||||
|
**Cause**: DATABASE_URL in `.env` points to non-existent database.
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
1. Create database: `CREATE DATABASE xxx;`
|
||||||
|
2. Verify DATABASE_URL in `.env`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error: "No such module 'node:*'"
|
||||||
|
|
||||||
|
**Cause**: `nodejs_compat` flag not enabled.
|
||||||
|
|
||||||
|
**Solution**: Add to `wrangler.jsonc`:
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"compatibility_flags": ["nodejs_compat"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Package Scripts
|
||||||
|
|
||||||
|
Add to `package.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"scripts": {
|
||||||
|
"db:generate": "prisma generate --no-engine",
|
||||||
|
"db:migrate": "prisma migrate dev",
|
||||||
|
"db:deploy": "prisma migrate deploy",
|
||||||
|
"db:studio": "prisma studio",
|
||||||
|
"db:push": "prisma db push"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
```bash
|
||||||
|
npm run db:generate # Generate Prisma Client
|
||||||
|
npm run db:migrate # Create and apply migration
|
||||||
|
npm run db:studio # Open Prisma Studio
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [Prisma Documentation](https://www.prisma.io/docs)
|
||||||
|
- [Prisma Driver Adapters](https://www.prisma.io/docs/orm/overview/databases/database-drivers)
|
||||||
|
- [Hyperdrive Prisma Example](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-drivers-and-libraries/prisma-orm/)
|
||||||
|
- [Prisma with Workers](https://www.prisma.io/docs/orm/more/under-the-hood/engines#using-custom-engine-binaries)
|
||||||
496
references/query-caching.md
Normal file
496
references/query-caching.md
Normal file
@@ -0,0 +1,496 @@
|
|||||||
|
# Query Caching Guide
|
||||||
|
|
||||||
|
Complete guide to Hyperdrive's automatic query caching.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Hyperdrive automatically caches **read-only queries** at Cloudflare's edge, reducing database load and improving response times.
|
||||||
|
|
||||||
|
**Key Features**:
|
||||||
|
- ✅ Automatic caching (enabled by default)
|
||||||
|
- ✅ Wire protocol parsing (differentiates reads from writes)
|
||||||
|
- ✅ Edge-based caching (near users)
|
||||||
|
- ✅ Zero configuration required
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## What Gets Cached
|
||||||
|
|
||||||
|
### Cacheable Queries (✅ Cached)
|
||||||
|
|
||||||
|
**PostgreSQL**:
|
||||||
|
```sql
|
||||||
|
-- Simple SELECT
|
||||||
|
SELECT * FROM articles WHERE published = true ORDER BY date DESC LIMIT 50;
|
||||||
|
|
||||||
|
-- JOIN queries
|
||||||
|
SELECT u.name, p.title
|
||||||
|
FROM users u
|
||||||
|
JOIN posts p ON u.id = p.author_id
|
||||||
|
WHERE p.published = true;
|
||||||
|
|
||||||
|
-- Aggregates
|
||||||
|
SELECT COUNT(*) FROM users WHERE created_at > '2024-01-01';
|
||||||
|
|
||||||
|
-- Subqueries
|
||||||
|
SELECT * FROM products
|
||||||
|
WHERE category IN (SELECT id FROM categories WHERE active = true);
|
||||||
|
```
|
||||||
|
|
||||||
|
**MySQL**:
|
||||||
|
```sql
|
||||||
|
-- Simple SELECT
|
||||||
|
SELECT * FROM articles WHERE DATE(published_time) = CURDATE() ORDER BY published_time DESC LIMIT 50;
|
||||||
|
|
||||||
|
-- Aggregates
|
||||||
|
SELECT COUNT(*) as total_users FROM users;
|
||||||
|
|
||||||
|
-- JOIN queries
|
||||||
|
SELECT orders.id, customers.name
|
||||||
|
FROM orders
|
||||||
|
JOIN customers ON orders.customer_id = customers.id;
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Non-Cacheable Queries (❌ NOT Cached)
|
||||||
|
|
||||||
|
**Mutating Queries** (writes to database):
|
||||||
|
```sql
|
||||||
|
-- INSERT
|
||||||
|
INSERT INTO users (name, email) VALUES ('John', 'john@example.com');
|
||||||
|
|
||||||
|
-- UPDATE
|
||||||
|
UPDATE posts SET published = true WHERE id = 123;
|
||||||
|
|
||||||
|
-- DELETE
|
||||||
|
DELETE FROM sessions WHERE expired = true;
|
||||||
|
|
||||||
|
-- UPSERT
|
||||||
|
INSERT INTO users (id, name) VALUES (1, 'John')
|
||||||
|
ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name;
|
||||||
|
|
||||||
|
-- CREATE/ALTER/DROP
|
||||||
|
CREATE TABLE new_table (...);
|
||||||
|
ALTER TABLE users ADD COLUMN age INT;
|
||||||
|
DROP TABLE old_table;
|
||||||
|
```
|
||||||
|
|
||||||
|
**Volatile Functions** (PostgreSQL):
|
||||||
|
```sql
|
||||||
|
-- LASTVAL() - returns last sequence value
|
||||||
|
SELECT LASTVAL(), * FROM articles LIMIT 50;
|
||||||
|
|
||||||
|
-- CURRVAL() - current sequence value
|
||||||
|
SELECT CURRVAL('users_id_seq');
|
||||||
|
|
||||||
|
-- NEXTVAL() - advance sequence
|
||||||
|
SELECT NEXTVAL('users_id_seq');
|
||||||
|
|
||||||
|
-- RANDOM() - random values
|
||||||
|
SELECT * FROM products ORDER BY RANDOM() LIMIT 10;
|
||||||
|
|
||||||
|
-- NOW(), CURRENT_TIMESTAMP with modifications
|
||||||
|
SELECT * FROM logs WHERE created_at > NOW() - INTERVAL '1 hour';
|
||||||
|
|
||||||
|
-- PG_SLEEP() - delays
|
||||||
|
SELECT PG_SLEEP(1);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Volatile Functions** (MySQL):
|
||||||
|
```sql
|
||||||
|
-- LAST_INSERT_ID()
|
||||||
|
SELECT LAST_INSERT_ID(), * FROM articles LIMIT 50;
|
||||||
|
|
||||||
|
-- UUID() - generates random UUIDs
|
||||||
|
SELECT UUID();
|
||||||
|
|
||||||
|
-- RAND() - random values
|
||||||
|
SELECT * FROM products ORDER BY RAND() LIMIT 10;
|
||||||
|
|
||||||
|
-- NOW() with modifications
|
||||||
|
SELECT * FROM logs WHERE created_at > NOW() - INTERVAL 1 HOUR;
|
||||||
|
```
|
||||||
|
|
||||||
|
**Transactions**:
|
||||||
|
```sql
|
||||||
|
-- Any query within explicit transaction
|
||||||
|
BEGIN;
|
||||||
|
SELECT * FROM users; -- Not cached (within transaction)
|
||||||
|
COMMIT;
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## How Caching Works
|
||||||
|
|
||||||
|
### 1. Wire Protocol Parsing
|
||||||
|
|
||||||
|
Hyperdrive parses the **database wire protocol** (not SQL text) to determine cacheability:
|
||||||
|
|
||||||
|
```
|
||||||
|
Client → Query → Hyperdrive → Parse Protocol → Cacheable?
|
||||||
|
↓
|
||||||
|
Yes → Check Cache
|
||||||
|
↓
|
||||||
|
Hit? → Return Cached
|
||||||
|
↓
|
||||||
|
Miss → Query Database → Cache Result
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why wire protocol, not SQL parsing?**
|
||||||
|
- More accurate (understands database's interpretation)
|
||||||
|
- Handles different SQL dialects
|
||||||
|
- Detects function volatility correctly
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Cache Key Generation
|
||||||
|
|
||||||
|
Cache key includes:
|
||||||
|
- Query text
|
||||||
|
- Parameter values
|
||||||
|
- Database connection details
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```typescript
|
||||||
|
// These are DIFFERENT cache keys (different parameters)
|
||||||
|
await pool.query('SELECT * FROM users WHERE id = $1', [1]);
|
||||||
|
await pool.query('SELECT * FROM users WHERE id = $1', [2]);
|
||||||
|
|
||||||
|
// These are the SAME cache key (same query, same parameter)
|
||||||
|
await pool.query('SELECT * FROM users WHERE id = $1', [1]);
|
||||||
|
await pool.query('SELECT * FROM users WHERE id = $1', [1]); // Cache HIT
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Cache Invalidation
|
||||||
|
|
||||||
|
**Automatic Invalidation**:
|
||||||
|
- Write queries invalidate related cached queries
|
||||||
|
- Hyperdrive tracks dependencies
|
||||||
|
- No manual invalidation needed
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```typescript
|
||||||
|
// 1. Query cached
|
||||||
|
await pool.query('SELECT * FROM users WHERE id = 1'); // Cache MISS, then cached
|
||||||
|
|
||||||
|
// 2. Read from cache
|
||||||
|
await pool.query('SELECT * FROM users WHERE id = 1'); // Cache HIT
|
||||||
|
|
||||||
|
// 3. Write invalidates cache
|
||||||
|
await pool.query('UPDATE users SET name = $1 WHERE id = 1', ['Jane']);
|
||||||
|
|
||||||
|
// 4. Query re-cached
|
||||||
|
await pool.query('SELECT * FROM users WHERE id = 1'); // Cache MISS (invalidated), then re-cached
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cache Configuration
|
||||||
|
|
||||||
|
### Default Settings
|
||||||
|
|
||||||
|
**Enabled by default** - no configuration needed.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Caching is automatically enabled
|
||||||
|
const client = new Client({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Disable Caching (Per Hyperdrive Config)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Disable caching for specific Hyperdrive config
|
||||||
|
npx wrangler hyperdrive update <hyperdrive-id> --caching-disabled
|
||||||
|
|
||||||
|
# Re-enable caching
|
||||||
|
npx wrangler hyperdrive update <hyperdrive-id> --caching-disabled=false
|
||||||
|
```
|
||||||
|
|
||||||
|
**When to disable**:
|
||||||
|
- Debugging cache-related issues
|
||||||
|
- Testing without cache
|
||||||
|
- Workloads that never benefit from caching (all writes)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Optimizing for Caching
|
||||||
|
|
||||||
|
### 1. Use Prepared Statements (postgres.js)
|
||||||
|
|
||||||
|
**Critical**: postgres.js requires `prepare: true` for caching.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// ✅ Cacheable (prepared statements enabled)
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
prepare: true // CRITICAL for caching
|
||||||
|
});
|
||||||
|
|
||||||
|
// ❌ NOT cacheable (prepare disabled)
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
prepare: false // Queries won't be cached!
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why**: Hyperdrive caches prepared statements, not simple queries from postgres.js.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Avoid Volatile Functions
|
||||||
|
|
||||||
|
**Instead of**:
|
||||||
|
```sql
|
||||||
|
-- ❌ Not cached (RANDOM)
|
||||||
|
SELECT * FROM products ORDER BY RANDOM() LIMIT 10;
|
||||||
|
```
|
||||||
|
|
||||||
|
**Use**:
|
||||||
|
```typescript
|
||||||
|
// ✅ Cached (random offset generated in Worker)
|
||||||
|
const offset = Math.floor(Math.random() * 1000);
|
||||||
|
await pool.query('SELECT * FROM products OFFSET $1 LIMIT 10', [offset]);
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Parameterize Queries
|
||||||
|
|
||||||
|
**Instead of**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Different cache keys for each user ID
|
||||||
|
await pool.query(`SELECT * FROM users WHERE id = ${userId}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Use**:
|
||||||
|
```typescript
|
||||||
|
// ✅ Same cache key structure, parameterized
|
||||||
|
await pool.query('SELECT * FROM users WHERE id = $1', [userId]);
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. Keep Transactions Short
|
||||||
|
|
||||||
|
**Avoid**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Queries within transaction not cached
|
||||||
|
await client.query('BEGIN');
|
||||||
|
await client.query('SELECT * FROM users'); // Not cached
|
||||||
|
await client.query('SELECT * FROM posts'); // Not cached
|
||||||
|
await client.query('COMMIT');
|
||||||
|
```
|
||||||
|
|
||||||
|
**Use**:
|
||||||
|
```typescript
|
||||||
|
// ✅ Queries outside transaction are cached
|
||||||
|
await client.query('SELECT * FROM users'); // Cached
|
||||||
|
await client.query('SELECT * FROM posts'); // Cached
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Separate Read and Write Patterns
|
||||||
|
|
||||||
|
**Organize code**:
|
||||||
|
```typescript
|
||||||
|
// Read-heavy endpoints (benefit from caching)
|
||||||
|
app.get('/users', async (c) => {
|
||||||
|
const users = await pool.query('SELECT * FROM users LIMIT 100');
|
||||||
|
return c.json(users.rows);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Write-heavy endpoints (no caching)
|
||||||
|
app.post('/users', async (c) => {
|
||||||
|
const { name, email } = await c.req.json();
|
||||||
|
await pool.query('INSERT INTO users (name, email) VALUES ($1, $2)', [name, email]);
|
||||||
|
return c.json({ success: true });
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Monitoring Cache Performance
|
||||||
|
|
||||||
|
### Cache Status
|
||||||
|
|
||||||
|
Check cache hit/miss in response:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const response = await fetch('https://your-worker.dev/api/users');
|
||||||
|
const cacheStatus = response.headers.get('cf-cache-status');
|
||||||
|
|
||||||
|
console.log(cacheStatus);
|
||||||
|
// Values: HIT, MISS, BYPASS, EXPIRED, etc.
|
||||||
|
```
|
||||||
|
|
||||||
|
**Cache Status Values**:
|
||||||
|
- `HIT` - Query result served from cache
|
||||||
|
- `MISS` - Query not in cache, fetched from database
|
||||||
|
- `BYPASS` - Caching disabled or non-cacheable query
|
||||||
|
- `EXPIRED` - Cached result expired, re-fetched
|
||||||
|
- `UNCACHEABLE` - Query cannot be cached (write operation)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Hyperdrive Analytics
|
||||||
|
|
||||||
|
View cache metrics in Cloudflare dashboard:
|
||||||
|
|
||||||
|
1. Go to [Hyperdrive Dashboard](https://dash.cloudflare.com/?to=/:account/workers/hyperdrive)
|
||||||
|
2. Select your Hyperdrive configuration
|
||||||
|
3. Click **Metrics** tab
|
||||||
|
|
||||||
|
**Available Metrics**:
|
||||||
|
- **Cache hit ratio** - % of queries served from cache
|
||||||
|
- **Query count** - Total queries
|
||||||
|
- **Cache status breakdown** - HIT vs MISS vs BYPASS
|
||||||
|
- **Query latency** - p50, p95, p99
|
||||||
|
- **Result size** - Bytes cached
|
||||||
|
|
||||||
|
**Ideal Cache Hit Ratio**:
|
||||||
|
- **Read-heavy workload**: 60-90% cache hit ratio
|
||||||
|
- **Mixed workload**: 30-60% cache hit ratio
|
||||||
|
- **Write-heavy workload**: 10-30% cache hit ratio
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cache Behavior by Query Type
|
||||||
|
|
||||||
|
| Query Type | PostgreSQL | MySQL | Cached? |
|
||||||
|
|------------|------------|-------|---------|
|
||||||
|
| **SELECT** | `SELECT * FROM users` | `SELECT * FROM users` | ✅ Yes |
|
||||||
|
| **INSERT** | `INSERT INTO users VALUES (...)` | `INSERT INTO users VALUES (...)` | ❌ No |
|
||||||
|
| **UPDATE** | `UPDATE users SET name = ...` | `UPDATE users SET name = ...` | ❌ No |
|
||||||
|
| **DELETE** | `DELETE FROM users WHERE ...` | `DELETE FROM users WHERE ...` | ❌ No |
|
||||||
|
| **CREATE TABLE** | `CREATE TABLE new_table (...)` | `CREATE TABLE new_table (...)` | ❌ No |
|
||||||
|
| **LASTVAL()** | `SELECT LASTVAL()` | `SELECT LAST_INSERT_ID()` | ❌ No |
|
||||||
|
| **RANDOM()** | `SELECT ... ORDER BY RANDOM()` | `SELECT ... ORDER BY RAND()` | ❌ No |
|
||||||
|
| **Transaction** | `BEGIN; SELECT ...; COMMIT;` | `START TRANSACTION; SELECT ...; COMMIT;` | ❌ No |
|
||||||
|
| **JOIN** | `SELECT * FROM a JOIN b` | `SELECT * FROM a JOIN b` | ✅ Yes |
|
||||||
|
| **Aggregate** | `SELECT COUNT(*) FROM users` | `SELECT COUNT(*) FROM users` | ✅ Yes |
|
||||||
|
| **Subquery** | `SELECT * WHERE id IN (SELECT ...)` | `SELECT * WHERE id IN (SELECT ...)` | ✅ Yes |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cache TTL
|
||||||
|
|
||||||
|
**Default TTL**: Hyperdrive manages TTL automatically based on workload patterns.
|
||||||
|
|
||||||
|
**Cache invalidation**:
|
||||||
|
- Write queries invalidate related cached queries
|
||||||
|
- TTL expires based on usage patterns
|
||||||
|
- No manual TTL configuration needed
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Enable prepared statements** (postgres.js: `prepare: true`)
|
||||||
|
2. **Avoid volatile functions** (RANDOM, LASTVAL, etc.)
|
||||||
|
3. **Use parameterized queries** for consistent cache keys
|
||||||
|
4. **Keep transactions short** (queries in transactions not cached)
|
||||||
|
5. **Separate read and write workloads** for better cache efficiency
|
||||||
|
6. **Monitor cache hit ratio** in Hyperdrive analytics
|
||||||
|
7. **Don't disable caching** unless debugging (it's automatic and beneficial)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Low Cache Hit Ratio
|
||||||
|
|
||||||
|
**Symptom**: Most queries showing cache MISS.
|
||||||
|
|
||||||
|
**Causes & Solutions**:
|
||||||
|
|
||||||
|
**1. Prepared statements disabled (postgres.js)**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Problem
|
||||||
|
const sql = postgres(url, { prepare: false });
|
||||||
|
|
||||||
|
// ✅ Solution
|
||||||
|
const sql = postgres(url, { prepare: true });
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Using volatile functions**:
|
||||||
|
```sql
|
||||||
|
-- ❌ Problem
|
||||||
|
SELECT LASTVAL(), * FROM articles;
|
||||||
|
|
||||||
|
-- ✅ Solution
|
||||||
|
SELECT * FROM articles;
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Mostly write queries**:
|
||||||
|
```sql
|
||||||
|
-- ❌ Problem (writes not cached)
|
||||||
|
INSERT INTO logs (...) VALUES (...);
|
||||||
|
|
||||||
|
-- ✅ Expected behavior (writes aren't cached, this is normal)
|
||||||
|
```
|
||||||
|
|
||||||
|
**4. Transactions wrapping reads**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Problem
|
||||||
|
await client.query('BEGIN');
|
||||||
|
await client.query('SELECT ...');
|
||||||
|
await client.query('COMMIT');
|
||||||
|
|
||||||
|
// ✅ Solution
|
||||||
|
await client.query('SELECT ...'); // No transaction
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Queries Not Cached
|
||||||
|
|
||||||
|
**Symptom**: Expected cacheable queries showing BYPASS or UNCACHEABLE.
|
||||||
|
|
||||||
|
**Debug Steps**:
|
||||||
|
|
||||||
|
**1. Check query is read-only**:
|
||||||
|
```sql
|
||||||
|
-- ✅ Should be cached
|
||||||
|
SELECT * FROM users;
|
||||||
|
|
||||||
|
-- ❌ Not cached (write)
|
||||||
|
UPDATE users SET name = 'John';
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Check for volatile functions**:
|
||||||
|
```sql
|
||||||
|
-- ❌ Not cached
|
||||||
|
SELECT RANDOM();
|
||||||
|
|
||||||
|
-- ✅ Cached
|
||||||
|
SELECT * FROM products;
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Check prepared statements enabled** (postgres.js):
|
||||||
|
```typescript
|
||||||
|
const sql = postgres(url, { prepare: true });
|
||||||
|
```
|
||||||
|
|
||||||
|
**4. Check caching not disabled**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive get <hyperdrive-id>
|
||||||
|
# Check "caching": { "disabled": false }
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [Query Caching Docs](https://developers.cloudflare.com/hyperdrive/configuration/query-caching/)
|
||||||
|
- [PostgreSQL Volatile Functions](https://www.postgresql.org/docs/current/xfunc-volatility.html)
|
||||||
|
- [How Hyperdrive Works](https://developers.cloudflare.com/hyperdrive/configuration/how-hyperdrive-works/)
|
||||||
|
- [Hyperdrive Metrics](https://developers.cloudflare.com/hyperdrive/observability/metrics/)
|
||||||
421
references/supported-databases.md
Normal file
421
references/supported-databases.md
Normal file
@@ -0,0 +1,421 @@
|
|||||||
|
# Supported Databases and Providers
|
||||||
|
|
||||||
|
Complete list of databases and providers compatible with Cloudflare Hyperdrive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Database Engine Support
|
||||||
|
|
||||||
|
| Engine | Supported | Versions | Notes |
|
||||||
|
|--------|-----------|----------|-------|
|
||||||
|
| **PostgreSQL** | ✅ Yes | 9.0 - 17.x | Both self-hosted and managed |
|
||||||
|
| **MySQL** | ✅ Yes | 5.7 - 8.x | Both self-hosted and managed, MariaDB included |
|
||||||
|
| **SQL Server** | ❌ No | - | Not currently supported |
|
||||||
|
| **MongoDB** | ❌ No | - | NoSQL not supported (use Atlas Data API) |
|
||||||
|
| **Oracle** | ❌ No | - | Not currently supported |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## PostgreSQL-Compatible Databases
|
||||||
|
|
||||||
|
### AWS RDS / Aurora PostgreSQL
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create aws-postgres \
|
||||||
|
--connection-string="postgres://admin:password@mydb.abc123.us-east-1.rds.amazonaws.com:5432/postgres"
|
||||||
|
```
|
||||||
|
|
||||||
|
**SSL**: Enabled by default on RDS
|
||||||
|
|
||||||
|
**Guide**: [AWS RDS Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/aws-rds-aurora/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Google Cloud SQL (PostgreSQL)
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create gcp-postgres \
|
||||||
|
--connection-string="postgres://postgres:password@34.123.45.67:5432/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Public IP**: Must enable public IP in Cloud SQL settings
|
||||||
|
|
||||||
|
**Guide**: [Google Cloud SQL Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/google-cloud-sql/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Azure Database for PostgreSQL
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create azure-postgres \
|
||||||
|
--connection-string="postgres://myuser@myserver:password@myserver.postgres.database.azure.com:5432/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Firewall**: Add Cloudflare IP ranges or allow all public IPs
|
||||||
|
|
||||||
|
**Guide**: [Azure PostgreSQL Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/azure/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Neon
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create neon-db \
|
||||||
|
--connection-string="postgres://user:password@ep-cool-darkness-123456.us-east-2.aws.neon.tech:5432/neondb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Driver**: Use `pg` or `postgres.js` (NOT @neondatabase/serverless driver)
|
||||||
|
|
||||||
|
**Guide**: [Neon Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/neon/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Supabase
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create supabase-db \
|
||||||
|
--connection-string="postgres://postgres:password@db.abc123xyz.supabase.co:5432/postgres"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Connection String**: Get from Supabase Dashboard → Settings → Database → Connection string (Direct connection)
|
||||||
|
|
||||||
|
**Guide**: [Supabase Guide](https://developers.cloudflare.com/workers/databases/third-party-integrations/supabase/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PlanetScale (PostgreSQL)
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create planetscale-postgres \
|
||||||
|
--connection-string="postgres://user:password@aws.connect.psdb.cloud:3306/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Driver**: Use `pg` or `postgres.js` (NOT PlanetScale serverless driver)
|
||||||
|
|
||||||
|
**Guide**: [PlanetScale Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/planetscale-postgres/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Timescale
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create timescale-db \
|
||||||
|
--connection-string="postgres://tsdbadmin:password@abc123xyz.tsdb.cloud.timescale.com:5432/tsdb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Guide**: [Timescale Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/timescale/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### CockroachDB
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create cockroach-db \
|
||||||
|
--connection-string="postgres://user:password@free-tier.gcp-us-central1.cockroachlabs.cloud:26257/defaultdb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes**: PostgreSQL-compatible, uses port 26257
|
||||||
|
|
||||||
|
**Guide**: [CockroachDB Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/cockroachdb/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Materialize
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create materialize-db \
|
||||||
|
--connection-string="postgres://user@materialize.example.com:6875/materialize"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Guide**: [Materialize Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/materialize/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Fly.io PostgreSQL
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create fly-postgres \
|
||||||
|
--connection-string="postgres://postgres:password@my-app-db.fly.dev:5432/postgres"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Guide**: [Fly.io Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/fly/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### pgEdge Cloud
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create pgedge-db \
|
||||||
|
--connection-string="postgres://user:password@db.pgedge.io:5432/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Guide**: [pgEdge Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/pgedge/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Prisma Postgres
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create prisma-postgres \
|
||||||
|
--connection-string="postgres://user:password@db.prisma.io:5432/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Guide**: [Prisma Postgres Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/postgres-database-providers/prisma-postgres/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MySQL-Compatible Databases
|
||||||
|
|
||||||
|
### AWS RDS / Aurora MySQL
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create aws-mysql \
|
||||||
|
--connection-string="mysql://admin:password@mydb.abc123.us-east-1.rds.amazonaws.com:3306/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Guide**: [AWS RDS MySQL Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-mysql/mysql-database-providers/aws-rds-aurora/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Google Cloud SQL (MySQL)
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create gcp-mysql \
|
||||||
|
--connection-string="mysql://root:password@34.123.45.67:3306/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Guide**: [Google Cloud SQL MySQL Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-mysql/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Azure Database for MySQL
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create azure-mysql \
|
||||||
|
--connection-string="mysql://myuser@myserver:password@myserver.mysql.database.azure.com:3306/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Guide**: [Azure MySQL Guide](https://developers.cloudflare.com/hyperdrive/examples/connect-to-mysql/mysql-database-providers/azure/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PlanetScale (MySQL)
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create planetscale-mysql \
|
||||||
|
--connection-string="mysql://user:password@aws.connect.psdb.cloud:3306/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Driver**: Use `mysql2` (NOT PlanetScale serverless driver)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### MariaDB
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create mariadb \
|
||||||
|
--connection-string="mysql://user:password@mariadb-host:3306/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes**: MariaDB uses MySQL protocol, fully compatible
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Self-Hosted Databases
|
||||||
|
|
||||||
|
### Self-Hosted PostgreSQL
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Requirements**:
|
||||||
|
- PostgreSQL 9.0+
|
||||||
|
- SSL/TLS enabled
|
||||||
|
- Public IP address (or use Cloudflare Tunnel for private)
|
||||||
|
- Firewall configured to allow public connections
|
||||||
|
|
||||||
|
**postgresql.conf**:
|
||||||
|
```conf
|
||||||
|
listen_addresses = '*'
|
||||||
|
ssl = on
|
||||||
|
ssl_cert_file = 'server.crt'
|
||||||
|
ssl_key_file = 'server.key'
|
||||||
|
```
|
||||||
|
|
||||||
|
**pg_hba.conf**:
|
||||||
|
```conf
|
||||||
|
# Allow connections from anywhere with SSL
|
||||||
|
hostssl all all 0.0.0.0/0 md5
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Self-Hosted MySQL
|
||||||
|
|
||||||
|
**Status**: ✅ Fully Supported
|
||||||
|
|
||||||
|
**Requirements**:
|
||||||
|
- MySQL 5.7+
|
||||||
|
- SSL/TLS enabled
|
||||||
|
- Public IP address (or use Cloudflare Tunnel)
|
||||||
|
- Firewall configured
|
||||||
|
|
||||||
|
**my.cnf**:
|
||||||
|
```conf
|
||||||
|
[mysqld]
|
||||||
|
bind-address = 0.0.0.0
|
||||||
|
require_secure_transport = ON
|
||||||
|
ssl-ca = ca-cert.pem
|
||||||
|
ssl-cert = server-cert.pem
|
||||||
|
ssl-key = server-key.pem
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Private Database Access
|
||||||
|
|
||||||
|
### Cloudflare Tunnel
|
||||||
|
|
||||||
|
For databases in private networks (VPCs, on-premises):
|
||||||
|
|
||||||
|
**Supported**:
|
||||||
|
- Private AWS VPCs
|
||||||
|
- Google Cloud private networks
|
||||||
|
- Azure VNets
|
||||||
|
- On-premises databases
|
||||||
|
- Any database behind firewall
|
||||||
|
|
||||||
|
**Setup**:
|
||||||
|
1. Install cloudflared
|
||||||
|
2. Create tunnel to database
|
||||||
|
3. Configure Hyperdrive with tunnel hostname
|
||||||
|
|
||||||
|
**Guide**: [Cloudflare Tunnel Guide](https://developers.cloudflare.com/hyperdrive/configuration/connect-to-private-database/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Connection Requirements
|
||||||
|
|
||||||
|
### All Databases MUST Have
|
||||||
|
|
||||||
|
✅ **TLS/SSL enabled** - Hyperdrive requires encryption
|
||||||
|
✅ **Accessible endpoint** - Public IP or via Cloudflare Tunnel
|
||||||
|
✅ **User permissions** - User must have read/write permissions
|
||||||
|
✅ **Correct credentials** - Valid username/password
|
||||||
|
✅ **Firewall configured** - Allow connections from internet or tunnel
|
||||||
|
|
||||||
|
### All Databases CANNOT Have
|
||||||
|
|
||||||
|
❌ **Private IP only** (10.x.x.x, 192.168.x.x) - Use Tunnel instead
|
||||||
|
❌ **SSL disabled** - Hyperdrive requires TLS
|
||||||
|
❌ **Restrictive firewall** - Must allow Cloudflare connections
|
||||||
|
❌ **Invalid credentials** - Connection will fail during setup
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Unsupported Databases
|
||||||
|
|
||||||
|
| Database | Status | Alternative |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| **MongoDB** | ❌ Not Supported | Use MongoDB Atlas Data API or Realm |
|
||||||
|
| **SQL Server** | ❌ Not Supported | Use Azure SQL Edge (Linux) or wait for support |
|
||||||
|
| **Oracle Database** | ❌ Not Supported | No current alternative |
|
||||||
|
| **SQLite** | ❌ Not Needed | Use Cloudflare D1 (serverless SQLite) |
|
||||||
|
| **Redis** | ❌ Not Supported | Use Upstash Redis or Cloudflare KV |
|
||||||
|
| **DynamoDB** | ❌ Not Supported | Use AWS SDK directly or Cloudflare KV |
|
||||||
|
| **Cassandra** | ❌ Not Supported | Use DataStax Astra DB API |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Choosing a Database Provider
|
||||||
|
|
||||||
|
### For New Projects
|
||||||
|
|
||||||
|
**Recommended**:
|
||||||
|
1. **Neon** - Serverless PostgreSQL, generous free tier
|
||||||
|
2. **Supabase** - PostgreSQL + Auth + Storage
|
||||||
|
3. **PlanetScale** - MySQL, branching workflow
|
||||||
|
4. **Cloudflare D1** - If you don't need PostgreSQL/MySQL features
|
||||||
|
|
||||||
|
### For Existing Apps
|
||||||
|
|
||||||
|
Use Hyperdrive with your current provider:
|
||||||
|
- AWS RDS/Aurora
|
||||||
|
- Google Cloud SQL
|
||||||
|
- Azure Database
|
||||||
|
|
||||||
|
### For Self-Hosted
|
||||||
|
|
||||||
|
Use Cloudflare Tunnel for secure private access.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Comparison
|
||||||
|
|
||||||
|
| Provider | Connection Latency | Query Latency | Caching |
|
||||||
|
|----------|-------------------|---------------|---------|
|
||||||
|
| **Neon** (US-East) | ~5ms | ~10-20ms | ✅ Yes |
|
||||||
|
| **Supabase** (US-East) | ~5ms | ~10-20ms | ✅ Yes |
|
||||||
|
| **AWS RDS** (US-East) | ~5ms | ~15-25ms | ✅ Yes |
|
||||||
|
| **Self-Hosted** (EU) | ~50ms | ~60-80ms | ✅ Yes |
|
||||||
|
|
||||||
|
**Note**: Hyperdrive adds minimal latency (~5ms) but eliminates 7 round trips, resulting in net performance gain.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [Supported Databases Docs](https://developers.cloudflare.com/hyperdrive/reference/supported-databases-and-features/)
|
||||||
|
- [PostgreSQL Examples](https://developers.cloudflare.com/hyperdrive/examples/connect-to-postgres/)
|
||||||
|
- [MySQL Examples](https://developers.cloudflare.com/hyperdrive/examples/connect-to-mysql/)
|
||||||
|
- [Private Database Guide](https://developers.cloudflare.com/hyperdrive/configuration/connect-to-private-database/)
|
||||||
438
references/tls-ssl-setup.md
Normal file
438
references/tls-ssl-setup.md
Normal file
@@ -0,0 +1,438 @@
|
|||||||
|
# TLS/SSL Setup Guide
|
||||||
|
|
||||||
|
Complete guide to configuring SSL/TLS certificates with Hyperdrive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
**Hyperdrive requires TLS/SSL** for all database connections.
|
||||||
|
|
||||||
|
**Supported Configurations**:
|
||||||
|
1. **Basic TLS** (`require` mode) - Default, validates certificates via WebPKI
|
||||||
|
2. **Server Certificates** (`verify-ca`, `verify-full`) - Verify server's CA certificate
|
||||||
|
3. **Client Certificates** (mTLS) - Authenticate Hyperdrive to database
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## SSL Modes
|
||||||
|
|
||||||
|
### 1. require (Default)
|
||||||
|
|
||||||
|
**What it does**:
|
||||||
|
- Enforces TLS encryption
|
||||||
|
- Validates server certificate using WebPKI (standard browser certificate authorities)
|
||||||
|
- No additional certificate configuration needed
|
||||||
|
|
||||||
|
**When to use**:
|
||||||
|
- Most cloud databases (AWS RDS, Google Cloud SQL, Azure, Neon, Supabase)
|
||||||
|
- Standard SSL/TLS setup
|
||||||
|
- Default choice for most applications
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://user:password@host:5432/database"
|
||||||
|
|
||||||
|
# SSL mode is "require" by default
|
||||||
|
```
|
||||||
|
|
||||||
|
**No configuration needed** - this is automatic.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. verify-ca
|
||||||
|
|
||||||
|
**What it does**:
|
||||||
|
- Verifies server certificate is signed by expected Certificate Authority (CA)
|
||||||
|
- Prevents man-in-the-middle attacks
|
||||||
|
- Requires uploading CA certificate to Hyperdrive
|
||||||
|
|
||||||
|
**When to use**:
|
||||||
|
- Enhanced security requirements
|
||||||
|
- Self-signed certificates
|
||||||
|
- Private/internal certificate authorities
|
||||||
|
|
||||||
|
**Setup**:
|
||||||
|
|
||||||
|
**Step 1: Upload CA certificate**:
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload certificate-authority \
|
||||||
|
--ca-cert /path/to/root-ca.pem \
|
||||||
|
--name my-ca-cert
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**:
|
||||||
|
```
|
||||||
|
✅ Uploaded CA Certificate my-ca-cert
|
||||||
|
ID: ca-12345678-1234-1234-1234-123456789012
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 2: Create Hyperdrive with CA**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://user:password@host:5432/database" \
|
||||||
|
--ca-certificate-id ca-12345678-1234-1234-1234-123456789012 \
|
||||||
|
--sslmode verify-ca
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. verify-full
|
||||||
|
|
||||||
|
**What it does**:
|
||||||
|
- Everything from `verify-ca`, PLUS
|
||||||
|
- Verifies database hostname matches Subject Alternative Name (SAN) in certificate
|
||||||
|
|
||||||
|
**When to use**:
|
||||||
|
- Maximum security requirements
|
||||||
|
- Preventing hostname spoofing
|
||||||
|
- Compliance requirements (PCI-DSS, HIPAA)
|
||||||
|
|
||||||
|
**Setup** (same as verify-ca, but use `--sslmode verify-full`):
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://user:password@host:5432/database" \
|
||||||
|
--ca-certificate-id ca-12345678-1234-1234-1234-123456789012 \
|
||||||
|
--sslmode verify-full
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Certificate Requirements
|
||||||
|
|
||||||
|
### CA Certificate Format
|
||||||
|
|
||||||
|
**Must be**:
|
||||||
|
- PEM format (`.pem` file)
|
||||||
|
- Root CA or Intermediate CA certificate
|
||||||
|
- **Region-specific** (not global bundle with multiple CAs)
|
||||||
|
|
||||||
|
**Example CA Certificate** (root-ca.pem):
|
||||||
|
```
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
|
||||||
|
...
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
```
|
||||||
|
|
||||||
|
**Get CA Certificate**:
|
||||||
|
|
||||||
|
**AWS RDS**:
|
||||||
|
```bash
|
||||||
|
wget https://truststore.pki.rds.amazonaws.com/us-east-1/us-east-1-bundle.pem
|
||||||
|
# Use region-specific bundle (NOT global bundle)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Google Cloud SQL**:
|
||||||
|
```bash
|
||||||
|
# Download from Cloud SQL instance details page
|
||||||
|
# Instance → Connections → Server CA certificate
|
||||||
|
```
|
||||||
|
|
||||||
|
**Azure Database**:
|
||||||
|
```bash
|
||||||
|
wget https://www.digicert.com/CACerts/BaltimoreCyberTrustRoot.crt.pem
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Client Certificates (mTLS)
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
|
||||||
|
**What is mTLS?**
|
||||||
|
- Mutual TLS: Both client and server authenticate each other
|
||||||
|
- Hyperdrive provides client certificate to database
|
||||||
|
- Database verifies certificate before allowing connection
|
||||||
|
|
||||||
|
**When to use**:
|
||||||
|
- Database requires client certificate authentication
|
||||||
|
- Enhanced security beyond username/password
|
||||||
|
- Compliance requirements
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Setup
|
||||||
|
|
||||||
|
**Step 1: Generate Client Certificate** (if needed):
|
||||||
|
```bash
|
||||||
|
# Generate private key
|
||||||
|
openssl genrsa -out client-key.pem 2048
|
||||||
|
|
||||||
|
# Generate certificate signing request (CSR)
|
||||||
|
openssl req -new -key client-key.pem -out client.csr
|
||||||
|
|
||||||
|
# Get certificate from your CA (or self-sign for testing)
|
||||||
|
openssl x509 -req -in client.csr -CA root-ca.pem -CAkey root-ca-key.pem \
|
||||||
|
-CAcreateserial -out client-cert.pem -days 365
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 2: Upload Client Certificate to Hyperdrive**:
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload mtls-certificate \
|
||||||
|
--cert /path/to/client-cert.pem \
|
||||||
|
--key /path/to/client-key.pem \
|
||||||
|
--name my-client-cert
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**:
|
||||||
|
```
|
||||||
|
✅ Uploaded client certificate my-client-cert
|
||||||
|
ID: mtls-87654321-4321-4321-4321-210987654321
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 3: Create Hyperdrive with Client Certificate**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://user:password@host:5432/database" \
|
||||||
|
--mtls-certificate-id mtls-87654321-4321-4321-4321-210987654321
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optionally combine with server certificates**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://..." \
|
||||||
|
--ca-certificate-id ca-12345678-1234-1234-1234-123456789012 \
|
||||||
|
--mtls-certificate-id mtls-87654321-4321-4321-4321-210987654321 \
|
||||||
|
--sslmode verify-full
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Complete Setup Examples
|
||||||
|
|
||||||
|
### Example 1: AWS RDS with verify-full
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Download AWS RDS CA certificate (region-specific)
|
||||||
|
wget https://truststore.pki.rds.amazonaws.com/us-east-1/us-east-1-bundle.pem
|
||||||
|
|
||||||
|
# 2. Upload CA certificate
|
||||||
|
npx wrangler cert upload certificate-authority \
|
||||||
|
--ca-cert us-east-1-bundle.pem \
|
||||||
|
--name aws-rds-us-east-1-ca
|
||||||
|
|
||||||
|
# Output: ID = ca-abc123...
|
||||||
|
|
||||||
|
# 3. Create Hyperdrive
|
||||||
|
npx wrangler hyperdrive create aws-rds-db \
|
||||||
|
--connection-string="postgres://admin:password@mydb.abc123.us-east-1.rds.amazonaws.com:5432/postgres" \
|
||||||
|
--ca-certificate-id ca-abc123... \
|
||||||
|
--sslmode verify-full
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Example 2: Self-Hosted with mTLS
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Upload server CA certificate
|
||||||
|
npx wrangler cert upload certificate-authority \
|
||||||
|
--ca-cert /path/to/server-ca.pem \
|
||||||
|
--name my-server-ca
|
||||||
|
|
||||||
|
# Output: ID = ca-server123...
|
||||||
|
|
||||||
|
# 2. Upload client certificate
|
||||||
|
npx wrangler cert upload mtls-certificate \
|
||||||
|
--cert /path/to/client-cert.pem \
|
||||||
|
--key /path/to/client-key.pem \
|
||||||
|
--name my-client-cert
|
||||||
|
|
||||||
|
# Output: ID = mtls-client456...
|
||||||
|
|
||||||
|
# 3. Create Hyperdrive with both
|
||||||
|
npx wrangler hyperdrive create secure-db \
|
||||||
|
--connection-string="postgres://user:password@secure-db.example.com:5432/mydb" \
|
||||||
|
--ca-certificate-id ca-server123... \
|
||||||
|
--mtls-certificate-id mtls-client456... \
|
||||||
|
--sslmode verify-full
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Example 3: Basic SSL (Default)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Most cloud databases (AWS, GCP, Azure, Neon, Supabase)
|
||||||
|
# No certificate configuration needed
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://user:password@host:5432/database"
|
||||||
|
|
||||||
|
# SSL mode "require" is automatic
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Database Configuration
|
||||||
|
|
||||||
|
### PostgreSQL SSL Setup
|
||||||
|
|
||||||
|
**postgresql.conf**:
|
||||||
|
```conf
|
||||||
|
ssl = on
|
||||||
|
ssl_cert_file = 'server.crt'
|
||||||
|
ssl_key_file = 'server.key'
|
||||||
|
ssl_ca_file = 'root.crt' # For client certificate verification
|
||||||
|
|
||||||
|
# Optional: Require SSL
|
||||||
|
ssl_min_protocol_version = 'TLSv1.2'
|
||||||
|
```
|
||||||
|
|
||||||
|
**pg_hba.conf** (require SSL):
|
||||||
|
```conf
|
||||||
|
# TYPE DATABASE USER ADDRESS METHOD
|
||||||
|
hostssl all all 0.0.0.0/0 md5
|
||||||
|
|
||||||
|
# Or require client certificates
|
||||||
|
hostssl all all 0.0.0.0/0 cert
|
||||||
|
```
|
||||||
|
|
||||||
|
**Restart PostgreSQL**:
|
||||||
|
```bash
|
||||||
|
sudo systemctl restart postgresql
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### MySQL SSL Setup
|
||||||
|
|
||||||
|
**my.cnf** or **my.ini**:
|
||||||
|
```conf
|
||||||
|
[mysqld]
|
||||||
|
require_secure_transport = ON
|
||||||
|
ssl-ca = /path/to/ca-cert.pem
|
||||||
|
ssl-cert = /path/to/server-cert.pem
|
||||||
|
ssl-key = /path/to/server-key.pem
|
||||||
|
```
|
||||||
|
|
||||||
|
**Require client certificates** (MySQL user):
|
||||||
|
```sql
|
||||||
|
CREATE USER 'hyperdrive'@'%' IDENTIFIED BY 'password' REQUIRE X509;
|
||||||
|
GRANT ALL PRIVILEGES ON mydb.* TO 'hyperdrive'@'%';
|
||||||
|
FLUSH PRIVILEGES;
|
||||||
|
```
|
||||||
|
|
||||||
|
**Restart MySQL**:
|
||||||
|
```bash
|
||||||
|
sudo systemctl restart mysql
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Error: "TLS not supported by the database"
|
||||||
|
|
||||||
|
**Cause**: Database doesn't have SSL/TLS enabled.
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
1. Enable SSL in database configuration
|
||||||
|
2. Restart database
|
||||||
|
3. Verify SSL enabled: `SHOW ssl;` (MySQL) or `SHOW ssl;` (PostgreSQL)
|
||||||
|
|
||||||
|
**PostgreSQL verification**:
|
||||||
|
```sql
|
||||||
|
SHOW ssl;
|
||||||
|
-- Should return "on"
|
||||||
|
```
|
||||||
|
|
||||||
|
**MySQL verification**:
|
||||||
|
```sql
|
||||||
|
SHOW VARIABLES LIKE 'have_ssl';
|
||||||
|
-- Should return "YES"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error: "TLS handshake failed: cert validation failed"
|
||||||
|
|
||||||
|
**Cause**: Server certificate not signed by expected CA.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Verify correct CA certificate uploaded
|
||||||
|
2. Check CA certificate is for correct region (AWS RDS)
|
||||||
|
3. Ensure CA certificate format is PEM
|
||||||
|
4. Verify connection string hostname matches certificate SAN (verify-full mode)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error: "Server return error and closed connection"
|
||||||
|
|
||||||
|
**Cause**: Database requires client certificate, but none provided.
|
||||||
|
|
||||||
|
**Solution**: Upload client certificate and configure Hyperdrive with mTLS:
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload mtls-certificate \
|
||||||
|
--cert client-cert.pem \
|
||||||
|
--key client-key.pem \
|
||||||
|
--name my-cert
|
||||||
|
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="..." \
|
||||||
|
--mtls-certificate-id <id>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error: "Certificate has expired"
|
||||||
|
|
||||||
|
**Cause**: Server certificate or client certificate expired.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Renew certificate from certificate authority
|
||||||
|
2. Upload new certificate to Hyperdrive
|
||||||
|
3. Update Hyperdrive configuration
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Local Development
|
||||||
|
|
||||||
|
### SSL in Local Development
|
||||||
|
|
||||||
|
**Option 1: Disable SSL for local database** (NOT recommended):
|
||||||
|
```bash
|
||||||
|
# Local PostgreSQL without SSL
|
||||||
|
export CLOUDFLARE_HYPERDRIVE_LOCAL_CONNECTION_STRING_HYPERDRIVE="postgres://user:password@localhost:5432/db?sslmode=disable"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option 2: Use Tunnel for local database** (Recommended):
|
||||||
|
```bash
|
||||||
|
# Use Cloudflare Tunnel to local database (keeps SSL)
|
||||||
|
cloudflared tunnel create local-db
|
||||||
|
cloudflared tunnel run local-db
|
||||||
|
|
||||||
|
# Hyperdrive connects via tunnel with SSL
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option 3: Self-signed certificates for local dev**:
|
||||||
|
```bash
|
||||||
|
# Generate self-signed cert
|
||||||
|
openssl req -new -x509 -days 365 -nodes -out server.crt -keyout server.key
|
||||||
|
|
||||||
|
# Configure local PostgreSQL to use it
|
||||||
|
# Then use verify-ca mode with local CA
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Use default `require` mode** unless you have specific security requirements
|
||||||
|
2. **Use verify-full for production** if handling sensitive data
|
||||||
|
3. **Store certificates securely** - don't commit to git
|
||||||
|
4. **Rotate certificates regularly** - before expiration
|
||||||
|
5. **Test certificate setup** in staging before production
|
||||||
|
6. **Use region-specific CA bundles** (AWS RDS) not global bundles
|
||||||
|
7. **Document certificate IDs** in project README
|
||||||
|
8. **Monitor certificate expiration** (set calendar reminders)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [Hyperdrive TLS/SSL Docs](https://developers.cloudflare.com/hyperdrive/configuration/tls-ssl-certificates-for-hyperdrive/)
|
||||||
|
- [PostgreSQL SSL Docs](https://www.postgresql.org/docs/current/ssl-tcp.html)
|
||||||
|
- [MySQL SSL Docs](https://dev.mysql.com/doc/refman/8.0/en/using-encrypted-connections.html)
|
||||||
|
- [AWS RDS SSL Certificates](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html)
|
||||||
481
references/troubleshooting.md
Normal file
481
references/troubleshooting.md
Normal file
@@ -0,0 +1,481 @@
|
|||||||
|
# Hyperdrive Troubleshooting Guide
|
||||||
|
|
||||||
|
Complete error reference with solutions for Cloudflare Hyperdrive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration Errors
|
||||||
|
|
||||||
|
These errors occur when creating or updating a Hyperdrive configuration.
|
||||||
|
|
||||||
|
### Error 2008: Bad hostname
|
||||||
|
|
||||||
|
**Error Message**: `Bad hostname`
|
||||||
|
|
||||||
|
**Cause**: Hyperdrive could not resolve the database hostname via DNS.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Verify hostname exists in public DNS: `nslookup db.example.com`
|
||||||
|
2. Check for typos in hostname
|
||||||
|
3. Ensure hostname is publicly resolvable (not internal-only DNS)
|
||||||
|
4. For private databases, use Cloudflare Tunnel
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error 2009: Private IP address not supported
|
||||||
|
|
||||||
|
**Error Message**: `The hostname does not resolve to a public IP address, or the IP address is not a public address`
|
||||||
|
|
||||||
|
**Cause**: Hyperdrive cannot connect to private IP addresses (10.x.x.x, 192.168.x.x, 172.16.x.x).
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Use Cloudflare Tunnel for private database access
|
||||||
|
2. Expose database with public IP (ensure firewall configured)
|
||||||
|
3. Use cloud provider's public endpoint (e.g., AWS RDS public endpoint)
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error 2010: Cannot connect to host:port
|
||||||
|
|
||||||
|
**Error Message**: `Cannot connect to the host:port`
|
||||||
|
|
||||||
|
**Cause**: Hyperdrive could not route to the hostname/port.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Verify hostname has public DNS record
|
||||||
|
2. Check port is correct (5432 for PostgreSQL, 3306 for MySQL)
|
||||||
|
3. Ensure hostname resolves to public IP address
|
||||||
|
4. Check for typos in hostname
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error 2011: Connection refused
|
||||||
|
|
||||||
|
**Error Message**: `Connection refused`
|
||||||
|
|
||||||
|
**Cause**: A network firewall or access control list (ACL) is rejecting requests from Hyperdrive.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Allow connections from the public internet in database firewall
|
||||||
|
2. Check cloud provider security groups (AWS, GCP, Azure)
|
||||||
|
3. Verify database is listening on correct IP/port
|
||||||
|
4. For private databases, use Cloudflare Tunnel
|
||||||
|
|
||||||
|
**Example - AWS RDS Security Group**:
|
||||||
|
```
|
||||||
|
Type: PostgreSQL
|
||||||
|
Protocol: TCP
|
||||||
|
Port: 5432
|
||||||
|
Source: 0.0.0.0/0 (or restrict to Cloudflare IPs)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error 2012: TLS (SSL) not supported by the database
|
||||||
|
|
||||||
|
**Error Message**: `TLS (SSL) not supported by the database`
|
||||||
|
|
||||||
|
**Cause**: Database does not have SSL/TLS enabled.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Enable SSL/TLS on your database
|
||||||
|
2. For AWS RDS: SSL is enabled by default
|
||||||
|
3. For self-hosted PostgreSQL: Edit postgresql.conf and set `ssl = on`
|
||||||
|
4. For self-hosted MySQL: Edit my.cnf and configure SSL certificates
|
||||||
|
|
||||||
|
**Hyperdrive requires TLS/SSL for all connections.**
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error 2013: Invalid database credentials
|
||||||
|
|
||||||
|
**Error Message**: `Invalid database credentials`
|
||||||
|
|
||||||
|
**Cause**: Username or password incorrect.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Verify username is correct and exists in database
|
||||||
|
2. Check password is correct (case-sensitive)
|
||||||
|
3. Test credentials locally: `psql postgres://user:password@host:port/database`
|
||||||
|
4. Ensure user has permissions to connect remotely
|
||||||
|
5. Check for special characters in password (may need URL encoding)
|
||||||
|
|
||||||
|
**Special Characters in Password**:
|
||||||
|
```bash
|
||||||
|
# Original password: p@ssw$rd
|
||||||
|
# URL-encoded: p%40ssw%24rd
|
||||||
|
postgres://user:p%40ssw%24rd@host:5432/database
|
||||||
|
```
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error 2014: Database name does not exist
|
||||||
|
|
||||||
|
**Error Message**: `The specified database name does not exist`
|
||||||
|
|
||||||
|
**Cause**: Database (not table) name provided does not exist.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Verify database name: `SHOW DATABASES;` (MySQL) or `\l` (PostgreSQL)
|
||||||
|
2. Check for typos in database name
|
||||||
|
3. Create database if needed: `CREATE DATABASE mydb;`
|
||||||
|
4. Ensure you're providing database name, not table name
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error 2015: Generic error
|
||||||
|
|
||||||
|
**Error Message**: `Generic error`
|
||||||
|
|
||||||
|
**Cause**: Hyperdrive failed to connect but could not determine a specific reason.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Check for ongoing Hyperdrive incidents: https://www.cloudflarestatus.com/
|
||||||
|
2. Contact Cloudflare Support with your Hyperdrive configuration ID
|
||||||
|
3. Review all previous error codes to eliminate other issues
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Error 2016: Test query failed
|
||||||
|
|
||||||
|
**Error Message**: `Test query failed`
|
||||||
|
|
||||||
|
**Cause**: User does not have permissions to read/write to database.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Grant necessary permissions to database user
|
||||||
|
2. PostgreSQL: `GRANT ALL PRIVILEGES ON DATABASE mydb TO myuser;`
|
||||||
|
3. MySQL: `GRANT ALL PRIVILEGES ON mydb.* TO 'myuser'@'%';`
|
||||||
|
4. Verify user can run basic queries
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Runtime Errors
|
||||||
|
|
||||||
|
These errors occur when querying the database from your Worker.
|
||||||
|
|
||||||
|
### Failed to acquire a connection from the pool
|
||||||
|
|
||||||
|
**Error Message**: `Failed to acquire a connection from the pool`
|
||||||
|
|
||||||
|
**Cause**: Hyperdrive timed out waiting for a connection, or connection pool exhausted.
|
||||||
|
|
||||||
|
**Why This Happens**:
|
||||||
|
- Too many connections held open too long by Worker
|
||||||
|
- Long-running queries or transactions
|
||||||
|
- Not cleaning up connections properly
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
|
||||||
|
**1. Use ctx.waitUntil() for cleanup**:
|
||||||
|
```typescript
|
||||||
|
// ✅ Correct
|
||||||
|
ctx.waitUntil(client.end());
|
||||||
|
|
||||||
|
// ❌ Wrong
|
||||||
|
await client.end(); // Blocks response
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Set connection pool max to 5**:
|
||||||
|
```typescript
|
||||||
|
const pool = new Pool({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString,
|
||||||
|
max: 5 // Workers limit: 6 concurrent external connections
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Avoid long-running transactions**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Bad: Long transaction holds connection
|
||||||
|
await client.query('BEGIN');
|
||||||
|
// ... many queries ...
|
||||||
|
await client.query('COMMIT');
|
||||||
|
|
||||||
|
// ✅ Good: Short transactions, or no transaction
|
||||||
|
const result = await client.query('SELECT ...');
|
||||||
|
```
|
||||||
|
|
||||||
|
**4. Check for connection leaks**:
|
||||||
|
```typescript
|
||||||
|
// Ensure every connection is closed
|
||||||
|
try {
|
||||||
|
// queries
|
||||||
|
} finally {
|
||||||
|
ctx.waitUntil(client.end()); // Always runs
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Server connection attempt failed: connection_refused
|
||||||
|
|
||||||
|
**Error Message**: `Server connection attempt failed: connection_refused`
|
||||||
|
|
||||||
|
**Cause**: Hyperdrive cannot create new connections to origin database.
|
||||||
|
|
||||||
|
**Why This Happens**:
|
||||||
|
- Firewall or ACL rejecting requests
|
||||||
|
- Database connection limit reached
|
||||||
|
- Database stopped or restarting
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Allow public internet connections in database firewall
|
||||||
|
2. Check database connection limit: `SHOW VARIABLES LIKE 'max_connections';` (MySQL)
|
||||||
|
3. Verify database is running
|
||||||
|
4. Check cloud provider connection limits (e.g., AWS RDS max connections)
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Hyperdrive does not currently support MySQL COM_STMT_PREPARE
|
||||||
|
|
||||||
|
**Error Message**: `Hyperdrive does not currently support MySQL COM_STMT_PREPARE messages`
|
||||||
|
|
||||||
|
**Cause**: Hyperdrive doesn't support prepared statements for MySQL.
|
||||||
|
|
||||||
|
**Solution**: Remove prepared statements from MySQL queries or driver configuration.
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Internal error
|
||||||
|
|
||||||
|
**Error Message**: `Internal error`
|
||||||
|
|
||||||
|
**Cause**: Something is broken on Cloudflare's side.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Check Cloudflare status: https://www.cloudflarestatus.com/
|
||||||
|
2. Contact Cloudflare Support
|
||||||
|
3. Retry query if appropriate for your use case
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Node.js Compatibility Errors
|
||||||
|
|
||||||
|
### Uncaught Error: No such module "node:\<module\>"
|
||||||
|
|
||||||
|
**Error Message**: `Uncaught Error: No such module "node:fs"` (or other node module)
|
||||||
|
|
||||||
|
**Cause**: Worker or library trying to access Node.js module, but nodejs_compat flag not enabled.
|
||||||
|
|
||||||
|
**Solution**: Add `nodejs_compat` to compatibility_flags in wrangler.jsonc:
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"compatibility_flags": ["nodejs_compat"],
|
||||||
|
"compatibility_date": "2024-09-23"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**CRITICAL**: This flag is **REQUIRED** for all database drivers (pg, postgres, mysql2).
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Driver-Specific Errors
|
||||||
|
|
||||||
|
### mysql2: Code generation from strings disallowed
|
||||||
|
|
||||||
|
**Error Message**: `Code generation from strings disallowed for this context`
|
||||||
|
|
||||||
|
**Cause**: mysql2 driver trying to use `eval()`, which is not supported in Workers.
|
||||||
|
|
||||||
|
**Solution**: Set `disableEval: true` in mysql2 configuration:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const connection = await createConnection({
|
||||||
|
host: env.HYPERDRIVE.host,
|
||||||
|
user: env.HYPERDRIVE.user,
|
||||||
|
password: env.HYPERDRIVE.password,
|
||||||
|
database: env.HYPERDRIVE.database,
|
||||||
|
port: env.HYPERDRIVE.port,
|
||||||
|
disableEval: true // REQUIRED for Workers
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Troubleshooting Docs](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### postgres.js: Queries not cached
|
||||||
|
|
||||||
|
**Error**: Queries aren't being cached even though caching is enabled.
|
||||||
|
|
||||||
|
**Cause**: postgres.js configured with `prepare: false`.
|
||||||
|
|
||||||
|
**Solution**: Enable prepared statements:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
prepare: true // CRITICAL for caching
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Hyperdrive can only cache prepared statements.**
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive Configuration Guide](https://developers.cloudflare.com/hyperdrive/observability/troubleshooting/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## TLS/SSL Errors
|
||||||
|
|
||||||
|
### Server return error and closed connection
|
||||||
|
|
||||||
|
**Error Message**: `Server return error and closed connection`
|
||||||
|
|
||||||
|
**Cause**: Database has client certificate verification enabled, but Hyperdrive not configured with client certificates.
|
||||||
|
|
||||||
|
**Solution**: Configure Hyperdrive with client certificates (mTLS):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Upload client certificate
|
||||||
|
npx wrangler cert upload mtls-certificate \
|
||||||
|
--cert client-cert.pem \
|
||||||
|
--key client-key.pem \
|
||||||
|
--name my-client-cert
|
||||||
|
|
||||||
|
# 2. Create Hyperdrive with client cert
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://..." \
|
||||||
|
--mtls-certificate-id <CERT_PAIR_ID>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive TLS/SSL Guide](https://developers.cloudflare.com/hyperdrive/configuration/tls-ssl-certificates-for-hyperdrive/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### TLS handshake failed: cert validation failed
|
||||||
|
|
||||||
|
**Error Message**: `TLS handshake failed: cert validation failed`
|
||||||
|
|
||||||
|
**Cause**: Server certificate not signed by expected CA certificate.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. Verify correct CA certificate uploaded to Hyperdrive
|
||||||
|
2. Check SSL mode is correct (verify-ca or verify-full)
|
||||||
|
3. Ensure CA certificate matches database's certificate authority
|
||||||
|
4. Verify connecting to correct database hostname
|
||||||
|
|
||||||
|
**Source**: [Hyperdrive TLS/SSL Guide](https://developers.cloudflare.com/hyperdrive/configuration/tls-ssl-certificates-for-hyperdrive/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Issues
|
||||||
|
|
||||||
|
### High query latency
|
||||||
|
|
||||||
|
**Symptoms**: Queries taking longer than expected.
|
||||||
|
|
||||||
|
**Causes & Solutions**:
|
||||||
|
|
||||||
|
**1. Missing indexes**:
|
||||||
|
```sql
|
||||||
|
-- Check slow queries
|
||||||
|
EXPLAIN ANALYZE SELECT * FROM users WHERE email = 'user@example.com';
|
||||||
|
|
||||||
|
-- Add index
|
||||||
|
CREATE INDEX idx_users_email ON users(email);
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Large result sets**:
|
||||||
|
```sql
|
||||||
|
-- ❌ Bad: Fetching all rows
|
||||||
|
SELECT * FROM products;
|
||||||
|
|
||||||
|
-- ✅ Good: Limit results
|
||||||
|
SELECT * FROM products LIMIT 100;
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Long-running transactions**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Bad: Transaction holds connection
|
||||||
|
await sql.begin(async sql => {
|
||||||
|
// many queries
|
||||||
|
});
|
||||||
|
|
||||||
|
// ✅ Good: Keep transactions short
|
||||||
|
await sql`INSERT INTO users VALUES (...)`;
|
||||||
|
```
|
||||||
|
|
||||||
|
**4. Not using connection pooling**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Bad: New connection per query
|
||||||
|
const client = new Client(...);
|
||||||
|
|
||||||
|
// ✅ Good: Use pool for parallel queries
|
||||||
|
const pool = new Pool({ max: 5 });
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Low cache hit ratio
|
||||||
|
|
||||||
|
**Symptoms**: Most queries showing cache MISS instead of HIT.
|
||||||
|
|
||||||
|
**Causes & Solutions**:
|
||||||
|
|
||||||
|
**1. Writing queries (not cached)**:
|
||||||
|
- Hyperdrive only caches SELECT queries
|
||||||
|
- INSERT/UPDATE/DELETE never cached
|
||||||
|
|
||||||
|
**2. Volatile functions**:
|
||||||
|
```sql
|
||||||
|
-- ❌ Not cached: Uses volatile function
|
||||||
|
SELECT LASTVAL(), * FROM articles;
|
||||||
|
|
||||||
|
-- ✅ Cached: No volatile functions
|
||||||
|
SELECT * FROM articles LIMIT 50;
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Prepared statements disabled (postgres.js)**:
|
||||||
|
```typescript
|
||||||
|
// ❌ Not cached
|
||||||
|
const sql = postgres(url, { prepare: false });
|
||||||
|
|
||||||
|
// ✅ Cached
|
||||||
|
const sql = postgres(url, { prepare: true });
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Getting Help
|
||||||
|
|
||||||
|
If you're still stuck:
|
||||||
|
|
||||||
|
1. **Check Cloudflare Status**: https://www.cloudflarestatus.com/
|
||||||
|
2. **Review Documentation**: https://developers.cloudflare.com/hyperdrive/
|
||||||
|
3. **Community Forum**: https://community.cloudflare.com/
|
||||||
|
4. **Support Ticket**: https://dash.cloudflare.com/?to=/:account/support
|
||||||
|
|
||||||
|
**Include in support request**:
|
||||||
|
- Hyperdrive configuration ID
|
||||||
|
- Error message (exact text)
|
||||||
|
- Database provider (AWS RDS, Neon, etc.)
|
||||||
|
- Driver and version (pg@8.13.0, etc.)
|
||||||
|
- Minimal code reproducing issue
|
||||||
488
references/wrangler-commands.md
Normal file
488
references/wrangler-commands.md
Normal file
@@ -0,0 +1,488 @@
|
|||||||
|
# Hyperdrive Wrangler Commands Reference
|
||||||
|
|
||||||
|
Complete CLI reference for managing Hyperdrive configurations with Wrangler.
|
||||||
|
|
||||||
|
**Minimum Wrangler Version**: 3.11.0+
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Create Hyperdrive Configuration
|
||||||
|
|
||||||
|
Create a new Hyperdrive configuration that connects to a database.
|
||||||
|
|
||||||
|
### PostgreSQL
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create <name> \
|
||||||
|
--connection-string="postgres://user:password@host:port/database"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-postgres-db \
|
||||||
|
--connection-string="postgres://myuser:mypass@db.example.com:5432/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
### MySQL
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create <name> \
|
||||||
|
--connection-string="mysql://user:password@host:port/database"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-mysql-db \
|
||||||
|
--connection-string="mysql://myuser:mypass@db.example.com:3306/mydb"
|
||||||
|
```
|
||||||
|
|
||||||
|
### With SSL Mode (PostgreSQL)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://..." \
|
||||||
|
--sslmode require # or verify-ca, verify-full
|
||||||
|
```
|
||||||
|
|
||||||
|
### With Server CA Certificate
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First, upload CA certificate
|
||||||
|
npx wrangler cert upload certificate-authority \
|
||||||
|
--ca-cert root-ca.pem \
|
||||||
|
--name my-ca-cert
|
||||||
|
|
||||||
|
# Then create Hyperdrive with CA
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://..." \
|
||||||
|
--ca-certificate-id <CA_CERT_ID> \
|
||||||
|
--sslmode verify-full
|
||||||
|
```
|
||||||
|
|
||||||
|
### With Client Certificates (mTLS)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First, upload client certificate + key
|
||||||
|
npx wrangler cert upload mtls-certificate \
|
||||||
|
--cert client-cert.pem \
|
||||||
|
--key client-key.pem \
|
||||||
|
--name my-client-cert
|
||||||
|
|
||||||
|
# Then create Hyperdrive with client cert
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://..." \
|
||||||
|
--mtls-certificate-id <CERT_PAIR_ID>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**:
|
||||||
|
```
|
||||||
|
✅ Successfully created Hyperdrive configuration
|
||||||
|
|
||||||
|
[[hyperdrive]]
|
||||||
|
binding = "HYPERDRIVE"
|
||||||
|
id = "a76a99bc-7901-48c9-9c15-c4b11b559606"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## List Hyperdrive Configurations
|
||||||
|
|
||||||
|
List all Hyperdrive configurations in your account.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive list
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**:
|
||||||
|
```
|
||||||
|
┌──────────────────────────────────────┬─────────────────┬────────────────┐
|
||||||
|
│ id │ name │ database │
|
||||||
|
├──────────────────────────────────────┼─────────────────┼────────────────┤
|
||||||
|
│ a76a99bc-7901-48c9-9c15-c4b11b559606 │ my-postgres-db │ PostgreSQL │
|
||||||
|
│ b8c12345-6789-12ab-cdef-012345678901 │ my-mysql-db │ MySQL │
|
||||||
|
└──────────────────────────────────────┴─────────────────┴────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Get Hyperdrive Configuration Details
|
||||||
|
|
||||||
|
Get details of a specific Hyperdrive configuration.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive get <hyperdrive-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive get a76a99bc-7901-48c9-9c15-c4b11b559606
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "a76a99bc-7901-48c9-9c15-c4b11b559606",
|
||||||
|
"name": "my-postgres-db",
|
||||||
|
"origin": {
|
||||||
|
"host": "db.example.com",
|
||||||
|
"port": 5432,
|
||||||
|
"database": "mydb",
|
||||||
|
"user": "myuser",
|
||||||
|
"scheme": "postgres"
|
||||||
|
},
|
||||||
|
"caching": {
|
||||||
|
"disabled": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note**: Password is never returned for security reasons.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Update Hyperdrive Configuration
|
||||||
|
|
||||||
|
Update connection string or other settings of an existing configuration.
|
||||||
|
|
||||||
|
### Update Connection String
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive update <hyperdrive-id> \
|
||||||
|
--connection-string="postgres://newuser:newpass@newhost:5432/newdb"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Update Name
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive update <hyperdrive-id> \
|
||||||
|
--name="my-renamed-db"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Disable Caching
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive update <hyperdrive-id> \
|
||||||
|
--caching-disabled
|
||||||
|
```
|
||||||
|
|
||||||
|
### Enable Caching
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive update <hyperdrive-id> \
|
||||||
|
--caching-disabled=false
|
||||||
|
```
|
||||||
|
|
||||||
|
**Use Case - Credential Rotation**:
|
||||||
|
```bash
|
||||||
|
# Update with new password
|
||||||
|
npx wrangler hyperdrive update a76a99bc-7901-48c9-9c15-c4b11b559606 \
|
||||||
|
--connection-string="postgres://user:new_password@host:5432/db"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Delete Hyperdrive Configuration
|
||||||
|
|
||||||
|
Delete a Hyperdrive configuration.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive delete <hyperdrive-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive delete a76a99bc-7901-48c9-9c15-c4b11b559606
|
||||||
|
```
|
||||||
|
|
||||||
|
**Confirmation Prompt**:
|
||||||
|
```
|
||||||
|
? Are you sure you want to delete Hyperdrive configuration a76a99bc-7901-48c9-9c15-c4b11b559606? › (y/N)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Warning**: This action cannot be undone. Workers using this configuration will fail until updated.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Certificate Management
|
||||||
|
|
||||||
|
### Upload CA Certificate (Server Certificate)
|
||||||
|
|
||||||
|
Upload a certificate authority (CA) certificate for `verify-ca` or `verify-full` SSL modes.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload certificate-authority \
|
||||||
|
--ca-cert <path-to-ca-cert.pem> \
|
||||||
|
--name <custom-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload certificate-authority \
|
||||||
|
--ca-cert /path/to/root-ca.pem \
|
||||||
|
--name aws-rds-ca-2024
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**:
|
||||||
|
```
|
||||||
|
✅ Uploaded CA Certificate aws-rds-ca-2024
|
||||||
|
ID: ca-12345678-1234-1234-1234-123456789012
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important**: You must use region-specific CA certificates, not global bundles containing multiple CAs.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Upload Client Certificate (mTLS)
|
||||||
|
|
||||||
|
Upload client certificate and private key pair for mutual TLS authentication.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload mtls-certificate \
|
||||||
|
--cert <path-to-client-cert.pem> \
|
||||||
|
--key <path-to-private-key.pem> \
|
||||||
|
--name <custom-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```bash
|
||||||
|
npx wrangler cert upload mtls-certificate \
|
||||||
|
--cert /path/to/client-cert.pem \
|
||||||
|
--key /path/to/client-key.pem \
|
||||||
|
--name my-client-cert
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**:
|
||||||
|
```
|
||||||
|
✅ Uploaded client certificate my-client-cert
|
||||||
|
ID: mtls-87654321-4321-4321-4321-210987654321
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Connection String Formats
|
||||||
|
|
||||||
|
### PostgreSQL
|
||||||
|
|
||||||
|
**Basic**:
|
||||||
|
```
|
||||||
|
postgres://username:password@hostname:port/database
|
||||||
|
```
|
||||||
|
|
||||||
|
**With SSL Mode**:
|
||||||
|
```
|
||||||
|
postgres://username:password@hostname:port/database?sslmode=require
|
||||||
|
```
|
||||||
|
|
||||||
|
**With Special Characters in Password**:
|
||||||
|
```bash
|
||||||
|
# Password: p@ssw$rd
|
||||||
|
# URL-encoded: p%40ssw%24rd
|
||||||
|
postgres://user:p%40ssw%24rd@host:5432/database
|
||||||
|
```
|
||||||
|
|
||||||
|
**URL Encoding Reference**:
|
||||||
|
- `@` → `%40`
|
||||||
|
- `#` → `%23`
|
||||||
|
- `$` → `%24`
|
||||||
|
- `%` → `%25`
|
||||||
|
- `&` → `%26`
|
||||||
|
- `+` → `%2B`
|
||||||
|
- `/` → `%2F`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### MySQL
|
||||||
|
|
||||||
|
**Basic**:
|
||||||
|
```
|
||||||
|
mysql://username:password@hostname:port/database
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
```
|
||||||
|
mysql://admin:mypass123@mysql-prod.example.com:3306/app_db
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Workflow Examples
|
||||||
|
|
||||||
|
### Complete Setup Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Create Hyperdrive configuration
|
||||||
|
npx wrangler hyperdrive create my-database \
|
||||||
|
--connection-string="postgres://user:pass@host:5432/db"
|
||||||
|
|
||||||
|
# Output: id = a76a99bc-7901-48c9-9c15-c4b11b559606
|
||||||
|
|
||||||
|
# 2. Add to wrangler.jsonc
|
||||||
|
cat >> wrangler.jsonc <<EOF
|
||||||
|
{
|
||||||
|
"hyperdrive": [
|
||||||
|
{
|
||||||
|
"binding": "HYPERDRIVE",
|
||||||
|
"id": "a76a99bc-7901-48c9-9c15-c4b11b559606"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# 3. Deploy Worker
|
||||||
|
npx wrangler deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Credential Rotation Workflow
|
||||||
|
|
||||||
|
**Option A: Update Existing Config**
|
||||||
|
```bash
|
||||||
|
# Update with new credentials
|
||||||
|
npx wrangler hyperdrive update a76a99bc-7901-48c9-9c15-c4b11b559606 \
|
||||||
|
--connection-string="postgres://user:NEW_PASSWORD@host:5432/db"
|
||||||
|
|
||||||
|
# Deploy (Workers automatically use new credentials)
|
||||||
|
npx wrangler deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option B: Create New Config (Zero Downtime)**
|
||||||
|
```bash
|
||||||
|
# 1. Create new config with new credentials
|
||||||
|
npx wrangler hyperdrive create my-database-v2 \
|
||||||
|
--connection-string="postgres://user:NEW_PASSWORD@host:5432/db"
|
||||||
|
|
||||||
|
# Output: id = b8c12345-6789-12ab-cdef-012345678901
|
||||||
|
|
||||||
|
# 2. Update wrangler.jsonc with new ID
|
||||||
|
# 3. Deploy gradually with gradual deployments
|
||||||
|
|
||||||
|
# 4. Delete old config when migration complete
|
||||||
|
npx wrangler hyperdrive delete a76a99bc-7901-48c9-9c15-c4b11b559606
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Multiple Database Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create multiple Hyperdrive configs
|
||||||
|
npx wrangler hyperdrive create postgres-db \
|
||||||
|
--connection-string="postgres://..."
|
||||||
|
|
||||||
|
npx wrangler hyperdrive create mysql-db \
|
||||||
|
--connection-string="mysql://..."
|
||||||
|
|
||||||
|
# Configure in wrangler.jsonc
|
||||||
|
cat >> wrangler.jsonc <<EOF
|
||||||
|
{
|
||||||
|
"hyperdrive": [
|
||||||
|
{
|
||||||
|
"binding": "POSTGRES_DB",
|
||||||
|
"id": "<postgres-id>"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"binding": "MYSQL_DB",
|
||||||
|
"id": "<mysql-id>"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting Commands
|
||||||
|
|
||||||
|
### Test Connection
|
||||||
|
|
||||||
|
Hyperdrive tests the connection when creating/updating. If creation succeeds, connection works.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# This command will fail fast if connection doesn't work
|
||||||
|
npx wrangler hyperdrive create test-connection \
|
||||||
|
--connection-string="postgres://user:pass@host:5432/db"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check config exists and view details
|
||||||
|
npx wrangler hyperdrive get <hyperdrive-id>
|
||||||
|
|
||||||
|
# List all configs
|
||||||
|
npx wrangler hyperdrive list
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Wrangler Version
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ensure you have wrangler 3.11.0+
|
||||||
|
npx wrangler --version
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Common Errors
|
||||||
|
|
||||||
|
### "Hyperdrive will attempt to connect to your database..."
|
||||||
|
|
||||||
|
**Full message**:
|
||||||
|
```
|
||||||
|
Hyperdrive will attempt to connect to your database with the provided
|
||||||
|
credentials to verify they are correct before creating a configuration.
|
||||||
|
```
|
||||||
|
|
||||||
|
This is an **info message**, not an error. Hyperdrive is testing the connection.
|
||||||
|
|
||||||
|
**If connection fails**, you'll see specific error (2008-2016). See `troubleshooting.md` for solutions.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### "wrangler: command not found"
|
||||||
|
|
||||||
|
**Cause**: Wrangler not installed.
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
```bash
|
||||||
|
npm install -g wrangler
|
||||||
|
# or use npx
|
||||||
|
npx wrangler@latest hyperdrive list
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### "Missing required argument: --connection-string"
|
||||||
|
|
||||||
|
**Cause**: Forgot to provide connection string when creating Hyperdrive.
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
```bash
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://user:pass@host:5432/db"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Use npx** for consistent wrangler versions: `npx wrangler hyperdrive ...`
|
||||||
|
2. **Store connection strings securely** - Never commit to git
|
||||||
|
3. **Use environment variables** for sensitive data:
|
||||||
|
```bash
|
||||||
|
export DB_PASSWORD="secret"
|
||||||
|
npx wrangler hyperdrive create my-db \
|
||||||
|
--connection-string="postgres://user:$DB_PASSWORD@host:5432/db"
|
||||||
|
```
|
||||||
|
4. **Test locally first** with `localConnectionString` before deploying
|
||||||
|
5. **Use gradual deployments** when rotating credentials
|
||||||
|
6. **Document Hyperdrive IDs** in your project README
|
||||||
|
7. **Separate configs** for staging and production
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [Official Wrangler Commands Docs](https://developers.cloudflare.com/hyperdrive/reference/wrangler-commands/)
|
||||||
|
- [Wrangler Configuration](https://developers.cloudflare.com/workers/wrangler/configuration/)
|
||||||
|
- [Gradual Deployments](https://developers.cloudflare.com/workers/configuration/versions-and-deployments/gradual-deployments/)
|
||||||
125
scripts/check-versions.sh
Executable file
125
scripts/check-versions.sh
Executable file
@@ -0,0 +1,125 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Hyperdrive Package Version Checker
|
||||||
|
#
|
||||||
|
# Checks installed package versions against minimum requirements.
|
||||||
|
# Run this script to verify your dependencies are up to date.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./scripts/check-versions.sh
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🔍 Checking Hyperdrive Package Versions"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Color codes
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Check if package.json exists
|
||||||
|
if [ ! -f "package.json" ]; then
|
||||||
|
echo "❌ package.json not found in current directory"
|
||||||
|
echo " Run this script from your project root"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Function to check package version
|
||||||
|
check_package() {
|
||||||
|
local package=$1
|
||||||
|
local min_version=$2
|
||||||
|
local installed_version
|
||||||
|
|
||||||
|
if npm list "$package" &> /dev/null; then
|
||||||
|
installed_version=$(npm list "$package" --depth=0 2>/dev/null | grep "$package@" | sed 's/.*@//' | cut -d' ' -f1)
|
||||||
|
|
||||||
|
echo -n " $package: "
|
||||||
|
|
||||||
|
if [ -n "$installed_version" ]; then
|
||||||
|
# Simple version comparison (works for most cases)
|
||||||
|
if [[ "$installed_version" == "$min_version"* ]] || [[ "$installed_version" > "$min_version" ]]; then
|
||||||
|
echo -e "${GREEN}✓${NC} $installed_version (>= $min_version required)"
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗${NC} $installed_version (>= $min_version required)"
|
||||||
|
echo -e " ${YELLOW}Run: npm install $package@latest${NC}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗${NC} Not found"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " $package: ${YELLOW}Not installed${NC}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check Wrangler
|
||||||
|
echo "Wrangler CLI:"
|
||||||
|
if command -v wrangler &> /dev/null; then
|
||||||
|
wrangler_version=$(wrangler --version 2>&1 | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||||
|
if [ -n "$wrangler_version" ]; then
|
||||||
|
echo -e " wrangler: ${GREEN}✓${NC} $wrangler_version (>= 3.11.0 required)"
|
||||||
|
else
|
||||||
|
echo -e " wrangler: ${YELLOW}Unknown version${NC}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e " wrangler: ${RED}✗${NC} Not installed"
|
||||||
|
echo -e " ${YELLOW}Run: npm install -g wrangler${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check PostgreSQL drivers (if applicable)
|
||||||
|
echo "PostgreSQL Drivers (optional):"
|
||||||
|
check_package "pg" "8.13.0"
|
||||||
|
check_package "postgres" "3.4.5"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check MySQL drivers (if applicable)
|
||||||
|
echo "MySQL Drivers (optional):"
|
||||||
|
check_package "mysql2" "3.13.0"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check ORMs (if applicable)
|
||||||
|
echo "ORMs (optional):"
|
||||||
|
check_package "drizzle-orm" "0.0.0" # Any version (always latest)
|
||||||
|
check_package "prisma" "0.0.0" # Any version (always latest)
|
||||||
|
check_package "@prisma/client" "0.0.0" # Any version (always latest)
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check required Workers packages
|
||||||
|
echo "Required Packages:"
|
||||||
|
check_package "@cloudflare/workers-types" "0.0.0" # Any version
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check if nodejs_compat flag is set
|
||||||
|
echo "Configuration:"
|
||||||
|
if [ -f "wrangler.jsonc" ]; then
|
||||||
|
if grep -q "nodejs_compat" wrangler.jsonc; then
|
||||||
|
echo -e " nodejs_compat flag: ${GREEN}✓${NC} Enabled"
|
||||||
|
else
|
||||||
|
echo -e " nodejs_compat flag: ${RED}✗${NC} Not found"
|
||||||
|
echo -e " ${YELLOW}Add to wrangler.jsonc: \"compatibility_flags\": [\"nodejs_compat\"]${NC}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " wrangler.jsonc: ${YELLOW}Not found${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo "Summary:"
|
||||||
|
echo ""
|
||||||
|
echo "✅ = Installed and meets minimum version"
|
||||||
|
echo "❌ = Needs update or not installed"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo ""
|
||||||
|
echo "For latest versions, run:"
|
||||||
|
echo " npm update"
|
||||||
|
echo ""
|
||||||
97
templates/drizzle-mysql.ts
Normal file
97
templates/drizzle-mysql.ts
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
/**
|
||||||
|
* Drizzle ORM with MySQL
|
||||||
|
*
|
||||||
|
* Type-safe ORM for MySQL via Hyperdrive.
|
||||||
|
*
|
||||||
|
* Install: npm install drizzle-orm mysql2
|
||||||
|
* Install (dev): npm install -D drizzle-kit
|
||||||
|
*
|
||||||
|
* Minimum version: mysql2@3.13.0
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { drizzle } from "drizzle-orm/mysql2";
|
||||||
|
import { createConnection } from "mysql2";
|
||||||
|
import { mysqlTable, int, varchar, timestamp } from "drizzle-orm/mysql-core";
|
||||||
|
import { eq } from "drizzle-orm";
|
||||||
|
|
||||||
|
// Define schema
|
||||||
|
export const users = mysqlTable("users", {
|
||||||
|
id: int("id").primaryKey().autoincrement(),
|
||||||
|
name: varchar("name", { length: 255 }).notNull(),
|
||||||
|
email: varchar("email", { length: 255 }).notNull(),
|
||||||
|
createdAt: timestamp("created_at").defaultNow(),
|
||||||
|
});
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(
|
||||||
|
request: Request,
|
||||||
|
env: Bindings,
|
||||||
|
ctx: ExecutionContext
|
||||||
|
): Promise<Response> {
|
||||||
|
// Create mysql2 connection
|
||||||
|
const connection = createConnection({
|
||||||
|
host: env.HYPERDRIVE.host,
|
||||||
|
user: env.HYPERDRIVE.user,
|
||||||
|
password: env.HYPERDRIVE.password,
|
||||||
|
database: env.HYPERDRIVE.database,
|
||||||
|
port: env.HYPERDRIVE.port,
|
||||||
|
disableEval: true // REQUIRED for Workers
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create Drizzle client
|
||||||
|
const db = drizzle(connection);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Example: Select all users
|
||||||
|
const allUsers = await db.select().from(users);
|
||||||
|
|
||||||
|
// Example: Select with where clause
|
||||||
|
const user = await db
|
||||||
|
.select()
|
||||||
|
.from(users)
|
||||||
|
.where(eq(users.id, 1));
|
||||||
|
|
||||||
|
// Example: Insert
|
||||||
|
await db.insert(users).values({
|
||||||
|
name: "John Doe",
|
||||||
|
email: `john.${Date.now()}@example.com`
|
||||||
|
});
|
||||||
|
|
||||||
|
// Example: Update
|
||||||
|
await db
|
||||||
|
.update(users)
|
||||||
|
.set({ name: "Jane Doe" })
|
||||||
|
.where(eq(users.id, 1));
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
allUsers,
|
||||||
|
user
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error("Database error:", error);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
}, {
|
||||||
|
status: 500
|
||||||
|
});
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
// CRITICAL: Clean up connection
|
||||||
|
ctx.waitUntil(
|
||||||
|
new Promise<void>((resolve) => {
|
||||||
|
connection.end(() => resolve());
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
96
templates/drizzle-postgres.ts
Normal file
96
templates/drizzle-postgres.ts
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
/**
|
||||||
|
* Drizzle ORM with PostgreSQL
|
||||||
|
*
|
||||||
|
* Type-safe ORM for PostgreSQL via Hyperdrive.
|
||||||
|
*
|
||||||
|
* Install: npm install drizzle-orm postgres
|
||||||
|
* Install (dev): npm install -D drizzle-kit
|
||||||
|
*
|
||||||
|
* Minimum version: postgres@3.4.5
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { drizzle } from "drizzle-orm/postgres-js";
|
||||||
|
import postgres from "postgres";
|
||||||
|
import { pgTable, serial, varchar, timestamp } from "drizzle-orm/pg-core";
|
||||||
|
import { eq } from "drizzle-orm";
|
||||||
|
|
||||||
|
// Define schema
|
||||||
|
export const users = pgTable("users", {
|
||||||
|
id: serial("id").primaryKey(),
|
||||||
|
name: varchar("name", { length: 255 }).notNull(),
|
||||||
|
email: varchar("email", { length: 255 }).notNull().unique(),
|
||||||
|
createdAt: timestamp("created_at").defaultNow(),
|
||||||
|
});
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(
|
||||||
|
request: Request,
|
||||||
|
env: Bindings,
|
||||||
|
ctx: ExecutionContext
|
||||||
|
): Promise<Response> {
|
||||||
|
// Create postgres.js connection
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
max: 5,
|
||||||
|
prepare: true,
|
||||||
|
fetch_types: false
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create Drizzle client
|
||||||
|
const db = drizzle(sql);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Example: Select all users
|
||||||
|
const allUsers = await db.select().from(users);
|
||||||
|
|
||||||
|
// Example: Select with where clause
|
||||||
|
const recentUsers = await db
|
||||||
|
.select()
|
||||||
|
.from(users)
|
||||||
|
.where(eq(users.createdAt, new Date('2024-01-01')));
|
||||||
|
|
||||||
|
// Example: Insert
|
||||||
|
const [newUser] = await db
|
||||||
|
.insert(users)
|
||||||
|
.values({
|
||||||
|
name: "John Doe",
|
||||||
|
email: `john.${Date.now()}@example.com`
|
||||||
|
})
|
||||||
|
.returning();
|
||||||
|
|
||||||
|
// Example: Update
|
||||||
|
await db
|
||||||
|
.update(users)
|
||||||
|
.set({ name: "Jane Doe" })
|
||||||
|
.where(eq(users.id, newUser.id));
|
||||||
|
|
||||||
|
// Example: Delete
|
||||||
|
// await db.delete(users).where(eq(users.id, 123));
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
allUsers,
|
||||||
|
recentUsers,
|
||||||
|
newUser
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error("Database error:", error);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
}, {
|
||||||
|
status: 500
|
||||||
|
});
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
ctx.waitUntil(sql.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
94
templates/local-dev-setup.sh
Executable file
94
templates/local-dev-setup.sh
Executable file
@@ -0,0 +1,94 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Hyperdrive Local Development Setup Helper
|
||||||
|
#
|
||||||
|
# This script helps set up environment variables for local development
|
||||||
|
# with Hyperdrive. Use this to avoid committing credentials to wrangler.jsonc.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./local-dev-setup.sh
|
||||||
|
# source .env.local # Load variables into current shell
|
||||||
|
# npm run dev # Start wrangler dev
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🚀 Hyperdrive Local Development Setup"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Get binding name from wrangler.jsonc
|
||||||
|
BINDING_NAME=$(grep -A 2 '"hyperdrive"' wrangler.jsonc | grep '"binding"' | cut -d'"' -f4 | head -1)
|
||||||
|
|
||||||
|
if [ -z "$BINDING_NAME" ]; then
|
||||||
|
echo "❌ Could not find Hyperdrive binding in wrangler.jsonc"
|
||||||
|
echo " Please ensure you have a hyperdrive configuration with a binding."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Found Hyperdrive binding: $BINDING_NAME"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Ask for database details
|
||||||
|
echo "Enter your local database connection details:"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
read -p "Database type (postgres/mysql): " DB_TYPE
|
||||||
|
read -p "Host (default: localhost): " DB_HOST
|
||||||
|
DB_HOST=${DB_HOST:-localhost}
|
||||||
|
|
||||||
|
if [ "$DB_TYPE" = "postgres" ]; then
|
||||||
|
read -p "Port (default: 5432): " DB_PORT
|
||||||
|
DB_PORT=${DB_PORT:-5432}
|
||||||
|
else
|
||||||
|
read -p "Port (default: 3306): " DB_PORT
|
||||||
|
DB_PORT=${DB_PORT:-3306}
|
||||||
|
fi
|
||||||
|
|
||||||
|
read -p "Database name: " DB_NAME
|
||||||
|
read -p "Username: " DB_USER
|
||||||
|
read -sp "Password: " DB_PASSWORD
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Build connection string
|
||||||
|
if [ "$DB_TYPE" = "postgres" ]; then
|
||||||
|
CONNECTION_STRING="postgres://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}"
|
||||||
|
else
|
||||||
|
CONNECTION_STRING="mysql://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create .env.local file
|
||||||
|
ENV_VAR_NAME="CLOUDFLARE_HYPERDRIVE_LOCAL_CONNECTION_STRING_${BINDING_NAME}"
|
||||||
|
|
||||||
|
cat > .env.local <<EOF
|
||||||
|
# Hyperdrive Local Development Environment Variables
|
||||||
|
# Generated on: $(date)
|
||||||
|
#
|
||||||
|
# Load these variables before running wrangler dev:
|
||||||
|
# source .env.local
|
||||||
|
# npm run dev
|
||||||
|
#
|
||||||
|
# DO NOT commit this file to version control!
|
||||||
|
|
||||||
|
export ${ENV_VAR_NAME}="${CONNECTION_STRING}"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Add to .gitignore
|
||||||
|
if ! grep -q ".env.local" .gitignore 2>/dev/null; then
|
||||||
|
echo ".env.local" >> .gitignore
|
||||||
|
echo "✅ Added .env.local to .gitignore"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "✅ Created .env.local with environment variable:"
|
||||||
|
echo " ${ENV_VAR_NAME}"
|
||||||
|
echo ""
|
||||||
|
echo "Next steps:"
|
||||||
|
echo "1. Load the environment variable:"
|
||||||
|
echo " source .env.local"
|
||||||
|
echo ""
|
||||||
|
echo "2. Start local development server:"
|
||||||
|
echo " npx wrangler dev"
|
||||||
|
echo ""
|
||||||
|
echo "3. Your Worker will connect to:"
|
||||||
|
echo " ${DB_TYPE}://${DB_HOST}:${DB_PORT}/${DB_NAME}"
|
||||||
|
echo ""
|
||||||
80
templates/mysql2-basic.ts
Normal file
80
templates/mysql2-basic.ts
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
/**
|
||||||
|
* MySQL with mysql2
|
||||||
|
*
|
||||||
|
* MySQL driver for Cloudflare Workers via Hyperdrive.
|
||||||
|
*
|
||||||
|
* CRITICAL: Must set disableEval: true (eval() not supported in Workers)
|
||||||
|
* Minimum version: mysql2@3.13.0
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createConnection } from "mysql2/promise";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(
|
||||||
|
request: Request,
|
||||||
|
env: Bindings,
|
||||||
|
ctx: ExecutionContext
|
||||||
|
): Promise<Response> {
|
||||||
|
// Create MySQL connection via Hyperdrive
|
||||||
|
const connection = await createConnection({
|
||||||
|
host: env.HYPERDRIVE.host,
|
||||||
|
user: env.HYPERDRIVE.user,
|
||||||
|
password: env.HYPERDRIVE.password,
|
||||||
|
database: env.HYPERDRIVE.database,
|
||||||
|
port: env.HYPERDRIVE.port,
|
||||||
|
|
||||||
|
// CRITICAL: Required for Workers (eval() not supported)
|
||||||
|
disableEval: true
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Example: Simple query
|
||||||
|
const [rows] = await connection.query('SELECT NOW() as current_time');
|
||||||
|
console.log("Query executed successfully");
|
||||||
|
|
||||||
|
// Example: Parameterized query (prevents SQL injection)
|
||||||
|
const [users] = await connection.query(
|
||||||
|
'SELECT id, name, email FROM users WHERE created_at > ? LIMIT ?',
|
||||||
|
['2024-01-01', 10]
|
||||||
|
);
|
||||||
|
|
||||||
|
// Example: Execute multiple statements
|
||||||
|
const [results] = await connection.query(
|
||||||
|
'SELECT COUNT(*) as total FROM users'
|
||||||
|
);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
currentTime: (rows as any[])[0].current_time,
|
||||||
|
users: users,
|
||||||
|
totalUsers: (results as any[])[0].total
|
||||||
|
},
|
||||||
|
// Hyperdrive metadata
|
||||||
|
hyperdriveInfo: {
|
||||||
|
host: env.HYPERDRIVE.host,
|
||||||
|
database: env.HYPERDRIVE.database,
|
||||||
|
port: env.HYPERDRIVE.port
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error("Database error:", error.message);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
}, {
|
||||||
|
status: 500
|
||||||
|
});
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
// CRITICAL: Clean up connection AFTER response is sent
|
||||||
|
ctx.waitUntil(connection.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
68
templates/postgres-basic.ts
Normal file
68
templates/postgres-basic.ts
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
/**
|
||||||
|
* PostgreSQL with node-postgres (pg) - Basic Usage
|
||||||
|
*
|
||||||
|
* Simple pattern using pg.Client for straightforward queries.
|
||||||
|
* Good for: Single query per request, simple operations
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Client } from "pg";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(
|
||||||
|
request: Request,
|
||||||
|
env: Bindings,
|
||||||
|
ctx: ExecutionContext
|
||||||
|
): Promise<Response> {
|
||||||
|
// Create a new client for this request
|
||||||
|
const client = new Client({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Connect to the database
|
||||||
|
await client.connect();
|
||||||
|
console.log("Connected to PostgreSQL via Hyperdrive");
|
||||||
|
|
||||||
|
// Example: Simple query
|
||||||
|
const result = await client.query('SELECT NOW() as current_time');
|
||||||
|
console.log("Query executed successfully");
|
||||||
|
|
||||||
|
// Example: Parameterized query (prevents SQL injection)
|
||||||
|
const users = await client.query(
|
||||||
|
'SELECT id, name, email FROM users WHERE created_at > $1 LIMIT $2',
|
||||||
|
['2024-01-01', 10]
|
||||||
|
);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: true,
|
||||||
|
currentTime: result.rows[0].current_time,
|
||||||
|
users: users.rows,
|
||||||
|
// Hyperdrive metadata
|
||||||
|
hyperdriveInfo: {
|
||||||
|
host: env.HYPERDRIVE.host,
|
||||||
|
database: env.HYPERDRIVE.database,
|
||||||
|
port: env.HYPERDRIVE.port
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error("Database error:", error.message);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
}, {
|
||||||
|
status: 500
|
||||||
|
});
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
// CRITICAL: Clean up connection AFTER response is sent
|
||||||
|
// ctx.waitUntil() runs in background (non-blocking)
|
||||||
|
ctx.waitUntil(client.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
111
templates/postgres-js.ts
Normal file
111
templates/postgres-js.ts
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
/**
|
||||||
|
* PostgreSQL with postgres.js
|
||||||
|
*
|
||||||
|
* Modern PostgreSQL driver with better performance and tagged template literals.
|
||||||
|
* Good for: Fast queries, streaming, modern API
|
||||||
|
*
|
||||||
|
* Minimum version: postgres@3.4.5
|
||||||
|
*/
|
||||||
|
|
||||||
|
import postgres from "postgres";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(
|
||||||
|
request: Request,
|
||||||
|
env: Bindings,
|
||||||
|
ctx: ExecutionContext
|
||||||
|
): Promise<Response> {
|
||||||
|
// Create postgres.js connection
|
||||||
|
const sql = postgres(env.HYPERDRIVE.connectionString, {
|
||||||
|
// CRITICAL: max 5 connections (Workers limit: 6)
|
||||||
|
max: 5,
|
||||||
|
|
||||||
|
// CRITICAL for caching: Enable prepared statements
|
||||||
|
prepare: true,
|
||||||
|
|
||||||
|
// Disable fetch_types if not using array types (reduces latency)
|
||||||
|
fetch_types: false,
|
||||||
|
|
||||||
|
// Connection timeout
|
||||||
|
connect_timeout: 10,
|
||||||
|
|
||||||
|
// Idle connection timeout
|
||||||
|
idle_timeout: 30
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Example: Simple query with tagged template literal
|
||||||
|
const currentTime = await sql`SELECT NOW() as current_time`;
|
||||||
|
|
||||||
|
// Example: Parameterized query (auto-escaped)
|
||||||
|
const users = await sql`
|
||||||
|
SELECT id, name, email
|
||||||
|
FROM users
|
||||||
|
WHERE created_at > ${new Date('2024-01-01')}
|
||||||
|
LIMIT 10
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Example: Dynamic columns (use sql() for identifiers)
|
||||||
|
const orderBy = 'created_at';
|
||||||
|
const sortedUsers = await sql`
|
||||||
|
SELECT * FROM users
|
||||||
|
ORDER BY ${sql(orderBy)} DESC
|
||||||
|
LIMIT 5
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Example: Bulk insert
|
||||||
|
const newUsers = [
|
||||||
|
{ name: 'Alice', email: 'alice@example.com' },
|
||||||
|
{ name: 'Bob', email: 'bob@example.com' }
|
||||||
|
];
|
||||||
|
|
||||||
|
await sql`
|
||||||
|
INSERT INTO users ${sql(newUsers, 'name', 'email')}
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Example: Transaction
|
||||||
|
const result = await sql.begin(async sql => {
|
||||||
|
const [user] = await sql`
|
||||||
|
INSERT INTO users (name, email)
|
||||||
|
VALUES ('Charlie', 'charlie@example.com')
|
||||||
|
RETURNING *
|
||||||
|
`;
|
||||||
|
|
||||||
|
await sql`
|
||||||
|
INSERT INTO audit_log (action, user_id)
|
||||||
|
VALUES ('User created', ${user.id})
|
||||||
|
`;
|
||||||
|
|
||||||
|
return user;
|
||||||
|
});
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
currentTime: currentTime[0].current_time,
|
||||||
|
users: users,
|
||||||
|
sortedUsers: sortedUsers,
|
||||||
|
newUser: result
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error("Database error:", error);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
}, {
|
||||||
|
status: 500
|
||||||
|
});
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
// CRITICAL: Close all connections
|
||||||
|
ctx.waitUntil(sql.end({ timeout: 5 }));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
80
templates/postgres-pool.ts
Normal file
80
templates/postgres-pool.ts
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
/**
|
||||||
|
* PostgreSQL with node-postgres (pg) - Connection Pool
|
||||||
|
*
|
||||||
|
* Advanced pattern using pg.Pool for parallel queries.
|
||||||
|
* Good for: Multiple queries per request, better performance
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Pool } from "pg";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(
|
||||||
|
request: Request,
|
||||||
|
env: Bindings,
|
||||||
|
ctx: ExecutionContext
|
||||||
|
): Promise<Response> {
|
||||||
|
// Create a connection pool
|
||||||
|
// CRITICAL: max: 5 (Workers limit is 6 concurrent external connections)
|
||||||
|
const pool = new Pool({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString,
|
||||||
|
max: 5, // Max connections in pool (stay within Workers' limit)
|
||||||
|
idleTimeoutMillis: 30000, // Close idle connections after 30 seconds
|
||||||
|
connectionTimeoutMillis: 10000 // Timeout after 10 seconds if can't acquire connection
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Example: Run multiple queries in parallel
|
||||||
|
const [usersResult, postsResult, statsResult] = await Promise.all([
|
||||||
|
pool.query('SELECT id, name, email FROM users ORDER BY created_at DESC LIMIT 10'),
|
||||||
|
pool.query('SELECT id, title, author_id FROM posts ORDER BY published_at DESC LIMIT 10'),
|
||||||
|
pool.query(`
|
||||||
|
SELECT
|
||||||
|
(SELECT COUNT(*) FROM users) as total_users,
|
||||||
|
(SELECT COUNT(*) FROM posts) as total_posts,
|
||||||
|
(SELECT COUNT(*) FROM comments) as total_comments
|
||||||
|
`)
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Example: Transaction
|
||||||
|
const client = await pool.connect();
|
||||||
|
try {
|
||||||
|
await client.query('BEGIN');
|
||||||
|
await client.query('INSERT INTO users (name, email) VALUES ($1, $2)', ['John Doe', 'john@example.com']);
|
||||||
|
await client.query('INSERT INTO audit_log (action) VALUES ($1)', ['User created']);
|
||||||
|
await client.query('COMMIT');
|
||||||
|
} catch (error) {
|
||||||
|
await client.query('ROLLBACK');
|
||||||
|
throw error;
|
||||||
|
} finally {
|
||||||
|
client.release(); // Return connection to pool
|
||||||
|
}
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
users: usersResult.rows,
|
||||||
|
posts: postsResult.rows,
|
||||||
|
stats: statsResult.rows[0]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error("Database error:", error.message);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
}, {
|
||||||
|
status: 500
|
||||||
|
});
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
// CRITICAL: Clean up all pool connections
|
||||||
|
ctx.waitUntil(pool.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
151
templates/prisma-postgres.ts
Normal file
151
templates/prisma-postgres.ts
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
/**
|
||||||
|
* Prisma ORM with PostgreSQL
|
||||||
|
*
|
||||||
|
* Type-safe ORM for PostgreSQL via Hyperdrive using driver adapters.
|
||||||
|
*
|
||||||
|
* Install: npm install prisma @prisma/client pg @prisma/adapter-pg
|
||||||
|
*
|
||||||
|
* Setup:
|
||||||
|
* 1. npx prisma init
|
||||||
|
* 2. Define schema in prisma/schema.prisma
|
||||||
|
* 3. npx prisma generate --no-engine
|
||||||
|
* 4. npx prisma migrate dev (for migrations)
|
||||||
|
*
|
||||||
|
* CRITICAL: Must use driver adapters (@prisma/adapter-pg) for Hyperdrive
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { PrismaPg } from "@prisma/adapter-pg";
|
||||||
|
import { PrismaClient } from "@prisma/client";
|
||||||
|
import { Pool } from "pg";
|
||||||
|
|
||||||
|
type Bindings = {
|
||||||
|
HYPERDRIVE: Hyperdrive;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Example Prisma schema (prisma/schema.prisma):
|
||||||
|
*
|
||||||
|
* generator client {
|
||||||
|
* provider = "prisma-client-js"
|
||||||
|
* previewFeatures = ["driverAdapters"]
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* datasource db {
|
||||||
|
* provider = "postgresql"
|
||||||
|
* url = env("DATABASE_URL")
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* model User {
|
||||||
|
* id Int @id @default(autoincrement())
|
||||||
|
* name String
|
||||||
|
* email String @unique
|
||||||
|
* createdAt DateTime @default(now())
|
||||||
|
* posts Post[]
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* model Post {
|
||||||
|
* id Int @id @default(autoincrement())
|
||||||
|
* title String
|
||||||
|
* content String?
|
||||||
|
* published Boolean @default(false)
|
||||||
|
* authorId Int
|
||||||
|
* author User @relation(fields: [authorId], references: [id])
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
export default {
|
||||||
|
async fetch(
|
||||||
|
request: Request,
|
||||||
|
env: Bindings,
|
||||||
|
ctx: ExecutionContext
|
||||||
|
): Promise<Response> {
|
||||||
|
// Create pg.Pool for driver adapter
|
||||||
|
const pool = new Pool({
|
||||||
|
connectionString: env.HYPERDRIVE.connectionString,
|
||||||
|
max: 5
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create Prisma driver adapter
|
||||||
|
const adapter = new PrismaPg(pool);
|
||||||
|
|
||||||
|
// Create Prisma client with adapter
|
||||||
|
const prisma = new PrismaClient({ adapter });
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Example: Create user
|
||||||
|
const newUser = await prisma.user.create({
|
||||||
|
data: {
|
||||||
|
name: "John Doe",
|
||||||
|
email: `john.${Date.now()}@example.com`
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Example: Find all users
|
||||||
|
const allUsers = await prisma.user.findMany({
|
||||||
|
include: {
|
||||||
|
posts: true // Include related posts
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Example: Find user by email
|
||||||
|
const user = await prisma.user.findUnique({
|
||||||
|
where: {
|
||||||
|
email: "john@example.com"
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Example: Update user
|
||||||
|
await prisma.user.update({
|
||||||
|
where: { id: newUser.id },
|
||||||
|
data: { name: "Jane Doe" }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Example: Create post with relation
|
||||||
|
await prisma.post.create({
|
||||||
|
data: {
|
||||||
|
title: "My First Post",
|
||||||
|
content: "Hello World!",
|
||||||
|
published: true,
|
||||||
|
authorId: newUser.id
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Example: Complex query with filters
|
||||||
|
const recentUsers = await prisma.user.findMany({
|
||||||
|
where: {
|
||||||
|
createdAt: {
|
||||||
|
gte: new Date('2024-01-01')
|
||||||
|
}
|
||||||
|
},
|
||||||
|
orderBy: {
|
||||||
|
createdAt: 'desc'
|
||||||
|
},
|
||||||
|
take: 10
|
||||||
|
});
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
newUser,
|
||||||
|
allUsers,
|
||||||
|
user,
|
||||||
|
recentUsers
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error("Database error:", error);
|
||||||
|
|
||||||
|
return Response.json({
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
}, {
|
||||||
|
status: 500
|
||||||
|
});
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
// Clean up connections
|
||||||
|
ctx.waitUntil(pool.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
38
templates/wrangler-hyperdrive-config.jsonc
Normal file
38
templates/wrangler-hyperdrive-config.jsonc
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
{
|
||||||
|
"name": "my-worker-with-hyperdrive",
|
||||||
|
"main": "src/index.ts",
|
||||||
|
"compatibility_date": "2024-09-23",
|
||||||
|
"compatibility_flags": [
|
||||||
|
"nodejs_compat" // REQUIRED for database drivers (pg, postgres, mysql2)
|
||||||
|
],
|
||||||
|
"hyperdrive": [
|
||||||
|
{
|
||||||
|
// Binding name - access as env.HYPERDRIVE in your Worker
|
||||||
|
"binding": "HYPERDRIVE",
|
||||||
|
|
||||||
|
// Hyperdrive configuration ID from: wrangler hyperdrive create
|
||||||
|
"id": "<your-hyperdrive-id-here>",
|
||||||
|
|
||||||
|
// (Optional) Local development connection string
|
||||||
|
// Alternative: Use CLOUDFLARE_HYPERDRIVE_LOCAL_CONNECTION_STRING_HYPERDRIVE env var
|
||||||
|
"localConnectionString": "postgres://user:password@localhost:5432/local_db"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
// Example: Multiple Hyperdrive configurations
|
||||||
|
// "hyperdrive": [
|
||||||
|
// {
|
||||||
|
// "binding": "POSTGRES_DB",
|
||||||
|
// "id": "postgres-hyperdrive-id"
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// "binding": "MYSQL_DB",
|
||||||
|
// "id": "mysql-hyperdrive-id"
|
||||||
|
// }
|
||||||
|
// ],
|
||||||
|
|
||||||
|
// Optional: Enable observability
|
||||||
|
"observability": {
|
||||||
|
"enabled": true
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user