Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:24:29 +08:00
commit 571bc8c17c
12 changed files with 2689 additions and 0 deletions

View File

@@ -0,0 +1,133 @@
{
"_comment": "R2 CORS Policy Examples - Apply via Cloudflare Dashboard",
"_instructions": [
"1. Go to Cloudflare Dashboard → R2",
"2. Select your bucket",
"3. Go to Settings tab",
"4. Under CORS Policy → Add CORS policy",
"5. Paste one of the configurations below",
"6. Save"
],
"public_assets_all_origins": {
"CORSRules": [
{
"AllowedOrigins": ["*"],
"AllowedMethods": ["GET", "HEAD"],
"AllowedHeaders": ["Range"],
"MaxAgeSeconds": 3600
}
]
},
"public_assets_specific_origin": {
"CORSRules": [
{
"AllowedOrigins": ["https://example.com", "https://www.example.com"],
"AllowedMethods": ["GET", "HEAD"],
"AllowedHeaders": ["Range"],
"MaxAgeSeconds": 3600
}
]
},
"file_uploads": {
"CORSRules": [
{
"AllowedOrigins": ["https://app.example.com"],
"AllowedMethods": ["GET", "PUT", "POST", "DELETE", "HEAD"],
"AllowedHeaders": [
"Content-Type",
"Content-MD5",
"Content-Disposition",
"x-amz-meta-*"
],
"ExposeHeaders": ["ETag", "x-amz-version-id"],
"MaxAgeSeconds": 3600
}
]
},
"presigned_urls": {
"_comment": "For presigned URL uploads from browser",
"CORSRules": [
{
"AllowedOrigins": ["https://app.example.com"],
"AllowedMethods": ["PUT", "POST"],
"AllowedHeaders": [
"Content-Type",
"Content-MD5",
"x-amz-*"
],
"ExposeHeaders": ["ETag"],
"MaxAgeSeconds": 3600
}
]
},
"multiple_domains": {
"CORSRules": [
{
"AllowedOrigins": [
"https://app.example.com",
"https://admin.example.com",
"https://staging.example.com"
],
"AllowedMethods": ["GET", "PUT", "POST", "DELETE", "HEAD"],
"AllowedHeaders": ["*"],
"ExposeHeaders": ["ETag", "Content-Length"],
"MaxAgeSeconds": 86400
}
]
},
"development_localhost": {
"_comment": "For local development only - DO NOT USE IN PRODUCTION",
"CORSRules": [
{
"AllowedOrigins": ["http://localhost:3000", "http://localhost:5173"],
"AllowedMethods": ["GET", "PUT", "POST", "DELETE", "HEAD"],
"AllowedHeaders": ["*"],
"ExposeHeaders": ["ETag"],
"MaxAgeSeconds": 3600
}
]
},
"strict_security": {
"_comment": "Minimal CORS for maximum security",
"CORSRules": [
{
"AllowedOrigins": ["https://app.example.com"],
"AllowedMethods": ["GET"],
"AllowedHeaders": ["Range"],
"MaxAgeSeconds": 3600
}
]
},
"cdn_and_api": {
"_comment": "Separate rules for CDN assets and API uploads",
"CORSRules": [
{
"_comment": "Rule for CDN/static assets",
"AllowedOrigins": ["*"],
"AllowedMethods": ["GET", "HEAD"],
"AllowedHeaders": ["Range"],
"MaxAgeSeconds": 86400
},
{
"_comment": "Rule for authenticated API uploads",
"AllowedOrigins": ["https://app.example.com"],
"AllowedMethods": ["PUT", "POST", "DELETE"],
"AllowedHeaders": [
"Content-Type",
"Authorization",
"x-amz-meta-*"
],
"ExposeHeaders": ["ETag"],
"MaxAgeSeconds": 3600
}
]
}
}

View File

@@ -0,0 +1,238 @@
/**
* R2 Multipart Upload Worker
*
* Enables large file uploads (>100MB) with:
* - Resumable uploads
* - Parallel part uploads
* - Progress tracking
* - Abort capability
*
* Flow:
* 1. POST /mpu/create - Create multipart upload
* 2. PUT /mpu/upload-part - Upload individual parts
* 3. POST /mpu/complete - Complete the upload
* 4. DELETE /mpu/abort - Abort the upload (optional)
*/
import { Hono } from 'hono';
type Bindings = {
MY_BUCKET: R2Bucket;
};
const app = new Hono<{ Bindings: Bindings }>();
// Create multipart upload
app.post('/mpu/create', async (c) => {
const { key, contentType } = await c.req.json<{
key: string;
contentType?: string;
}>();
if (!key) {
return c.json({
success: false,
error: 'Missing required field: key',
}, 400);
}
try {
const multipart = await c.env.MY_BUCKET.createMultipartUpload(key, {
httpMetadata: {
contentType: contentType || 'application/octet-stream',
},
});
return c.json({
success: true,
key: multipart.key,
uploadId: multipart.uploadId,
});
} catch (error: any) {
console.error('Create multipart error:', error.message);
return c.json({
success: false,
error: 'Failed to create multipart upload',
}, 500);
}
});
// Upload a part
app.put('/mpu/upload-part', async (c) => {
const key = c.req.query('key');
const uploadId = c.req.query('uploadId');
const partNumber = parseInt(c.req.query('partNumber') || '0');
if (!key || !uploadId || !partNumber) {
return c.json({
success: false,
error: 'Missing required parameters: key, uploadId, partNumber',
}, 400);
}
if (partNumber < 1 || partNumber > 10000) {
return c.json({
success: false,
error: 'Part number must be between 1 and 10000',
}, 400);
}
try {
const body = await c.req.arrayBuffer();
// Resume the multipart upload
const multipart = c.env.MY_BUCKET.resumeMultipartUpload(key, uploadId);
// Upload the part
const uploadedPart = await multipart.uploadPart(partNumber, body);
return c.json({
success: true,
partNumber: uploadedPart.partNumber,
etag: uploadedPart.etag,
});
} catch (error: any) {
console.error('Upload part error:', error.message);
return c.json({
success: false,
error: 'Failed to upload part',
details: error.message,
}, 500);
}
});
// Complete multipart upload
app.post('/mpu/complete', async (c) => {
const { key, uploadId, parts } = await c.req.json<{
key: string;
uploadId: string;
parts: Array<{ partNumber: number; etag: string }>;
}>();
if (!key || !uploadId || !parts || !Array.isArray(parts)) {
return c.json({
success: false,
error: 'Missing required fields: key, uploadId, parts',
}, 400);
}
try {
const multipart = c.env.MY_BUCKET.resumeMultipartUpload(key, uploadId);
// Complete the upload
const object = await multipart.complete(parts);
return c.json({
success: true,
key: object.key,
size: object.size,
etag: object.etag,
uploaded: object.uploaded,
});
} catch (error: any) {
console.error('Complete multipart error:', error.message);
return c.json({
success: false,
error: 'Failed to complete multipart upload',
details: error.message,
}, 500);
}
});
// Abort multipart upload
app.delete('/mpu/abort', async (c) => {
const key = c.req.query('key');
const uploadId = c.req.query('uploadId');
if (!key || !uploadId) {
return c.json({
success: false,
error: 'Missing required parameters: key, uploadId',
}, 400);
}
try {
const multipart = c.env.MY_BUCKET.resumeMultipartUpload(key, uploadId);
await multipart.abort();
return c.json({
success: true,
message: 'Multipart upload aborted',
key,
uploadId,
});
} catch (error: any) {
console.error('Abort multipart error:', error.message);
return c.json({
success: false,
error: 'Failed to abort multipart upload',
}, 500);
}
});
// Health check
app.get('/health', (c) => {
return c.json({
status: 'healthy',
service: 'r2-multipart-worker',
timestamp: new Date().toISOString(),
});
});
export default app;
/**
* Example Python client for multipart upload:
*
* import requests
* from concurrent.futures import ThreadPoolExecutor
*
* WORKER_URL = "https://my-worker.workers.dev"
* FILE_PATH = "large-file.mp4"
* PART_SIZE = 10 * 1024 * 1024 # 10MB parts
*
* # 1. Create multipart upload
* response = requests.post(f"{WORKER_URL}/mpu/create", json={
* "key": "uploads/large-file.mp4",
* "contentType": "video/mp4"
* })
* data = response.json()
* upload_id = data["uploadId"]
* key = data["key"]
*
* # 2. Upload parts in parallel
* def upload_part(part_number, data):
* response = requests.put(
* f"{WORKER_URL}/mpu/upload-part",
* params={
* "key": key,
* "uploadId": upload_id,
* "partNumber": part_number
* },
* data=data
* )
* return response.json()
*
* with open(FILE_PATH, 'rb') as f:
* part_number = 1
* uploaded_parts = []
*
* with ThreadPoolExecutor(max_workers=4) as executor:
* while True:
* chunk = f.read(PART_SIZE)
* if not chunk:
* break
*
* result = executor.submit(upload_part, part_number, chunk)
* uploaded_parts.append(result.result())
* part_number += 1
*
* # 3. Complete upload
* response = requests.post(f"{WORKER_URL}/mpu/complete", json={
* "key": key,
* "uploadId": upload_id,
* "parts": uploaded_parts
* })
*
* print(response.json())
*/

View File

@@ -0,0 +1,289 @@
/**
* R2 Presigned URL Generator Worker
*
* Generates presigned URLs for:
* - Direct client uploads to R2 (bypasses Worker)
* - Temporary download links with expiry
*
* IMPORTANT:
* - Never expose R2 access keys in client code
* - Always generate presigned URLs server-side
* - Set appropriate expiry times (1-24 hours)
* - Add authentication before generating URLs
*
* Setup:
* 1. Create R2 API token in Cloudflare dashboard
* 2. Add secrets to wrangler:
* wrangler secret put R2_ACCESS_KEY_ID
* wrangler secret put R2_SECRET_ACCESS_KEY
* wrangler secret put ACCOUNT_ID
*/
import { Hono } from 'hono';
import { AwsClient } from 'aws4fetch';
type Bindings = {
R2_ACCESS_KEY_ID: string;
R2_SECRET_ACCESS_KEY: string;
ACCOUNT_ID: string;
MY_BUCKET: R2Bucket;
};
const app = new Hono<{ Bindings: Bindings }>();
// Generate presigned upload URL
app.post('/presigned/upload', async (c) => {
// TODO: Add authentication here
// const authHeader = c.req.header('Authorization');
// if (!authHeader) {
// return c.json({ error: 'Unauthorized' }, 401);
// }
const { filename, expiresIn = 3600 } = await c.req.json<{
filename: string;
expiresIn?: number;
}>();
if (!filename) {
return c.json({
success: false,
error: 'Missing required field: filename',
}, 400);
}
// Validate expiry (max 7 days)
const maxExpiry = 7 * 24 * 60 * 60; // 7 days
const validExpiry = Math.min(expiresIn, maxExpiry);
try {
const r2Client = new AwsClient({
accessKeyId: c.env.R2_ACCESS_KEY_ID,
secretAccessKey: c.env.R2_SECRET_ACCESS_KEY,
});
const bucketName = 'my-bucket'; // Replace with your bucket name
const accountId = c.env.ACCOUNT_ID;
const url = new URL(
`https://${bucketName}.${accountId}.r2.cloudflarestorage.com/${filename}`
);
// Set expiry
url.searchParams.set('X-Amz-Expires', validExpiry.toString());
// Sign the URL for PUT
const signed = await r2Client.sign(
new Request(url, { method: 'PUT' }),
{ aws: { signQuery: true } }
);
return c.json({
success: true,
uploadUrl: signed.url,
filename,
expiresIn: validExpiry,
expiresAt: new Date(Date.now() + validExpiry * 1000).toISOString(),
});
} catch (error: any) {
console.error('Presigned upload URL error:', error.message);
return c.json({
success: false,
error: 'Failed to generate presigned upload URL',
}, 500);
}
});
// Generate presigned download URL
app.post('/presigned/download', async (c) => {
// TODO: Add authentication here
// const authHeader = c.req.header('Authorization');
// if (!authHeader) {
// return c.json({ error: 'Unauthorized' }, 401);
// }
const { filename, expiresIn = 3600 } = await c.req.json<{
filename: string;
expiresIn?: number;
}>();
if (!filename) {
return c.json({
success: false,
error: 'Missing required field: filename',
}, 400);
}
// Validate expiry (max 7 days)
const maxExpiry = 7 * 24 * 60 * 60;
const validExpiry = Math.min(expiresIn, maxExpiry);
try {
// Check if file exists first
const exists = await c.env.MY_BUCKET.head(filename);
if (!exists) {
return c.json({
success: false,
error: 'File not found',
}, 404);
}
const r2Client = new AwsClient({
accessKeyId: c.env.R2_ACCESS_KEY_ID,
secretAccessKey: c.env.R2_SECRET_ACCESS_KEY,
});
const bucketName = 'my-bucket'; // Replace with your bucket name
const accountId = c.env.ACCOUNT_ID;
const url = new URL(
`https://${bucketName}.${accountId}.r2.cloudflarestorage.com/${filename}`
);
url.searchParams.set('X-Amz-Expires', validExpiry.toString());
// Sign the URL for GET
const signed = await r2Client.sign(
new Request(url, { method: 'GET' }),
{ aws: { signQuery: true } }
);
return c.json({
success: true,
downloadUrl: signed.url,
filename,
size: exists.size,
expiresIn: validExpiry,
expiresAt: new Date(Date.now() + validExpiry * 1000).toISOString(),
});
} catch (error: any) {
console.error('Presigned download URL error:', error.message);
return c.json({
success: false,
error: 'Failed to generate presigned download URL',
}, 500);
}
});
// Generate batch presigned URLs (upload)
app.post('/presigned/upload/batch', async (c) => {
const { filenames, expiresIn = 3600 } = await c.req.json<{
filenames: string[];
expiresIn?: number;
}>();
if (!filenames || !Array.isArray(filenames)) {
return c.json({
success: false,
error: 'Invalid request: filenames must be an array',
}, 400);
}
const maxExpiry = 7 * 24 * 60 * 60;
const validExpiry = Math.min(expiresIn, maxExpiry);
try {
const r2Client = new AwsClient({
accessKeyId: c.env.R2_ACCESS_KEY_ID,
secretAccessKey: c.env.R2_SECRET_ACCESS_KEY,
});
const bucketName = 'my-bucket';
const accountId = c.env.ACCOUNT_ID;
const urls = await Promise.all(
filenames.map(async (filename) => {
const url = new URL(
`https://${bucketName}.${accountId}.r2.cloudflarestorage.com/${filename}`
);
url.searchParams.set('X-Amz-Expires', validExpiry.toString());
const signed = await r2Client.sign(
new Request(url, { method: 'PUT' }),
{ aws: { signQuery: true } }
);
return {
filename,
uploadUrl: signed.url,
};
})
);
return c.json({
success: true,
urls,
expiresIn: validExpiry,
expiresAt: new Date(Date.now() + validExpiry * 1000).toISOString(),
});
} catch (error: any) {
console.error('Batch presigned URLs error:', error.message);
return c.json({
success: false,
error: 'Failed to generate presigned URLs',
}, 500);
}
});
// Health check
app.get('/health', (c) => {
return c.json({
status: 'healthy',
service: 'r2-presigned-urls',
timestamp: new Date().toISOString(),
});
});
export default app;
/**
* Example client-side upload with presigned URL:
*
* // 1. Get presigned URL from your Worker
* const response = await fetch('https://my-worker.workers.dev/presigned/upload', {
* method: 'POST',
* headers: {
* 'Content-Type': 'application/json',
* 'Authorization': 'Bearer YOUR_TOKEN'
* },
* body: JSON.stringify({
* filename: 'uploads/photo.jpg',
* expiresIn: 3600
* })
* });
*
* const { uploadUrl } = await response.json();
*
* // 2. Upload file directly to R2
* const file = document.querySelector('input[type="file"]').files[0];
*
* await fetch(uploadUrl, {
* method: 'PUT',
* body: file,
* headers: {
* 'Content-Type': file.type
* }
* });
*
* console.log('Upload complete!');
*/
/**
* Wrangler setup for secrets:
*
* # Add R2 access key ID
* wrangler secret put R2_ACCESS_KEY_ID
*
* # Add R2 secret access key
* wrangler secret put R2_SECRET_ACCESS_KEY
*
* # Add account ID
* wrangler secret put ACCOUNT_ID
*
* # Create R2 API token:
* 1. Go to Cloudflare Dashboard → R2
* 2. Click "Manage R2 API Tokens"
* 3. Create API Token with:
* - Permissions: Object Read & Write
* - Buckets: Specific bucket or all buckets
* 4. Save the Access Key ID and Secret Access Key
*/

View File

@@ -0,0 +1,226 @@
/**
* Simple R2 Upload/Download Worker
*
* Features:
* - Upload files with PUT requests
* - Download files with GET requests
* - Delete files with DELETE requests
* - List all files
* - Proper content-type handling
* - Error handling
*/
import { Hono } from 'hono';
type Bindings = {
MY_BUCKET: R2Bucket;
};
const app = new Hono<{ Bindings: Bindings }>();
// Upload a file
app.put('/files/:filename', async (c) => {
const filename = c.req.param('filename');
const body = await c.req.arrayBuffer();
const contentType = c.req.header('content-type') || 'application/octet-stream';
try {
const object = await c.env.MY_BUCKET.put(filename, body, {
httpMetadata: {
contentType: contentType,
cacheControl: 'public, max-age=3600',
},
customMetadata: {
uploadedAt: new Date().toISOString(),
uploadedBy: 'api',
},
});
return c.json({
success: true,
key: object.key,
size: object.size,
etag: object.etag,
uploaded: object.uploaded,
});
} catch (error: any) {
console.error('Upload error:', error.message);
return c.json({
success: false,
error: 'Failed to upload file',
}, 500);
}
});
// Download a file
app.get('/files/:filename', async (c) => {
const filename = c.req.param('filename');
try {
const object = await c.env.MY_BUCKET.get(filename);
if (!object) {
return c.json({
success: false,
error: 'File not found',
}, 404);
}
// Apply http metadata from R2
const headers = new Headers();
object.writeHttpMetadata(headers);
headers.set('etag', object.httpEtag);
return new Response(object.body, { headers });
} catch (error: any) {
console.error('Download error:', error.message);
return c.json({
success: false,
error: 'Failed to download file',
}, 500);
}
});
// Get file metadata (without downloading body)
app.head('/files/:filename', async (c) => {
const filename = c.req.param('filename');
try {
const object = await c.env.MY_BUCKET.head(filename);
if (!object) {
return c.json({
success: false,
error: 'File not found',
}, 404);
}
return c.json({
success: true,
key: object.key,
size: object.size,
etag: object.etag,
uploaded: object.uploaded,
contentType: object.httpMetadata?.contentType,
customMetadata: object.customMetadata,
});
} catch (error: any) {
console.error('Head error:', error.message);
return c.json({
success: false,
error: 'Failed to get file metadata',
}, 500);
}
});
// Delete a file
app.delete('/files/:filename', async (c) => {
const filename = c.req.param('filename');
try {
// Check if file exists first
const exists = await c.env.MY_BUCKET.head(filename);
if (!exists) {
return c.json({
success: false,
error: 'File not found',
}, 404);
}
await c.env.MY_BUCKET.delete(filename);
return c.json({
success: true,
message: 'File deleted successfully',
key: filename,
});
} catch (error: any) {
console.error('Delete error:', error.message);
return c.json({
success: false,
error: 'Failed to delete file',
}, 500);
}
});
// List all files (with pagination)
app.get('/files', async (c) => {
const cursor = c.req.query('cursor');
const limit = parseInt(c.req.query('limit') || '100');
const prefix = c.req.query('prefix') || '';
try {
const listed = await c.env.MY_BUCKET.list({
limit: Math.min(limit, 1000), // Max 1000
cursor: cursor || undefined,
prefix: prefix || undefined,
});
return c.json({
success: true,
files: listed.objects.map(obj => ({
key: obj.key,
size: obj.size,
etag: obj.etag,
uploaded: obj.uploaded,
contentType: obj.httpMetadata?.contentType,
})),
truncated: listed.truncated,
cursor: listed.cursor,
count: listed.objects.length,
});
} catch (error: any) {
console.error('List error:', error.message);
return c.json({
success: false,
error: 'Failed to list files',
}, 500);
}
});
// Bulk delete (up to 1000 files)
app.post('/files/bulk-delete', async (c) => {
const { keys } = await c.req.json<{ keys: string[] }>();
if (!keys || !Array.isArray(keys)) {
return c.json({
success: false,
error: 'Invalid request: keys must be an array',
}, 400);
}
if (keys.length > 1000) {
return c.json({
success: false,
error: 'Cannot delete more than 1000 keys at once',
}, 400);
}
try {
await c.env.MY_BUCKET.delete(keys);
return c.json({
success: true,
message: `Deleted ${keys.length} files`,
count: keys.length,
});
} catch (error: any) {
console.error('Bulk delete error:', error.message);
return c.json({
success: false,
error: 'Failed to delete files',
}, 500);
}
});
// Health check
app.get('/health', (c) => {
return c.json({
status: 'healthy',
service: 'r2-worker',
timestamp: new Date().toISOString(),
});
});
export default app;

View File

@@ -0,0 +1,49 @@
{
"$schema": "node_modules/wrangler/config-schema.json",
"name": "my-r2-worker",
"main": "src/index.ts",
"account_id": "YOUR_ACCOUNT_ID",
"compatibility_date": "2025-10-11",
// R2 Bucket Bindings
"r2_buckets": [
{
// The binding name - accessible as env.MY_BUCKET in your Worker
"binding": "MY_BUCKET",
// The actual bucket name in R2 (must exist)
"bucket_name": "my-bucket",
// Optional: Use a different bucket for local development
// This prevents dev/test data from polluting production bucket
"preview_bucket_name": "my-bucket-preview"
}
],
// Multiple buckets example
// "r2_buckets": [
// {
// "binding": "UPLOADS",
// "bucket_name": "user-uploads"
// },
// {
// "binding": "ASSETS",
// "bucket_name": "static-assets"
// },
// {
// "binding": "BACKUPS",
// "bucket_name": "database-backups"
// }
// ],
// Optional: Enable observability
"observability": {
"enabled": true
},
// Optional: Workers Static Assets (if serving frontend)
"assets": {
"directory": "./public/",
"binding": "ASSETS"
}
}