Initial commit
This commit is contained in:
12
.claude-plugin/plugin.json
Normal file
12
.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "cloudflare-r2",
|
||||
"description": "Store objects with R2s S3-compatible storage on Cloudflares edge. Use when: uploading/downloading files, configuring CORS, generating presigned URLs, multipart uploads, managing metadata, or troubleshooting R2_ERROR, CORS failures, presigned URL issues, or quota errors.",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Jeremy Dawes",
|
||||
"email": "jeremy@jezweb.net"
|
||||
},
|
||||
"skills": [
|
||||
"./"
|
||||
]
|
||||
}
|
||||
3
README.md
Normal file
3
README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# cloudflare-r2
|
||||
|
||||
Store objects with R2s S3-compatible storage on Cloudflares edge. Use when: uploading/downloading files, configuring CORS, generating presigned URLs, multipart uploads, managing metadata, or troubleshooting R2_ERROR, CORS failures, presigned URL issues, or quota errors.
|
||||
385
SKILL.md
Normal file
385
SKILL.md
Normal file
@@ -0,0 +1,385 @@
|
||||
---
|
||||
name: cloudflare-r2
|
||||
description: |
|
||||
Store objects with R2's S3-compatible storage on Cloudflare's edge. Use when: uploading/downloading files, configuring CORS, generating presigned URLs, multipart uploads, managing metadata, or troubleshooting R2_ERROR, CORS failures, presigned URL issues, or quota errors.
|
||||
license: MIT
|
||||
---
|
||||
|
||||
# Cloudflare R2 Object Storage
|
||||
|
||||
**Status**: Production Ready ✅
|
||||
**Last Updated**: 2025-11-24
|
||||
**Dependencies**: cloudflare-worker-base (for Worker setup)
|
||||
**Latest Versions**: wrangler@4.50.0, @cloudflare/workers-types@4.20251121.0, aws4fetch@1.0.20
|
||||
|
||||
**Recent Updates (2025)**:
|
||||
- **September 2025**: R2 SQL open beta (serverless query engine for Apache Iceberg), Pipelines GA (real-time stream ingestion), Remote bindings GA (local dev connects to deployed R2)
|
||||
- **May 2025**: Dashboard redesign (deeplink support, bucket settings centralization), Super Slurper 5x faster (rebuilt with Workers/Queues/Durable Objects)
|
||||
- **April 2025**: R2 Data Catalog open beta (managed Apache Iceberg catalog), Event Notifications open beta (5,000 msg/s per Queue)
|
||||
- **2025**: Bucket limits increased (1 million max), CRC-64/NVME checksums, Server-side encryption with customer keys, Infrequent Access storage class (beta), Oceania region, S3 API enhancements (sha256/sha1 checksums, ListParts, conditional CopyObject)
|
||||
|
||||
---
|
||||
|
||||
## Quick Start (5 Minutes)
|
||||
|
||||
```bash
|
||||
# 1. Create bucket
|
||||
npx wrangler r2 bucket create my-bucket
|
||||
|
||||
# 2. Add binding to wrangler.jsonc
|
||||
# {
|
||||
# "r2_buckets": [{
|
||||
# "binding": "MY_BUCKET",
|
||||
# "bucket_name": "my-bucket",
|
||||
# "preview_bucket_name": "my-bucket-preview" // Optional: separate dev/prod
|
||||
# }]
|
||||
# }
|
||||
|
||||
# 3. Upload/download from Worker
|
||||
type Bindings = { MY_BUCKET: R2Bucket };
|
||||
|
||||
// Upload
|
||||
await env.MY_BUCKET.put('file.txt', data, {
|
||||
httpMetadata: { contentType: 'text/plain' }
|
||||
});
|
||||
|
||||
// Download
|
||||
const object = await env.MY_BUCKET.get('file.txt');
|
||||
if (!object) return c.json({ error: 'Not found' }, 404);
|
||||
|
||||
return new Response(object.body, {
|
||||
headers: {
|
||||
'Content-Type': object.httpMetadata?.contentType || 'application/octet-stream',
|
||||
'ETag': object.httpEtag,
|
||||
},
|
||||
});
|
||||
|
||||
# 4. Deploy
|
||||
npx wrangler deploy
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## R2 Workers API
|
||||
|
||||
### Core Methods
|
||||
|
||||
```typescript
|
||||
// put() - Upload objects
|
||||
await env.MY_BUCKET.put('file.txt', data, {
|
||||
httpMetadata: {
|
||||
contentType: 'text/plain',
|
||||
cacheControl: 'public, max-age=3600',
|
||||
},
|
||||
customMetadata: { userId: '123' },
|
||||
md5: await crypto.subtle.digest('MD5', data), // Checksum verification
|
||||
});
|
||||
|
||||
// Conditional upload (prevent overwrites)
|
||||
const object = await env.MY_BUCKET.put('file.txt', data, {
|
||||
onlyIf: { uploadedBefore: new Date('2020-01-01') }
|
||||
});
|
||||
if (!object) return c.json({ error: 'File already exists' }, 409);
|
||||
|
||||
// get() - Download objects
|
||||
const object = await env.MY_BUCKET.get('file.txt');
|
||||
if (!object) return c.json({ error: 'Not found' }, 404);
|
||||
|
||||
const text = await object.text(); // As string
|
||||
const json = await object.json(); // As JSON
|
||||
const buffer = await object.arrayBuffer(); // As ArrayBuffer
|
||||
|
||||
// Range requests (partial downloads)
|
||||
const partial = await env.MY_BUCKET.get('video.mp4', {
|
||||
range: { offset: 0, length: 1024 * 1024 } // First 1MB
|
||||
});
|
||||
|
||||
// head() - Get metadata only (no body download)
|
||||
const object = await env.MY_BUCKET.head('file.txt');
|
||||
console.log(object.size, object.etag, object.customMetadata);
|
||||
|
||||
// delete() - Delete objects
|
||||
await env.MY_BUCKET.delete('file.txt'); // Single delete (idempotent)
|
||||
await env.MY_BUCKET.delete(['file1.txt', 'file2.txt']); // Bulk delete (max 1000)
|
||||
|
||||
// list() - List objects
|
||||
const listed = await env.MY_BUCKET.list({
|
||||
prefix: 'images/', // Filter by prefix
|
||||
limit: 100,
|
||||
cursor: cursor, // Pagination
|
||||
delimiter: '/', // Folder-like listing
|
||||
});
|
||||
|
||||
for (const object of listed.objects) {
|
||||
console.log(`${object.key}: ${object.size} bytes`);
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Multipart Uploads
|
||||
|
||||
For files >100MB or resumable uploads. Use when: large files, browser uploads, parallelization needed.
|
||||
|
||||
```typescript
|
||||
// 1. Create multipart upload
|
||||
const multipart = await env.MY_BUCKET.createMultipartUpload('large-file.zip', {
|
||||
httpMetadata: { contentType: 'application/zip' }
|
||||
});
|
||||
|
||||
// 2. Upload parts (5MB-100MB each, max 10,000 parts)
|
||||
const multipart = env.MY_BUCKET.resumeMultipartUpload(key, uploadId);
|
||||
const part1 = await multipart.uploadPart(1, chunk1);
|
||||
const part2 = await multipart.uploadPart(2, chunk2);
|
||||
|
||||
// 3. Complete upload
|
||||
const object = await multipart.complete([
|
||||
{ partNumber: 1, etag: part1.etag },
|
||||
{ partNumber: 2, etag: part2.etag },
|
||||
]);
|
||||
|
||||
// 4. Abort if needed
|
||||
await multipart.abort();
|
||||
```
|
||||
|
||||
**Limits**: Parts 5MB-100MB, max 10,000 parts per upload. Don't use for files <5MB (overhead).
|
||||
|
||||
---
|
||||
|
||||
## Presigned URLs
|
||||
|
||||
Allow clients to upload/download directly to/from R2 (bypasses Worker). Use aws4fetch library.
|
||||
|
||||
```typescript
|
||||
import { AwsClient } from 'aws4fetch';
|
||||
|
||||
const r2Client = new AwsClient({
|
||||
accessKeyId: env.R2_ACCESS_KEY_ID,
|
||||
secretAccessKey: env.R2_SECRET_ACCESS_KEY,
|
||||
});
|
||||
|
||||
const url = new URL(
|
||||
`https://${bucketName}.${accountId}.r2.cloudflarestorage.com/${filename}`
|
||||
);
|
||||
url.searchParams.set('X-Amz-Expires', '3600'); // 1 hour expiry
|
||||
|
||||
const signed = await r2Client.sign(
|
||||
new Request(url, { method: 'PUT' }), // or 'GET' for downloads
|
||||
{ aws: { signQuery: true } }
|
||||
);
|
||||
|
||||
// Client uploads directly to R2
|
||||
await fetch(signed.url, { method: 'PUT', body: file });
|
||||
```
|
||||
|
||||
**CRITICAL Security:**
|
||||
- ❌ **NEVER** expose R2 access keys in client-side code
|
||||
- ✅ **ALWAYS** generate presigned URLs server-side
|
||||
- ✅ **ALWAYS** set expiry times (1-24 hours typical)
|
||||
- ✅ **ALWAYS** add authentication before generating URLs
|
||||
- ✅ **CONSIDER** scoping to user folders: `users/${userId}/${filename}`
|
||||
|
||||
---
|
||||
|
||||
## CORS Configuration
|
||||
|
||||
Configure CORS in bucket settings (Dashboard → R2 → Bucket → Settings → CORS Policy) before browser access.
|
||||
|
||||
```json
|
||||
{
|
||||
"CORSRules": [{
|
||||
"AllowedOrigins": ["https://app.example.com"],
|
||||
"AllowedMethods": ["GET", "PUT", "POST", "DELETE", "HEAD"],
|
||||
"AllowedHeaders": ["Content-Type", "Content-MD5", "x-amz-meta-*"],
|
||||
"ExposeHeaders": ["ETag"],
|
||||
"MaxAgeSeconds": 3600
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**For presigned URLs**: CORS handled by R2 directly (configure on bucket, not Worker).
|
||||
|
||||
---
|
||||
|
||||
## HTTP Metadata & Custom Metadata
|
||||
|
||||
```typescript
|
||||
// HTTP metadata (standard headers)
|
||||
await env.MY_BUCKET.put('file.pdf', data, {
|
||||
httpMetadata: {
|
||||
contentType: 'application/pdf',
|
||||
cacheControl: 'public, max-age=31536000, immutable',
|
||||
contentDisposition: 'attachment; filename="report.pdf"',
|
||||
contentEncoding: 'gzip',
|
||||
},
|
||||
customMetadata: {
|
||||
userId: '12345',
|
||||
version: '1.0',
|
||||
} // Max 2KB total, keys/values must be strings
|
||||
});
|
||||
|
||||
// Read metadata
|
||||
const object = await env.MY_BUCKET.head('file.pdf');
|
||||
console.log(object.httpMetadata, object.customMetadata);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common R2 Errors
|
||||
|
||||
```typescript
|
||||
try {
|
||||
await env.MY_BUCKET.put(key, data);
|
||||
} catch (error: any) {
|
||||
const message = error.message;
|
||||
|
||||
if (message.includes('R2_ERROR')) {
|
||||
// Generic R2 error
|
||||
} else if (message.includes('exceeded')) {
|
||||
// Quota exceeded
|
||||
} else if (message.includes('precondition')) {
|
||||
// Conditional operation failed
|
||||
} else if (message.includes('multipart')) {
|
||||
// Multipart upload error
|
||||
}
|
||||
|
||||
console.error('R2 Error:', message);
|
||||
return c.json({ error: 'Storage operation failed' }, 500);
|
||||
}
|
||||
```
|
||||
|
||||
### Retry Logic
|
||||
|
||||
```typescript
|
||||
async function r2WithRetry<T>(
|
||||
operation: () => Promise<T>,
|
||||
maxRetries = 3
|
||||
): Promise<T> {
|
||||
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error: any) {
|
||||
const message = error.message;
|
||||
|
||||
// Retry on transient errors
|
||||
const isRetryable =
|
||||
message.includes('network') ||
|
||||
message.includes('timeout') ||
|
||||
message.includes('temporarily unavailable');
|
||||
|
||||
if (!isRetryable || attempt === maxRetries - 1) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Exponential backoff
|
||||
const delay = Math.min(1000 * Math.pow(2, attempt), 5000);
|
||||
await new Promise(resolve => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Retry logic failed');
|
||||
}
|
||||
|
||||
// Usage
|
||||
const object = await r2WithRetry(() =>
|
||||
env.MY_BUCKET.get('important-file.txt')
|
||||
);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
```typescript
|
||||
// Batch delete (up to 1000 keys)
|
||||
await env.MY_BUCKET.delete(['file1.txt', 'file2.txt', 'file3.txt']);
|
||||
|
||||
// Range requests for large files
|
||||
const partial = await env.MY_BUCKET.get('video.mp4', {
|
||||
range: { offset: 0, length: 10 * 1024 * 1024 } // First 10MB
|
||||
});
|
||||
|
||||
// Cache headers for immutable assets
|
||||
await env.MY_BUCKET.put('static/app.abc123.js', jsData, {
|
||||
httpMetadata: { cacheControl: 'public, max-age=31536000, immutable' }
|
||||
});
|
||||
|
||||
// Checksums for data integrity
|
||||
const md5Hash = await crypto.subtle.digest('MD5', fileData);
|
||||
await env.MY_BUCKET.put('important.dat', fileData, { md5: md5Hash });
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Best Practices Summary
|
||||
|
||||
**Always Do:**
|
||||
- Set `contentType` for all uploads
|
||||
- Use batch delete for multiple objects (up to 1000)
|
||||
- Set cache headers for static assets
|
||||
- Use presigned URLs for large client uploads
|
||||
- Use multipart for files >100MB
|
||||
- Set CORS before browser uploads
|
||||
- Set expiry times on presigned URLs (1-24 hours)
|
||||
- Use `head()` when you only need metadata
|
||||
- Use conditional operations to prevent overwrites
|
||||
|
||||
**Never Do:**
|
||||
- Never expose R2 access keys in client-side code
|
||||
- Never skip `contentType` (files download as binary)
|
||||
- Never delete in loops (use batch delete)
|
||||
- Never skip CORS for browser uploads
|
||||
- Never use multipart for small files (<5MB)
|
||||
- Never delete >1000 keys in single call
|
||||
- Never skip presigned URL expiry (security risk)
|
||||
|
||||
---
|
||||
|
||||
## Known Issues Prevented
|
||||
|
||||
| Issue | Description | How to Avoid |
|
||||
|-------|-------------|--------------|
|
||||
| **CORS errors in browser** | Browser can't upload/download due to missing CORS policy | Configure CORS in bucket settings before browser access |
|
||||
| **Files download as binary** | Missing content-type causes browsers to download files instead of display | Always set `httpMetadata.contentType` on upload |
|
||||
| **Presigned URL expiry** | URLs never expire, posing security risk | Always set `X-Amz-Expires` (1-24 hours typical) |
|
||||
| **Multipart upload limits** | Parts exceed 100MB or >10,000 parts | Keep parts 5MB-100MB, max 10,000 parts per upload |
|
||||
| **Bulk delete limits** | Trying to delete >1000 keys fails | Chunk deletes into batches of 1000 |
|
||||
| **Custom metadata overflow** | Metadata exceeds 2KB limit | Keep custom metadata under 2KB total |
|
||||
|
||||
---
|
||||
|
||||
## Wrangler Commands Reference
|
||||
|
||||
```bash
|
||||
# Bucket management
|
||||
wrangler r2 bucket create <BUCKET_NAME>
|
||||
wrangler r2 bucket list
|
||||
wrangler r2 bucket delete <BUCKET_NAME>
|
||||
|
||||
# Object management
|
||||
wrangler r2 object put <BUCKET_NAME>/<KEY> --file=<FILE_PATH>
|
||||
wrangler r2 object get <BUCKET_NAME>/<KEY> --file=<OUTPUT_PATH>
|
||||
wrangler r2 object delete <BUCKET_NAME>/<KEY>
|
||||
|
||||
# List objects
|
||||
wrangler r2 object list <BUCKET_NAME>
|
||||
wrangler r2 object list <BUCKET_NAME> --prefix="folder/"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Official Documentation
|
||||
|
||||
- **R2 Overview**: https://developers.cloudflare.com/r2/
|
||||
- **Get Started**: https://developers.cloudflare.com/r2/get-started/
|
||||
- **Workers API**: https://developers.cloudflare.com/r2/api/workers/workers-api-reference/
|
||||
- **Multipart Upload**: https://developers.cloudflare.com/r2/api/workers/workers-multipart-usage/
|
||||
- **Presigned URLs**: https://developers.cloudflare.com/r2/api/s3/presigned-urls/
|
||||
- **CORS Configuration**: https://developers.cloudflare.com/r2/buckets/cors/
|
||||
- **Public Buckets**: https://developers.cloudflare.com/r2/buckets/public-buckets/
|
||||
|
||||
---
|
||||
|
||||
**Ready to store with R2!** 🚀
|
||||
77
plugin.lock.json
Normal file
77
plugin.lock.json
Normal file
@@ -0,0 +1,77 @@
|
||||
{
|
||||
"$schema": "internal://schemas/plugin.lock.v1.json",
|
||||
"pluginId": "gh:jezweb/claude-skills:skills/cloudflare-r2",
|
||||
"normalized": {
|
||||
"repo": null,
|
||||
"ref": "refs/tags/v20251128.0",
|
||||
"commit": "be2a7e22a7360db75b5abca56d1205fc69d236cd",
|
||||
"treeHash": "761ddf23acde8eaec331b15b7c3aa3956d8ecfa2dcc41b10750ca7624adc45b3",
|
||||
"generatedAt": "2025-11-28T10:18:56.163002Z",
|
||||
"toolVersion": "publish_plugins.py@0.2.0"
|
||||
},
|
||||
"origin": {
|
||||
"remote": "git@github.com:zhongweili/42plugin-data.git",
|
||||
"branch": "master",
|
||||
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
|
||||
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
|
||||
},
|
||||
"manifest": {
|
||||
"name": "cloudflare-r2",
|
||||
"description": "Store objects with R2s S3-compatible storage on Cloudflares edge. Use when: uploading/downloading files, configuring CORS, generating presigned URLs, multipart uploads, managing metadata, or troubleshooting R2_ERROR, CORS failures, presigned URL issues, or quota errors.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"content": {
|
||||
"files": [
|
||||
{
|
||||
"path": "README.md",
|
||||
"sha256": "b4207461ca05f93fa1eeeebe79b75069fd5c9fb6ba82cab57bb9857ed86c0ff6"
|
||||
},
|
||||
{
|
||||
"path": "SKILL.md",
|
||||
"sha256": "abc8b8ba5a549b58859dd7cdd4c5815badb81ccc849c60f6c0a51031cf8ef52e"
|
||||
},
|
||||
{
|
||||
"path": "references/s3-compatibility.md",
|
||||
"sha256": "555b9a7164b23e69f1be92a72158bc386051784f05ca7ef2596c5c22ac76e25c"
|
||||
},
|
||||
{
|
||||
"path": "references/common-patterns.md",
|
||||
"sha256": "cce79b3888e0b4337e34d387d2a3bfc3e0f042091f23138a63f8e23279cb6d88"
|
||||
},
|
||||
{
|
||||
"path": "references/workers-api.md",
|
||||
"sha256": "9525c983661590ff04bafc0ea0bc09f147e9aba4c10820545079de2e36070884"
|
||||
},
|
||||
{
|
||||
"path": ".claude-plugin/plugin.json",
|
||||
"sha256": "7c9c5f8a4390fbabc3999079abbb2684fc87df820352678282f8c5f3315bd1f6"
|
||||
},
|
||||
{
|
||||
"path": "templates/r2-simple-upload.ts",
|
||||
"sha256": "81eabe6a55ecf5908eac02db6b07523ae20c5cd20326c7575f29e2b53f6098d9"
|
||||
},
|
||||
{
|
||||
"path": "templates/r2-presigned-urls.ts",
|
||||
"sha256": "9477be62fa75bc5a6aafa0fc7d240ea221e2ab97662e39cef375ac8c94720716"
|
||||
},
|
||||
{
|
||||
"path": "templates/r2-cors-config.json",
|
||||
"sha256": "503d55e97fe6319b15159fcfb59e0bf2a466fef1082df346e65135cd99470194"
|
||||
},
|
||||
{
|
||||
"path": "templates/r2-multipart-upload.ts",
|
||||
"sha256": "c1cb00916c20fbbf026e4abd428e430a4d44efcf03136cb5241d9e638a42e3a5"
|
||||
},
|
||||
{
|
||||
"path": "templates/wrangler-r2-config.jsonc",
|
||||
"sha256": "5339f8b4bd0a5f7b991c2029666270cc15ab0a8638509ac6ed077e7931e46134"
|
||||
}
|
||||
],
|
||||
"dirSha256": "761ddf23acde8eaec331b15b7c3aa3956d8ecfa2dcc41b10750ca7624adc45b3"
|
||||
},
|
||||
"security": {
|
||||
"scannedAt": null,
|
||||
"scannerVersion": null,
|
||||
"flags": []
|
||||
}
|
||||
}
|
||||
469
references/common-patterns.md
Normal file
469
references/common-patterns.md
Normal file
@@ -0,0 +1,469 @@
|
||||
# R2 Common Patterns
|
||||
|
||||
**Last Updated**: 2025-10-21
|
||||
|
||||
---
|
||||
|
||||
## Image Upload & Serving
|
||||
|
||||
### Upload with Automatic Content-Type Detection
|
||||
|
||||
```typescript
|
||||
import { Hono } from 'hono';
|
||||
|
||||
type Bindings = {
|
||||
IMAGES: R2Bucket;
|
||||
};
|
||||
|
||||
const app = new Hono<{ Bindings: Bindings }>();
|
||||
|
||||
app.post('/upload/image', async (c) => {
|
||||
const formData = await c.req.formData();
|
||||
const file = formData.get('image') as File;
|
||||
|
||||
if (!file) {
|
||||
return c.json({ error: 'No file provided' }, 400);
|
||||
}
|
||||
|
||||
// Validate file type
|
||||
const allowedTypes = ['image/jpeg', 'image/png', 'image/webp', 'image/gif'];
|
||||
if (!allowedTypes.includes(file.type)) {
|
||||
return c.json({ error: 'Invalid file type' }, 400);
|
||||
}
|
||||
|
||||
// Generate unique filename
|
||||
const extension = file.name.split('.').pop();
|
||||
const filename = `${crypto.randomUUID()}.${extension}`;
|
||||
const key = `images/${filename}`;
|
||||
|
||||
// Upload to R2
|
||||
const arrayBuffer = await file.arrayBuffer();
|
||||
const object = await c.env.IMAGES.put(key, arrayBuffer, {
|
||||
httpMetadata: {
|
||||
contentType: file.type,
|
||||
cacheControl: 'public, max-age=31536000, immutable',
|
||||
},
|
||||
customMetadata: {
|
||||
originalFilename: file.name,
|
||||
uploadedAt: new Date().toISOString(),
|
||||
},
|
||||
});
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
url: `/images/${filename}`,
|
||||
key: object.key,
|
||||
size: object.size,
|
||||
});
|
||||
});
|
||||
|
||||
// Serve image
|
||||
app.get('/images/:filename', async (c) => {
|
||||
const filename = c.req.param('filename');
|
||||
const key = `images/${filename}`;
|
||||
|
||||
const object = await c.env.IMAGES.get(key);
|
||||
|
||||
if (!object) {
|
||||
return c.json({ error: 'Image not found' }, 404);
|
||||
}
|
||||
|
||||
return new Response(object.body, {
|
||||
headers: {
|
||||
'Content-Type': object.httpMetadata?.contentType || 'image/jpeg',
|
||||
'Cache-Control': 'public, max-age=31536000, immutable',
|
||||
'ETag': object.httpEtag,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
export default app;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## User File Storage with Folder Organization
|
||||
|
||||
```typescript
|
||||
app.post('/users/:userId/files', async (c) => {
|
||||
const userId = c.req.param('userId');
|
||||
const formData = await c.req.formData();
|
||||
const file = formData.get('file') as File;
|
||||
|
||||
if (!file) {
|
||||
return c.json({ error: 'No file provided' }, 400);
|
||||
}
|
||||
|
||||
// Organize by user ID and date
|
||||
const date = new Date().toISOString().split('T')[0]; // YYYY-MM-DD
|
||||
const filename = file.name;
|
||||
const key = `users/${userId}/${date}/${filename}`;
|
||||
|
||||
const arrayBuffer = await file.arrayBuffer();
|
||||
const object = await c.env.MY_BUCKET.put(key, arrayBuffer, {
|
||||
httpMetadata: {
|
||||
contentType: file.type,
|
||||
contentDisposition: `attachment; filename="${filename}"`,
|
||||
},
|
||||
customMetadata: {
|
||||
userId,
|
||||
uploadDate: date,
|
||||
originalSize: file.size.toString(),
|
||||
},
|
||||
});
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
fileId: object.key,
|
||||
size: object.size,
|
||||
});
|
||||
});
|
||||
|
||||
// List user's files
|
||||
app.get('/users/:userId/files', async (c) => {
|
||||
const userId = c.req.param('userId');
|
||||
const cursor = c.req.query('cursor');
|
||||
|
||||
const listed = await c.env.MY_BUCKET.list({
|
||||
prefix: `users/${userId}/`,
|
||||
limit: 100,
|
||||
cursor: cursor || undefined,
|
||||
});
|
||||
|
||||
return c.json({
|
||||
files: listed.objects.map(obj => ({
|
||||
key: obj.key,
|
||||
filename: obj.key.split('/').pop(),
|
||||
size: obj.size,
|
||||
uploaded: obj.uploaded,
|
||||
metadata: obj.customMetadata,
|
||||
})),
|
||||
hasMore: listed.truncated,
|
||||
cursor: listed.cursor,
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Thumbnail Generation & Caching
|
||||
|
||||
```typescript
|
||||
app.get('/thumbnails/:filename', async (c) => {
|
||||
const filename = c.req.param('filename');
|
||||
const width = parseInt(c.req.query('w') || '200');
|
||||
const height = parseInt(c.req.query('h') || '200');
|
||||
|
||||
const thumbnailKey = `thumbnails/${width}x${height}/${filename}`;
|
||||
|
||||
// Check if thumbnail already exists
|
||||
let thumbnail = await c.env.IMAGES.get(thumbnailKey);
|
||||
|
||||
if (!thumbnail) {
|
||||
// Get original image
|
||||
const original = await c.env.IMAGES.get(`images/${filename}`);
|
||||
|
||||
if (!original) {
|
||||
return c.json({ error: 'Image not found' }, 404);
|
||||
}
|
||||
|
||||
// Generate thumbnail (using Cloudflare Images or external service)
|
||||
// This is a placeholder - use actual image processing
|
||||
const thumbnailData = await generateThumbnail(
|
||||
await original.arrayBuffer(),
|
||||
width,
|
||||
height
|
||||
);
|
||||
|
||||
// Store thumbnail for future requests
|
||||
await c.env.IMAGES.put(thumbnailKey, thumbnailData, {
|
||||
httpMetadata: {
|
||||
contentType: 'image/jpeg',
|
||||
cacheControl: 'public, max-age=31536000, immutable',
|
||||
},
|
||||
});
|
||||
|
||||
thumbnail = await c.env.IMAGES.get(thumbnailKey);
|
||||
}
|
||||
|
||||
return new Response(thumbnail!.body, {
|
||||
headers: {
|
||||
'Content-Type': 'image/jpeg',
|
||||
'Cache-Control': 'public, max-age=31536000, immutable',
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
async function generateThumbnail(
|
||||
imageData: ArrayBuffer,
|
||||
width: number,
|
||||
height: number
|
||||
): Promise<ArrayBuffer> {
|
||||
// Use Cloudflare Images API, sharp, or other image processing library
|
||||
// This is a placeholder
|
||||
return imageData;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Versioned File Storage
|
||||
|
||||
```typescript
|
||||
app.put('/files/:filename', async (c) => {
|
||||
const filename = c.req.param('filename');
|
||||
const body = await c.req.arrayBuffer();
|
||||
|
||||
// Get current version number
|
||||
const versionKey = `versions/${filename}/latest`;
|
||||
const currentVersion = await c.env.MY_BUCKET.head(versionKey);
|
||||
|
||||
let version = 1;
|
||||
if (currentVersion?.customMetadata?.version) {
|
||||
version = parseInt(currentVersion.customMetadata.version) + 1;
|
||||
}
|
||||
|
||||
// Store new version
|
||||
const versionedKey = `versions/${filename}/v${version}`;
|
||||
await c.env.MY_BUCKET.put(versionedKey, body, {
|
||||
httpMetadata: {
|
||||
contentType: c.req.header('content-type') || 'application/octet-stream',
|
||||
},
|
||||
customMetadata: {
|
||||
version: version.toString(),
|
||||
createdAt: new Date().toISOString(),
|
||||
},
|
||||
});
|
||||
|
||||
// Update "latest" pointer
|
||||
await c.env.MY_BUCKET.put(versionKey, body, {
|
||||
httpMetadata: {
|
||||
contentType: c.req.header('content-type') || 'application/octet-stream',
|
||||
},
|
||||
customMetadata: {
|
||||
version: version.toString(),
|
||||
latestVersion: 'true',
|
||||
},
|
||||
});
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
version,
|
||||
key: versionedKey,
|
||||
});
|
||||
});
|
||||
|
||||
// Get specific version
|
||||
app.get('/files/:filename/v/:version', async (c) => {
|
||||
const filename = c.req.param('filename');
|
||||
const version = c.req.param('version');
|
||||
|
||||
const key = `versions/${filename}/v${version}`;
|
||||
const object = await c.env.MY_BUCKET.get(key);
|
||||
|
||||
if (!object) {
|
||||
return c.json({ error: 'Version not found' }, 404);
|
||||
}
|
||||
|
||||
return new Response(object.body, {
|
||||
headers: {
|
||||
'Content-Type': object.httpMetadata?.contentType || 'application/octet-stream',
|
||||
},
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Backup & Archive Pattern
|
||||
|
||||
```typescript
|
||||
// Daily database backup to R2
|
||||
async function backupDatabase(env: Bindings) {
|
||||
const date = new Date().toISOString().split('T')[0];
|
||||
const key = `backups/database/${date}/dump.sql.gz`;
|
||||
|
||||
// Generate backup (placeholder)
|
||||
const backupData = await generateDatabaseDump();
|
||||
|
||||
await env.BACKUPS.put(key, backupData, {
|
||||
httpMetadata: {
|
||||
contentType: 'application/gzip',
|
||||
contentEncoding: 'gzip',
|
||||
},
|
||||
customMetadata: {
|
||||
backupDate: date,
|
||||
backupType: 'full',
|
||||
database: 'production',
|
||||
},
|
||||
});
|
||||
|
||||
// Delete backups older than 30 days
|
||||
await cleanupOldBackups(env, 30);
|
||||
}
|
||||
|
||||
async function cleanupOldBackups(env: Bindings, retentionDays: number) {
|
||||
const cutoffDate = new Date();
|
||||
cutoffDate.setDate(cutoffDate.getDate() - retentionDays);
|
||||
|
||||
const listed = await env.BACKUPS.list({
|
||||
prefix: 'backups/database/',
|
||||
});
|
||||
|
||||
const oldBackups = listed.objects.filter(
|
||||
obj => obj.uploaded < cutoffDate
|
||||
);
|
||||
|
||||
if (oldBackups.length > 0) {
|
||||
const keysToDelete = oldBackups.map(obj => obj.key);
|
||||
await env.BACKUPS.delete(keysToDelete);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Static Site Hosting with SPA Fallback
|
||||
|
||||
```typescript
|
||||
app.get('/*', async (c) => {
|
||||
const url = new URL(c.req.url);
|
||||
let key = url.pathname.slice(1); // Remove leading slash
|
||||
|
||||
if (key === '' || key.endsWith('/')) {
|
||||
key += 'index.html';
|
||||
}
|
||||
|
||||
let object = await c.env.STATIC.get(key);
|
||||
|
||||
// SPA fallback: if file not found, try index.html
|
||||
if (!object && !key.includes('.')) {
|
||||
object = await c.env.STATIC.get('index.html');
|
||||
}
|
||||
|
||||
if (!object) {
|
||||
return c.json({ error: 'Not found' }, 404);
|
||||
}
|
||||
|
||||
const headers = new Headers();
|
||||
object.writeHttpMetadata(headers);
|
||||
|
||||
// Set appropriate cache headers
|
||||
if (key.match(/\.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2)$/)) {
|
||||
headers.set('Cache-Control', 'public, max-age=31536000, immutable');
|
||||
} else {
|
||||
headers.set('Cache-Control', 'public, max-age=3600, must-revalidate');
|
||||
}
|
||||
|
||||
return new Response(object.body, { headers });
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## CDN with Origin Fallback
|
||||
|
||||
```typescript
|
||||
// Use R2 as CDN with external origin fallback
|
||||
app.get('/cdn/*', async (c) => {
|
||||
const url = new URL(c.req.url);
|
||||
const key = url.pathname.replace('/cdn/', '');
|
||||
|
||||
// Check R2 cache first
|
||||
let object = await c.env.CDN_CACHE.get(key);
|
||||
|
||||
if (!object) {
|
||||
// Fetch from origin
|
||||
const originUrl = `https://origin.example.com/${key}`;
|
||||
const response = await fetch(originUrl);
|
||||
|
||||
if (!response.ok) {
|
||||
return c.json({ error: 'Not found on origin' }, 404);
|
||||
}
|
||||
|
||||
const data = await response.arrayBuffer();
|
||||
const contentType = response.headers.get('content-type') || 'application/octet-stream';
|
||||
|
||||
// Cache in R2
|
||||
await c.env.CDN_CACHE.put(key, data, {
|
||||
httpMetadata: {
|
||||
contentType,
|
||||
cacheControl: 'public, max-age=31536000',
|
||||
},
|
||||
});
|
||||
|
||||
object = await c.env.CDN_CACHE.get(key);
|
||||
}
|
||||
|
||||
return new Response(object!.body, {
|
||||
headers: {
|
||||
'Content-Type': object!.httpMetadata?.contentType || 'application/octet-stream',
|
||||
'Cache-Control': 'public, max-age=31536000',
|
||||
'X-Cache': object ? 'HIT' : 'MISS',
|
||||
},
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Signed Upload with Quota Limits
|
||||
|
||||
```typescript
|
||||
app.post('/request-upload', async (c) => {
|
||||
const { userId, filename, fileSize } = await c.req.json();
|
||||
|
||||
// Check user's quota
|
||||
const quota = await getUserQuota(userId);
|
||||
|
||||
if (quota.used + fileSize > quota.total) {
|
||||
return c.json({ error: 'Quota exceeded' }, 403);
|
||||
}
|
||||
|
||||
// Generate presigned URL
|
||||
const r2Client = new AwsClient({
|
||||
accessKeyId: c.env.R2_ACCESS_KEY_ID,
|
||||
secretAccessKey: c.env.R2_SECRET_ACCESS_KEY,
|
||||
});
|
||||
|
||||
const key = `users/${userId}/${filename}`;
|
||||
const url = new URL(
|
||||
`https://my-bucket.${c.env.ACCOUNT_ID}.r2.cloudflarestorage.com/${key}`
|
||||
);
|
||||
|
||||
url.searchParams.set('X-Amz-Expires', '3600');
|
||||
|
||||
const signed = await r2Client.sign(
|
||||
new Request(url, { method: 'PUT' }),
|
||||
{ aws: { signQuery: true } }
|
||||
);
|
||||
|
||||
return c.json({
|
||||
uploadUrl: signed.url,
|
||||
expiresIn: 3600,
|
||||
});
|
||||
});
|
||||
|
||||
async function getUserQuota(userId: string) {
|
||||
// Query database for user quota
|
||||
return {
|
||||
used: 1024 * 1024 * 100, // 100MB used
|
||||
total: 1024 * 1024 * 1024, // 1GB total
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Best Practices Summary
|
||||
|
||||
1. **Use meaningful key prefixes** for organization (`users/{id}/`, `images/`, `backups/`)
|
||||
2. **Set appropriate cache headers** for static assets
|
||||
3. **Store metadata** for tracking and filtering
|
||||
4. **Use bulk delete** instead of loops
|
||||
5. **Implement cleanup** for old/temporary files
|
||||
6. **Add authentication** before presigned URL generation
|
||||
7. **Validate file types** before uploading
|
||||
8. **Use UUIDs** for unique filenames
|
||||
9. **Set expiry times** on presigned URLs
|
||||
10. **Monitor quota** to prevent overages
|
||||
343
references/s3-compatibility.md
Normal file
343
references/s3-compatibility.md
Normal file
@@ -0,0 +1,343 @@
|
||||
# R2 S3 API Compatibility
|
||||
|
||||
**Last Updated**: 2025-10-21
|
||||
**Official Docs**: https://developers.cloudflare.com/r2/api/s3/api/
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
R2 implements a large portion of the Amazon S3 API, allowing you to use existing S3 SDKs and tools.
|
||||
|
||||
**S3 Endpoint Format:**
|
||||
```
|
||||
https://<account_id>.r2.cloudflarestorage.com
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Supported S3 Operations
|
||||
|
||||
### Bucket Operations
|
||||
- ✅ ListBuckets
|
||||
- ❌ CreateBucket (use Cloudflare Dashboard or Wrangler)
|
||||
- ❌ DeleteBucket (use Cloudflare Dashboard or Wrangler)
|
||||
|
||||
### Object Operations
|
||||
- ✅ GetObject
|
||||
- ✅ PutObject
|
||||
- ✅ DeleteObject
|
||||
- ✅ DeleteObjects (bulk delete, max 1000)
|
||||
- ✅ HeadObject
|
||||
- ✅ ListObjectsV2
|
||||
- ✅ CopyObject
|
||||
- ✅ UploadPart
|
||||
- ✅ CreateMultipartUpload
|
||||
- ✅ CompleteMultipartUpload
|
||||
- ✅ AbortMultipartUpload
|
||||
- ✅ ListMultipartUploads
|
||||
- ✅ ListParts
|
||||
|
||||
### Presigned URLs
|
||||
- ✅ GetObject (download)
|
||||
- ✅ PutObject (upload)
|
||||
- ✅ UploadPart (multipart)
|
||||
|
||||
### Not Supported
|
||||
- ❌ Versioning
|
||||
- ❌ Object Lock
|
||||
- ❌ ACLs (use CORS instead)
|
||||
- ❌ Bucket policies
|
||||
- ❌ Object tagging (use custom metadata)
|
||||
- ❌ Server-side encryption config (use SSE-C instead)
|
||||
|
||||
---
|
||||
|
||||
## Using AWS SDK for JavaScript
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
npm install @aws-sdk/client-s3
|
||||
npm install @aws-sdk/s3-request-presigner
|
||||
```
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```typescript
|
||||
import { S3Client, PutObjectCommand, GetObjectCommand } from '@aws-sdk/client-s3';
|
||||
|
||||
// Create S3 client for R2
|
||||
const s3Client = new S3Client({
|
||||
region: 'auto',
|
||||
endpoint: `https://<ACCOUNT_ID>.r2.cloudflarestorage.com`,
|
||||
credentials: {
|
||||
accessKeyId: '<R2_ACCESS_KEY_ID>',
|
||||
secretAccessKey: '<R2_SECRET_ACCESS_KEY>',
|
||||
},
|
||||
});
|
||||
|
||||
// Upload object
|
||||
const uploadParams = {
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'path/to/file.txt',
|
||||
Body: 'Hello, R2!',
|
||||
ContentType: 'text/plain',
|
||||
};
|
||||
|
||||
await s3Client.send(new PutObjectCommand(uploadParams));
|
||||
|
||||
// Download object
|
||||
const downloadParams = {
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'path/to/file.txt',
|
||||
};
|
||||
|
||||
const response = await s3Client.send(new GetObjectCommand(downloadParams));
|
||||
const text = await response.Body.transformToString();
|
||||
```
|
||||
|
||||
### Presigned URLs with AWS SDK
|
||||
|
||||
```typescript
|
||||
import { getSignedUrl } from '@aws-sdk/s3-request-presigner';
|
||||
import { PutObjectCommand, GetObjectCommand } from '@aws-sdk/client-s3';
|
||||
|
||||
// Generate presigned upload URL
|
||||
const uploadCommand = new PutObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'uploads/file.jpg',
|
||||
});
|
||||
|
||||
const uploadUrl = await getSignedUrl(s3Client, uploadCommand, {
|
||||
expiresIn: 3600, // 1 hour
|
||||
});
|
||||
|
||||
// Generate presigned download URL
|
||||
const downloadCommand = new GetObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'uploads/file.jpg',
|
||||
});
|
||||
|
||||
const downloadUrl = await getSignedUrl(s3Client, downloadCommand, {
|
||||
expiresIn: 3600,
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using aws4fetch (Lightweight Alternative)
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
npm install aws4fetch
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
```typescript
|
||||
import { AwsClient } from 'aws4fetch';
|
||||
|
||||
const r2Client = new AwsClient({
|
||||
accessKeyId: '<R2_ACCESS_KEY_ID>',
|
||||
secretAccessKey: '<R2_SECRET_ACCESS_KEY>',
|
||||
});
|
||||
|
||||
const endpoint = `https://<ACCOUNT_ID>.r2.cloudflarestorage.com`;
|
||||
|
||||
// Upload object
|
||||
await r2Client.fetch(`${endpoint}/my-bucket/file.txt`, {
|
||||
method: 'PUT',
|
||||
body: 'Hello, R2!',
|
||||
headers: {
|
||||
'Content-Type': 'text/plain',
|
||||
},
|
||||
});
|
||||
|
||||
// Download object
|
||||
const response = await r2Client.fetch(`${endpoint}/my-bucket/file.txt`);
|
||||
const text = await response.text();
|
||||
|
||||
// Delete object
|
||||
await r2Client.fetch(`${endpoint}/my-bucket/file.txt`, {
|
||||
method: 'DELETE',
|
||||
});
|
||||
|
||||
// List objects
|
||||
const listResponse = await r2Client.fetch(
|
||||
`${endpoint}/my-bucket?list-type=2&max-keys=100`
|
||||
);
|
||||
const xml = await listResponse.text();
|
||||
```
|
||||
|
||||
### Presigned URLs with aws4fetch
|
||||
|
||||
```typescript
|
||||
import { AwsClient } from 'aws4fetch';
|
||||
|
||||
const r2Client = new AwsClient({
|
||||
accessKeyId: '<R2_ACCESS_KEY_ID>',
|
||||
secretAccessKey: '<R2_SECRET_ACCESS_KEY>',
|
||||
});
|
||||
|
||||
const url = new URL(
|
||||
`https://<ACCOUNT_ID>.r2.cloudflarestorage.com/my-bucket/file.txt`
|
||||
);
|
||||
|
||||
// Set expiry (in seconds)
|
||||
url.searchParams.set('X-Amz-Expires', '3600');
|
||||
|
||||
// Sign for PUT (upload)
|
||||
const signedUpload = await r2Client.sign(
|
||||
new Request(url, { method: 'PUT' }),
|
||||
{ aws: { signQuery: true } }
|
||||
);
|
||||
|
||||
console.log(signedUpload.url);
|
||||
|
||||
// Sign for GET (download)
|
||||
const signedDownload = await r2Client.sign(
|
||||
new Request(url, { method: 'GET' }),
|
||||
{ aws: { signQuery: true } }
|
||||
);
|
||||
|
||||
console.log(signedDownload.url);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## S3 vs R2 Workers API Comparison
|
||||
|
||||
| Feature | S3 API | R2 Workers API |
|
||||
|---------|--------|----------------|
|
||||
| **Performance** | External network call | Native binding (faster) |
|
||||
| **Authentication** | Access keys required | Automatic via binding |
|
||||
| **Presigned URLs** | Supported | Requires S3 API + access keys |
|
||||
| **Multipart Upload** | Full S3 API | Simplified Workers API |
|
||||
| **Custom Metadata** | `x-amz-meta-*` headers | `customMetadata` object |
|
||||
| **Conditional Ops** | S3 headers | `onlyIf` object |
|
||||
| **Size Limits** | 5GB per PUT | 100MB per PUT (200MB Business, 500MB Enterprise) |
|
||||
|
||||
---
|
||||
|
||||
## When to Use S3 API vs Workers API
|
||||
|
||||
### Use S3 API when:
|
||||
- ✅ Migrating from AWS S3
|
||||
- ✅ Using existing S3 tools (aws-cli, s3cmd)
|
||||
- ✅ Generating presigned URLs
|
||||
- ✅ Need S3 compatibility for external systems
|
||||
|
||||
### Use Workers API when:
|
||||
- ✅ Building new applications on Cloudflare
|
||||
- ✅ Need better performance (native binding)
|
||||
- ✅ Don't want to manage access keys
|
||||
- ✅ Using R2 from Workers
|
||||
|
||||
---
|
||||
|
||||
## R2-Specific Extensions
|
||||
|
||||
R2 adds some extensions to the S3 API:
|
||||
|
||||
### Conditional Operations
|
||||
|
||||
```typescript
|
||||
// Only upload if file doesn't exist
|
||||
await s3Client.send(new PutObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'file.txt',
|
||||
Body: data,
|
||||
IfUnmodifiedSince: new Date('2020-01-01'), // Before R2 existed
|
||||
}));
|
||||
```
|
||||
|
||||
### Storage Class
|
||||
|
||||
R2 currently only supports 'Standard' storage class.
|
||||
|
||||
```typescript
|
||||
await s3Client.send(new PutObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'file.txt',
|
||||
Body: data,
|
||||
StorageClass: 'STANDARD',
|
||||
}));
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Migration from S3
|
||||
|
||||
### 1. Update Endpoint
|
||||
|
||||
```diff
|
||||
const s3Client = new S3Client({
|
||||
region: 'auto',
|
||||
- endpoint: 'https://s3.amazonaws.com',
|
||||
+ endpoint: 'https://<ACCOUNT_ID>.r2.cloudflarestorage.com',
|
||||
credentials: {
|
||||
- accessKeyId: process.env.AWS_ACCESS_KEY_ID,
|
||||
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
|
||||
+ accessKeyId: process.env.R2_ACCESS_KEY_ID,
|
||||
+ secretAccessKey: process.env.R2_SECRET_ACCESS_KEY,
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
### 2. Remove Unsupported Features
|
||||
|
||||
```diff
|
||||
await s3Client.send(new PutObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'file.txt',
|
||||
Body: data,
|
||||
- ACL: 'public-read', // ❌ Not supported
|
||||
- Tagging: 'key=value', // ❌ Not supported (use custom metadata)
|
||||
+ Metadata: { // ✅ Use custom metadata instead
|
||||
+ visibility: 'public',
|
||||
+ },
|
||||
}));
|
||||
```
|
||||
|
||||
### 3. Use CORS Instead of ACLs
|
||||
|
||||
R2 doesn't support S3 ACLs. Use CORS policies instead for browser access.
|
||||
|
||||
---
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Issue: SignatureDoesNotMatch
|
||||
|
||||
**Cause:** Incorrect access keys or endpoint URL
|
||||
|
||||
**Fix:**
|
||||
- Verify access key ID and secret
|
||||
- Ensure endpoint includes your account ID
|
||||
- Check region is set to 'auto'
|
||||
|
||||
### Issue: Presigned URLs Don't Work with Custom Domains
|
||||
|
||||
**Cause:** Presigned URLs only work with R2 S3 endpoint
|
||||
|
||||
**Fix:**
|
||||
- Use `<ACCOUNT_ID>.r2.cloudflarestorage.com` endpoint
|
||||
- Or use Worker with R2 binding for custom domains
|
||||
|
||||
### Issue: Upload Size Exceeds Limit
|
||||
|
||||
**Cause:** S3 API PUT has 5GB limit, but R2 Workers has 100-500MB limit
|
||||
|
||||
**Fix:**
|
||||
- Use multipart upload for large files
|
||||
- Or use S3 API directly (not through Worker)
|
||||
|
||||
---
|
||||
|
||||
## Official Resources
|
||||
|
||||
- **S3 API Compatibility**: https://developers.cloudflare.com/r2/api/s3/api/
|
||||
- **AWS SDK Examples**: https://developers.cloudflare.com/r2/examples/aws/
|
||||
- **Presigned URLs**: https://developers.cloudflare.com/r2/api/s3/presigned-urls/
|
||||
465
references/workers-api.md
Normal file
465
references/workers-api.md
Normal file
@@ -0,0 +1,465 @@
|
||||
# R2 Workers API Complete Reference
|
||||
|
||||
**Last Updated**: 2025-10-21
|
||||
**Official Docs**: https://developers.cloudflare.com/r2/api/workers/workers-api-reference/
|
||||
|
||||
---
|
||||
|
||||
## R2Bucket Methods
|
||||
|
||||
### put()
|
||||
|
||||
Upload an object to R2.
|
||||
|
||||
```typescript
|
||||
put(
|
||||
key: string,
|
||||
value: ReadableStream | ArrayBuffer | ArrayBufferView | string | Blob,
|
||||
options?: R2PutOptions
|
||||
): Promise<R2Object | null>
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `key` - Object key (path) in the bucket
|
||||
- `value` - Object data
|
||||
- `options` - Optional upload options
|
||||
|
||||
**Returns:**
|
||||
- `R2Object` - Metadata of uploaded object
|
||||
- `null` - If precondition failed (onlyIf clause)
|
||||
|
||||
**Options (R2PutOptions):**
|
||||
```typescript
|
||||
interface R2PutOptions {
|
||||
httpMetadata?: R2HTTPMetadata;
|
||||
customMetadata?: Record<string, string>;
|
||||
md5?: ArrayBuffer;
|
||||
sha1?: ArrayBuffer;
|
||||
sha256?: ArrayBuffer;
|
||||
sha384?: ArrayBuffer;
|
||||
sha512?: ArrayBuffer;
|
||||
onlyIf?: R2Conditional;
|
||||
storageClass?: 'Standard';
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### get()
|
||||
|
||||
Download an object from R2.
|
||||
|
||||
```typescript
|
||||
get(
|
||||
key: string,
|
||||
options?: R2GetOptions
|
||||
): Promise<R2ObjectBody | null>
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `key` - Object key (path) in the bucket
|
||||
- `options` - Optional download options
|
||||
|
||||
**Returns:**
|
||||
- `R2ObjectBody` - Object with metadata and body stream
|
||||
- `null` - If object doesn't exist or precondition failed
|
||||
|
||||
**Options (R2GetOptions):**
|
||||
```typescript
|
||||
interface R2GetOptions {
|
||||
onlyIf?: R2Conditional | Headers;
|
||||
range?: R2Range;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### head()
|
||||
|
||||
Get object metadata without downloading body.
|
||||
|
||||
```typescript
|
||||
head(key: string): Promise<R2Object | null>
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `key` - Object key (path) in the bucket
|
||||
|
||||
**Returns:**
|
||||
- `R2Object` - Object metadata only
|
||||
- `null` - If object doesn't exist
|
||||
|
||||
**Use Cases:**
|
||||
- Check if file exists
|
||||
- Get file size
|
||||
- Get last modified date
|
||||
- Validate etag
|
||||
|
||||
---
|
||||
|
||||
### delete()
|
||||
|
||||
Delete one or more objects.
|
||||
|
||||
```typescript
|
||||
delete(key: string | string[]): Promise<void>
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `key` - Single key or array of keys (max 1000)
|
||||
|
||||
**Returns:**
|
||||
- `void` - Always succeeds (idempotent)
|
||||
|
||||
**Notes:**
|
||||
- No error if object doesn't exist
|
||||
- Can delete up to 1000 objects at once
|
||||
- Deletes are strongly consistent
|
||||
|
||||
---
|
||||
|
||||
### list()
|
||||
|
||||
List objects in the bucket.
|
||||
|
||||
```typescript
|
||||
list(options?: R2ListOptions): Promise<R2Objects>
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `options` - Optional listing options
|
||||
|
||||
**Returns:**
|
||||
- `R2Objects` - List of objects and metadata
|
||||
|
||||
**Options (R2ListOptions):**
|
||||
```typescript
|
||||
interface R2ListOptions {
|
||||
limit?: number; // Max 1000, default 1000
|
||||
prefix?: string; // Filter by prefix
|
||||
cursor?: string; // Pagination cursor
|
||||
delimiter?: string; // Folder delimiter (usually '/')
|
||||
include?: ('httpMetadata' | 'customMetadata')[];
|
||||
}
|
||||
```
|
||||
|
||||
**Response (R2Objects):**
|
||||
```typescript
|
||||
interface R2Objects {
|
||||
objects: R2Object[]; // Array of objects
|
||||
truncated: boolean; // true if more results exist
|
||||
cursor?: string; // Cursor for next page
|
||||
delimitedPrefixes: string[]; // "Folder" names (if delimiter used)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### createMultipartUpload()
|
||||
|
||||
Create a new multipart upload.
|
||||
|
||||
```typescript
|
||||
createMultipartUpload(
|
||||
key: string,
|
||||
options?: R2MultipartOptions
|
||||
): Promise<R2MultipartUpload>
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `key` - Object key for the upload
|
||||
- `options` - Optional metadata
|
||||
|
||||
**Returns:**
|
||||
- `R2MultipartUpload` - Object for managing the upload
|
||||
|
||||
**Options (R2MultipartOptions):**
|
||||
```typescript
|
||||
interface R2MultipartOptions {
|
||||
httpMetadata?: R2HTTPMetadata;
|
||||
customMetadata?: Record<string, string>;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### resumeMultipartUpload()
|
||||
|
||||
Resume an existing multipart upload.
|
||||
|
||||
```typescript
|
||||
resumeMultipartUpload(
|
||||
key: string,
|
||||
uploadId: string
|
||||
): R2MultipartUpload
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `key` - Object key for the upload
|
||||
- `uploadId` - Upload ID from createMultipartUpload()
|
||||
|
||||
**Returns:**
|
||||
- `R2MultipartUpload` - Object for managing the upload
|
||||
|
||||
**Notes:**
|
||||
- Does NOT validate uploadId or key
|
||||
- No network request made
|
||||
- Use to continue an upload after Worker restart
|
||||
|
||||
---
|
||||
|
||||
## R2Object Interface
|
||||
|
||||
Metadata for an R2 object.
|
||||
|
||||
```typescript
|
||||
interface R2Object {
|
||||
key: string; // Object key
|
||||
version: string; // Version ID
|
||||
size: number; // Size in bytes
|
||||
etag: string; // ETag (without quotes)
|
||||
httpEtag: string; // ETag with quotes (RFC 9110)
|
||||
uploaded: Date; // Upload timestamp
|
||||
httpMetadata?: R2HTTPMetadata; // HTTP metadata
|
||||
customMetadata?: Record<string, string>; // Custom metadata
|
||||
range?: R2Range; // Range (if partial)
|
||||
checksums?: R2Checksums; // Checksums
|
||||
storageClass: 'Standard'; // Storage class
|
||||
ssecKeyMd5?: string; // SSE-C key hash
|
||||
|
||||
writeHttpMetadata(headers: Headers): void; // Apply metadata to headers
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## R2ObjectBody Interface
|
||||
|
||||
Extends R2Object with body stream and read methods.
|
||||
|
||||
```typescript
|
||||
interface R2ObjectBody extends R2Object {
|
||||
body: ReadableStream; // Object body stream
|
||||
bodyUsed: boolean; // Whether body consumed
|
||||
|
||||
arrayBuffer(): Promise<ArrayBuffer>; // Read as ArrayBuffer
|
||||
text(): Promise<string>; // Read as text
|
||||
json<T>(): Promise<T>; // Read as JSON
|
||||
blob(): Promise<Blob>; // Read as Blob
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## R2MultipartUpload Interface
|
||||
|
||||
Manage a multipart upload.
|
||||
|
||||
```typescript
|
||||
interface R2MultipartUpload {
|
||||
key: string; // Object key
|
||||
uploadId: string; // Upload ID
|
||||
|
||||
uploadPart(
|
||||
partNumber: number,
|
||||
value: ReadableStream | ArrayBuffer | ArrayBufferView | string | Blob,
|
||||
options?: R2MultipartOptions
|
||||
): Promise<R2UploadedPart>;
|
||||
|
||||
abort(): Promise<void>;
|
||||
complete(uploadedParts: R2UploadedPart[]): Promise<R2Object>;
|
||||
}
|
||||
```
|
||||
|
||||
**Methods:**
|
||||
|
||||
- **uploadPart()** - Upload a single part (1-10,000)
|
||||
- **abort()** - Cancel the multipart upload
|
||||
- **complete()** - Finish upload with list of parts
|
||||
|
||||
---
|
||||
|
||||
## R2UploadedPart Interface
|
||||
|
||||
Metadata for an uploaded part.
|
||||
|
||||
```typescript
|
||||
interface R2UploadedPart {
|
||||
partNumber: number; // Part number (1-10,000)
|
||||
etag: string; // Part ETag
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## R2HTTPMetadata Interface
|
||||
|
||||
HTTP headers for object.
|
||||
|
||||
```typescript
|
||||
interface R2HTTPMetadata {
|
||||
contentType?: string; // Content-Type header
|
||||
contentLanguage?: string; // Content-Language header
|
||||
contentDisposition?: string; // Content-Disposition header
|
||||
contentEncoding?: string; // Content-Encoding header
|
||||
cacheControl?: string; // Cache-Control header
|
||||
cacheExpiry?: Date; // Expires header
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## R2Conditional Interface
|
||||
|
||||
Conditional operations (onlyIf clause).
|
||||
|
||||
```typescript
|
||||
interface R2Conditional {
|
||||
etagMatches?: string; // If-Match
|
||||
etagDoesNotMatch?: string; // If-None-Match
|
||||
uploadedBefore?: Date; // If-Unmodified-Since
|
||||
uploadedAfter?: Date; // If-Modified-Since
|
||||
}
|
||||
```
|
||||
|
||||
**Alternatively, pass a Headers object with:**
|
||||
- `If-Match`
|
||||
- `If-None-Match`
|
||||
- `If-Modified-Since`
|
||||
- `If-Unmodified-Since`
|
||||
|
||||
---
|
||||
|
||||
## R2Range Interface
|
||||
|
||||
Byte range for partial downloads.
|
||||
|
||||
```typescript
|
||||
interface R2Range {
|
||||
offset?: number; // Start byte
|
||||
length?: number; // Number of bytes
|
||||
suffix?: number; // Last N bytes
|
||||
}
|
||||
```
|
||||
|
||||
**Examples:**
|
||||
```typescript
|
||||
// First 1000 bytes
|
||||
{ offset: 0, length: 1000 }
|
||||
|
||||
// Bytes 100-200
|
||||
{ offset: 100, length: 100 }
|
||||
|
||||
// From byte 1000 to end
|
||||
{ offset: 1000 }
|
||||
|
||||
// Last 500 bytes
|
||||
{ suffix: 500 }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## R2Checksums Interface
|
||||
|
||||
Stored checksums for object.
|
||||
|
||||
```typescript
|
||||
interface R2Checksums {
|
||||
md5?: ArrayBuffer;
|
||||
sha1?: ArrayBuffer;
|
||||
sha256?: ArrayBuffer;
|
||||
sha384?: ArrayBuffer;
|
||||
sha512?: ArrayBuffer;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Complete Example
|
||||
|
||||
```typescript
|
||||
import { Hono } from 'hono';
|
||||
|
||||
type Bindings = {
|
||||
MY_BUCKET: R2Bucket;
|
||||
};
|
||||
|
||||
const app = new Hono<{ Bindings: Bindings }>();
|
||||
|
||||
// Upload with all metadata
|
||||
app.put('/files/:key', async (c) => {
|
||||
const key = c.req.param('key');
|
||||
const body = await c.req.arrayBuffer();
|
||||
|
||||
const object = await c.env.MY_BUCKET.put(key, body, {
|
||||
httpMetadata: {
|
||||
contentType: c.req.header('content-type') || 'application/octet-stream',
|
||||
cacheControl: 'public, max-age=3600',
|
||||
contentDisposition: `attachment; filename="${key}"`,
|
||||
},
|
||||
customMetadata: {
|
||||
uploadedBy: 'api',
|
||||
uploadedAt: new Date().toISOString(),
|
||||
},
|
||||
onlyIf: {
|
||||
// Only upload if file doesn't exist
|
||||
uploadedBefore: new Date('2020-01-01'),
|
||||
},
|
||||
});
|
||||
|
||||
if (!object) {
|
||||
return c.json({ error: 'File already exists' }, 409);
|
||||
}
|
||||
|
||||
return c.json({
|
||||
key: object.key,
|
||||
size: object.size,
|
||||
etag: object.etag,
|
||||
});
|
||||
});
|
||||
|
||||
// Download with range support
|
||||
app.get('/files/:key', async (c) => {
|
||||
const key = c.req.param('key');
|
||||
const rangeHeader = c.req.header('range');
|
||||
|
||||
let options: R2GetOptions | undefined;
|
||||
|
||||
if (rangeHeader) {
|
||||
// Parse range header: bytes=0-1000
|
||||
const match = rangeHeader.match(/bytes=(\d+)-(\d*)/);
|
||||
if (match) {
|
||||
const start = parseInt(match[1]);
|
||||
const end = match[2] ? parseInt(match[2]) : undefined;
|
||||
options = {
|
||||
range: {
|
||||
offset: start,
|
||||
length: end ? end - start + 1 : undefined,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const object = await c.env.MY_BUCKET.get(key, options);
|
||||
|
||||
if (!object) {
|
||||
return c.json({ error: 'Not found' }, 404);
|
||||
}
|
||||
|
||||
const headers = new Headers();
|
||||
object.writeHttpMetadata(headers);
|
||||
headers.set('etag', object.httpEtag);
|
||||
|
||||
if (object.range) {
|
||||
headers.set('content-range', `bytes ${object.range.offset}-${object.range.offset + object.range.length - 1}/${object.size}`);
|
||||
return new Response(object.body, {
|
||||
status: 206,
|
||||
headers,
|
||||
});
|
||||
}
|
||||
|
||||
return new Response(object.body, { headers });
|
||||
});
|
||||
|
||||
export default app;
|
||||
```
|
||||
133
templates/r2-cors-config.json
Normal file
133
templates/r2-cors-config.json
Normal file
@@ -0,0 +1,133 @@
|
||||
{
|
||||
"_comment": "R2 CORS Policy Examples - Apply via Cloudflare Dashboard",
|
||||
"_instructions": [
|
||||
"1. Go to Cloudflare Dashboard → R2",
|
||||
"2. Select your bucket",
|
||||
"3. Go to Settings tab",
|
||||
"4. Under CORS Policy → Add CORS policy",
|
||||
"5. Paste one of the configurations below",
|
||||
"6. Save"
|
||||
],
|
||||
|
||||
"public_assets_all_origins": {
|
||||
"CORSRules": [
|
||||
{
|
||||
"AllowedOrigins": ["*"],
|
||||
"AllowedMethods": ["GET", "HEAD"],
|
||||
"AllowedHeaders": ["Range"],
|
||||
"MaxAgeSeconds": 3600
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"public_assets_specific_origin": {
|
||||
"CORSRules": [
|
||||
{
|
||||
"AllowedOrigins": ["https://example.com", "https://www.example.com"],
|
||||
"AllowedMethods": ["GET", "HEAD"],
|
||||
"AllowedHeaders": ["Range"],
|
||||
"MaxAgeSeconds": 3600
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"file_uploads": {
|
||||
"CORSRules": [
|
||||
{
|
||||
"AllowedOrigins": ["https://app.example.com"],
|
||||
"AllowedMethods": ["GET", "PUT", "POST", "DELETE", "HEAD"],
|
||||
"AllowedHeaders": [
|
||||
"Content-Type",
|
||||
"Content-MD5",
|
||||
"Content-Disposition",
|
||||
"x-amz-meta-*"
|
||||
],
|
||||
"ExposeHeaders": ["ETag", "x-amz-version-id"],
|
||||
"MaxAgeSeconds": 3600
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"presigned_urls": {
|
||||
"_comment": "For presigned URL uploads from browser",
|
||||
"CORSRules": [
|
||||
{
|
||||
"AllowedOrigins": ["https://app.example.com"],
|
||||
"AllowedMethods": ["PUT", "POST"],
|
||||
"AllowedHeaders": [
|
||||
"Content-Type",
|
||||
"Content-MD5",
|
||||
"x-amz-*"
|
||||
],
|
||||
"ExposeHeaders": ["ETag"],
|
||||
"MaxAgeSeconds": 3600
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"multiple_domains": {
|
||||
"CORSRules": [
|
||||
{
|
||||
"AllowedOrigins": [
|
||||
"https://app.example.com",
|
||||
"https://admin.example.com",
|
||||
"https://staging.example.com"
|
||||
],
|
||||
"AllowedMethods": ["GET", "PUT", "POST", "DELETE", "HEAD"],
|
||||
"AllowedHeaders": ["*"],
|
||||
"ExposeHeaders": ["ETag", "Content-Length"],
|
||||
"MaxAgeSeconds": 86400
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"development_localhost": {
|
||||
"_comment": "For local development only - DO NOT USE IN PRODUCTION",
|
||||
"CORSRules": [
|
||||
{
|
||||
"AllowedOrigins": ["http://localhost:3000", "http://localhost:5173"],
|
||||
"AllowedMethods": ["GET", "PUT", "POST", "DELETE", "HEAD"],
|
||||
"AllowedHeaders": ["*"],
|
||||
"ExposeHeaders": ["ETag"],
|
||||
"MaxAgeSeconds": 3600
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"strict_security": {
|
||||
"_comment": "Minimal CORS for maximum security",
|
||||
"CORSRules": [
|
||||
{
|
||||
"AllowedOrigins": ["https://app.example.com"],
|
||||
"AllowedMethods": ["GET"],
|
||||
"AllowedHeaders": ["Range"],
|
||||
"MaxAgeSeconds": 3600
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"cdn_and_api": {
|
||||
"_comment": "Separate rules for CDN assets and API uploads",
|
||||
"CORSRules": [
|
||||
{
|
||||
"_comment": "Rule for CDN/static assets",
|
||||
"AllowedOrigins": ["*"],
|
||||
"AllowedMethods": ["GET", "HEAD"],
|
||||
"AllowedHeaders": ["Range"],
|
||||
"MaxAgeSeconds": 86400
|
||||
},
|
||||
{
|
||||
"_comment": "Rule for authenticated API uploads",
|
||||
"AllowedOrigins": ["https://app.example.com"],
|
||||
"AllowedMethods": ["PUT", "POST", "DELETE"],
|
||||
"AllowedHeaders": [
|
||||
"Content-Type",
|
||||
"Authorization",
|
||||
"x-amz-meta-*"
|
||||
],
|
||||
"ExposeHeaders": ["ETag"],
|
||||
"MaxAgeSeconds": 3600
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
238
templates/r2-multipart-upload.ts
Normal file
238
templates/r2-multipart-upload.ts
Normal file
@@ -0,0 +1,238 @@
|
||||
/**
|
||||
* R2 Multipart Upload Worker
|
||||
*
|
||||
* Enables large file uploads (>100MB) with:
|
||||
* - Resumable uploads
|
||||
* - Parallel part uploads
|
||||
* - Progress tracking
|
||||
* - Abort capability
|
||||
*
|
||||
* Flow:
|
||||
* 1. POST /mpu/create - Create multipart upload
|
||||
* 2. PUT /mpu/upload-part - Upload individual parts
|
||||
* 3. POST /mpu/complete - Complete the upload
|
||||
* 4. DELETE /mpu/abort - Abort the upload (optional)
|
||||
*/
|
||||
|
||||
import { Hono } from 'hono';
|
||||
|
||||
type Bindings = {
|
||||
MY_BUCKET: R2Bucket;
|
||||
};
|
||||
|
||||
const app = new Hono<{ Bindings: Bindings }>();
|
||||
|
||||
// Create multipart upload
|
||||
app.post('/mpu/create', async (c) => {
|
||||
const { key, contentType } = await c.req.json<{
|
||||
key: string;
|
||||
contentType?: string;
|
||||
}>();
|
||||
|
||||
if (!key) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Missing required field: key',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
try {
|
||||
const multipart = await c.env.MY_BUCKET.createMultipartUpload(key, {
|
||||
httpMetadata: {
|
||||
contentType: contentType || 'application/octet-stream',
|
||||
},
|
||||
});
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
key: multipart.key,
|
||||
uploadId: multipart.uploadId,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Create multipart error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to create multipart upload',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Upload a part
|
||||
app.put('/mpu/upload-part', async (c) => {
|
||||
const key = c.req.query('key');
|
||||
const uploadId = c.req.query('uploadId');
|
||||
const partNumber = parseInt(c.req.query('partNumber') || '0');
|
||||
|
||||
if (!key || !uploadId || !partNumber) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Missing required parameters: key, uploadId, partNumber',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
if (partNumber < 1 || partNumber > 10000) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Part number must be between 1 and 10000',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
try {
|
||||
const body = await c.req.arrayBuffer();
|
||||
|
||||
// Resume the multipart upload
|
||||
const multipart = c.env.MY_BUCKET.resumeMultipartUpload(key, uploadId);
|
||||
|
||||
// Upload the part
|
||||
const uploadedPart = await multipart.uploadPart(partNumber, body);
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
partNumber: uploadedPart.partNumber,
|
||||
etag: uploadedPart.etag,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Upload part error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to upload part',
|
||||
details: error.message,
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Complete multipart upload
|
||||
app.post('/mpu/complete', async (c) => {
|
||||
const { key, uploadId, parts } = await c.req.json<{
|
||||
key: string;
|
||||
uploadId: string;
|
||||
parts: Array<{ partNumber: number; etag: string }>;
|
||||
}>();
|
||||
|
||||
if (!key || !uploadId || !parts || !Array.isArray(parts)) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Missing required fields: key, uploadId, parts',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
try {
|
||||
const multipart = c.env.MY_BUCKET.resumeMultipartUpload(key, uploadId);
|
||||
|
||||
// Complete the upload
|
||||
const object = await multipart.complete(parts);
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
key: object.key,
|
||||
size: object.size,
|
||||
etag: object.etag,
|
||||
uploaded: object.uploaded,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Complete multipart error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to complete multipart upload',
|
||||
details: error.message,
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Abort multipart upload
|
||||
app.delete('/mpu/abort', async (c) => {
|
||||
const key = c.req.query('key');
|
||||
const uploadId = c.req.query('uploadId');
|
||||
|
||||
if (!key || !uploadId) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Missing required parameters: key, uploadId',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
try {
|
||||
const multipart = c.env.MY_BUCKET.resumeMultipartUpload(key, uploadId);
|
||||
await multipart.abort();
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
message: 'Multipart upload aborted',
|
||||
key,
|
||||
uploadId,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Abort multipart error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to abort multipart upload',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Health check
|
||||
app.get('/health', (c) => {
|
||||
return c.json({
|
||||
status: 'healthy',
|
||||
service: 'r2-multipart-worker',
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
});
|
||||
|
||||
export default app;
|
||||
|
||||
/**
|
||||
* Example Python client for multipart upload:
|
||||
*
|
||||
* import requests
|
||||
* from concurrent.futures import ThreadPoolExecutor
|
||||
*
|
||||
* WORKER_URL = "https://my-worker.workers.dev"
|
||||
* FILE_PATH = "large-file.mp4"
|
||||
* PART_SIZE = 10 * 1024 * 1024 # 10MB parts
|
||||
*
|
||||
* # 1. Create multipart upload
|
||||
* response = requests.post(f"{WORKER_URL}/mpu/create", json={
|
||||
* "key": "uploads/large-file.mp4",
|
||||
* "contentType": "video/mp4"
|
||||
* })
|
||||
* data = response.json()
|
||||
* upload_id = data["uploadId"]
|
||||
* key = data["key"]
|
||||
*
|
||||
* # 2. Upload parts in parallel
|
||||
* def upload_part(part_number, data):
|
||||
* response = requests.put(
|
||||
* f"{WORKER_URL}/mpu/upload-part",
|
||||
* params={
|
||||
* "key": key,
|
||||
* "uploadId": upload_id,
|
||||
* "partNumber": part_number
|
||||
* },
|
||||
* data=data
|
||||
* )
|
||||
* return response.json()
|
||||
*
|
||||
* with open(FILE_PATH, 'rb') as f:
|
||||
* part_number = 1
|
||||
* uploaded_parts = []
|
||||
*
|
||||
* with ThreadPoolExecutor(max_workers=4) as executor:
|
||||
* while True:
|
||||
* chunk = f.read(PART_SIZE)
|
||||
* if not chunk:
|
||||
* break
|
||||
*
|
||||
* result = executor.submit(upload_part, part_number, chunk)
|
||||
* uploaded_parts.append(result.result())
|
||||
* part_number += 1
|
||||
*
|
||||
* # 3. Complete upload
|
||||
* response = requests.post(f"{WORKER_URL}/mpu/complete", json={
|
||||
* "key": key,
|
||||
* "uploadId": upload_id,
|
||||
* "parts": uploaded_parts
|
||||
* })
|
||||
*
|
||||
* print(response.json())
|
||||
*/
|
||||
289
templates/r2-presigned-urls.ts
Normal file
289
templates/r2-presigned-urls.ts
Normal file
@@ -0,0 +1,289 @@
|
||||
/**
|
||||
* R2 Presigned URL Generator Worker
|
||||
*
|
||||
* Generates presigned URLs for:
|
||||
* - Direct client uploads to R2 (bypasses Worker)
|
||||
* - Temporary download links with expiry
|
||||
*
|
||||
* IMPORTANT:
|
||||
* - Never expose R2 access keys in client code
|
||||
* - Always generate presigned URLs server-side
|
||||
* - Set appropriate expiry times (1-24 hours)
|
||||
* - Add authentication before generating URLs
|
||||
*
|
||||
* Setup:
|
||||
* 1. Create R2 API token in Cloudflare dashboard
|
||||
* 2. Add secrets to wrangler:
|
||||
* wrangler secret put R2_ACCESS_KEY_ID
|
||||
* wrangler secret put R2_SECRET_ACCESS_KEY
|
||||
* wrangler secret put ACCOUNT_ID
|
||||
*/
|
||||
|
||||
import { Hono } from 'hono';
|
||||
import { AwsClient } from 'aws4fetch';
|
||||
|
||||
type Bindings = {
|
||||
R2_ACCESS_KEY_ID: string;
|
||||
R2_SECRET_ACCESS_KEY: string;
|
||||
ACCOUNT_ID: string;
|
||||
MY_BUCKET: R2Bucket;
|
||||
};
|
||||
|
||||
const app = new Hono<{ Bindings: Bindings }>();
|
||||
|
||||
// Generate presigned upload URL
|
||||
app.post('/presigned/upload', async (c) => {
|
||||
// TODO: Add authentication here
|
||||
// const authHeader = c.req.header('Authorization');
|
||||
// if (!authHeader) {
|
||||
// return c.json({ error: 'Unauthorized' }, 401);
|
||||
// }
|
||||
|
||||
const { filename, expiresIn = 3600 } = await c.req.json<{
|
||||
filename: string;
|
||||
expiresIn?: number;
|
||||
}>();
|
||||
|
||||
if (!filename) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Missing required field: filename',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
// Validate expiry (max 7 days)
|
||||
const maxExpiry = 7 * 24 * 60 * 60; // 7 days
|
||||
const validExpiry = Math.min(expiresIn, maxExpiry);
|
||||
|
||||
try {
|
||||
const r2Client = new AwsClient({
|
||||
accessKeyId: c.env.R2_ACCESS_KEY_ID,
|
||||
secretAccessKey: c.env.R2_SECRET_ACCESS_KEY,
|
||||
});
|
||||
|
||||
const bucketName = 'my-bucket'; // Replace with your bucket name
|
||||
const accountId = c.env.ACCOUNT_ID;
|
||||
|
||||
const url = new URL(
|
||||
`https://${bucketName}.${accountId}.r2.cloudflarestorage.com/${filename}`
|
||||
);
|
||||
|
||||
// Set expiry
|
||||
url.searchParams.set('X-Amz-Expires', validExpiry.toString());
|
||||
|
||||
// Sign the URL for PUT
|
||||
const signed = await r2Client.sign(
|
||||
new Request(url, { method: 'PUT' }),
|
||||
{ aws: { signQuery: true } }
|
||||
);
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
uploadUrl: signed.url,
|
||||
filename,
|
||||
expiresIn: validExpiry,
|
||||
expiresAt: new Date(Date.now() + validExpiry * 1000).toISOString(),
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Presigned upload URL error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to generate presigned upload URL',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Generate presigned download URL
|
||||
app.post('/presigned/download', async (c) => {
|
||||
// TODO: Add authentication here
|
||||
// const authHeader = c.req.header('Authorization');
|
||||
// if (!authHeader) {
|
||||
// return c.json({ error: 'Unauthorized' }, 401);
|
||||
// }
|
||||
|
||||
const { filename, expiresIn = 3600 } = await c.req.json<{
|
||||
filename: string;
|
||||
expiresIn?: number;
|
||||
}>();
|
||||
|
||||
if (!filename) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Missing required field: filename',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
// Validate expiry (max 7 days)
|
||||
const maxExpiry = 7 * 24 * 60 * 60;
|
||||
const validExpiry = Math.min(expiresIn, maxExpiry);
|
||||
|
||||
try {
|
||||
// Check if file exists first
|
||||
const exists = await c.env.MY_BUCKET.head(filename);
|
||||
if (!exists) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'File not found',
|
||||
}, 404);
|
||||
}
|
||||
|
||||
const r2Client = new AwsClient({
|
||||
accessKeyId: c.env.R2_ACCESS_KEY_ID,
|
||||
secretAccessKey: c.env.R2_SECRET_ACCESS_KEY,
|
||||
});
|
||||
|
||||
const bucketName = 'my-bucket'; // Replace with your bucket name
|
||||
const accountId = c.env.ACCOUNT_ID;
|
||||
|
||||
const url = new URL(
|
||||
`https://${bucketName}.${accountId}.r2.cloudflarestorage.com/${filename}`
|
||||
);
|
||||
|
||||
url.searchParams.set('X-Amz-Expires', validExpiry.toString());
|
||||
|
||||
// Sign the URL for GET
|
||||
const signed = await r2Client.sign(
|
||||
new Request(url, { method: 'GET' }),
|
||||
{ aws: { signQuery: true } }
|
||||
);
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
downloadUrl: signed.url,
|
||||
filename,
|
||||
size: exists.size,
|
||||
expiresIn: validExpiry,
|
||||
expiresAt: new Date(Date.now() + validExpiry * 1000).toISOString(),
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Presigned download URL error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to generate presigned download URL',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Generate batch presigned URLs (upload)
|
||||
app.post('/presigned/upload/batch', async (c) => {
|
||||
const { filenames, expiresIn = 3600 } = await c.req.json<{
|
||||
filenames: string[];
|
||||
expiresIn?: number;
|
||||
}>();
|
||||
|
||||
if (!filenames || !Array.isArray(filenames)) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Invalid request: filenames must be an array',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
const maxExpiry = 7 * 24 * 60 * 60;
|
||||
const validExpiry = Math.min(expiresIn, maxExpiry);
|
||||
|
||||
try {
|
||||
const r2Client = new AwsClient({
|
||||
accessKeyId: c.env.R2_ACCESS_KEY_ID,
|
||||
secretAccessKey: c.env.R2_SECRET_ACCESS_KEY,
|
||||
});
|
||||
|
||||
const bucketName = 'my-bucket';
|
||||
const accountId = c.env.ACCOUNT_ID;
|
||||
|
||||
const urls = await Promise.all(
|
||||
filenames.map(async (filename) => {
|
||||
const url = new URL(
|
||||
`https://${bucketName}.${accountId}.r2.cloudflarestorage.com/${filename}`
|
||||
);
|
||||
url.searchParams.set('X-Amz-Expires', validExpiry.toString());
|
||||
|
||||
const signed = await r2Client.sign(
|
||||
new Request(url, { method: 'PUT' }),
|
||||
{ aws: { signQuery: true } }
|
||||
);
|
||||
|
||||
return {
|
||||
filename,
|
||||
uploadUrl: signed.url,
|
||||
};
|
||||
})
|
||||
);
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
urls,
|
||||
expiresIn: validExpiry,
|
||||
expiresAt: new Date(Date.now() + validExpiry * 1000).toISOString(),
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Batch presigned URLs error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to generate presigned URLs',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Health check
|
||||
app.get('/health', (c) => {
|
||||
return c.json({
|
||||
status: 'healthy',
|
||||
service: 'r2-presigned-urls',
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
});
|
||||
|
||||
export default app;
|
||||
|
||||
/**
|
||||
* Example client-side upload with presigned URL:
|
||||
*
|
||||
* // 1. Get presigned URL from your Worker
|
||||
* const response = await fetch('https://my-worker.workers.dev/presigned/upload', {
|
||||
* method: 'POST',
|
||||
* headers: {
|
||||
* 'Content-Type': 'application/json',
|
||||
* 'Authorization': 'Bearer YOUR_TOKEN'
|
||||
* },
|
||||
* body: JSON.stringify({
|
||||
* filename: 'uploads/photo.jpg',
|
||||
* expiresIn: 3600
|
||||
* })
|
||||
* });
|
||||
*
|
||||
* const { uploadUrl } = await response.json();
|
||||
*
|
||||
* // 2. Upload file directly to R2
|
||||
* const file = document.querySelector('input[type="file"]').files[0];
|
||||
*
|
||||
* await fetch(uploadUrl, {
|
||||
* method: 'PUT',
|
||||
* body: file,
|
||||
* headers: {
|
||||
* 'Content-Type': file.type
|
||||
* }
|
||||
* });
|
||||
*
|
||||
* console.log('Upload complete!');
|
||||
*/
|
||||
|
||||
/**
|
||||
* Wrangler setup for secrets:
|
||||
*
|
||||
* # Add R2 access key ID
|
||||
* wrangler secret put R2_ACCESS_KEY_ID
|
||||
*
|
||||
* # Add R2 secret access key
|
||||
* wrangler secret put R2_SECRET_ACCESS_KEY
|
||||
*
|
||||
* # Add account ID
|
||||
* wrangler secret put ACCOUNT_ID
|
||||
*
|
||||
* # Create R2 API token:
|
||||
* 1. Go to Cloudflare Dashboard → R2
|
||||
* 2. Click "Manage R2 API Tokens"
|
||||
* 3. Create API Token with:
|
||||
* - Permissions: Object Read & Write
|
||||
* - Buckets: Specific bucket or all buckets
|
||||
* 4. Save the Access Key ID and Secret Access Key
|
||||
*/
|
||||
226
templates/r2-simple-upload.ts
Normal file
226
templates/r2-simple-upload.ts
Normal file
@@ -0,0 +1,226 @@
|
||||
/**
|
||||
* Simple R2 Upload/Download Worker
|
||||
*
|
||||
* Features:
|
||||
* - Upload files with PUT requests
|
||||
* - Download files with GET requests
|
||||
* - Delete files with DELETE requests
|
||||
* - List all files
|
||||
* - Proper content-type handling
|
||||
* - Error handling
|
||||
*/
|
||||
|
||||
import { Hono } from 'hono';
|
||||
|
||||
type Bindings = {
|
||||
MY_BUCKET: R2Bucket;
|
||||
};
|
||||
|
||||
const app = new Hono<{ Bindings: Bindings }>();
|
||||
|
||||
// Upload a file
|
||||
app.put('/files/:filename', async (c) => {
|
||||
const filename = c.req.param('filename');
|
||||
const body = await c.req.arrayBuffer();
|
||||
const contentType = c.req.header('content-type') || 'application/octet-stream';
|
||||
|
||||
try {
|
||||
const object = await c.env.MY_BUCKET.put(filename, body, {
|
||||
httpMetadata: {
|
||||
contentType: contentType,
|
||||
cacheControl: 'public, max-age=3600',
|
||||
},
|
||||
customMetadata: {
|
||||
uploadedAt: new Date().toISOString(),
|
||||
uploadedBy: 'api',
|
||||
},
|
||||
});
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
key: object.key,
|
||||
size: object.size,
|
||||
etag: object.etag,
|
||||
uploaded: object.uploaded,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Upload error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to upload file',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Download a file
|
||||
app.get('/files/:filename', async (c) => {
|
||||
const filename = c.req.param('filename');
|
||||
|
||||
try {
|
||||
const object = await c.env.MY_BUCKET.get(filename);
|
||||
|
||||
if (!object) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'File not found',
|
||||
}, 404);
|
||||
}
|
||||
|
||||
// Apply http metadata from R2
|
||||
const headers = new Headers();
|
||||
object.writeHttpMetadata(headers);
|
||||
headers.set('etag', object.httpEtag);
|
||||
|
||||
return new Response(object.body, { headers });
|
||||
} catch (error: any) {
|
||||
console.error('Download error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to download file',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Get file metadata (without downloading body)
|
||||
app.head('/files/:filename', async (c) => {
|
||||
const filename = c.req.param('filename');
|
||||
|
||||
try {
|
||||
const object = await c.env.MY_BUCKET.head(filename);
|
||||
|
||||
if (!object) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'File not found',
|
||||
}, 404);
|
||||
}
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
key: object.key,
|
||||
size: object.size,
|
||||
etag: object.etag,
|
||||
uploaded: object.uploaded,
|
||||
contentType: object.httpMetadata?.contentType,
|
||||
customMetadata: object.customMetadata,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Head error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to get file metadata',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Delete a file
|
||||
app.delete('/files/:filename', async (c) => {
|
||||
const filename = c.req.param('filename');
|
||||
|
||||
try {
|
||||
// Check if file exists first
|
||||
const exists = await c.env.MY_BUCKET.head(filename);
|
||||
|
||||
if (!exists) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'File not found',
|
||||
}, 404);
|
||||
}
|
||||
|
||||
await c.env.MY_BUCKET.delete(filename);
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
message: 'File deleted successfully',
|
||||
key: filename,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Delete error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to delete file',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// List all files (with pagination)
|
||||
app.get('/files', async (c) => {
|
||||
const cursor = c.req.query('cursor');
|
||||
const limit = parseInt(c.req.query('limit') || '100');
|
||||
const prefix = c.req.query('prefix') || '';
|
||||
|
||||
try {
|
||||
const listed = await c.env.MY_BUCKET.list({
|
||||
limit: Math.min(limit, 1000), // Max 1000
|
||||
cursor: cursor || undefined,
|
||||
prefix: prefix || undefined,
|
||||
});
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
files: listed.objects.map(obj => ({
|
||||
key: obj.key,
|
||||
size: obj.size,
|
||||
etag: obj.etag,
|
||||
uploaded: obj.uploaded,
|
||||
contentType: obj.httpMetadata?.contentType,
|
||||
})),
|
||||
truncated: listed.truncated,
|
||||
cursor: listed.cursor,
|
||||
count: listed.objects.length,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('List error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to list files',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Bulk delete (up to 1000 files)
|
||||
app.post('/files/bulk-delete', async (c) => {
|
||||
const { keys } = await c.req.json<{ keys: string[] }>();
|
||||
|
||||
if (!keys || !Array.isArray(keys)) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Invalid request: keys must be an array',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
if (keys.length > 1000) {
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Cannot delete more than 1000 keys at once',
|
||||
}, 400);
|
||||
}
|
||||
|
||||
try {
|
||||
await c.env.MY_BUCKET.delete(keys);
|
||||
|
||||
return c.json({
|
||||
success: true,
|
||||
message: `Deleted ${keys.length} files`,
|
||||
count: keys.length,
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error('Bulk delete error:', error.message);
|
||||
return c.json({
|
||||
success: false,
|
||||
error: 'Failed to delete files',
|
||||
}, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// Health check
|
||||
app.get('/health', (c) => {
|
||||
return c.json({
|
||||
status: 'healthy',
|
||||
service: 'r2-worker',
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
});
|
||||
|
||||
export default app;
|
||||
49
templates/wrangler-r2-config.jsonc
Normal file
49
templates/wrangler-r2-config.jsonc
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"$schema": "node_modules/wrangler/config-schema.json",
|
||||
"name": "my-r2-worker",
|
||||
"main": "src/index.ts",
|
||||
"account_id": "YOUR_ACCOUNT_ID",
|
||||
"compatibility_date": "2025-10-11",
|
||||
|
||||
// R2 Bucket Bindings
|
||||
"r2_buckets": [
|
||||
{
|
||||
// The binding name - accessible as env.MY_BUCKET in your Worker
|
||||
"binding": "MY_BUCKET",
|
||||
|
||||
// The actual bucket name in R2 (must exist)
|
||||
"bucket_name": "my-bucket",
|
||||
|
||||
// Optional: Use a different bucket for local development
|
||||
// This prevents dev/test data from polluting production bucket
|
||||
"preview_bucket_name": "my-bucket-preview"
|
||||
}
|
||||
],
|
||||
|
||||
// Multiple buckets example
|
||||
// "r2_buckets": [
|
||||
// {
|
||||
// "binding": "UPLOADS",
|
||||
// "bucket_name": "user-uploads"
|
||||
// },
|
||||
// {
|
||||
// "binding": "ASSETS",
|
||||
// "bucket_name": "static-assets"
|
||||
// },
|
||||
// {
|
||||
// "binding": "BACKUPS",
|
||||
// "bucket_name": "database-backups"
|
||||
// }
|
||||
// ],
|
||||
|
||||
// Optional: Enable observability
|
||||
"observability": {
|
||||
"enabled": true
|
||||
},
|
||||
|
||||
// Optional: Workers Static Assets (if serving frontend)
|
||||
"assets": {
|
||||
"directory": "./public/",
|
||||
"binding": "ASSETS"
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user