Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:24:29 +08:00
commit 571bc8c17c
12 changed files with 2689 additions and 0 deletions

View File

@@ -0,0 +1,469 @@
# R2 Common Patterns
**Last Updated**: 2025-10-21
---
## Image Upload & Serving
### Upload with Automatic Content-Type Detection
```typescript
import { Hono } from 'hono';
type Bindings = {
IMAGES: R2Bucket;
};
const app = new Hono<{ Bindings: Bindings }>();
app.post('/upload/image', async (c) => {
const formData = await c.req.formData();
const file = formData.get('image') as File;
if (!file) {
return c.json({ error: 'No file provided' }, 400);
}
// Validate file type
const allowedTypes = ['image/jpeg', 'image/png', 'image/webp', 'image/gif'];
if (!allowedTypes.includes(file.type)) {
return c.json({ error: 'Invalid file type' }, 400);
}
// Generate unique filename
const extension = file.name.split('.').pop();
const filename = `${crypto.randomUUID()}.${extension}`;
const key = `images/${filename}`;
// Upload to R2
const arrayBuffer = await file.arrayBuffer();
const object = await c.env.IMAGES.put(key, arrayBuffer, {
httpMetadata: {
contentType: file.type,
cacheControl: 'public, max-age=31536000, immutable',
},
customMetadata: {
originalFilename: file.name,
uploadedAt: new Date().toISOString(),
},
});
return c.json({
success: true,
url: `/images/${filename}`,
key: object.key,
size: object.size,
});
});
// Serve image
app.get('/images/:filename', async (c) => {
const filename = c.req.param('filename');
const key = `images/${filename}`;
const object = await c.env.IMAGES.get(key);
if (!object) {
return c.json({ error: 'Image not found' }, 404);
}
return new Response(object.body, {
headers: {
'Content-Type': object.httpMetadata?.contentType || 'image/jpeg',
'Cache-Control': 'public, max-age=31536000, immutable',
'ETag': object.httpEtag,
},
});
});
export default app;
```
---
## User File Storage with Folder Organization
```typescript
app.post('/users/:userId/files', async (c) => {
const userId = c.req.param('userId');
const formData = await c.req.formData();
const file = formData.get('file') as File;
if (!file) {
return c.json({ error: 'No file provided' }, 400);
}
// Organize by user ID and date
const date = new Date().toISOString().split('T')[0]; // YYYY-MM-DD
const filename = file.name;
const key = `users/${userId}/${date}/${filename}`;
const arrayBuffer = await file.arrayBuffer();
const object = await c.env.MY_BUCKET.put(key, arrayBuffer, {
httpMetadata: {
contentType: file.type,
contentDisposition: `attachment; filename="${filename}"`,
},
customMetadata: {
userId,
uploadDate: date,
originalSize: file.size.toString(),
},
});
return c.json({
success: true,
fileId: object.key,
size: object.size,
});
});
// List user's files
app.get('/users/:userId/files', async (c) => {
const userId = c.req.param('userId');
const cursor = c.req.query('cursor');
const listed = await c.env.MY_BUCKET.list({
prefix: `users/${userId}/`,
limit: 100,
cursor: cursor || undefined,
});
return c.json({
files: listed.objects.map(obj => ({
key: obj.key,
filename: obj.key.split('/').pop(),
size: obj.size,
uploaded: obj.uploaded,
metadata: obj.customMetadata,
})),
hasMore: listed.truncated,
cursor: listed.cursor,
});
});
```
---
## Thumbnail Generation & Caching
```typescript
app.get('/thumbnails/:filename', async (c) => {
const filename = c.req.param('filename');
const width = parseInt(c.req.query('w') || '200');
const height = parseInt(c.req.query('h') || '200');
const thumbnailKey = `thumbnails/${width}x${height}/${filename}`;
// Check if thumbnail already exists
let thumbnail = await c.env.IMAGES.get(thumbnailKey);
if (!thumbnail) {
// Get original image
const original = await c.env.IMAGES.get(`images/${filename}`);
if (!original) {
return c.json({ error: 'Image not found' }, 404);
}
// Generate thumbnail (using Cloudflare Images or external service)
// This is a placeholder - use actual image processing
const thumbnailData = await generateThumbnail(
await original.arrayBuffer(),
width,
height
);
// Store thumbnail for future requests
await c.env.IMAGES.put(thumbnailKey, thumbnailData, {
httpMetadata: {
contentType: 'image/jpeg',
cacheControl: 'public, max-age=31536000, immutable',
},
});
thumbnail = await c.env.IMAGES.get(thumbnailKey);
}
return new Response(thumbnail!.body, {
headers: {
'Content-Type': 'image/jpeg',
'Cache-Control': 'public, max-age=31536000, immutable',
},
});
});
async function generateThumbnail(
imageData: ArrayBuffer,
width: number,
height: number
): Promise<ArrayBuffer> {
// Use Cloudflare Images API, sharp, or other image processing library
// This is a placeholder
return imageData;
}
```
---
## Versioned File Storage
```typescript
app.put('/files/:filename', async (c) => {
const filename = c.req.param('filename');
const body = await c.req.arrayBuffer();
// Get current version number
const versionKey = `versions/${filename}/latest`;
const currentVersion = await c.env.MY_BUCKET.head(versionKey);
let version = 1;
if (currentVersion?.customMetadata?.version) {
version = parseInt(currentVersion.customMetadata.version) + 1;
}
// Store new version
const versionedKey = `versions/${filename}/v${version}`;
await c.env.MY_BUCKET.put(versionedKey, body, {
httpMetadata: {
contentType: c.req.header('content-type') || 'application/octet-stream',
},
customMetadata: {
version: version.toString(),
createdAt: new Date().toISOString(),
},
});
// Update "latest" pointer
await c.env.MY_BUCKET.put(versionKey, body, {
httpMetadata: {
contentType: c.req.header('content-type') || 'application/octet-stream',
},
customMetadata: {
version: version.toString(),
latestVersion: 'true',
},
});
return c.json({
success: true,
version,
key: versionedKey,
});
});
// Get specific version
app.get('/files/:filename/v/:version', async (c) => {
const filename = c.req.param('filename');
const version = c.req.param('version');
const key = `versions/${filename}/v${version}`;
const object = await c.env.MY_BUCKET.get(key);
if (!object) {
return c.json({ error: 'Version not found' }, 404);
}
return new Response(object.body, {
headers: {
'Content-Type': object.httpMetadata?.contentType || 'application/octet-stream',
},
});
});
```
---
## Backup & Archive Pattern
```typescript
// Daily database backup to R2
async function backupDatabase(env: Bindings) {
const date = new Date().toISOString().split('T')[0];
const key = `backups/database/${date}/dump.sql.gz`;
// Generate backup (placeholder)
const backupData = await generateDatabaseDump();
await env.BACKUPS.put(key, backupData, {
httpMetadata: {
contentType: 'application/gzip',
contentEncoding: 'gzip',
},
customMetadata: {
backupDate: date,
backupType: 'full',
database: 'production',
},
});
// Delete backups older than 30 days
await cleanupOldBackups(env, 30);
}
async function cleanupOldBackups(env: Bindings, retentionDays: number) {
const cutoffDate = new Date();
cutoffDate.setDate(cutoffDate.getDate() - retentionDays);
const listed = await env.BACKUPS.list({
prefix: 'backups/database/',
});
const oldBackups = listed.objects.filter(
obj => obj.uploaded < cutoffDate
);
if (oldBackups.length > 0) {
const keysToDelete = oldBackups.map(obj => obj.key);
await env.BACKUPS.delete(keysToDelete);
}
}
```
---
## Static Site Hosting with SPA Fallback
```typescript
app.get('/*', async (c) => {
const url = new URL(c.req.url);
let key = url.pathname.slice(1); // Remove leading slash
if (key === '' || key.endsWith('/')) {
key += 'index.html';
}
let object = await c.env.STATIC.get(key);
// SPA fallback: if file not found, try index.html
if (!object && !key.includes('.')) {
object = await c.env.STATIC.get('index.html');
}
if (!object) {
return c.json({ error: 'Not found' }, 404);
}
const headers = new Headers();
object.writeHttpMetadata(headers);
// Set appropriate cache headers
if (key.match(/\.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2)$/)) {
headers.set('Cache-Control', 'public, max-age=31536000, immutable');
} else {
headers.set('Cache-Control', 'public, max-age=3600, must-revalidate');
}
return new Response(object.body, { headers });
});
```
---
## CDN with Origin Fallback
```typescript
// Use R2 as CDN with external origin fallback
app.get('/cdn/*', async (c) => {
const url = new URL(c.req.url);
const key = url.pathname.replace('/cdn/', '');
// Check R2 cache first
let object = await c.env.CDN_CACHE.get(key);
if (!object) {
// Fetch from origin
const originUrl = `https://origin.example.com/${key}`;
const response = await fetch(originUrl);
if (!response.ok) {
return c.json({ error: 'Not found on origin' }, 404);
}
const data = await response.arrayBuffer();
const contentType = response.headers.get('content-type') || 'application/octet-stream';
// Cache in R2
await c.env.CDN_CACHE.put(key, data, {
httpMetadata: {
contentType,
cacheControl: 'public, max-age=31536000',
},
});
object = await c.env.CDN_CACHE.get(key);
}
return new Response(object!.body, {
headers: {
'Content-Type': object!.httpMetadata?.contentType || 'application/octet-stream',
'Cache-Control': 'public, max-age=31536000',
'X-Cache': object ? 'HIT' : 'MISS',
},
});
});
```
---
## Signed Upload with Quota Limits
```typescript
app.post('/request-upload', async (c) => {
const { userId, filename, fileSize } = await c.req.json();
// Check user's quota
const quota = await getUserQuota(userId);
if (quota.used + fileSize > quota.total) {
return c.json({ error: 'Quota exceeded' }, 403);
}
// Generate presigned URL
const r2Client = new AwsClient({
accessKeyId: c.env.R2_ACCESS_KEY_ID,
secretAccessKey: c.env.R2_SECRET_ACCESS_KEY,
});
const key = `users/${userId}/${filename}`;
const url = new URL(
`https://my-bucket.${c.env.ACCOUNT_ID}.r2.cloudflarestorage.com/${key}`
);
url.searchParams.set('X-Amz-Expires', '3600');
const signed = await r2Client.sign(
new Request(url, { method: 'PUT' }),
{ aws: { signQuery: true } }
);
return c.json({
uploadUrl: signed.url,
expiresIn: 3600,
});
});
async function getUserQuota(userId: string) {
// Query database for user quota
return {
used: 1024 * 1024 * 100, // 100MB used
total: 1024 * 1024 * 1024, // 1GB total
};
}
```
---
## Best Practices Summary
1. **Use meaningful key prefixes** for organization (`users/{id}/`, `images/`, `backups/`)
2. **Set appropriate cache headers** for static assets
3. **Store metadata** for tracking and filtering
4. **Use bulk delete** instead of loops
5. **Implement cleanup** for old/temporary files
6. **Add authentication** before presigned URL generation
7. **Validate file types** before uploading
8. **Use UUIDs** for unique filenames
9. **Set expiry times** on presigned URLs
10. **Monitor quota** to prevent overages

View File

@@ -0,0 +1,343 @@
# R2 S3 API Compatibility
**Last Updated**: 2025-10-21
**Official Docs**: https://developers.cloudflare.com/r2/api/s3/api/
---
## Overview
R2 implements a large portion of the Amazon S3 API, allowing you to use existing S3 SDKs and tools.
**S3 Endpoint Format:**
```
https://<account_id>.r2.cloudflarestorage.com
```
---
## Supported S3 Operations
### Bucket Operations
- ✅ ListBuckets
- ❌ CreateBucket (use Cloudflare Dashboard or Wrangler)
- ❌ DeleteBucket (use Cloudflare Dashboard or Wrangler)
### Object Operations
- ✅ GetObject
- ✅ PutObject
- ✅ DeleteObject
- ✅ DeleteObjects (bulk delete, max 1000)
- ✅ HeadObject
- ✅ ListObjectsV2
- ✅ CopyObject
- ✅ UploadPart
- ✅ CreateMultipartUpload
- ✅ CompleteMultipartUpload
- ✅ AbortMultipartUpload
- ✅ ListMultipartUploads
- ✅ ListParts
### Presigned URLs
- ✅ GetObject (download)
- ✅ PutObject (upload)
- ✅ UploadPart (multipart)
### Not Supported
- ❌ Versioning
- ❌ Object Lock
- ❌ ACLs (use CORS instead)
- ❌ Bucket policies
- ❌ Object tagging (use custom metadata)
- ❌ Server-side encryption config (use SSE-C instead)
---
## Using AWS SDK for JavaScript
### Installation
```bash
npm install @aws-sdk/client-s3
npm install @aws-sdk/s3-request-presigner
```
### Basic Usage
```typescript
import { S3Client, PutObjectCommand, GetObjectCommand } from '@aws-sdk/client-s3';
// Create S3 client for R2
const s3Client = new S3Client({
region: 'auto',
endpoint: `https://<ACCOUNT_ID>.r2.cloudflarestorage.com`,
credentials: {
accessKeyId: '<R2_ACCESS_KEY_ID>',
secretAccessKey: '<R2_SECRET_ACCESS_KEY>',
},
});
// Upload object
const uploadParams = {
Bucket: 'my-bucket',
Key: 'path/to/file.txt',
Body: 'Hello, R2!',
ContentType: 'text/plain',
};
await s3Client.send(new PutObjectCommand(uploadParams));
// Download object
const downloadParams = {
Bucket: 'my-bucket',
Key: 'path/to/file.txt',
};
const response = await s3Client.send(new GetObjectCommand(downloadParams));
const text = await response.Body.transformToString();
```
### Presigned URLs with AWS SDK
```typescript
import { getSignedUrl } from '@aws-sdk/s3-request-presigner';
import { PutObjectCommand, GetObjectCommand } from '@aws-sdk/client-s3';
// Generate presigned upload URL
const uploadCommand = new PutObjectCommand({
Bucket: 'my-bucket',
Key: 'uploads/file.jpg',
});
const uploadUrl = await getSignedUrl(s3Client, uploadCommand, {
expiresIn: 3600, // 1 hour
});
// Generate presigned download URL
const downloadCommand = new GetObjectCommand({
Bucket: 'my-bucket',
Key: 'uploads/file.jpg',
});
const downloadUrl = await getSignedUrl(s3Client, downloadCommand, {
expiresIn: 3600,
});
```
---
## Using aws4fetch (Lightweight Alternative)
### Installation
```bash
npm install aws4fetch
```
### Usage
```typescript
import { AwsClient } from 'aws4fetch';
const r2Client = new AwsClient({
accessKeyId: '<R2_ACCESS_KEY_ID>',
secretAccessKey: '<R2_SECRET_ACCESS_KEY>',
});
const endpoint = `https://<ACCOUNT_ID>.r2.cloudflarestorage.com`;
// Upload object
await r2Client.fetch(`${endpoint}/my-bucket/file.txt`, {
method: 'PUT',
body: 'Hello, R2!',
headers: {
'Content-Type': 'text/plain',
},
});
// Download object
const response = await r2Client.fetch(`${endpoint}/my-bucket/file.txt`);
const text = await response.text();
// Delete object
await r2Client.fetch(`${endpoint}/my-bucket/file.txt`, {
method: 'DELETE',
});
// List objects
const listResponse = await r2Client.fetch(
`${endpoint}/my-bucket?list-type=2&max-keys=100`
);
const xml = await listResponse.text();
```
### Presigned URLs with aws4fetch
```typescript
import { AwsClient } from 'aws4fetch';
const r2Client = new AwsClient({
accessKeyId: '<R2_ACCESS_KEY_ID>',
secretAccessKey: '<R2_SECRET_ACCESS_KEY>',
});
const url = new URL(
`https://<ACCOUNT_ID>.r2.cloudflarestorage.com/my-bucket/file.txt`
);
// Set expiry (in seconds)
url.searchParams.set('X-Amz-Expires', '3600');
// Sign for PUT (upload)
const signedUpload = await r2Client.sign(
new Request(url, { method: 'PUT' }),
{ aws: { signQuery: true } }
);
console.log(signedUpload.url);
// Sign for GET (download)
const signedDownload = await r2Client.sign(
new Request(url, { method: 'GET' }),
{ aws: { signQuery: true } }
);
console.log(signedDownload.url);
```
---
## S3 vs R2 Workers API Comparison
| Feature | S3 API | R2 Workers API |
|---------|--------|----------------|
| **Performance** | External network call | Native binding (faster) |
| **Authentication** | Access keys required | Automatic via binding |
| **Presigned URLs** | Supported | Requires S3 API + access keys |
| **Multipart Upload** | Full S3 API | Simplified Workers API |
| **Custom Metadata** | `x-amz-meta-*` headers | `customMetadata` object |
| **Conditional Ops** | S3 headers | `onlyIf` object |
| **Size Limits** | 5GB per PUT | 100MB per PUT (200MB Business, 500MB Enterprise) |
---
## When to Use S3 API vs Workers API
### Use S3 API when:
- ✅ Migrating from AWS S3
- ✅ Using existing S3 tools (aws-cli, s3cmd)
- ✅ Generating presigned URLs
- ✅ Need S3 compatibility for external systems
### Use Workers API when:
- ✅ Building new applications on Cloudflare
- ✅ Need better performance (native binding)
- ✅ Don't want to manage access keys
- ✅ Using R2 from Workers
---
## R2-Specific Extensions
R2 adds some extensions to the S3 API:
### Conditional Operations
```typescript
// Only upload if file doesn't exist
await s3Client.send(new PutObjectCommand({
Bucket: 'my-bucket',
Key: 'file.txt',
Body: data,
IfUnmodifiedSince: new Date('2020-01-01'), // Before R2 existed
}));
```
### Storage Class
R2 currently only supports 'Standard' storage class.
```typescript
await s3Client.send(new PutObjectCommand({
Bucket: 'my-bucket',
Key: 'file.txt',
Body: data,
StorageClass: 'STANDARD',
}));
```
---
## Migration from S3
### 1. Update Endpoint
```diff
const s3Client = new S3Client({
region: 'auto',
- endpoint: 'https://s3.amazonaws.com',
+ endpoint: 'https://<ACCOUNT_ID>.r2.cloudflarestorage.com',
credentials: {
- accessKeyId: process.env.AWS_ACCESS_KEY_ID,
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
+ accessKeyId: process.env.R2_ACCESS_KEY_ID,
+ secretAccessKey: process.env.R2_SECRET_ACCESS_KEY,
},
});
```
### 2. Remove Unsupported Features
```diff
await s3Client.send(new PutObjectCommand({
Bucket: 'my-bucket',
Key: 'file.txt',
Body: data,
- ACL: 'public-read', // ❌ Not supported
- Tagging: 'key=value', // ❌ Not supported (use custom metadata)
+ Metadata: { // ✅ Use custom metadata instead
+ visibility: 'public',
+ },
}));
```
### 3. Use CORS Instead of ACLs
R2 doesn't support S3 ACLs. Use CORS policies instead for browser access.
---
## Common Issues
### Issue: SignatureDoesNotMatch
**Cause:** Incorrect access keys or endpoint URL
**Fix:**
- Verify access key ID and secret
- Ensure endpoint includes your account ID
- Check region is set to 'auto'
### Issue: Presigned URLs Don't Work with Custom Domains
**Cause:** Presigned URLs only work with R2 S3 endpoint
**Fix:**
- Use `<ACCOUNT_ID>.r2.cloudflarestorage.com` endpoint
- Or use Worker with R2 binding for custom domains
### Issue: Upload Size Exceeds Limit
**Cause:** S3 API PUT has 5GB limit, but R2 Workers has 100-500MB limit
**Fix:**
- Use multipart upload for large files
- Or use S3 API directly (not through Worker)
---
## Official Resources
- **S3 API Compatibility**: https://developers.cloudflare.com/r2/api/s3/api/
- **AWS SDK Examples**: https://developers.cloudflare.com/r2/examples/aws/
- **Presigned URLs**: https://developers.cloudflare.com/r2/api/s3/presigned-urls/

465
references/workers-api.md Normal file
View File

@@ -0,0 +1,465 @@
# R2 Workers API Complete Reference
**Last Updated**: 2025-10-21
**Official Docs**: https://developers.cloudflare.com/r2/api/workers/workers-api-reference/
---
## R2Bucket Methods
### put()
Upload an object to R2.
```typescript
put(
key: string,
value: ReadableStream | ArrayBuffer | ArrayBufferView | string | Blob,
options?: R2PutOptions
): Promise<R2Object | null>
```
**Parameters:**
- `key` - Object key (path) in the bucket
- `value` - Object data
- `options` - Optional upload options
**Returns:**
- `R2Object` - Metadata of uploaded object
- `null` - If precondition failed (onlyIf clause)
**Options (R2PutOptions):**
```typescript
interface R2PutOptions {
httpMetadata?: R2HTTPMetadata;
customMetadata?: Record<string, string>;
md5?: ArrayBuffer;
sha1?: ArrayBuffer;
sha256?: ArrayBuffer;
sha384?: ArrayBuffer;
sha512?: ArrayBuffer;
onlyIf?: R2Conditional;
storageClass?: 'Standard';
}
```
---
### get()
Download an object from R2.
```typescript
get(
key: string,
options?: R2GetOptions
): Promise<R2ObjectBody | null>
```
**Parameters:**
- `key` - Object key (path) in the bucket
- `options` - Optional download options
**Returns:**
- `R2ObjectBody` - Object with metadata and body stream
- `null` - If object doesn't exist or precondition failed
**Options (R2GetOptions):**
```typescript
interface R2GetOptions {
onlyIf?: R2Conditional | Headers;
range?: R2Range;
}
```
---
### head()
Get object metadata without downloading body.
```typescript
head(key: string): Promise<R2Object | null>
```
**Parameters:**
- `key` - Object key (path) in the bucket
**Returns:**
- `R2Object` - Object metadata only
- `null` - If object doesn't exist
**Use Cases:**
- Check if file exists
- Get file size
- Get last modified date
- Validate etag
---
### delete()
Delete one or more objects.
```typescript
delete(key: string | string[]): Promise<void>
```
**Parameters:**
- `key` - Single key or array of keys (max 1000)
**Returns:**
- `void` - Always succeeds (idempotent)
**Notes:**
- No error if object doesn't exist
- Can delete up to 1000 objects at once
- Deletes are strongly consistent
---
### list()
List objects in the bucket.
```typescript
list(options?: R2ListOptions): Promise<R2Objects>
```
**Parameters:**
- `options` - Optional listing options
**Returns:**
- `R2Objects` - List of objects and metadata
**Options (R2ListOptions):**
```typescript
interface R2ListOptions {
limit?: number; // Max 1000, default 1000
prefix?: string; // Filter by prefix
cursor?: string; // Pagination cursor
delimiter?: string; // Folder delimiter (usually '/')
include?: ('httpMetadata' | 'customMetadata')[];
}
```
**Response (R2Objects):**
```typescript
interface R2Objects {
objects: R2Object[]; // Array of objects
truncated: boolean; // true if more results exist
cursor?: string; // Cursor for next page
delimitedPrefixes: string[]; // "Folder" names (if delimiter used)
}
```
---
### createMultipartUpload()
Create a new multipart upload.
```typescript
createMultipartUpload(
key: string,
options?: R2MultipartOptions
): Promise<R2MultipartUpload>
```
**Parameters:**
- `key` - Object key for the upload
- `options` - Optional metadata
**Returns:**
- `R2MultipartUpload` - Object for managing the upload
**Options (R2MultipartOptions):**
```typescript
interface R2MultipartOptions {
httpMetadata?: R2HTTPMetadata;
customMetadata?: Record<string, string>;
}
```
---
### resumeMultipartUpload()
Resume an existing multipart upload.
```typescript
resumeMultipartUpload(
key: string,
uploadId: string
): R2MultipartUpload
```
**Parameters:**
- `key` - Object key for the upload
- `uploadId` - Upload ID from createMultipartUpload()
**Returns:**
- `R2MultipartUpload` - Object for managing the upload
**Notes:**
- Does NOT validate uploadId or key
- No network request made
- Use to continue an upload after Worker restart
---
## R2Object Interface
Metadata for an R2 object.
```typescript
interface R2Object {
key: string; // Object key
version: string; // Version ID
size: number; // Size in bytes
etag: string; // ETag (without quotes)
httpEtag: string; // ETag with quotes (RFC 9110)
uploaded: Date; // Upload timestamp
httpMetadata?: R2HTTPMetadata; // HTTP metadata
customMetadata?: Record<string, string>; // Custom metadata
range?: R2Range; // Range (if partial)
checksums?: R2Checksums; // Checksums
storageClass: 'Standard'; // Storage class
ssecKeyMd5?: string; // SSE-C key hash
writeHttpMetadata(headers: Headers): void; // Apply metadata to headers
}
```
---
## R2ObjectBody Interface
Extends R2Object with body stream and read methods.
```typescript
interface R2ObjectBody extends R2Object {
body: ReadableStream; // Object body stream
bodyUsed: boolean; // Whether body consumed
arrayBuffer(): Promise<ArrayBuffer>; // Read as ArrayBuffer
text(): Promise<string>; // Read as text
json<T>(): Promise<T>; // Read as JSON
blob(): Promise<Blob>; // Read as Blob
}
```
---
## R2MultipartUpload Interface
Manage a multipart upload.
```typescript
interface R2MultipartUpload {
key: string; // Object key
uploadId: string; // Upload ID
uploadPart(
partNumber: number,
value: ReadableStream | ArrayBuffer | ArrayBufferView | string | Blob,
options?: R2MultipartOptions
): Promise<R2UploadedPart>;
abort(): Promise<void>;
complete(uploadedParts: R2UploadedPart[]): Promise<R2Object>;
}
```
**Methods:**
- **uploadPart()** - Upload a single part (1-10,000)
- **abort()** - Cancel the multipart upload
- **complete()** - Finish upload with list of parts
---
## R2UploadedPart Interface
Metadata for an uploaded part.
```typescript
interface R2UploadedPart {
partNumber: number; // Part number (1-10,000)
etag: string; // Part ETag
}
```
---
## R2HTTPMetadata Interface
HTTP headers for object.
```typescript
interface R2HTTPMetadata {
contentType?: string; // Content-Type header
contentLanguage?: string; // Content-Language header
contentDisposition?: string; // Content-Disposition header
contentEncoding?: string; // Content-Encoding header
cacheControl?: string; // Cache-Control header
cacheExpiry?: Date; // Expires header
}
```
---
## R2Conditional Interface
Conditional operations (onlyIf clause).
```typescript
interface R2Conditional {
etagMatches?: string; // If-Match
etagDoesNotMatch?: string; // If-None-Match
uploadedBefore?: Date; // If-Unmodified-Since
uploadedAfter?: Date; // If-Modified-Since
}
```
**Alternatively, pass a Headers object with:**
- `If-Match`
- `If-None-Match`
- `If-Modified-Since`
- `If-Unmodified-Since`
---
## R2Range Interface
Byte range for partial downloads.
```typescript
interface R2Range {
offset?: number; // Start byte
length?: number; // Number of bytes
suffix?: number; // Last N bytes
}
```
**Examples:**
```typescript
// First 1000 bytes
{ offset: 0, length: 1000 }
// Bytes 100-200
{ offset: 100, length: 100 }
// From byte 1000 to end
{ offset: 1000 }
// Last 500 bytes
{ suffix: 500 }
```
---
## R2Checksums Interface
Stored checksums for object.
```typescript
interface R2Checksums {
md5?: ArrayBuffer;
sha1?: ArrayBuffer;
sha256?: ArrayBuffer;
sha384?: ArrayBuffer;
sha512?: ArrayBuffer;
}
```
---
## Complete Example
```typescript
import { Hono } from 'hono';
type Bindings = {
MY_BUCKET: R2Bucket;
};
const app = new Hono<{ Bindings: Bindings }>();
// Upload with all metadata
app.put('/files/:key', async (c) => {
const key = c.req.param('key');
const body = await c.req.arrayBuffer();
const object = await c.env.MY_BUCKET.put(key, body, {
httpMetadata: {
contentType: c.req.header('content-type') || 'application/octet-stream',
cacheControl: 'public, max-age=3600',
contentDisposition: `attachment; filename="${key}"`,
},
customMetadata: {
uploadedBy: 'api',
uploadedAt: new Date().toISOString(),
},
onlyIf: {
// Only upload if file doesn't exist
uploadedBefore: new Date('2020-01-01'),
},
});
if (!object) {
return c.json({ error: 'File already exists' }, 409);
}
return c.json({
key: object.key,
size: object.size,
etag: object.etag,
});
});
// Download with range support
app.get('/files/:key', async (c) => {
const key = c.req.param('key');
const rangeHeader = c.req.header('range');
let options: R2GetOptions | undefined;
if (rangeHeader) {
// Parse range header: bytes=0-1000
const match = rangeHeader.match(/bytes=(\d+)-(\d*)/);
if (match) {
const start = parseInt(match[1]);
const end = match[2] ? parseInt(match[2]) : undefined;
options = {
range: {
offset: start,
length: end ? end - start + 1 : undefined,
},
};
}
}
const object = await c.env.MY_BUCKET.get(key, options);
if (!object) {
return c.json({ error: 'Not found' }, 404);
}
const headers = new Headers();
object.writeHttpMetadata(headers);
headers.set('etag', object.httpEtag);
if (object.range) {
headers.set('content-range', `bytes ${object.range.offset}-${object.range.offset + object.range.length - 1}/${object.size}`);
return new Response(object.body, {
status: 206,
headers,
});
}
return new Response(object.body, { headers });
});
export default app;
```