Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:28:34 +08:00
commit 390afca02b
220 changed files with 86013 additions and 0 deletions

View File

@@ -0,0 +1,691 @@
---
name: aws-sdk-java-v2-s3
description: Amazon S3 patterns and examples using AWS SDK for Java 2.x. Use when working with S3 buckets, uploading/downloading objects, multipart uploads, presigned URLs, S3 Transfer Manager, object operations, or S3-specific configurations.
category: aws
tags: [aws, s3, java, sdk, storage, objects, transfer-manager, presigned-urls]
version: 1.1.0
allowed-tools: Read, Write, Bash
---
# AWS SDK for Java 2.x - Amazon S3
## When to Use
Use this skill when:
- Creating, listing, or deleting S3 buckets with proper configuration
- Uploading or downloading objects from S3 with metadata and encryption
- Working with multipart uploads for large files (>100MB) with error handling
- Generating presigned URLs for temporary access to S3 objects
- Copying or moving objects between S3 buckets with metadata preservation
- Setting object metadata, storage classes, and access controls
- Implementing S3 Transfer Manager for optimized file transfers
- Integrating S3 with Spring Boot applications for cloud storage
- Setting up S3 event notifications for object lifecycle management
- Managing bucket policies, CORS configuration, and access controls
- Implementing retry mechanisms and error handling for S3 operations
- Testing S3 integrations with LocalStack for development environments
## Dependencies
```xml
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>s3</artifactId>
<version>2.20.0</version> // Use the latest stable version
</dependency>
<!-- For S3 Transfer Manager -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>s3-transfer-manager</artifactId>
<version>2.20.0</version> // Use the latest stable version
</dependency>
<!-- For async operations -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>netty-nio-client</artifactId>
<version>2.20.0</version> // Use the latest stable version
</dependency>
```
## Client Setup
### Basic Synchronous Client
```java
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
S3Client s3Client = S3Client.builder()
.region(Region.US_EAST_1)
.build();
```
### Basic Asynchronous Client
```java
import software.amazon.awssdk.services.s3.S3AsyncClient;
S3AsyncClient s3AsyncClient = S3AsyncClient.builder()
.region(Region.US_EAST_1)
.build();
```
### Configured Client with Retry Logic
```java
import software.amazon.awssdk.http.apache.ApacheHttpClient;
import software.amazon.awssdk.core.retry.RetryPolicy;
import software.amazon.awssdk.core.retry.backoff.ExponentialRetryBackoff;
import java.time.Duration;
S3Client s3Client = S3Client.builder()
.region(Region.US_EAST_1)
.httpClientBuilder(ApacheHttpClient.builder()
.maxConnections(200)
.connectionTimeout(Duration.ofSeconds(5)))
.overrideConfiguration(b -> b
.apiCallTimeout(Duration.ofSeconds(60))
.apiCallAttemptTimeout(Duration.ofSeconds(30))
.retryPolicy(RetryPolicy.builder()
.numRetries(3)
.retryBackoffStrategy(ExponentialRetryBackoff.builder()
.baseDelay(Duration.ofSeconds(1))
.maxBackoffTime(Duration.ofSeconds(30))
.build())
.build()))
.build();
```
## Basic Bucket Operations
### Create Bucket
```java
import software.amazon.awssdk.services.s3.model.*;
import java.util.concurrent.CompletableFuture;
public void createBucket(S3Client s3Client, String bucketName) {
try {
CreateBucketRequest request = CreateBucketRequest.builder()
.bucket(bucketName)
.build();
s3Client.createBucket(request);
// Wait until bucket is ready
HeadBucketRequest waitRequest = HeadBucketRequest.builder()
.bucket(bucketName)
.build();
s3Client.waiter().waitUntilBucketExists(waitRequest);
System.out.println("Bucket created successfully: " + bucketName);
} catch (S3Exception e) {
System.err.println("Error creating bucket: " + e.awsErrorDetails().errorMessage());
throw e;
}
}
```
### List All Buckets
```java
public List<String> listAllBuckets(S3Client s3Client) {
ListBucketsResponse response = s3Client.listBuckets();
return response.buckets().stream()
.map(Bucket::name)
.collect(Collectors.toList());
}
```
### Check if Bucket Exists
```java
public boolean bucketExists(S3Client s3Client, String bucketName) {
try {
HeadBucketRequest request = HeadBucketRequest.builder()
.bucket(bucketName)
.build();
s3Client.headBucket(request);
return true;
} catch (NoSuchBucketException e) {
return false;
}
}
```
## Basic Object Operations
### Upload File to S3
```java
import software.amazon.awssdk.core.sync.RequestBody;
import java.nio.file.Paths;
public void uploadFile(S3Client s3Client, String bucketName, String key, String filePath) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
System.out.println("File uploaded: " + key);
}
```
### Download File from S3
```java
import software.amazon.awssdk.core.ResponseInputStream;
import software.amazon.awssdk.services.s3.model.GetObjectResponse;
import java.nio.file.Paths;
public void downloadFile(S3Client s3Client, String bucketName, String key, String destPath) {
GetObjectRequest request = GetObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
s3Client.getObject(request, Paths.get(destPath));
System.out.println("File downloaded: " + destPath);
}
```
### Get Object Metadata
```java
public Map<String, String> getObjectMetadata(S3Client s3Client, String bucketName, String key) {
HeadObjectRequest request = HeadObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
HeadObjectResponse response = s3Client.headObject(request);
return response.metadata();
}
```
## Advanced Object Operations
### Upload with Metadata and Encryption
```java
public void uploadWithMetadata(S3Client s3Client, String bucketName, String key,
String filePath, Map<String, String> metadata) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.metadata(metadata)
.contentType("application/pdf")
.serverSideEncryption(ServerSideEncryption.AES256)
.storageClass(StorageClass.STANDARD_IA)
.build();
PutObjectResponse response = s3Client.putObject(request,
RequestBody.fromFile(Paths.get(filePath)));
System.out.println("Upload completed. ETag: " + response.eTag());
}
```
### Copy Object Between Buckets
```java
public void copyObject(S3Client s3Client, String sourceBucket, String sourceKey,
String destBucket, String destKey) {
CopyObjectRequest request = CopyObjectRequest.builder()
.sourceBucket(sourceBucket)
.sourceKey(sourceKey)
.destinationBucket(destBucket)
.destinationKey(destKey)
.build();
s3Client.copyObject(request);
System.out.println("Object copied: " + sourceKey + " -> " + destKey);
}
```
### Delete Multiple Objects
```java
public void deleteMultipleObjects(S3Client s3Client, String bucketName, List<String> keys) {
List<ObjectIdentifier> objectIds = keys.stream()
.map(key -> ObjectIdentifier.builder().key(key).build())
.collect(Collectors.toList());
Delete delete = Delete.builder()
.objects(objectIds)
.build();
DeleteObjectsRequest request = DeleteObjectsRequest.builder()
.bucket(bucketName)
.delete(delete)
.build();
DeleteObjectsResponse response = s3Client.deleteObjects(request);
response.deleted().forEach(deleted ->
System.out.println("Deleted: " + deleted.key()));
response.errors().forEach(error ->
System.err.println("Failed to delete " + error.key() + ": " + error.message()));
}
```
## Presigned URLs
### Generate Download URL
```java
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
import software.amazon.awssdk.services.s3.presigner.model.*;
import java.time.Duration;
public String generateDownloadUrl(String bucketName, String key) {
try (S3Presigner presigner = S3Presigner.builder()
.region(Region.US_EAST_1)
.build()) {
GetObjectRequest getObjectRequest = GetObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
GetObjectPresignRequest presignRequest = GetObjectPresignRequest.builder()
.signatureDuration(Duration.ofMinutes(10))
.getObjectRequest(getObjectRequest)
.build();
PresignedGetObjectRequest presignedRequest = presigner.presignGetObject(presignRequest);
return presignedRequest.url().toString();
}
}
```
### Generate Upload URL
```java
public String generateUploadUrl(String bucketName, String key) {
try (S3Presigner presigner = S3Presigner.create()) {
PutObjectRequest putObjectRequest = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
PutObjectPresignRequest presignRequest = PutObjectPresignRequest.builder()
.signatureDuration(Duration.ofMinutes(5))
.putObjectRequest(putObjectRequest)
.build();
PresignedPutObjectRequest presignedRequest = presigner.presignPutObject(presignRequest);
return presignedRequest.url().toString();
}
}
```
## S3 Transfer Manager
### Upload with Transfer Manager
```java
import software.amazon.awssdk.transfer.s3.*;
import software.amazon.awssdk.transfer.s3.model.*;
public void uploadWithTransferManager(String bucketName, String key, String filePath) {
try (S3TransferManager transferManager = S3TransferManager.create()) {
UploadFileRequest uploadRequest = UploadFileRequest.builder()
.putObjectRequest(req -> req
.bucket(bucketName)
.key(key))
.source(Paths.get(filePath))
.build();
FileUpload upload = transferManager.uploadFile(uploadRequest);
// Monitor progress
upload.progressFuture().thenAccept(progress -> {
System.out.println("Upload progress: " + progress.progressPercent() + "%");
});
CompletedFileUpload result = upload.completionFuture().join();
System.out.println("Upload complete. ETag: " + result.response().eTag());
}
}
```
### Download with Transfer Manager
```java
public void downloadWithTransferManager(String bucketName, String key, String destPath) {
try (S3TransferManager transferManager = S3TransferManager.create()) {
DownloadFileRequest downloadRequest = DownloadFileRequest.builder()
.getObjectRequest(req -> req
.bucket(bucketName)
.key(key))
.destination(Paths.get(destPath))
.build();
FileDownload download = transferManager.downloadFile(downloadRequest);
CompletedFileDownload result = download.completionFuture().join();
System.out.println("Download complete. Size: " + result.response().contentLength());
}
}
```
## Spring Boot Integration
### Configuration Properties
```java
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties(prefix = "aws.s3")
public class S3Properties {
private String accessKey;
private String secretKey;
private String region = "us-east-1";
private String endpoint;
private String defaultBucket;
private boolean asyncEnabled = false;
private boolean transferManagerEnabled = true;
// Getters and setters
public String getAccessKey() { return accessKey; }
public void setAccessKey(String accessKey) { this.accessKey = accessKey; }
// ... other getters and setters
}
```
### S3 Configuration Class
```java
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.S3AsyncClient;
import software.amazon.awssdk.regions.Region;
import java.net.URI;
@Configuration
public class S3Configuration {
private final S3Properties properties;
public S3Configuration(S3Properties properties) {
this.properties = properties;
}
@Bean
public S3Client s3Client() {
S3Client.Builder builder = S3Client.builder()
.region(Region.of(properties.getRegion()));
if (properties.getAccessKey() != null && properties.getSecretKey() != null) {
builder.credentialsProvider(StaticCredentialsProvider.create(
AwsBasicCredentials.create(
properties.getAccessKey(),
properties.getSecretKey())));
}
if (properties.getEndpoint() != null) {
builder.endpointOverride(URI.create(properties.getEndpoint()));
}
return builder.build();
}
@Bean
public S3AsyncClient s3AsyncClient() {
S3AsyncClient.Builder builder = S3AsyncClient.builder()
.region(Region.of(properties.getRegion()));
if (properties.getAccessKey() != null && properties.getSecretKey() != null) {
builder.credentialsProvider(StaticCredentialsProvider.create(
AwsBasicCredentials.create(
properties.getAccessKey(),
properties.getSecretKey())));
}
if (properties.getEndpoint() != null) {
builder.endpointOverride(URI.create(properties.getEndpoint()));
}
return builder.build();
}
@Bean
public S3TransferManager s3TransferManager() {
return S3TransferManager.builder()
.s3Client(s3Client())
.build();
}
}
```
### S3 Service
```java
import org.springframework.stereotype.Service;
import software.amazon.awssdk.transfer.s3.S3TransferManager;
import software.amazon.awssdk.services.s3.model.*;
import java.nio.file.*;
import java.util.*;
import java.util.concurrent.CompletableFuture;
@Service
@RequiredArgsConstructor
public class S3Service {
private final S3Client s3Client;
private final S3AsyncClient s3AsyncClient;
private final S3TransferManager transferManager;
private final S3Properties properties;
public CompletableFuture<Void> uploadFileAsync(String key, Path file) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(properties.getDefaultBucket())
.key(key)
.build();
return CompletableFuture.runAsync(() -> {
s3Client.putObject(request, RequestBody.fromFile(file));
});
}
public CompletableFuture<byte[]> downloadFileAsync(String key) {
GetObjectRequest request = GetObjectRequest.builder()
.bucket(properties.getDefaultBucket())
.key(key)
.build();
return CompletableFuture.supplyAsync(() -> {
try (ResponseInputStream<GetObjectResponse> response = s3Client.getObject(request)) {
return response.readAllBytes();
} catch (IOException e) {
throw new RuntimeException("Failed to read S3 object", e);
}
});
}
public CompletableFuture<String> generatePresignedUrl(String key, Duration duration) {
return CompletableFuture.supplyAsync(() -> {
try (S3Presigner presigner = S3Presigner.builder()
.region(Region.of(properties.getRegion()))
.build()) {
GetObjectRequest getRequest = GetObjectRequest.builder()
.bucket(properties.getDefaultBucket())
.key(key)
.build();
GetObjectPresignRequest presignRequest = GetObjectPresignRequest.builder()
.signatureDuration(duration)
.getObjectRequest(getRequest)
.build();
return presigner.presignGetObject(presignRequest).url().toString();
}
});
}
public Flux<S3Object> listObjects(String prefix) {
ListObjectsV2Request request = ListObjectsV2Request.builder()
.bucket(properties.getDefaultBucket())
.prefix(prefix)
.build();
return Flux.create(sink -> {
s3Client.listObjectsV2Paginator(request)
.contents()
.forEach(sink::next);
sink.complete();
});
}
}
```
## Examples
### Basic File Upload Example
```java
public class S3UploadExample {
public static void main(String[] args) {
// Initialize client
S3Client s3Client = S3Client.builder()
.region(Region.US_EAST_1)
.build();
String bucketName = "my-example-bucket";
String filePath = "document.pdf";
String key = "uploads/document.pdf";
// Create bucket if it doesn't exist
if (!bucketExists(s3Client, bucketName)) {
createBucket(s3Client, bucketName);
}
// Upload file
Map<String, String> metadata = Map.of(
"author", "John Doe",
"content-type", "application/pdf",
"upload-date", java.time.LocalDate.now().toString()
);
uploadWithMetadata(s3Client, bucketName, key, filePath, metadata);
// Generate presigned URL
String downloadUrl = generateDownloadUrl(bucketName, key);
System.out.println("Download URL: " + downloadUrl);
// Close client
s3Client.close();
}
}
```
### Batch File Processing Example
```java
import java.nio.file.*;
import java.util.stream.*;
public class S3BatchProcessing {
public void processDirectoryUpload(S3Client s3Client, String bucketName, String directoryPath) {
try (Stream<Path> paths = Files.walk(Paths.get(directoryPath))) {
List<CompletableFuture<Void>> futures = paths
.filter(Files::isRegularFile)
.map(path -> {
String key = bucketName + "/" + path.getFileName().toString();
return CompletableFuture.runAsync(() -> {
uploadFile(s3Client, bucketName, key, path.toString());
});
})
.collect(Collectors.toList());
// Wait for all uploads to complete
CompletableFuture.allOf(
futures.toArray(new CompletableFuture[0])
).join();
System.out.println("All files uploaded successfully");
} catch (IOException e) {
throw new RuntimeException("Failed to process directory", e);
}
}
}
```
## Best Practices
### Performance Optimization
1. **Use S3 Transfer Manager**: Automatically handles multipart uploads, parallel transfers, and progress tracking for files >100MB
2. **Reuse S3 Client**: Clients are thread-safe and should be reused throughout the application lifecycle
3. **Enable async operations**: Use S3AsyncClient for I/O-bound operations to improve throughput
4. **Configure proper timeouts**: Set appropriate timeouts for large file operations
5. **Use connection pooling**: Configure HTTP client for optimal connection management
### Security Considerations
1. **Use temporary credentials**: Always use IAM roles or AWS STS for short-lived access tokens
2. **Enable server-side encryption**: Use AES-256 or AWS KMS for sensitive data
3. **Implement access controls**: Use bucket policies and IAM roles instead of access keys in production
4. **Validate object metadata**: Sanitize user-provided metadata to prevent header injection
5. **Use presigned URLs**: Avoid exposing credentials by using temporary access URLs
### Error Handling
1. **Implement retry logic**: Network operations should have exponential backoff retry strategies
2. **Handle throttling**: Implement proper handling of 429 Too Many Requests responses
3. **Validate object existence**: Check if objects exist before operations that require them
4. **Clean up failed operations**: Abort multipart uploads that fail
5. **Log appropriately**: Log successful operations and errors for monitoring
### Cost Optimization
1. **Use appropriate storage classes**: Choose STANDARD, STANDARD_IA, INTELLIGENT_TIERING based on access patterns
2. **Implement lifecycle policies**: Automatically transition or expire objects
3. **Enable object versioning**: For important data that needs retention
4. **Monitor usage**: Track data transfer and storage costs
5. **Minimize API calls**: Use batch operations when possible
## Constraints and Limitations
- **File size limits**: Single PUT operations limited to 5GB; use multipart uploads for larger files
- **Batch operations**: Maximum 1000 objects per DeleteObjects operation
- **Metadata size**: User-defined metadata limited to 2KB
- **Concurrent transfers**: Transfer Manager handles up to 100 concurrent transfers by default
- **Region consistency**: Cross-region operations may incur additional costs and latency
- **S3 eventual consistency**: New objects might not be immediately visible after upload
## References
For more detailed information, see:
- [AWS S3 Object Operations Reference](./references/s3-object-operations.md)
- [S3 Transfer Manager Patterns](./references/s3-transfer-patterns.md)
- [Spring Boot Integration Guide](./references/s3-spring-boot-integration.md)
- [AWS S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/userguide/)
- [AWS SDK for Java 2.x S3 API](https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/services/s3/package-summary.html)
## Related Skills
- `aws-sdk-java-v2-core` - Core AWS SDK patterns and configuration
- `spring-boot-dependency`-injection - Spring dependency injection patterns
- `unit-test-service-layer` - Testing service layer patterns
- `unit-test-wiremock-rest-api` - Testing external API integrations

View File

@@ -0,0 +1,371 @@
# S3 Object Operations Reference
## Detailed Object Operations
### Advanced Upload Patterns
#### Streaming Upload with Progress Monitoring
```java
public void uploadWithProgress(S3Client s3Client, String bucketName, String key,
String filePath) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
try (RequestBody file = RequestBody.fromFile(Paths.get(filePath))) {
s3Client.putObject(request, file);
}
}
```
#### Conditional Upload
```java
public void conditionalUpload(S3Client s3Client, String bucketName, String key,
String filePath, String expectedETag) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.ifMatch(expectedETag)
.build();
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
}
```
### Advanced Download Patterns
#### Range Requests for Large Files
```java
public void downloadInChunks(S3Client s3Client, String bucketName, String key,
String destPath, int chunkSizeMB) {
long fileSize = getFileSize(s3Client, bucketName, key);
int chunkSize = chunkSizeMB * 1024 * 1024;
try (OutputStream os = new FileOutputStream(destPath)) {
for (long start = 0; start < fileSize; start += chunkSize) {
long end = Math.min(start + chunkSize - 1, fileSize - 1);
GetObjectRequest request = GetObjectRequest.builder()
.bucket(bucketName)
.key(key)
.range("bytes=" + start + "-" + end)
.build();
try (ResponseInputStream<GetObjectResponse> response =
s3Client.getObject(request)) {
response.transferTo(os);
}
}
}
}
```
### Metadata Management
#### Setting and Retrieving Object Metadata
```java
public void setObjectMetadata(S3Client s3Client, String bucketName, String key,
Map<String, String> metadata) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.metadata(metadata)
.build();
s3Client.putObject(request, RequestBody.empty());
}
public Map<String, String> getObjectMetadata(S3Client s3Client,
String bucketName, String key) {
HeadObjectRequest request = HeadObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
HeadObjectResponse response = s3Client.headObject(request);
return response.metadata();
}
```
### Storage Classes and Lifecycle
#### Managing Different Storage Classes
```java
public void uploadWithStorageClass(S3Client s3Client, String bucketName, String key,
String filePath, StorageClass storageClass) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.storageClass(storageClass)
.build();
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
}
// Storage class options:
// STANDARD - Default storage class
// STANDARD_IA - Infrequent Access
// ONEZONE_IA - Single-zone infrequent access
// INTELLIGENT_TIERING - Automatically optimizes storage
// GLACIER - Archive storage
// DEEP_ARCHIVE - Long-term archive storage
```
### Object Tagging
#### Adding and Managing Tags
```java
public void addTags(S3Client s3Client, String bucketName, String key,
Map<String, String> tags) {
Tagging tagging = Tagging.builder()
.tagSet(tags.entrySet().stream()
.map(entry -> Tag.builder()
.key(entry.getKey())
.value(entry.getValue())
.build())
.collect(Collectors.toList()))
.build();
PutObjectTaggingRequest request = PutObjectTaggingRequest.builder()
.bucket(bucketName)
.key(key)
.tagging(tagging)
.build();
s3Client.putObjectTagging(request);
}
public Map<String, String> getTags(S3Client s3Client, String bucketName, String key) {
GetObjectTaggingRequest request = GetObjectTaggingRequest.builder()
.bucket(bucketName)
.key(key)
.build();
GetObjectTaggingResponse response = s3Client.getObjectTagging(request);
return response.tagSet().stream()
.collect(Collectors.toMap(Tag::key, Tag::value));
}
```
### Advanced Copy Operations
#### Server-Side Copy with Metadata
```java
public void copyWithMetadata(S3Client s3Client, String sourceBucket, String sourceKey,
String destBucket, String destKey,
Map<String, String> metadata) {
CopyObjectRequest request = CopyObjectRequest.builder()
.sourceBucket(sourceBucket)
.sourceKey(sourceKey)
.destinationBucket(destBucket)
.destinationKey(destKey)
.metadata(metadata)
.metadataDirective(MetadataDirective.REPLACE)
.build();
s3Client.copyObject(request);
}
```
## Error Handling Patterns
### Retry Mechanisms
```java
import software.amazon.awssdk.core.retry.RetryPolicy;
import software.amazon.awssdk.core.retry.backoff.FixedRetryBackoff;
import software.amazon.awssdk.core.retry.conditions.RetryCondition;
public S3Client createS3ClientWithRetry() {
return S3Client.builder()
.overrideConfiguration(b -> b
.retryPolicy(RetryPolicy.builder()
.numRetries(3)
.retryBackoffStrategy(FixedRetryBackoff.create(
Duration.ofSeconds(1), 3))
.retryCondition(RetryCondition.defaultRetryCondition())
.build()))
.build();
}
```
### Throttling Handling
```java
public void handleThrottling(S3Client s3Client, String bucketName, String key) {
try {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
s3Client.putObject(request, RequestBody.fromString("test"));
} catch (S3Exception e) {
if (e.statusCode() == 429) {
// Too Many Requests - implement backoff
try {
Thread.sleep(1000);
// Retry logic here
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
throw e;
}
}
```
## Performance Optimization
### Batch Operations
#### Batch Delete Objects
```java
public void batchDeleteObjects(S3Client s3Client, String bucketName,
List<String> keys) {
int batchSize = 1000; // S3 limit for batch operations
int totalBatches = (int) Math.ceil((double) keys.size() / batchSize);
for (int i = 0; i < totalBatches; i++) {
List<String> batchKeys = keys.subList(
i * batchSize,
Math.min((i + 1) * batchSize, keys.size()));
List<ObjectIdentifier> objectIdentifiers = batchKeys.stream()
.map(key -> ObjectIdentifier.builder().key(key).build())
.collect(Collectors.toList());
Delete delete = Delete.builder()
.objects(objectIdentifiers)
.build();
DeleteObjectsRequest request = DeleteObjectsRequest.builder()
.bucket(bucketName)
.delete(delete)
.build();
s3Client.deleteObjects(request);
}
}
```
### Parallel Uploads
```java
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public void parallelUploads(S3Client s3Client, String bucketName,
List<String> keys, ExecutorService executor) {
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (String key : keys) {
CompletableFuture<Void> future = CompletableFuture.runAsync(() -> {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
s3Client.putObject(request, RequestBody.fromString("data"));
}, executor);
futures.add(future);
}
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
}
```
## Security Considerations
### Access Control
#### Setting Object ACLs
```java
public void setObjectAcl(S3Client s3Client, String bucketName, String key,
CannedAccessControlList acl) {
PutObjectAclRequest request = PutObjectAclRequest.builder()
.bucket(bucketName)
.key(key)
.acl(acl)
.build();
s3Client.putObjectAcl(request);
}
// ACL options:
// private, public-read, public-read-write, authenticated-read,
// aws-exec-read, bucket-owner-read, bucket-owner-full-control
```
#### Encryption
```java
public void encryptedUpload(S3Client s3Client, String bucketName, String key,
String filePath, String kmsKeyId) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.serverSideEncryption(ServerSideEncryption.AWS_KMS)
.ssekmsKeyId(kmsKeyId)
.build();
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
}
```
## Monitoring and Logging
#### Upload Completion Events
```java
public void uploadWithMonitoring(S3Client s3Client, String bucketName, String key,
String filePath) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
Response<PutObjectResponse> response = s3Client.putObject(request,
RequestBody.fromFile(Paths.get(filePath)));
System.out.println("Upload completed with ETag: " +
response.response().eTag());
}
```
## Integration Patterns
### Event Notifications
```java
public void setupEventNotifications(S3Client s3Client, String bucketName) {
NotificationConfiguration configuration = NotificationConfiguration.builder()
.topicConfigurations(TopicConfiguration.builder()
.topicArn("arn:aws:sns:us-east-1:123456789012:my-topic")
.events(Event.OBJECT_CREATED_PUT, Event.OBJECT_CREATED_POST)
.build())
.build();
PutBucketNotificationConfigurationRequest request =
PutBucketNotificationConfigurationRequest.builder()
.bucket(bucketName)
.notificationConfiguration(configuration)
.build();
s3Client.putBucketNotificationConfiguration(request);
}
```

View File

@@ -0,0 +1,668 @@
# S3 Spring Boot Integration Reference
## Advanced Spring Boot Configuration
### Multi-Environment Configuration
```java
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Configuration;
@Configuration
@EnableConfigurationProperties(S3Properties.class)
public class S3Configuration {
private final S3Properties properties;
public S3Configuration(S3Properties properties) {
this.properties = properties;
}
@Bean
@ConditionalOnProperty(name = "s3.client.async.enabled", havingValue = "true")
public S3AsyncClient s3AsyncClient() {
return S3AsyncClient.builder()
.region(Region.of(properties.getRegion()))
.credentialsProvider(StaticCredentialsProvider.create(
AwsBasicCredentials.create(
properties.getAccessKey(),
properties.getSecretKey())))
.endpointOverride(URI.create(properties.getEndpoint()))
.build();
}
@Bean
@ConditionalOnProperty(name = "s3.client.sync.enabled", havingValue = "true", matchIfMissing = true)
public S3Client s3Client() {
return S3Client.builder()
.region(Region.of(properties.getRegion()))
.credentialsProvider(StaticCredentialsProvider.create(
AwsBasicCredentials.create(
properties.getAccessKey(),
properties.getSecretKey())))
.endpointOverride(URI.create(properties.getEndpoint()))
.build();
}
@Bean
@ConditionalOnProperty(name = "s3.transfer-manager.enabled", havingValue = "true")
public S3TransferManager s3TransferManager() {
return S3TransferManager.builder()
.s3Client(s3Client())
.build();
}
@Bean
@ConditionalOnProperty(name = "s3.presigner.enabled", havingValue = "true")
public S3Presigner s3Presigner() {
return S3Presigner.builder()
.region(Region.of(properties.getRegion()))
.build();
}
}
@ConfigurationProperties(prefix = "s3")
@Data
public class S3Properties {
private String accessKey;
private String secretKey;
private String region = "us-east-1";
private String endpoint = null;
private boolean syncEnabled = true;
private boolean asyncEnabled = false;
private boolean transferManagerEnabled = false;
private boolean presignerEnabled = false;
private int maxConnections = 100;
private int connectionTimeout = 5000;
private int socketTimeout = 30000;
private String defaultBucket;
}
```
### Profile-Specific Configuration
```properties
# application-dev.properties
s3.access-key=${AWS_ACCESS_KEY}
s3.secret-key=${AWS_SECRET_KEY}
s3.region=us-east-1
s3.endpoint=http://localhost:4566
s3.async-enabled=true
s3.transfer-manager-enabled=true
# application-prod.properties
s3.access-key=${AWS_ACCESS_KEY}
s3.secret-key=${AWS_SECRET_KEY}
s3.region=us-east-1
s3.async-enabled=true
s3.presigner-enabled=true
```
## Advanced Service Patterns
### Generic S3 Service Template
```java
import software.amazon.awssdk.services.s3.model.*;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import java.nio.file.*;
import java.util.*;
import java.util.stream.Collectors;
@Service
@RequiredArgsConstructor
public class S3Service {
private final S3Client s3Client;
private final S3AsyncClient s3AsyncClient;
private final S3TransferManager transferManager;
private final S3Properties s3Properties;
// Basic Operations
public Mono<Void> uploadObjectAsync(String key, byte[] data) {
return Mono.fromFuture(() -> {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(s3Properties.getDefaultBucket())
.key(key)
.build();
return s3AsyncClient.putObject(request,
RequestBody.fromBytes(data)).future();
});
}
public Mono<byte[]> downloadObjectAsync(String key) {
return Mono.fromFuture(() -> {
GetObjectRequest request = GetObjectRequest.builder()
.bucket(s3Properties.getDefaultBucket())
.key(key)
.build();
return s3AsyncClient.getObject(request)
.thenApply(response -> {
try {
return response.readAllBytes();
} catch (IOException e) {
throw new RuntimeException("Failed to read S3 object", e);
}
});
});
}
// Advanced Operations
public Mono<UploadResult> uploadWithMetadata(String key,
Path file,
Map<String, String> metadata) {
return Mono.fromFuture(() -> {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(s3Properties.getDefaultBucket())
.key(key)
.metadata(metadata)
.contentType(getContentType(file))
.build();
return s3AsyncClient.putObject(request, RequestBody.fromFile(file))
.thenApply(response -> new UploadResult(key, response.eTag()));
});
}
public Flux<S3Object> listObjectsWithPrefix(String prefix) {
ListObjectsV2Request request = ListObjectsV2Request.builder()
.bucket(s3Properties.getDefaultBucket())
.prefix(prefix)
.build();
return Flux.create(sink -> {
s3Client.listObjectsV2Paginator(request)
.contents()
.forEach(sink::next);
sink.complete();
});
}
public Mono<Void> batchDelete(List<String> keys) {
return Mono.fromFuture(() -> {
List<ObjectIdentifier> objectIdentifiers = keys.stream()
.map(key -> ObjectIdentifier.builder().key(key).build())
.collect(Collectors.toList());
Delete delete = Delete.builder()
.objects(objectIdentifiers)
.build();
DeleteObjectsRequest request = DeleteObjectsRequest.builder()
.bucket(s3Properties.getDefaultBucket())
.delete(delete)
.build();
return s3AsyncClient.deleteObjects(request).future();
});
}
// Transfer Manager Operations
public Mono<UploadResult> uploadWithTransferManager(String key, Path file) {
return Mono.fromFuture(() -> {
UploadFileRequest request = UploadFileRequest.builder()
.putObjectRequest(req -> req
.bucket(s3Properties.getDefaultBucket())
.key(key))
.source(file)
.build();
return transferManager.uploadFile(request)
.completionFuture()
.thenApply(result -> new UploadResult(key, result.response().eTag()));
});
}
public Mono<DownloadResult> downloadWithTransferManager(String key, Path destination) {
return Mono.fromFuture(() -> {
DownloadFileRequest request = DownloadFileRequest.builder()
.getObjectRequest(req -> req
.bucket(s3Properties.getDefaultBucket())
.key(key))
.destination(destination)
.build();
return transferManager.downloadFile(request)
.completionFuture()
.thenApply(result -> new DownloadResult(destination, result.response().contentLength()));
});
}
// Utility Methods
private String getContentType(Path file) {
try {
return Files.probeContentType(file);
} catch (IOException e) {
return "application/octet-stream";
}
}
// Records for Results
public record UploadResult(String key, String eTag) {}
public record DownloadResult(Path path, long size) {}
}
```
### Event-Driven S3 Operations
```java
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import reactor.core.publisher.Mono;
@Service
@RequiredArgsConstructor
public class S3EventService {
private final S3Service s3Service;
private final ApplicationEventPublisher eventPublisher;
@Transactional
public Mono<UploadResult> uploadAndPublishEvent(String key, Path file) {
return s3Service.uploadWithTransferManager(key, file)
.doOnSuccess(result -> {
eventPublisher.publishEvent(new S3UploadEvent(key, result.eTag()));
})
.doOnError(error -> {
eventPublisher.publishEvent(new S3UploadFailedEvent(key, error.getMessage()));
});
}
public Mono<String> generatePresignedUrl(String key) {
return s3Service.downloadObjectAsync(key)
.flatMap(data -> {
return Mono.fromCallable(() -> {
S3Presigner presigner = S3Presigner.create();
try {
GetObjectRequest request = GetObjectRequest.builder()
.bucket(s3Service.getDefaultBucket())
.key(key)
.build();
GetObjectPresignRequest presignRequest = GetObjectPresignRequest.builder()
.signatureDuration(Duration.ofMinutes(10))
.getObjectRequest(request)
.build();
return presigner.presignGetObject(presignRequest)
.url()
.toString();
} finally {
presigner.close();
}
});
});
}
}
// Event Classes
public class S3UploadEvent extends ApplicationEvent {
private final String key;
private final String eTag;
public S3UploadEvent(String key, String eTag) {
super(key);
this.key = key;
this.eTag = eTag;
}
public String getKey() { return key; }
public String getETag() { return eTag; }
}
public class S3UploadFailedEvent extends ApplicationEvent {
private final String key;
private final String errorMessage;
public S3UploadFailedEvent(String key, String errorMessage) {
super(key);
this.key = key;
this.errorMessage = errorMessage;
}
public String getKey() { return key; }
public String getErrorMessage() { return errorMessage; }
}
```
### Retry and Error Handling
```java
import org.springframework.retry.annotation.*;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Mono;
import software.amazon.awssdk.services.s3.model.*;
@Service
@RequiredArgsConstructor
public class ResilientS3Service {
private final S3Client s3Client;
private final RetryTemplate retryTemplate;
@Retryable(value = {S3Exception.class, SdkClientException.class},
maxAttempts = 3,
backoff = @Backoff(delay = 1000, multiplier = 2))
public Mono<PutObjectResponse> uploadWithRetry(String key, Path file) {
return Mono.fromCallable(() -> {
PutObjectRequest request = PutObjectRequest.builder()
.bucket("my-bucket")
.key(key)
.build();
return s3Client.putObject(request, RequestBody.fromFile(file));
});
}
@Recover
public Mono<PutObjectResponse> uploadRecover(S3Exception e, String key, Path file) {
// Log the failure and potentially send notification
System.err.println("Upload failed after retries: " + e.getMessage());
return Mono.error(new S3UploadException("Upload failed after retries", e));
}
@Retryable(value = {S3Exception.class},
maxAttempts = 5,
backoff = @Backoff(delay = 2000, multiplier = 2))
public Mono<Void> copyObjectWithRetry(String sourceKey, String destinationKey) {
return Mono.fromFuture(() -> {
CopyObjectRequest request = CopyObjectRequest.builder()
.sourceBucket("source-bucket")
.sourceKey(sourceKey)
.destinationBucket("destination-bucket")
.destinationKey(destinationKey)
.build();
return s3AsyncClient.copyObject(request).future();
});
}
}
public class S3UploadException extends RuntimeException {
public S3UploadException(String message, Throwable cause) {
super(message, cause);
}
}
```
## Testing Integration
### Test Configuration with LocalStack
```java
import org.testcontainers.containers.localstack.LocalStackContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.test.context.ActiveProfiles;
import org.testcontainers.utility.DockerImageName;
@Testcontainers
@ActiveProfiles("test")
@TestConfiguration
public class S3TestConfig {
@Container
static LocalStackContainer localstack = new LocalStackContainer(
DockerImageName.parse("localstack/localstack:3.0"))
.withServices(LocalStackContainer.Service.S3)
.withEnv("DEFAULT_REGION", "us-east-1");
@Bean
public S3Client testS3Client() {
return S3Client.builder()
.region(Region.US_EAST_1)
.endpointOverride(localstack.getEndpointOverride(LocalStackContainer.Service.S3))
.credentialsProvider(StaticCredentialsProvider.create(
AwsBasicCredentials.create(
localstack.getAccessKey(),
localstack.getSecretKey())))
.build();
}
@Bean
public S3AsyncClient testS3AsyncClient() {
return S3AsyncClient.builder()
.region(Region.US_EAST_1)
.endpointOverride(localstack.getEndpointOverride(LocalStackContainer.Service.S3))
.credentialsProvider(StaticCredentialsProvider.create(
AwsBasicCredentials.create(
localstack.getAccessKey(),
localstack.getSecretKey())))
.build();
}
}
```
### Unit Testing with Mocks
```java
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.*;
import reactor.core.publisher.Mono;
import java.util.List;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class S3ServiceTest {
@Mock
private S3Client s3Client;
@InjectMocks
private S3Service s3Service;
@Test
void uploadObjectAsync_ShouldReturnUploadResult() {
// Arrange
String key = "test-key";
byte[] data = "test-content".getBytes();
String eTag = "12345";
PutObjectResponse response = PutObjectResponse.builder()
.eTag(eTag)
.build();
when(s3Client.putObject(any(PutObjectRequest.class), any()))
.thenReturn(response);
// Act
Mono<UploadResult> result = s3Service.uploadObjectAsync(key, data);
// Assert
result.subscribe(uploadResult -> {
assertEquals(key, uploadResult.key());
assertEquals(eTag, uploadResult.eTag());
});
}
@Test
void listObjectsWithPrefix_ShouldReturnObjectList() {
// Arrange
String prefix = "documents/";
S3Object object1 = S3Object.builder().key("documents/file1.txt").build();
S3Object object2 = S3Object.builder().key("documents/file2.txt").build();
ListObjectsV2Response response = ListObjectsV2Response.builder()
.contents(object1, object2)
.build();
when(s3Client.listObjectsV2(any(ListObjectsV2Request.class)))
.thenReturn(response);
// Act
Flux<S3Object> result = s3Service.listObjectsWithPrefix(prefix);
// Assert
result.collectList()
.subscribe(objects -> {
assertEquals(2, objects.size());
assertTrue(objects.stream().allMatch(obj -> obj.key().startsWith(prefix)));
});
}
}
```
### Integration Testing
```java
import org.junit.jupiter.api.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import software.amazon.awssdk.services.s3.model.*;
import java.nio.file.*;
import java.util.Map;
@SpringBootTest
@ActiveProfiles("test")
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
class S3IntegrationTest {
@Autowired
private S3Service s3Service;
private static final String TEST_BUCKET = "test-bucket";
private static final String TEST_FILE = "test-document.txt";
@BeforeAll
static void setup() throws Exception {
// Create test file
Files.write(Paths.get(TEST_FILE), "Test content".getBytes());
}
@Test
@Order(1)
void uploadFile_ShouldSucceed() {
// Act & Assert
s3Service.uploadWithMetadata(TEST_FILE, Paths.get(TEST_FILE),
Map.of("author", "test", "type", "document"))
.as(StepVerifier::create)
.expectNextMatches(result ->
result.key().equals(TEST_FILE) && result.eTag() != null)
.verifyComplete();
}
@Test
@Order(2)
void downloadFile_ShouldReturnContent() {
// Act & Assert
s3Service.downloadObjectAsync(TEST_FILE)
.as(StepVerifier::create)
.expectNext("Test content".getBytes())
.verifyComplete();
}
@Test
@Order(3)
void listObjects_ShouldReturnFiles() {
// Act & Assert
s3Service.listObjectsWithPrefix("")
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
}
@AfterAll
static void cleanup() {
try {
Files.deleteIfExists(Paths.get(TEST_FILE));
} catch (IOException e) {
// Ignore
}
}
}
```
## Advanced Configuration Patterns
### Environment-Specific Configuration
```java
import org.springframework.boot.autoconfigure.condition.*;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import software.amazon.awssdk.auth.credentials.*;
@Configuration
public class EnvironmentAwareS3Config {
@Bean
@ConditionalOnMissingBean
public AwsCredentialsProvider awsCredentialsProvider(S3Properties properties) {
if (properties.getAccessKey() != null && properties.getSecretKey() != null) {
return StaticCredentialsProvider.create(
AwsBasicCredentials.create(
properties.getAccessKey(),
properties.getSecretKey()));
}
return DefaultCredentialsProvider.create();
}
@Bean
@ConditionalOnMissingBean
@ConditionalOnProperty(name = "s3.region")
public Region region(S3Properties properties) {
return Region.of(properties.getRegion());
}
@Bean
@ConditionalOnMissingBean
@ConditionalOnProperty(name = "s3.endpoint")
public String endpoint(S3Properties properties) {
return properties.getEndpoint();
}
}
```
### Multi-Bucket Support
```java
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.Map;
@Service
@RequiredArgsConstructor
public class MultiBucketS3Service {
private final Map<String, S3Client> bucketClients = new HashMap<>();
private final S3Client defaultS3Client;
@Autowired
public MultiBucketS3Service(S3Client defaultS3Client) {
this.defaultS3Client = defaultS3Client;
}
public S3Client getClientForBucket(String bucketName) {
return bucketClients.computeIfAbsent(bucketName, name ->
S3Client.builder()
.region(defaultS3Client.config().region())
.credentialsProvider(defaultS3Client.config().credentialsProvider())
.build());
}
public Mono<UploadResult> uploadToBucket(String bucketName, String key, Path file) {
S3Client client = getClientForBucket(bucketName);
// Upload implementation using the specific client
return Mono.empty(); // Implementation
}
}
```

View File

@@ -0,0 +1,473 @@
# S3 Transfer Patterns Reference
## S3 Transfer Manager Advanced Patterns
### Configuration and Optimization
#### Custom Transfer Manager Configuration
```java
import software.amazon.awssdk.transfer.s3.S3TransferManager;
import software.amazon.awssdk.transfer.s3.model.UploadFileRequest;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.http.apache.ApacheHttpClient;
import java.time.Duration;
public S3TransferManager createOptimizedTransferManager(S3Client s3Client) {
return S3TransferManager.builder()
.s3Client(s3Client)
.storageProvider(ApacheHttpClient.builder()
.maxConnections(200)
.connectionTimeout(Duration.ofSeconds(5))
.socketTimeout(Duration.ofSeconds(60))
.build())
.build();
}
```
#### Parallel Upload Configuration
```java
public void configureParallelUploads() {
S3TransferManager transferManager = S3TransferManager.create();
FileUpload upload = transferManager.uploadFile(
UploadFileRequest.builder()
.putObjectRequest(req -> req
.bucket("my-bucket")
.key("large-file.bin"))
.source(Paths.get("large-file.bin"))
.build());
// Track upload progress
upload.progressFuture().thenAccept(progress -> {
System.out.println("Upload progress: " + progress.progressPercent());
});
// Handle completion
upload.completionFuture().thenAccept(result -> {
System.out.println("Upload completed with ETag: " +
result.response().eTag());
});
}
```
### Advanced Upload Patterns
#### Multipart Upload with Progress Monitoring
```java
public void multipartUploadWithProgress(S3Client s3Client, String bucketName,
String key, String filePath) {
int partSize = 5 * 1024 * 1024; // 5 MB parts
File file = new File(filePath);
CreateMultipartUploadRequest createRequest = CreateMultipartUploadRequest.builder()
.bucket(bucketName)
.key(key)
.build();
CreateMultipartUploadResponse createResponse = s3Client.createMultipartUpload(createRequest);
String uploadId = createResponse.uploadId();
List<CompletedPart> completedParts = new ArrayList<>();
long uploadedBytes = 0;
long totalBytes = file.length();
try (FileInputStream fis = new FileInputStream(file)) {
byte[] buffer = new byte[partSize];
int partNumber = 1;
while (true) {
int bytesRead = fis.read(buffer);
if (bytesRead == -1) break;
byte[] partData = Arrays.copyOf(buffer, bytesRead);
UploadPartRequest uploadRequest = UploadPartRequest.builder()
.bucket(bucketName)
.key(key)
.uploadId(uploadId)
.partNumber(partNumber)
.build();
UploadPartResponse uploadResponse = s3Client.uploadPart(
uploadRequest, RequestBody.fromBytes(partData));
completedParts.add(CompletedPart.builder()
.partNumber(partNumber)
.eTag(uploadResponse.eTag())
.build());
uploadedBytes += bytesRead;
partNumber++;
// Log progress
double progress = (double) uploadedBytes / totalBytes * 100;
System.out.printf("Upload progress: %.2f%%%n", progress);
}
CompleteMultipartUploadRequest completeRequest =
CompleteMultipartUploadRequest.builder()
.bucket(bucketName)
.key(key)
.uploadId(uploadId)
.multipartUpload(CompletedMultipartUpload.builder()
.parts(completedParts)
.build())
.build();
s3Client.completeMultipartUpload(completeRequest);
} catch (Exception e) {
// Abort on failure
AbortMultipartUploadRequest abortRequest =
AbortMultipartUploadRequest.builder()
.bucket(bucketName)
.key(key)
.uploadId(uploadId)
.build();
s3Client.abortMultipartUpload(abortRequest);
throw new RuntimeException("Multipart upload failed", e);
}
}
```
#### Resume Interrupted Uploads
```java
public void resumeUpload(S3Client s3Client, String bucketName, String key,
String filePath, String existingUploadId) {
ListMultipartUploadsRequest listRequest = ListMultipartUploadsRequest.builder()
.bucket(bucketName)
.prefix(key)
.build();
ListMultipartUploadsResponse listResponse = s3Client.listMultipartUploads(listRequest);
// Check if upload already exists
boolean uploadExists = listResponse.uploads().stream()
.anyMatch(upload -> upload.key().equals(key) &&
upload.uploadId().equals(existingUploadId));
if (uploadExists) {
// Resume existing upload
continueExistingUpload(s3Client, bucketName, key, existingUploadId, filePath);
} else {
// Start new upload
multipartUploadWithProgress(s3Client, bucketName, key, filePath);
}
}
private void continueExistingUpload(S3Client s3Client, String bucketName,
String key, String uploadId, String filePath) {
// List already uploaded parts
ListPartsRequest listPartsRequest = ListPartsRequest.builder()
.bucket(bucketName)
.key(key)
.uploadId(uploadId)
.build();
ListPartsResponse listPartsResponse = s3Client.listParts(listPartsRequest);
List<CompletedPart> completedParts = listPartsResponse.parts().stream()
.map(part -> CompletedPart.builder()
.partNumber(part.partNumber())
.eTag(part.eTag())
.build())
.collect(Collectors.toList());
// Upload remaining parts
// ... implementation of remaining parts upload
}
```
### Advanced Download Patterns
#### Partial File Download
```java
public void downloadPartialFile(S3Client s3Client, String bucketName, String key,
String destPath, long startByte, long endByte) {
GetObjectRequest request = GetObjectRequest.builder()
.bucket(bucketName)
.key(key)
.range("bytes=" + startByte + "-" + endByte)
.build();
try (ResponseInputStream<GetObjectResponse> response = s3Client.getObject(request);
OutputStream outputStream = new FileOutputStream(destPath)) {
response.transferTo(outputStream);
System.out.println("Partial download completed: " +
(endByte - startByte + 1) + " bytes");
}
}
```
#### Parallel Downloads
```java
import java.util.concurrent.*;
import java.util.stream.*;
public void parallelDownloads(S3Client s3Client, String bucketName,
String key, String destPath, int chunkCount) {
long fileSize = getFileSize(s3Client, bucketName, key);
long chunkSize = fileSize / chunkCount;
ExecutorService executor = Executors.newFixedThreadPool(chunkCount);
List<Future<Void>> futures = new ArrayList<>();
for (int i = 0; i < chunkCount; i++) {
long start = i * chunkSize;
long end = (i == chunkCount - 1) ? fileSize - 1 : start + chunkSize - 1;
Future<Void> future = executor.submit(() -> {
downloadPartialFile(s3Client, bucketName, key,
destPath + ".part" + i, start, end);
return null;
});
futures.add(future);
}
// Wait for all downloads to complete
for (Future<Void> future : futures) {
try {
future.get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException("Download failed", e);
}
}
// Combine chunks
combineChunks(destPath, chunkCount);
executor.shutdown();
}
private void combineChunks(String baseName, int chunkCount) throws IOException {
try (OutputStream outputStream = new FileOutputStream(baseName)) {
for (int i = 0; i < chunkCount; i++) {
String chunkFile = baseName + ".part" + i;
try (InputStream inputStream = new FileInputStream(chunkFile)) {
inputStream.transferTo(outputStream);
}
new File(chunkFile).delete();
}
}
}
```
### Error Handling and Retry
#### Upload with Exponential Backoff
```java
import software.amazon.awssdk.core.retry.conditions.*;
import software.amazon.awssdk.core.retry.*;
import software.amazon.awssdk.core.retry.backoff.*;
public void resilientUpload(S3Client s3Client, String bucketName, String key,
String filePath) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
// Configure retry policy
S3Client retryS3Client = S3Client.builder()
.overrideConfiguration(b -> b
.retryPolicy(RetryPolicy.builder()
.numRetries(5)
.retryBackoffStrategy(
ExponentialRetryBackoff.builder()
.baseDelay(Duration.ofSeconds(1))
.maxBackoffTime(Duration.ofSeconds(30))
.build())
.retryCondition(
RetryCondition.or(
RetryCondition.defaultRetryCondition(),
RetryCondition.create(response ->
response.httpResponse().is5xxServerError()))
)
.build()))
.build();
retryS3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
}
```
#### Upload with Checkpoint
```java
import java.nio.file.*;
public void uploadWithCheckpoint(S3Client s3Client, String bucketName,
String key, String filePath) {
String checkpointFile = filePath + ".checkpoint";
Path checkpointPath = Paths.get(checkpointFile);
long startPos = 0;
if (Files.exists(checkpointPath)) {
// Read checkpoint
try {
startPos = Long.parseLong(Files.readString(checkpointPath));
} catch (IOException e) {
throw new RuntimeException("Failed to read checkpoint", e);
}
}
if (startPos > 0) {
// Resume upload
continueUploadFromCheckpoint(s3Client, bucketName, key, filePath, startPos);
} else {
// Start new upload
startNewUpload(s3Client, bucketName, key, filePath);
}
// Update checkpoint
long endPos = new File(filePath).length();
try {
Files.writeString(checkpointPath, String.valueOf(endPos));
} catch (IOException e) {
throw new RuntimeException("Failed to write checkpoint", e);
}
}
private void continueUploadFromCheckpoint(S3Client s3Client, String bucketName,
String key, String filePath, long startPos) {
// Implement resume logic
}
private void startNewUpload(S3Client s3Client, String bucketName,
String key, String filePath) {
// Implement initial upload logic
}
```
### Performance Tuning
#### Buffer Configuration
```java
public S3Client configureLargeBuffer() {
return S3Client.builder()
.overrideConfiguration(b -> b
.apiCallAttemptTimeout(Duration.ofMinutes(5))
.apiCallTimeout(Duration.ofMinutes(10)))
.build();
}
public S3TransferManager configureHighThroughput() {
return S3TransferManager.builder()
.multipartUploadThreshold(8 * 1024 * 1024) // 8 MB
.multipartUploadPartSize(10 * 1024 * 1024) // 10 MB
.build();
}
```
#### Network Optimization
```java
public S3Client createOptimizedS3Client() {
return S3Client.builder()
.httpClientBuilder(ApacheHttpClient.builder()
.maxConnections(200)
.connectionPoolStrategy(ConnectionPoolStrategy.defaultStrategy())
.socketTimeout(Duration.ofSeconds(30))
.connectionTimeout(Duration.ofSeconds(5))
.connectionAcquisitionTimeout(Duration.ofSeconds(30))
.build())
.region(Region.US_EAST_1)
.build();
}
```
### Monitoring and Metrics
#### Upload Progress Tracking
```java
public void uploadWithProgressTracking(S3Client s3Client, String bucketName,
String key, String filePath) {
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
// Create progress listener
software.amazon.awssdk.core.ProgressListener progressListener =
progressEvent -> {
System.out.println("Transferred: " +
progressEvent.transferredBytes() + " bytes");
System.out.println("Progress: " +
progressEvent.progressPercent() + "%");
};
Response<PutObjectResponse> response = s3Client.putObject(
request,
RequestBody.fromFile(Paths.get(filePath)),
software.amazon.awssdk.core.sync.RequestBody.fromFile(Paths.get(filePath))
.contentLength(new File(filePath).length()),
progressListener);
System.out.println("Upload complete. ETag: " +
response.response().eTag());
}
```
#### Throughput Measurement
```java
public void measureUploadThroughput(S3Client s3Client, String bucketName,
String key, String filePath) {
long startTime = System.currentTimeMillis();
long fileSize = new File(filePath).length();
PutObjectRequest request = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
long endTime = System.currentTimeMillis();
long duration = endTime - startTime;
double throughput = (fileSize * 1000.0) / duration / (1024 * 1024); // MB/s
System.out.printf("Upload throughput: %.2f MB/s%n", throughput);
}
```
## Testing and Validation
#### Upload Validation
```java
public void validateUpload(S3Client s3Client, String bucketName, String key,
String localFilePath) {
// Download file from S3
byte[] s3Content = downloadObject(s3Client, bucketName, key);
// Read local file
byte[] localContent = Files.readAllBytes(Paths.get(localFilePath));
// Validate content matches
if (!Arrays.equals(s3Content, localContent)) {
throw new RuntimeException("Upload validation failed: content mismatch");
}
// Verify file size
long s3Size = s3Content.length;
long localSize = localContent.length;
if (s3Size != localSize) {
throw new RuntimeException("Upload validation failed: size mismatch");
}
System.out.println("Upload validation successful");
}
```