Initial commit
This commit is contained in:
@@ -0,0 +1,371 @@
|
||||
# S3 Object Operations Reference
|
||||
|
||||
## Detailed Object Operations
|
||||
|
||||
### Advanced Upload Patterns
|
||||
|
||||
#### Streaming Upload with Progress Monitoring
|
||||
|
||||
```java
|
||||
public void uploadWithProgress(S3Client s3Client, String bucketName, String key,
|
||||
String filePath) {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
try (RequestBody file = RequestBody.fromFile(Paths.get(filePath))) {
|
||||
s3Client.putObject(request, file);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Conditional Upload
|
||||
|
||||
```java
|
||||
public void conditionalUpload(S3Client s3Client, String bucketName, String key,
|
||||
String filePath, String expectedETag) {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.ifMatch(expectedETag)
|
||||
.build();
|
||||
|
||||
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Download Patterns
|
||||
|
||||
#### Range Requests for Large Files
|
||||
|
||||
```java
|
||||
public void downloadInChunks(S3Client s3Client, String bucketName, String key,
|
||||
String destPath, int chunkSizeMB) {
|
||||
long fileSize = getFileSize(s3Client, bucketName, key);
|
||||
int chunkSize = chunkSizeMB * 1024 * 1024;
|
||||
|
||||
try (OutputStream os = new FileOutputStream(destPath)) {
|
||||
for (long start = 0; start < fileSize; start += chunkSize) {
|
||||
long end = Math.min(start + chunkSize - 1, fileSize - 1);
|
||||
|
||||
GetObjectRequest request = GetObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.range("bytes=" + start + "-" + end)
|
||||
.build();
|
||||
|
||||
try (ResponseInputStream<GetObjectResponse> response =
|
||||
s3Client.getObject(request)) {
|
||||
response.transferTo(os);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Metadata Management
|
||||
|
||||
#### Setting and Retrieving Object Metadata
|
||||
|
||||
```java
|
||||
public void setObjectMetadata(S3Client s3Client, String bucketName, String key,
|
||||
Map<String, String> metadata) {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.metadata(metadata)
|
||||
.build();
|
||||
|
||||
s3Client.putObject(request, RequestBody.empty());
|
||||
}
|
||||
|
||||
public Map<String, String> getObjectMetadata(S3Client s3Client,
|
||||
String bucketName, String key) {
|
||||
HeadObjectRequest request = HeadObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
HeadObjectResponse response = s3Client.headObject(request);
|
||||
return response.metadata();
|
||||
}
|
||||
```
|
||||
|
||||
### Storage Classes and Lifecycle
|
||||
|
||||
#### Managing Different Storage Classes
|
||||
|
||||
```java
|
||||
public void uploadWithStorageClass(S3Client s3Client, String bucketName, String key,
|
||||
String filePath, StorageClass storageClass) {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.storageClass(storageClass)
|
||||
.build();
|
||||
|
||||
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
|
||||
}
|
||||
|
||||
// Storage class options:
|
||||
// STANDARD - Default storage class
|
||||
// STANDARD_IA - Infrequent Access
|
||||
// ONEZONE_IA - Single-zone infrequent access
|
||||
// INTELLIGENT_TIERING - Automatically optimizes storage
|
||||
// GLACIER - Archive storage
|
||||
// DEEP_ARCHIVE - Long-term archive storage
|
||||
```
|
||||
|
||||
### Object Tagging
|
||||
|
||||
#### Adding and Managing Tags
|
||||
|
||||
```java
|
||||
public void addTags(S3Client s3Client, String bucketName, String key,
|
||||
Map<String, String> tags) {
|
||||
Tagging tagging = Tagging.builder()
|
||||
.tagSet(tags.entrySet().stream()
|
||||
.map(entry -> Tag.builder()
|
||||
.key(entry.getKey())
|
||||
.value(entry.getValue())
|
||||
.build())
|
||||
.collect(Collectors.toList()))
|
||||
.build();
|
||||
|
||||
PutObjectTaggingRequest request = PutObjectTaggingRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.tagging(tagging)
|
||||
.build();
|
||||
|
||||
s3Client.putObjectTagging(request);
|
||||
}
|
||||
|
||||
public Map<String, String> getTags(S3Client s3Client, String bucketName, String key) {
|
||||
GetObjectTaggingRequest request = GetObjectTaggingRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
GetObjectTaggingResponse response = s3Client.getObjectTagging(request);
|
||||
|
||||
return response.tagSet().stream()
|
||||
.collect(Collectors.toMap(Tag::key, Tag::value));
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Copy Operations
|
||||
|
||||
#### Server-Side Copy with Metadata
|
||||
|
||||
```java
|
||||
public void copyWithMetadata(S3Client s3Client, String sourceBucket, String sourceKey,
|
||||
String destBucket, String destKey,
|
||||
Map<String, String> metadata) {
|
||||
CopyObjectRequest request = CopyObjectRequest.builder()
|
||||
.sourceBucket(sourceBucket)
|
||||
.sourceKey(sourceKey)
|
||||
.destinationBucket(destBucket)
|
||||
.destinationKey(destKey)
|
||||
.metadata(metadata)
|
||||
.metadataDirective(MetadataDirective.REPLACE)
|
||||
.build();
|
||||
|
||||
s3Client.copyObject(request);
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling Patterns
|
||||
|
||||
### Retry Mechanisms
|
||||
|
||||
```java
|
||||
import software.amazon.awssdk.core.retry.RetryPolicy;
|
||||
import software.amazon.awssdk.core.retry.backoff.FixedRetryBackoff;
|
||||
import software.amazon.awssdk.core.retry.conditions.RetryCondition;
|
||||
|
||||
public S3Client createS3ClientWithRetry() {
|
||||
return S3Client.builder()
|
||||
.overrideConfiguration(b -> b
|
||||
.retryPolicy(RetryPolicy.builder()
|
||||
.numRetries(3)
|
||||
.retryBackoffStrategy(FixedRetryBackoff.create(
|
||||
Duration.ofSeconds(1), 3))
|
||||
.retryCondition(RetryCondition.defaultRetryCondition())
|
||||
.build()))
|
||||
.build();
|
||||
}
|
||||
```
|
||||
|
||||
### Throttling Handling
|
||||
|
||||
```java
|
||||
public void handleThrottling(S3Client s3Client, String bucketName, String key) {
|
||||
try {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
s3Client.putObject(request, RequestBody.fromString("test"));
|
||||
|
||||
} catch (S3Exception e) {
|
||||
if (e.statusCode() == 429) {
|
||||
// Too Many Requests - implement backoff
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
// Retry logic here
|
||||
} catch (InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Batch Operations
|
||||
|
||||
#### Batch Delete Objects
|
||||
|
||||
```java
|
||||
public void batchDeleteObjects(S3Client s3Client, String bucketName,
|
||||
List<String> keys) {
|
||||
int batchSize = 1000; // S3 limit for batch operations
|
||||
int totalBatches = (int) Math.ceil((double) keys.size() / batchSize);
|
||||
|
||||
for (int i = 0; i < totalBatches; i++) {
|
||||
List<String> batchKeys = keys.subList(
|
||||
i * batchSize,
|
||||
Math.min((i + 1) * batchSize, keys.size()));
|
||||
|
||||
List<ObjectIdentifier> objectIdentifiers = batchKeys.stream()
|
||||
.map(key -> ObjectIdentifier.builder().key(key).build())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
Delete delete = Delete.builder()
|
||||
.objects(objectIdentifiers)
|
||||
.build();
|
||||
|
||||
DeleteObjectsRequest request = DeleteObjectsRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.delete(delete)
|
||||
.build();
|
||||
|
||||
s3Client.deleteObjects(request);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Parallel Uploads
|
||||
|
||||
```java
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
public void parallelUploads(S3Client s3Client, String bucketName,
|
||||
List<String> keys, ExecutorService executor) {
|
||||
List<CompletableFuture<Void>> futures = new ArrayList<>();
|
||||
|
||||
for (String key : keys) {
|
||||
CompletableFuture<Void> future = CompletableFuture.runAsync(() -> {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
s3Client.putObject(request, RequestBody.fromString("data"));
|
||||
}, executor);
|
||||
|
||||
futures.add(future);
|
||||
}
|
||||
|
||||
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
|
||||
}
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Access Control
|
||||
|
||||
#### Setting Object ACLs
|
||||
|
||||
```java
|
||||
public void setObjectAcl(S3Client s3Client, String bucketName, String key,
|
||||
CannedAccessControlList acl) {
|
||||
PutObjectAclRequest request = PutObjectAclRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.acl(acl)
|
||||
.build();
|
||||
|
||||
s3Client.putObjectAcl(request);
|
||||
}
|
||||
|
||||
// ACL options:
|
||||
// private, public-read, public-read-write, authenticated-read,
|
||||
// aws-exec-read, bucket-owner-read, bucket-owner-full-control
|
||||
```
|
||||
|
||||
#### Encryption
|
||||
|
||||
```java
|
||||
public void encryptedUpload(S3Client s3Client, String bucketName, String key,
|
||||
String filePath, String kmsKeyId) {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.serverSideEncryption(ServerSideEncryption.AWS_KMS)
|
||||
.ssekmsKeyId(kmsKeyId)
|
||||
.build();
|
||||
|
||||
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
|
||||
}
|
||||
```
|
||||
|
||||
## Monitoring and Logging
|
||||
|
||||
#### Upload Completion Events
|
||||
|
||||
```java
|
||||
public void uploadWithMonitoring(S3Client s3Client, String bucketName, String key,
|
||||
String filePath) {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
Response<PutObjectResponse> response = s3Client.putObject(request,
|
||||
RequestBody.fromFile(Paths.get(filePath)));
|
||||
|
||||
System.out.println("Upload completed with ETag: " +
|
||||
response.response().eTag());
|
||||
}
|
||||
```
|
||||
|
||||
## Integration Patterns
|
||||
|
||||
### Event Notifications
|
||||
|
||||
```java
|
||||
public void setupEventNotifications(S3Client s3Client, String bucketName) {
|
||||
NotificationConfiguration configuration = NotificationConfiguration.builder()
|
||||
.topicConfigurations(TopicConfiguration.builder()
|
||||
.topicArn("arn:aws:sns:us-east-1:123456789012:my-topic")
|
||||
.events(Event.OBJECT_CREATED_PUT, Event.OBJECT_CREATED_POST)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
PutBucketNotificationConfigurationRequest request =
|
||||
PutBucketNotificationConfigurationRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.notificationConfiguration(configuration)
|
||||
.build();
|
||||
|
||||
s3Client.putBucketNotificationConfiguration(request);
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,668 @@
|
||||
# S3 Spring Boot Integration Reference
|
||||
|
||||
## Advanced Spring Boot Configuration
|
||||
|
||||
### Multi-Environment Configuration
|
||||
|
||||
```java
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(S3Properties.class)
|
||||
public class S3Configuration {
|
||||
|
||||
private final S3Properties properties;
|
||||
|
||||
public S3Configuration(S3Properties properties) {
|
||||
this.properties = properties;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "s3.client.async.enabled", havingValue = "true")
|
||||
public S3AsyncClient s3AsyncClient() {
|
||||
return S3AsyncClient.builder()
|
||||
.region(Region.of(properties.getRegion()))
|
||||
.credentialsProvider(StaticCredentialsProvider.create(
|
||||
AwsBasicCredentials.create(
|
||||
properties.getAccessKey(),
|
||||
properties.getSecretKey())))
|
||||
.endpointOverride(URI.create(properties.getEndpoint()))
|
||||
.build();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "s3.client.sync.enabled", havingValue = "true", matchIfMissing = true)
|
||||
public S3Client s3Client() {
|
||||
return S3Client.builder()
|
||||
.region(Region.of(properties.getRegion()))
|
||||
.credentialsProvider(StaticCredentialsProvider.create(
|
||||
AwsBasicCredentials.create(
|
||||
properties.getAccessKey(),
|
||||
properties.getSecretKey())))
|
||||
.endpointOverride(URI.create(properties.getEndpoint()))
|
||||
.build();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "s3.transfer-manager.enabled", havingValue = "true")
|
||||
public S3TransferManager s3TransferManager() {
|
||||
return S3TransferManager.builder()
|
||||
.s3Client(s3Client())
|
||||
.build();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "s3.presigner.enabled", havingValue = "true")
|
||||
public S3Presigner s3Presigner() {
|
||||
return S3Presigner.builder()
|
||||
.region(Region.of(properties.getRegion()))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
@ConfigurationProperties(prefix = "s3")
|
||||
@Data
|
||||
public class S3Properties {
|
||||
private String accessKey;
|
||||
private String secretKey;
|
||||
private String region = "us-east-1";
|
||||
private String endpoint = null;
|
||||
private boolean syncEnabled = true;
|
||||
private boolean asyncEnabled = false;
|
||||
private boolean transferManagerEnabled = false;
|
||||
private boolean presignerEnabled = false;
|
||||
private int maxConnections = 100;
|
||||
private int connectionTimeout = 5000;
|
||||
private int socketTimeout = 30000;
|
||||
private String defaultBucket;
|
||||
}
|
||||
```
|
||||
|
||||
### Profile-Specific Configuration
|
||||
|
||||
```properties
|
||||
# application-dev.properties
|
||||
s3.access-key=${AWS_ACCESS_KEY}
|
||||
s3.secret-key=${AWS_SECRET_KEY}
|
||||
s3.region=us-east-1
|
||||
s3.endpoint=http://localhost:4566
|
||||
s3.async-enabled=true
|
||||
s3.transfer-manager-enabled=true
|
||||
|
||||
# application-prod.properties
|
||||
s3.access-key=${AWS_ACCESS_KEY}
|
||||
s3.secret-key=${AWS_SECRET_KEY}
|
||||
s3.region=us-east-1
|
||||
s3.async-enabled=true
|
||||
s3.presigner-enabled=true
|
||||
```
|
||||
|
||||
## Advanced Service Patterns
|
||||
|
||||
### Generic S3 Service Template
|
||||
|
||||
```java
|
||||
import software.amazon.awssdk.services.s3.model.*;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.StringUtils;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import java.nio.file.*;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
public class S3Service {
|
||||
|
||||
private final S3Client s3Client;
|
||||
private final S3AsyncClient s3AsyncClient;
|
||||
private final S3TransferManager transferManager;
|
||||
private final S3Properties s3Properties;
|
||||
|
||||
// Basic Operations
|
||||
public Mono<Void> uploadObjectAsync(String key, byte[] data) {
|
||||
return Mono.fromFuture(() -> {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(s3Properties.getDefaultBucket())
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
return s3AsyncClient.putObject(request,
|
||||
RequestBody.fromBytes(data)).future();
|
||||
});
|
||||
}
|
||||
|
||||
public Mono<byte[]> downloadObjectAsync(String key) {
|
||||
return Mono.fromFuture(() -> {
|
||||
GetObjectRequest request = GetObjectRequest.builder()
|
||||
.bucket(s3Properties.getDefaultBucket())
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
return s3AsyncClient.getObject(request)
|
||||
.thenApply(response -> {
|
||||
try {
|
||||
return response.readAllBytes();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to read S3 object", e);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Advanced Operations
|
||||
public Mono<UploadResult> uploadWithMetadata(String key,
|
||||
Path file,
|
||||
Map<String, String> metadata) {
|
||||
return Mono.fromFuture(() -> {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(s3Properties.getDefaultBucket())
|
||||
.key(key)
|
||||
.metadata(metadata)
|
||||
.contentType(getContentType(file))
|
||||
.build();
|
||||
|
||||
return s3AsyncClient.putObject(request, RequestBody.fromFile(file))
|
||||
.thenApply(response -> new UploadResult(key, response.eTag()));
|
||||
});
|
||||
}
|
||||
|
||||
public Flux<S3Object> listObjectsWithPrefix(String prefix) {
|
||||
ListObjectsV2Request request = ListObjectsV2Request.builder()
|
||||
.bucket(s3Properties.getDefaultBucket())
|
||||
.prefix(prefix)
|
||||
.build();
|
||||
|
||||
return Flux.create(sink -> {
|
||||
s3Client.listObjectsV2Paginator(request)
|
||||
.contents()
|
||||
.forEach(sink::next);
|
||||
sink.complete();
|
||||
});
|
||||
}
|
||||
|
||||
public Mono<Void> batchDelete(List<String> keys) {
|
||||
return Mono.fromFuture(() -> {
|
||||
List<ObjectIdentifier> objectIdentifiers = keys.stream()
|
||||
.map(key -> ObjectIdentifier.builder().key(key).build())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
Delete delete = Delete.builder()
|
||||
.objects(objectIdentifiers)
|
||||
.build();
|
||||
|
||||
DeleteObjectsRequest request = DeleteObjectsRequest.builder()
|
||||
.bucket(s3Properties.getDefaultBucket())
|
||||
.delete(delete)
|
||||
.build();
|
||||
|
||||
return s3AsyncClient.deleteObjects(request).future();
|
||||
});
|
||||
}
|
||||
|
||||
// Transfer Manager Operations
|
||||
public Mono<UploadResult> uploadWithTransferManager(String key, Path file) {
|
||||
return Mono.fromFuture(() -> {
|
||||
UploadFileRequest request = UploadFileRequest.builder()
|
||||
.putObjectRequest(req -> req
|
||||
.bucket(s3Properties.getDefaultBucket())
|
||||
.key(key))
|
||||
.source(file)
|
||||
.build();
|
||||
|
||||
return transferManager.uploadFile(request)
|
||||
.completionFuture()
|
||||
.thenApply(result -> new UploadResult(key, result.response().eTag()));
|
||||
});
|
||||
}
|
||||
|
||||
public Mono<DownloadResult> downloadWithTransferManager(String key, Path destination) {
|
||||
return Mono.fromFuture(() -> {
|
||||
DownloadFileRequest request = DownloadFileRequest.builder()
|
||||
.getObjectRequest(req -> req
|
||||
.bucket(s3Properties.getDefaultBucket())
|
||||
.key(key))
|
||||
.destination(destination)
|
||||
.build();
|
||||
|
||||
return transferManager.downloadFile(request)
|
||||
.completionFuture()
|
||||
.thenApply(result -> new DownloadResult(destination, result.response().contentLength()));
|
||||
});
|
||||
}
|
||||
|
||||
// Utility Methods
|
||||
private String getContentType(Path file) {
|
||||
try {
|
||||
return Files.probeContentType(file);
|
||||
} catch (IOException e) {
|
||||
return "application/octet-stream";
|
||||
}
|
||||
}
|
||||
|
||||
// Records for Results
|
||||
public record UploadResult(String key, String eTag) {}
|
||||
public record DownloadResult(Path path, long size) {}
|
||||
}
|
||||
```
|
||||
|
||||
### Event-Driven S3 Operations
|
||||
|
||||
```java
|
||||
import org.springframework.context.ApplicationEventPublisher;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
public class S3EventService {
|
||||
|
||||
private final S3Service s3Service;
|
||||
private final ApplicationEventPublisher eventPublisher;
|
||||
|
||||
@Transactional
|
||||
public Mono<UploadResult> uploadAndPublishEvent(String key, Path file) {
|
||||
return s3Service.uploadWithTransferManager(key, file)
|
||||
.doOnSuccess(result -> {
|
||||
eventPublisher.publishEvent(new S3UploadEvent(key, result.eTag()));
|
||||
})
|
||||
.doOnError(error -> {
|
||||
eventPublisher.publishEvent(new S3UploadFailedEvent(key, error.getMessage()));
|
||||
});
|
||||
}
|
||||
|
||||
public Mono<String> generatePresignedUrl(String key) {
|
||||
return s3Service.downloadObjectAsync(key)
|
||||
.flatMap(data -> {
|
||||
return Mono.fromCallable(() -> {
|
||||
S3Presigner presigner = S3Presigner.create();
|
||||
try {
|
||||
GetObjectRequest request = GetObjectRequest.builder()
|
||||
.bucket(s3Service.getDefaultBucket())
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
GetObjectPresignRequest presignRequest = GetObjectPresignRequest.builder()
|
||||
.signatureDuration(Duration.ofMinutes(10))
|
||||
.getObjectRequest(request)
|
||||
.build();
|
||||
|
||||
return presigner.presignGetObject(presignRequest)
|
||||
.url()
|
||||
.toString();
|
||||
} finally {
|
||||
presigner.close();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Event Classes
|
||||
public class S3UploadEvent extends ApplicationEvent {
|
||||
private final String key;
|
||||
private final String eTag;
|
||||
|
||||
public S3UploadEvent(String key, String eTag) {
|
||||
super(key);
|
||||
this.key = key;
|
||||
this.eTag = eTag;
|
||||
}
|
||||
|
||||
public String getKey() { return key; }
|
||||
public String getETag() { return eTag; }
|
||||
}
|
||||
|
||||
public class S3UploadFailedEvent extends ApplicationEvent {
|
||||
private final String key;
|
||||
private final String errorMessage;
|
||||
|
||||
public S3UploadFailedEvent(String key, String errorMessage) {
|
||||
super(key);
|
||||
this.key = key;
|
||||
this.errorMessage = errorMessage;
|
||||
}
|
||||
|
||||
public String getKey() { return key; }
|
||||
public String getErrorMessage() { return errorMessage; }
|
||||
}
|
||||
```
|
||||
|
||||
### Retry and Error Handling
|
||||
|
||||
```java
|
||||
import org.springframework.retry.annotation.*;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
import software.amazon.awssdk.services.s3.model.*;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
public class ResilientS3Service {
|
||||
|
||||
private final S3Client s3Client;
|
||||
private final RetryTemplate retryTemplate;
|
||||
|
||||
@Retryable(value = {S3Exception.class, SdkClientException.class},
|
||||
maxAttempts = 3,
|
||||
backoff = @Backoff(delay = 1000, multiplier = 2))
|
||||
public Mono<PutObjectResponse> uploadWithRetry(String key, Path file) {
|
||||
return Mono.fromCallable(() -> {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket("my-bucket")
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
return s3Client.putObject(request, RequestBody.fromFile(file));
|
||||
});
|
||||
}
|
||||
|
||||
@Recover
|
||||
public Mono<PutObjectResponse> uploadRecover(S3Exception e, String key, Path file) {
|
||||
// Log the failure and potentially send notification
|
||||
System.err.println("Upload failed after retries: " + e.getMessage());
|
||||
return Mono.error(new S3UploadException("Upload failed after retries", e));
|
||||
}
|
||||
|
||||
@Retryable(value = {S3Exception.class},
|
||||
maxAttempts = 5,
|
||||
backoff = @Backoff(delay = 2000, multiplier = 2))
|
||||
public Mono<Void> copyObjectWithRetry(String sourceKey, String destinationKey) {
|
||||
return Mono.fromFuture(() -> {
|
||||
CopyObjectRequest request = CopyObjectRequest.builder()
|
||||
.sourceBucket("source-bucket")
|
||||
.sourceKey(sourceKey)
|
||||
.destinationBucket("destination-bucket")
|
||||
.destinationKey(destinationKey)
|
||||
.build();
|
||||
|
||||
return s3AsyncClient.copyObject(request).future();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public class S3UploadException extends RuntimeException {
|
||||
public S3UploadException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing Integration
|
||||
|
||||
### Test Configuration with LocalStack
|
||||
|
||||
```java
|
||||
import org.testcontainers.containers.localstack.LocalStackContainer;
|
||||
import org.testcontainers.junit.jupiter.Container;
|
||||
import org.testcontainers.junit.jupiter.Testcontainers;
|
||||
import org.springframework.boot.test.context.TestConfiguration;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.testcontainers.utility.DockerImageName;
|
||||
|
||||
@Testcontainers
|
||||
@ActiveProfiles("test")
|
||||
@TestConfiguration
|
||||
public class S3TestConfig {
|
||||
|
||||
@Container
|
||||
static LocalStackContainer localstack = new LocalStackContainer(
|
||||
DockerImageName.parse("localstack/localstack:3.0"))
|
||||
.withServices(LocalStackContainer.Service.S3)
|
||||
.withEnv("DEFAULT_REGION", "us-east-1");
|
||||
|
||||
@Bean
|
||||
public S3Client testS3Client() {
|
||||
return S3Client.builder()
|
||||
.region(Region.US_EAST_1)
|
||||
.endpointOverride(localstack.getEndpointOverride(LocalStackContainer.Service.S3))
|
||||
.credentialsProvider(StaticCredentialsProvider.create(
|
||||
AwsBasicCredentials.create(
|
||||
localstack.getAccessKey(),
|
||||
localstack.getSecretKey())))
|
||||
.build();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public S3AsyncClient testS3AsyncClient() {
|
||||
return S3AsyncClient.builder()
|
||||
.region(Region.US_EAST_1)
|
||||
.endpointOverride(localstack.getEndpointOverride(LocalStackContainer.Service.S3))
|
||||
.credentialsProvider(StaticCredentialsProvider.create(
|
||||
AwsBasicCredentials.create(
|
||||
localstack.getAccessKey(),
|
||||
localstack.getSecretKey())))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Unit Testing with Mocks
|
||||
|
||||
```java
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
import software.amazon.awssdk.services.s3.model.*;
|
||||
import reactor.core.publisher.Mono;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
import static org.mockito.ArgumentMatchers.*;
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class S3ServiceTest {
|
||||
|
||||
@Mock
|
||||
private S3Client s3Client;
|
||||
|
||||
@InjectMocks
|
||||
private S3Service s3Service;
|
||||
|
||||
@Test
|
||||
void uploadObjectAsync_ShouldReturnUploadResult() {
|
||||
// Arrange
|
||||
String key = "test-key";
|
||||
byte[] data = "test-content".getBytes();
|
||||
String eTag = "12345";
|
||||
|
||||
PutObjectResponse response = PutObjectResponse.builder()
|
||||
.eTag(eTag)
|
||||
.build();
|
||||
|
||||
when(s3Client.putObject(any(PutObjectRequest.class), any()))
|
||||
.thenReturn(response);
|
||||
|
||||
// Act
|
||||
Mono<UploadResult> result = s3Service.uploadObjectAsync(key, data);
|
||||
|
||||
// Assert
|
||||
result.subscribe(uploadResult -> {
|
||||
assertEquals(key, uploadResult.key());
|
||||
assertEquals(eTag, uploadResult.eTag());
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void listObjectsWithPrefix_ShouldReturnObjectList() {
|
||||
// Arrange
|
||||
String prefix = "documents/";
|
||||
S3Object object1 = S3Object.builder().key("documents/file1.txt").build();
|
||||
S3Object object2 = S3Object.builder().key("documents/file2.txt").build();
|
||||
|
||||
ListObjectsV2Response response = ListObjectsV2Response.builder()
|
||||
.contents(object1, object2)
|
||||
.build();
|
||||
|
||||
when(s3Client.listObjectsV2(any(ListObjectsV2Request.class)))
|
||||
.thenReturn(response);
|
||||
|
||||
// Act
|
||||
Flux<S3Object> result = s3Service.listObjectsWithPrefix(prefix);
|
||||
|
||||
// Assert
|
||||
result.collectList()
|
||||
.subscribe(objects -> {
|
||||
assertEquals(2, objects.size());
|
||||
assertTrue(objects.stream().allMatch(obj -> obj.key().startsWith(prefix)));
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
|
||||
```java
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import software.amazon.awssdk.services.s3.model.*;
|
||||
import java.nio.file.*;
|
||||
import java.util.Map;
|
||||
|
||||
@SpringBootTest
|
||||
@ActiveProfiles("test")
|
||||
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
|
||||
class S3IntegrationTest {
|
||||
|
||||
@Autowired
|
||||
private S3Service s3Service;
|
||||
|
||||
private static final String TEST_BUCKET = "test-bucket";
|
||||
private static final String TEST_FILE = "test-document.txt";
|
||||
|
||||
@BeforeAll
|
||||
static void setup() throws Exception {
|
||||
// Create test file
|
||||
Files.write(Paths.get(TEST_FILE), "Test content".getBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
@Order(1)
|
||||
void uploadFile_ShouldSucceed() {
|
||||
// Act & Assert
|
||||
s3Service.uploadWithMetadata(TEST_FILE, Paths.get(TEST_FILE),
|
||||
Map.of("author", "test", "type", "document"))
|
||||
.as(StepVerifier::create)
|
||||
.expectNextMatches(result ->
|
||||
result.key().equals(TEST_FILE) && result.eTag() != null)
|
||||
.verifyComplete();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Order(2)
|
||||
void downloadFile_ShouldReturnContent() {
|
||||
// Act & Assert
|
||||
s3Service.downloadObjectAsync(TEST_FILE)
|
||||
.as(StepVerifier::create)
|
||||
.expectNext("Test content".getBytes())
|
||||
.verifyComplete();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Order(3)
|
||||
void listObjects_ShouldReturnFiles() {
|
||||
// Act & Assert
|
||||
s3Service.listObjectsWithPrefix("")
|
||||
.as(StepVerifier::create)
|
||||
.expectNextCount(1)
|
||||
.verifyComplete();
|
||||
}
|
||||
|
||||
@AfterAll
|
||||
static void cleanup() {
|
||||
try {
|
||||
Files.deleteIfExists(Paths.get(TEST_FILE));
|
||||
} catch (IOException e) {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Configuration Patterns
|
||||
|
||||
### Environment-Specific Configuration
|
||||
|
||||
```java
|
||||
import org.springframework.boot.autoconfigure.condition.*;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import software.amazon.awssdk.auth.credentials.*;
|
||||
|
||||
@Configuration
|
||||
public class EnvironmentAwareS3Config {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingBean
|
||||
public AwsCredentialsProvider awsCredentialsProvider(S3Properties properties) {
|
||||
if (properties.getAccessKey() != null && properties.getSecretKey() != null) {
|
||||
return StaticCredentialsProvider.create(
|
||||
AwsBasicCredentials.create(
|
||||
properties.getAccessKey(),
|
||||
properties.getSecretKey()));
|
||||
}
|
||||
return DefaultCredentialsProvider.create();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingBean
|
||||
@ConditionalOnProperty(name = "s3.region")
|
||||
public Region region(S3Properties properties) {
|
||||
return Region.of(properties.getRegion());
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingBean
|
||||
@ConditionalOnProperty(name = "s3.endpoint")
|
||||
public String endpoint(S3Properties properties) {
|
||||
return properties.getEndpoint();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Multi-Bucket Support
|
||||
|
||||
```java
|
||||
import org.springframework.stereotype.Service;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
public class MultiBucketS3Service {
|
||||
|
||||
private final Map<String, S3Client> bucketClients = new HashMap<>();
|
||||
private final S3Client defaultS3Client;
|
||||
|
||||
@Autowired
|
||||
public MultiBucketS3Service(S3Client defaultS3Client) {
|
||||
this.defaultS3Client = defaultS3Client;
|
||||
}
|
||||
|
||||
public S3Client getClientForBucket(String bucketName) {
|
||||
return bucketClients.computeIfAbsent(bucketName, name ->
|
||||
S3Client.builder()
|
||||
.region(defaultS3Client.config().region())
|
||||
.credentialsProvider(defaultS3Client.config().credentialsProvider())
|
||||
.build());
|
||||
}
|
||||
|
||||
public Mono<UploadResult> uploadToBucket(String bucketName, String key, Path file) {
|
||||
S3Client client = getClientForBucket(bucketName);
|
||||
// Upload implementation using the specific client
|
||||
return Mono.empty(); // Implementation
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,473 @@
|
||||
# S3 Transfer Patterns Reference
|
||||
|
||||
## S3 Transfer Manager Advanced Patterns
|
||||
|
||||
### Configuration and Optimization
|
||||
|
||||
#### Custom Transfer Manager Configuration
|
||||
|
||||
```java
|
||||
import software.amazon.awssdk.transfer.s3.S3TransferManager;
|
||||
import software.amazon.awssdk.transfer.s3.model.UploadFileRequest;
|
||||
import software.amazon.awssdk.core.sync.RequestBody;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
import software.amazon.awssdk.http.apache.ApacheHttpClient;
|
||||
import java.time.Duration;
|
||||
|
||||
public S3TransferManager createOptimizedTransferManager(S3Client s3Client) {
|
||||
return S3TransferManager.builder()
|
||||
.s3Client(s3Client)
|
||||
.storageProvider(ApacheHttpClient.builder()
|
||||
.maxConnections(200)
|
||||
.connectionTimeout(Duration.ofSeconds(5))
|
||||
.socketTimeout(Duration.ofSeconds(60))
|
||||
.build())
|
||||
.build();
|
||||
}
|
||||
```
|
||||
|
||||
#### Parallel Upload Configuration
|
||||
|
||||
```java
|
||||
public void configureParallelUploads() {
|
||||
S3TransferManager transferManager = S3TransferManager.create();
|
||||
|
||||
FileUpload upload = transferManager.uploadFile(
|
||||
UploadFileRequest.builder()
|
||||
.putObjectRequest(req -> req
|
||||
.bucket("my-bucket")
|
||||
.key("large-file.bin"))
|
||||
.source(Paths.get("large-file.bin"))
|
||||
.build());
|
||||
|
||||
// Track upload progress
|
||||
upload.progressFuture().thenAccept(progress -> {
|
||||
System.out.println("Upload progress: " + progress.progressPercent());
|
||||
});
|
||||
|
||||
// Handle completion
|
||||
upload.completionFuture().thenAccept(result -> {
|
||||
System.out.println("Upload completed with ETag: " +
|
||||
result.response().eTag());
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Upload Patterns
|
||||
|
||||
#### Multipart Upload with Progress Monitoring
|
||||
|
||||
```java
|
||||
public void multipartUploadWithProgress(S3Client s3Client, String bucketName,
|
||||
String key, String filePath) {
|
||||
int partSize = 5 * 1024 * 1024; // 5 MB parts
|
||||
File file = new File(filePath);
|
||||
|
||||
CreateMultipartUploadRequest createRequest = CreateMultipartUploadRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
CreateMultipartUploadResponse createResponse = s3Client.createMultipartUpload(createRequest);
|
||||
String uploadId = createResponse.uploadId();
|
||||
|
||||
List<CompletedPart> completedParts = new ArrayList<>();
|
||||
long uploadedBytes = 0;
|
||||
long totalBytes = file.length();
|
||||
|
||||
try (FileInputStream fis = new FileInputStream(file)) {
|
||||
byte[] buffer = new byte[partSize];
|
||||
int partNumber = 1;
|
||||
|
||||
while (true) {
|
||||
int bytesRead = fis.read(buffer);
|
||||
if (bytesRead == -1) break;
|
||||
|
||||
byte[] partData = Arrays.copyOf(buffer, bytesRead);
|
||||
|
||||
UploadPartRequest uploadRequest = UploadPartRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.uploadId(uploadId)
|
||||
.partNumber(partNumber)
|
||||
.build();
|
||||
|
||||
UploadPartResponse uploadResponse = s3Client.uploadPart(
|
||||
uploadRequest, RequestBody.fromBytes(partData));
|
||||
|
||||
completedParts.add(CompletedPart.builder()
|
||||
.partNumber(partNumber)
|
||||
.eTag(uploadResponse.eTag())
|
||||
.build());
|
||||
|
||||
uploadedBytes += bytesRead;
|
||||
partNumber++;
|
||||
|
||||
// Log progress
|
||||
double progress = (double) uploadedBytes / totalBytes * 100;
|
||||
System.out.printf("Upload progress: %.2f%%%n", progress);
|
||||
}
|
||||
|
||||
CompleteMultipartUploadRequest completeRequest =
|
||||
CompleteMultipartUploadRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.uploadId(uploadId)
|
||||
.multipartUpload(CompletedMultipartUpload.builder()
|
||||
.parts(completedParts)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
s3Client.completeMultipartUpload(completeRequest);
|
||||
|
||||
} catch (Exception e) {
|
||||
// Abort on failure
|
||||
AbortMultipartUploadRequest abortRequest =
|
||||
AbortMultipartUploadRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.uploadId(uploadId)
|
||||
.build();
|
||||
|
||||
s3Client.abortMultipartUpload(abortRequest);
|
||||
throw new RuntimeException("Multipart upload failed", e);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Resume Interrupted Uploads
|
||||
|
||||
```java
|
||||
public void resumeUpload(S3Client s3Client, String bucketName, String key,
|
||||
String filePath, String existingUploadId) {
|
||||
ListMultipartUploadsRequest listRequest = ListMultipartUploadsRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.prefix(key)
|
||||
.build();
|
||||
|
||||
ListMultipartUploadsResponse listResponse = s3Client.listMultipartUploads(listRequest);
|
||||
|
||||
// Check if upload already exists
|
||||
boolean uploadExists = listResponse.uploads().stream()
|
||||
.anyMatch(upload -> upload.key().equals(key) &&
|
||||
upload.uploadId().equals(existingUploadId));
|
||||
|
||||
if (uploadExists) {
|
||||
// Resume existing upload
|
||||
continueExistingUpload(s3Client, bucketName, key, existingUploadId, filePath);
|
||||
} else {
|
||||
// Start new upload
|
||||
multipartUploadWithProgress(s3Client, bucketName, key, filePath);
|
||||
}
|
||||
}
|
||||
|
||||
private void continueExistingUpload(S3Client s3Client, String bucketName,
|
||||
String key, String uploadId, String filePath) {
|
||||
// List already uploaded parts
|
||||
ListPartsRequest listPartsRequest = ListPartsRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.uploadId(uploadId)
|
||||
.build();
|
||||
|
||||
ListPartsResponse listPartsResponse = s3Client.listParts(listPartsRequest);
|
||||
|
||||
List<CompletedPart> completedParts = listPartsResponse.parts().stream()
|
||||
.map(part -> CompletedPart.builder()
|
||||
.partNumber(part.partNumber())
|
||||
.eTag(part.eTag())
|
||||
.build())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Upload remaining parts
|
||||
// ... implementation of remaining parts upload
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Download Patterns
|
||||
|
||||
#### Partial File Download
|
||||
|
||||
```java
|
||||
public void downloadPartialFile(S3Client s3Client, String bucketName, String key,
|
||||
String destPath, long startByte, long endByte) {
|
||||
GetObjectRequest request = GetObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.range("bytes=" + startByte + "-" + endByte)
|
||||
.build();
|
||||
|
||||
try (ResponseInputStream<GetObjectResponse> response = s3Client.getObject(request);
|
||||
OutputStream outputStream = new FileOutputStream(destPath)) {
|
||||
|
||||
response.transferTo(outputStream);
|
||||
System.out.println("Partial download completed: " +
|
||||
(endByte - startByte + 1) + " bytes");
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Parallel Downloads
|
||||
|
||||
```java
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.*;
|
||||
|
||||
public void parallelDownloads(S3Client s3Client, String bucketName,
|
||||
String key, String destPath, int chunkCount) {
|
||||
long fileSize = getFileSize(s3Client, bucketName, key);
|
||||
long chunkSize = fileSize / chunkCount;
|
||||
|
||||
ExecutorService executor = Executors.newFixedThreadPool(chunkCount);
|
||||
List<Future<Void>> futures = new ArrayList<>();
|
||||
|
||||
for (int i = 0; i < chunkCount; i++) {
|
||||
long start = i * chunkSize;
|
||||
long end = (i == chunkCount - 1) ? fileSize - 1 : start + chunkSize - 1;
|
||||
|
||||
Future<Void> future = executor.submit(() -> {
|
||||
downloadPartialFile(s3Client, bucketName, key,
|
||||
destPath + ".part" + i, start, end);
|
||||
return null;
|
||||
});
|
||||
|
||||
futures.add(future);
|
||||
}
|
||||
|
||||
// Wait for all downloads to complete
|
||||
for (Future<Void> future : futures) {
|
||||
try {
|
||||
future.get();
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
throw new RuntimeException("Download failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Combine chunks
|
||||
combineChunks(destPath, chunkCount);
|
||||
|
||||
executor.shutdown();
|
||||
}
|
||||
|
||||
private void combineChunks(String baseName, int chunkCount) throws IOException {
|
||||
try (OutputStream outputStream = new FileOutputStream(baseName)) {
|
||||
for (int i = 0; i < chunkCount; i++) {
|
||||
String chunkFile = baseName + ".part" + i;
|
||||
try (InputStream inputStream = new FileInputStream(chunkFile)) {
|
||||
inputStream.transferTo(outputStream);
|
||||
}
|
||||
new File(chunkFile).delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Handling and Retry
|
||||
|
||||
#### Upload with Exponential Backoff
|
||||
|
||||
```java
|
||||
import software.amazon.awssdk.core.retry.conditions.*;
|
||||
import software.amazon.awssdk.core.retry.*;
|
||||
import software.amazon.awssdk.core.retry.backoff.*;
|
||||
|
||||
public void resilientUpload(S3Client s3Client, String bucketName, String key,
|
||||
String filePath) {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
// Configure retry policy
|
||||
S3Client retryS3Client = S3Client.builder()
|
||||
.overrideConfiguration(b -> b
|
||||
.retryPolicy(RetryPolicy.builder()
|
||||
.numRetries(5)
|
||||
.retryBackoffStrategy(
|
||||
ExponentialRetryBackoff.builder()
|
||||
.baseDelay(Duration.ofSeconds(1))
|
||||
.maxBackoffTime(Duration.ofSeconds(30))
|
||||
.build())
|
||||
.retryCondition(
|
||||
RetryCondition.or(
|
||||
RetryCondition.defaultRetryCondition(),
|
||||
RetryCondition.create(response ->
|
||||
response.httpResponse().is5xxServerError()))
|
||||
)
|
||||
.build()))
|
||||
.build();
|
||||
|
||||
retryS3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
|
||||
}
|
||||
```
|
||||
|
||||
#### Upload with Checkpoint
|
||||
|
||||
```java
|
||||
import java.nio.file.*;
|
||||
|
||||
public void uploadWithCheckpoint(S3Client s3Client, String bucketName,
|
||||
String key, String filePath) {
|
||||
String checkpointFile = filePath + ".checkpoint";
|
||||
Path checkpointPath = Paths.get(checkpointFile);
|
||||
|
||||
long startPos = 0;
|
||||
if (Files.exists(checkpointPath)) {
|
||||
// Read checkpoint
|
||||
try {
|
||||
startPos = Long.parseLong(Files.readString(checkpointPath));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to read checkpoint", e);
|
||||
}
|
||||
}
|
||||
|
||||
if (startPos > 0) {
|
||||
// Resume upload
|
||||
continueUploadFromCheckpoint(s3Client, bucketName, key, filePath, startPos);
|
||||
} else {
|
||||
// Start new upload
|
||||
startNewUpload(s3Client, bucketName, key, filePath);
|
||||
}
|
||||
|
||||
// Update checkpoint
|
||||
long endPos = new File(filePath).length();
|
||||
try {
|
||||
Files.writeString(checkpointPath, String.valueOf(endPos));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to write checkpoint", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void continueUploadFromCheckpoint(S3Client s3Client, String bucketName,
|
||||
String key, String filePath, long startPos) {
|
||||
// Implement resume logic
|
||||
}
|
||||
|
||||
private void startNewUpload(S3Client s3Client, String bucketName,
|
||||
String key, String filePath) {
|
||||
// Implement initial upload logic
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Tuning
|
||||
|
||||
#### Buffer Configuration
|
||||
|
||||
```java
|
||||
public S3Client configureLargeBuffer() {
|
||||
return S3Client.builder()
|
||||
.overrideConfiguration(b -> b
|
||||
.apiCallAttemptTimeout(Duration.ofMinutes(5))
|
||||
.apiCallTimeout(Duration.ofMinutes(10)))
|
||||
.build();
|
||||
}
|
||||
|
||||
public S3TransferManager configureHighThroughput() {
|
||||
return S3TransferManager.builder()
|
||||
.multipartUploadThreshold(8 * 1024 * 1024) // 8 MB
|
||||
.multipartUploadPartSize(10 * 1024 * 1024) // 10 MB
|
||||
.build();
|
||||
}
|
||||
```
|
||||
|
||||
#### Network Optimization
|
||||
|
||||
```java
|
||||
public S3Client createOptimizedS3Client() {
|
||||
return S3Client.builder()
|
||||
.httpClientBuilder(ApacheHttpClient.builder()
|
||||
.maxConnections(200)
|
||||
.connectionPoolStrategy(ConnectionPoolStrategy.defaultStrategy())
|
||||
.socketTimeout(Duration.ofSeconds(30))
|
||||
.connectionTimeout(Duration.ofSeconds(5))
|
||||
.connectionAcquisitionTimeout(Duration.ofSeconds(30))
|
||||
.build())
|
||||
.region(Region.US_EAST_1)
|
||||
.build();
|
||||
}
|
||||
```
|
||||
|
||||
### Monitoring and Metrics
|
||||
|
||||
#### Upload Progress Tracking
|
||||
|
||||
```java
|
||||
public void uploadWithProgressTracking(S3Client s3Client, String bucketName,
|
||||
String key, String filePath) {
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
// Create progress listener
|
||||
software.amazon.awssdk.core.ProgressListener progressListener =
|
||||
progressEvent -> {
|
||||
System.out.println("Transferred: " +
|
||||
progressEvent.transferredBytes() + " bytes");
|
||||
System.out.println("Progress: " +
|
||||
progressEvent.progressPercent() + "%");
|
||||
};
|
||||
|
||||
Response<PutObjectResponse> response = s3Client.putObject(
|
||||
request,
|
||||
RequestBody.fromFile(Paths.get(filePath)),
|
||||
software.amazon.awssdk.core.sync.RequestBody.fromFile(Paths.get(filePath))
|
||||
.contentLength(new File(filePath).length()),
|
||||
progressListener);
|
||||
|
||||
System.out.println("Upload complete. ETag: " +
|
||||
response.response().eTag());
|
||||
}
|
||||
```
|
||||
|
||||
#### Throughput Measurement
|
||||
|
||||
```java
|
||||
public void measureUploadThroughput(S3Client s3Client, String bucketName,
|
||||
String key, String filePath) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
long fileSize = new File(filePath).length();
|
||||
|
||||
PutObjectRequest request = PutObjectRequest.builder()
|
||||
.bucket(bucketName)
|
||||
.key(key)
|
||||
.build();
|
||||
|
||||
s3Client.putObject(request, RequestBody.fromFile(Paths.get(filePath)));
|
||||
|
||||
long endTime = System.currentTimeMillis();
|
||||
long duration = endTime - startTime;
|
||||
double throughput = (fileSize * 1000.0) / duration / (1024 * 1024); // MB/s
|
||||
|
||||
System.out.printf("Upload throughput: %.2f MB/s%n", throughput);
|
||||
}
|
||||
```
|
||||
|
||||
## Testing and Validation
|
||||
|
||||
#### Upload Validation
|
||||
|
||||
```java
|
||||
public void validateUpload(S3Client s3Client, String bucketName, String key,
|
||||
String localFilePath) {
|
||||
// Download file from S3
|
||||
byte[] s3Content = downloadObject(s3Client, bucketName, key);
|
||||
|
||||
// Read local file
|
||||
byte[] localContent = Files.readAllBytes(Paths.get(localFilePath));
|
||||
|
||||
// Validate content matches
|
||||
if (!Arrays.equals(s3Content, localContent)) {
|
||||
throw new RuntimeException("Upload validation failed: content mismatch");
|
||||
}
|
||||
|
||||
// Verify file size
|
||||
long s3Size = s3Content.length;
|
||||
long localSize = localContent.length;
|
||||
if (s3Size != localSize) {
|
||||
throw new RuntimeException("Upload validation failed: size mismatch");
|
||||
}
|
||||
|
||||
System.out.println("Upload validation successful");
|
||||
}
|
||||
```
|
||||
Reference in New Issue
Block a user