Bucket
分批上傳物件
Last updated
分批上傳物件
Last updated
參考資料: GitHub: GitHub:
原本已經有寫過上傳物件,為什麼又要再多寫一篇 V2 版本
因為Amazon S3 Java SDK
有出 V2
,沒錯你沒看錯,這句就是廢話。
主要原因是在使用V1
時,當遇到大檔案上傳時,要採用分批上傳,發現會出現錯誤。
// Error message
com.amazonaws.request - Received error response:
com.amazonaws.services.s3.model.AmazonS3Exception:
Your socket connection to the server was not read from or written to within the timeout period.Idle connections will be closed.
(
Service: Amazon S3;
Status Code: 400;
Error Code: RequestTimeout;
Request ID: RequestIdxxxxxx;
S3 Extended Request ID: base64IDxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=;
Proxy: null),
S3 Extended Request ID: base64IDxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
// Amazon config init > set-up the client
AmazonS3 s3client = AmazonS3ClientBuilder
.standard()
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withClientConfiguration(new ClientConfiguration().withConnectionTimeout(15 * 60 * 1000))
.withRegion(Regions.fromName(area))
.build();
不管怎麼去設定timeout
時間,都沒辦法解決這個問題,只好採取其他辦法,不然專案會趕不出來....😂
廢話就不再說了上code
,使用Gradle
建構工具,他souce code
有說到不建議無腦引用SDK
,還是依據使用需求引用相對應得package
// aws s3 java sdk
implementation platform('software.amazon.awssdk:bom:2.18.31')
implementation 'software.amazon.awssdk:s3'
implementation 'software.amazon.awssdk:regions'
implementation 'software.amazon.awssdk:auth'
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
/**
* Author: Minchang Hsu (Caster)
* Date: 2022/12/13
* Comment:
*/
@Configuration
public class AWSS3Config {
@Value("${custom.aws.accessKey}")
private String accessKey;
@Value("${custom.aws.secretKey}")
private String secretKey;
@Bean
public S3Client createAWSS3Client() {
AwsBasicCredentials awsCreds = AwsBasicCredentials.create(accessKey, secretKey);
return S3Client.builder()
.region(Region.AP_EAST_1)
.credentialsProvider(StaticCredentialsProvider.create(awsCreds)).build();
}
}
client config
有很多種認證的方式,我是使用accessKey
的這個方式去認證。
import com.longxiang.utils.DateTimeUtil;
import com.longxiang.utils.constants.DateConstants;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.*;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Objects;
/**
* Author: Minchang Hsu (Caster)
* Date: 2022/12/13
* Comment:
*/
@Slf4j
@Service
@RequiredArgsConstructor
public class AWSS3StorageService {
@Value("${custom.aws.bucketName}")
private String bucketName;
@Value("${custom.aws.baseFolder}")
private String baseFolder;
private final S3Client s3;
private String composeKey(String key){
return baseFolder + "/" + (StringUtils.isBlank(key) ? "" : key);
}
public ListObjectsResponse listObjects() {
return s3.listObjects(ListObjectsRequest.builder().bucket(bucketName).prefix(baseFolder).build());
}
public void deleteObject(String key) {
try {
s3.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(composeKey(key)).build());
} catch (Exception e) {
e.printStackTrace();
log.error("Exception > delete object. key:{}, message:{}.", key, e.getMessage());
}
}
public void multipartUpload(String key, InputStream inputStream, Long splitSize) {
String keyName = composeKey(key);
long partSize = Objects.isNull(splitSize) ? 5 * 1024 * 1024l : splitSize.longValue(); // Set part size to 5 MB.
try {
CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder()
.bucket(bucketName)
.key(keyName)
.build();
CreateMultipartUploadResponse response = s3.createMultipartUpload(createMultipartUploadRequest);
String uploadId = response.uploadId();
List<CompletedPart> completedParts = new ArrayList<>();
log.warn("Multiple Part upload. key:{}, id:{}, upload start.", keyName, uploadId);
int index = 1;
byte[] buffer = new byte[(int) partSize];
int len;
while ((len = inputStream.read(buffer, 0, buffer.length)) > 0) {
InputStream targetStream;
if (len < buffer.length)
targetStream = new ByteArrayInputStream(buffer, 0, len);
else
targetStream = new ByteArrayInputStream(buffer);
{
// Upload all the different parts of the object
UploadPartRequest uploadPartRequest = UploadPartRequest.builder()
.bucket(bucketName)
.key(keyName)
.uploadId(uploadId)
.contentLength(Long.valueOf(len + ""))
.partNumber(index).build();
UploadPartResponse uploadPartResponse = s3.uploadPart(uploadPartRequest,
RequestBody.fromInputStream(targetStream, len));
completedParts.add(CompletedPart.builder().eTag(uploadPartResponse.eTag()).partNumber(index).build());
log.warn("Multiple Part upload. key:{}, eTag:{}", keyName, uploadPartResponse.eTag());
}
index++;
}
// Finally call completeMultipartUpload operation to tell S3 to merge all uploaded
// parts and finish the multipart operation.
CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder()
.parts(completedParts)
.build();
CompleteMultipartUploadRequest completeMultipartUploadRequest =
CompleteMultipartUploadRequest.builder()
.bucket(bucketName)
.key(keyName)
.uploadId(uploadId)
.multipartUpload(completedMultipartUpload)
.build();
s3.completeMultipartUpload(completeMultipartUploadRequest);
log.warn("Multiple Part upload. key:{}, id:{}, upload end.", keyName, uploadId);
} catch (Exception e) {
e.printStackTrace();
log.error("Exception > Multiple Part upload. key:{}, message:{}.", keyName, e.getMessage());
}
}
public void traceMultipartUploadProgressAPI(Boolean abortUpload, String key) {
try {
ListMultipartUploadsResponse response = s3.listMultipartUploads(ListMultipartUploadsRequest.builder().bucket(bucketName).build());
List<MultipartUpload> uploads = response.uploads();
if (StringUtils.isNotBlank(key) && abortUpload) {
uploads.stream().filter(o -> o.key().equals(composeKey(key))).findFirst().ifPresent(o -> abortMultipartUpload(o.key(), o.uploadId()));
} else {
uploads.stream().forEach(o -> {
log.warn("Initiated Date: {}, key:{}, " + "uploadId:{}",
DateTimeUtil.formatDate(Date.from(o.initiated()), DateConstants.YYYY_MM_DD_HH_mm_SS), o.key(), o.uploadId());
if (abortUpload)
abortMultipartUpload(o.key(), o.uploadId());
});
}
} catch (S3Exception s3Exception) {
s3Exception.printStackTrace();
log.error(s3Exception.getMessage());
}
}
/**
* abortMultipartUpload
* @param key
* @param uploadId
*/
private void abortMultipartUpload(String key, String uploadId) {
try {
s3.abortMultipartUpload(AbortMultipartUploadRequest.builder()
.bucket(bucketName)
.key(key)
// .expectedBucketOwner("xxxxxx") // 加了會錯, 不清楚這用途要幹嘛, 要再查看看
.uploadId(uploadId)
.build());
} catch (S3Exception s3Exception) {
log.warn("Unable to upload file, upload was aborted.");
s3Exception.printStackTrace();
}
}
}
source code
大致上就這樣,其他上傳物件等方式大同小異就不特別補上了🚬💨