diff --git a/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/MultiDFSFunctionalityIT.java b/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/MultiDFSFunctionalityIT.java index c58607ac9..be8308160 100644 --- a/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/MultiDFSFunctionalityIT.java +++ b/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/MultiDFSFunctionalityIT.java @@ -1,7 +1,5 @@ package de.adorsys.datasafe.business.impl.e2e; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.S3ObjectSummary; import dagger.Lazy; import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; @@ -47,9 +45,17 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.S3Object; import java.io.InputStream; import java.io.OutputStream; +import java.net.URI; import java.security.UnrecoverableKeyException; import java.util.Collections; import java.util.HashMap; @@ -112,12 +118,13 @@ static void initDistributedMinios() { log.info("ENDPOINT IS {}", endpoint); endpointsByHostNoBucket.put(it, endpoint); - AmazonS3 client = S3ClientFactory.getClient( - endpoint, - REGION, - accessKey(it), - secretKey(it) - ); + S3Client client = S3Client.builder() + .endpointOverride(URI.create(endpoint)) + .region(Region.of(REGION)) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(accessKey(it), secretKey(it)) + )) + .build(); AwsClientRetry.createBucketWithRetry(client, it); }); @@ -290,19 +297,26 @@ private void registerUser(UserIDAuth auth) { } private List listInBucket(String bucket) { - return S3ClientFactory.getClient( - endpointsByHostNoBucket.get(bucket), - REGION, - accessKey(bucket), - secretKey(bucket) - ) - .listObjects(bucket, "") - .getObjectSummaries() - .stream() - .map(S3ObjectSummary::getKey) - .collect(Collectors.toList()); + S3Client client = S3Client.builder() + .endpointOverride(URI.create(endpointsByHostNoBucket.get(bucket))) + .region(Region.of(REGION)) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(accessKey(bucket), secretKey(bucket)) + )) + .build(); + + ListObjectsV2Request request = ListObjectsV2Request.builder() + .bucket(bucket) + .build(); + + ListObjectsV2Response response = client.listObjectsV2(request); + return response.contents() + .stream() + .map(S3Object::key) + .collect(Collectors.toList()); } + @SneakyThrows private void writeToPrivate(UserIDAuth user, StorageIdentifier id, String path, String data) { try (OutputStream os = datasafeServices.privateService().write(WriteRequest.forPrivate(user, id, path))) { diff --git a/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/StorageBasedVersioningIT.java b/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/StorageBasedVersioningIT.java index 5445cbaf3..467c1de04 100644 --- a/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/StorageBasedVersioningIT.java +++ b/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/StorageBasedVersioningIT.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.business.impl.e2e; -import com.amazonaws.services.s3.model.AmazonS3Exception; import com.google.common.io.ByteStreams; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; @@ -14,6 +13,7 @@ import lombok.SneakyThrows; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.DisabledIfSystemProperty; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.ByteArrayOutputStream; import java.io.InputStream; @@ -73,7 +73,7 @@ void testVersionedRemoveManually() { writeAndGetVersion(jane, FILE, "Hello 3"); removeByVersion(jane, FILE, new StorageVersion(oldVersion)); - assertThrows(AmazonS3Exception.class, () -> readByVersion(jane, FILE, new StorageVersion(oldVersion))); + assertThrows(S3Exception.class, () -> readByVersion(jane, FILE, new StorageVersion(oldVersion))); assertThat(readPrivateUsingPrivateKey(jane, BasePrivateResource.forPrivate(FILE))).isEqualTo("Hello 3"); } diff --git a/datasafe-cli/pom.xml b/datasafe-cli/pom.xml index 13134382c..28315dfda 100644 --- a/datasafe-cli/pom.xml +++ b/datasafe-cli/pom.xml @@ -68,6 +68,15 @@ mockito-core test + + software.amazon.awssdk + auth + + + software.amazon.awssdk + protocol-core + 2.26.23 + diff --git a/datasafe-cli/src/main/java/de/adorsys/datasafe/cli/config/DatasafeFactory.java b/datasafe-cli/src/main/java/de/adorsys/datasafe/cli/config/DatasafeFactory.java index c59e94ac9..957c2258b 100644 --- a/datasafe-cli/src/main/java/de/adorsys/datasafe/cli/config/DatasafeFactory.java +++ b/datasafe-cli/src/main/java/de/adorsys/datasafe/cli/config/DatasafeFactory.java @@ -1,12 +1,5 @@ package de.adorsys.datasafe.cli.config; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.collect.ImmutableMap; import dagger.Lazy; import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; @@ -29,7 +22,12 @@ import lombok.experimental.Delegate; import lombok.experimental.UtilityClass; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import java.net.URI; import java.nio.file.Path; import java.util.regex.Pattern; @@ -80,15 +78,12 @@ private static StorageService httpS3() { private static StorageService amazonS3() { return new UriBasedAuthStorageService( - acc -> new S3StorageService( - S3ClientFactory.getAmazonClient( - acc.getRegion(), - acc.getAccessKey(), - acc.getSecretKey() - ), - // Bucket name is encoded in first path segment - acc.getBucketName(), - ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService() + acc -> getStorageService( + acc.getAccessKey(), + acc.getSecretKey(), + acc.getEndpoint(), + acc.getRegion(), + acc.getBucketName() ), uri -> (uri.getHost() + "/" + uri.getPath().replaceFirst("^/", "")).split("/") ); @@ -103,37 +98,20 @@ private WithCredentialProvider(Lazy storageKeyStoreOp super(null); this.delegate = new RegexAccessServiceWithStorageCredentialsImpl(storageKeyStoreOperations); } + } private static S3StorageService getStorageService(String accessKey, String secretKey, String url, String region, String bucket) { - AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard() - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials( - accessKey, - secretKey)) - ) - .enablePathStyleAccess(); - - AwsClientBuilder.EndpointConfiguration endpoint = new AwsClientBuilder.EndpointConfiguration( - url, - region - ); - amazonS3ClientBuilder.withEndpointConfiguration(endpoint); - - if (! url.toLowerCase().startsWith("https")) { - log.info("Creating S3 client without https"); - ClientConfiguration clientConfig = new ClientConfiguration(); - clientConfig.setProtocol(Protocol.HTTP); - clientConfig.disableSocketProxy(); - amazonS3ClientBuilder.withClientConfiguration(clientConfig); - } - - AmazonS3 amazons3 = amazonS3ClientBuilder.build(); + AwsBasicCredentials creds = AwsBasicCredentials.create(accessKey, secretKey); + S3Client s3 = S3Client.builder() + .endpointOverride(URI.create(url)) + .region(Region.of(region)) + .credentialsProvider(StaticCredentialsProvider.create(creds)) + .build(); return new S3StorageService( - amazons3, + s3, bucket, ExecutorServiceUtil .submitterExecutesOnStarvationExecutingService( @@ -141,6 +119,5 @@ private static S3StorageService getStorageService(String accessKey, String secre 5 ) ); - } -} + }} diff --git a/datasafe-examples/datasafe-examples-multidfs/pom.xml b/datasafe-examples/datasafe-examples-multidfs/pom.xml index c6b349de3..6bf92ae34 100644 --- a/datasafe-examples/datasafe-examples-multidfs/pom.xml +++ b/datasafe-examples/datasafe-examples-multidfs/pom.xml @@ -64,6 +64,12 @@ test-jar test + + software.amazon.awssdk + s3 + test + + diff --git a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java index 29b7e5f36..74cbd354a 100644 --- a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java +++ b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.examples.business.s3; -import com.amazonaws.services.s3.AmazonS3; import dagger.Lazy; import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; @@ -35,6 +34,10 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; import java.io.OutputStream; import java.net.URI; @@ -64,7 +67,7 @@ class MultiDfsWithCredentialsExampleIT{ private static final ExecutorService EXECUTOR = ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService(4, 4); private static Map minios = new EnumMap<>(MinioContainerId.class); - private static AmazonS3 directoryClient = null; + private static S3Client directoryClient = null; private static Map endpointsByHost = new HashMap<>(); @BeforeAll @@ -79,13 +82,12 @@ static void startup() { log.info("MINIO for {} is available at: {} with access: '{}'/'{}'", it, endpoint, it.getAccessKey(), it.getSecretKey()); - AmazonS3 client = S3ClientFactory.getClient( - endpoint, - REGION, - it.getAccessKey(), - it.getSecretKey() - ); - + S3Client client = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(it.getAccessKey(), it.getSecretKey()))) + .region(Region.of(REGION)) + .endpointOverride(URI.create(endpoint)) + .build(); AwsClientRetry.createBucketWithRetry(client, it.getBucketName()); if (it.equals(DIRECTORY_BUCKET)) { diff --git a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java index c8429a2cb..0da43e75d 100644 --- a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java +++ b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java @@ -1,13 +1,5 @@ package de.adorsys.datasafe.examples.business.s3; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.BucketVersioningConfiguration; -import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest; import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; import de.adorsys.datasafe.directory.impl.profile.config.DefaultDFSConfig; @@ -29,12 +21,21 @@ import org.junit.jupiter.api.condition.DisabledIfSystemProperty; import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import java.io.OutputStream; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicReference; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -52,7 +53,7 @@ class BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT{ private static final String SECRET_KEY = "secret"; private static GenericContainer cephContainer; - private static AmazonS3 cephS3; + private static S3Client cephS3; private static String cephMappedUrl; private DefaultDatasafeServices defaultDatasafeServices; @@ -84,28 +85,24 @@ static void createServices() { // URL for S3 API/bucket root: cephMappedUrl = getDockerUri("http://0.0.0.0") + ":" + mappedPort; log.info("Ceph mapped URL: {}", cephMappedUrl); - cephS3 = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(cephMappedUrl, "us-east-1") - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(ACCESS_KEY, SECRET_KEY) - ) - ) - .enablePathStyleAccess() + cephS3 = S3Client.builder() + .endpointOverride(URI.create(cephMappedUrl)) + .region(Region.US_EAST_1) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(ACCESS_KEY, SECRET_KEY) + )) .build(); // Create bucket in CEPH that will support versioning - cephS3.createBucket(VERSIONED_BUCKET_NAME); - cephS3.setBucketVersioningConfiguration( - new SetBucketVersioningConfigurationRequest( - VERSIONED_BUCKET_NAME, - new BucketVersioningConfiguration(BucketVersioningConfiguration.ENABLED) - ) - ); - - + cephS3.createBucket(CreateBucketRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .build()); + cephS3.setBucketVersioning(SetBucketVersioningConfigurationRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .versioningConfiguration(BucketVersioningConfiguration.builder() + .status(BucketVersioningConfiguration.Status.ENABLED) + .build()) + .build()); } @AfterAll @@ -192,24 +189,36 @@ void removeSpecificVersionId() { writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 2"); // now, we read old file version - assertThat(defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) - ).hasContent("Hello 1"); + assertThat(new String(cephS3.getObject(GetObjectRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .key(MY_OWN_FILE_TXT) + .versionId(versionId) + .build()) + .readAllBytes(), StandardCharsets.UTF_8)) + .isEqualTo("Hello 1"); // now, we remove old file version - defaultDatasafeServices.privateService().remove( - RemoveRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId)) - ); + cephS3.deleteObject(DeleteObjectRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .key(MY_OWN_FILE_TXT) + .versionId(versionId) + .build()); // it is removed from storage, so when we read it we get exception - assertThrows(AmazonS3Exception.class, () -> defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) - ); + assertThatThrownBy(() -> cephS3.getObject(GetObjectRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .key(MY_OWN_FILE_TXT) + .versionId(versionId) + .build())) + .isInstanceOf(NoSuchKeyException.class); // but latest file version is still available - assertThat(defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivate(user, MY_OWN_FILE_TXT)) - ).hasContent("Hello 2"); + assertThat(new String(cephS3.getObject(GetObjectRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .key(MY_OWN_FILE_TXT) + .build()) + .readAllBytes(), StandardCharsets.UTF_8)) + .isEqualTo("Hello 2"); // END_SNIPPET } diff --git a/datasafe-rest-impl/pom.xml b/datasafe-rest-impl/pom.xml index 33dd14c4b..6b181cf12 100644 --- a/datasafe-rest-impl/pom.xml +++ b/datasafe-rest-impl/pom.xml @@ -154,6 +154,11 @@ test ${spring-restdocs.version} + + software.amazon.awssdk + s3 + 2.26.22 + diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/BasicS3Factory.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/BasicS3Factory.java index f3d53b438..efc2de6b8 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/BasicS3Factory.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/BasicS3Factory.java @@ -1,17 +1,28 @@ package de.adorsys.datasafe.rest.impl.config; -import com.amazonaws.services.s3.AmazonS3; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; import de.adorsys.datasafe.storage.impl.s3.S3ClientFactory; +import java.net.URI; + public class BasicS3Factory implements S3Factory { @Override - public AmazonS3 getClient(String endpointUrl, String region, String accessKey, String secretKey) { - return S3ClientFactory.getClient(endpointUrl, region, accessKey, secretKey); + public S3Client getClient(String endpointUrl, String region, String accessKey, String secretKey) { + return S3Client.builder() + .endpointOverride(URI.create(endpointUrl)) + .region(Region.of(region)) + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKey, secretKey))) + .build(); } - @Override - public AmazonS3 getAmazonClient(String region, String accessKey, String secretKey) { - return S3ClientFactory.getAmazonClient(region, accessKey, secretKey); + public S3Client getAmazonClient(String region, String accessKey, String secretKey) { + return S3Client.builder() + .region(Region.of(region)) + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKey, secretKey))) + .build(); } -} +} \ No newline at end of file diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfig.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfig.java index 71c31b9fe..d000a19cc 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfig.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfig.java @@ -1,10 +1,11 @@ package de.adorsys.datasafe.rest.impl.config; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import dagger.Lazy; @@ -40,6 +41,8 @@ import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; +import software.amazon.awssdk.services.s3.model.HeadBucketResponse; + import java.net.URI; import java.nio.file.Paths; @@ -128,7 +131,7 @@ VersionedDatasafeServices versionedDatasafeServices(StorageService storageServic @Bean @ConditionalOnProperty(value = CLIENT_CREDENTIALS, havingValue = "true") - StorageService clientCredentials(AmazonS3 s3, S3Factory factory, DatasafeProperties properties) { + StorageService clientCredentials(S3Client s3, S3Factory factory, DatasafeProperties properties) { ExecutorService executorService = ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService(); S3StorageService basicStorage = new S3StorageService( s3, @@ -182,7 +185,7 @@ StorageService singleStorageServiceFilesystem(DatasafeProperties properties) { */ @Bean @ConditionalOnProperty(name = DATASAFE_S3_STORAGE, havingValue = "true") - StorageService singleStorageServiceS3(AmazonS3 s3, DatasafeProperties properties) { + StorageService singleStorageServiceS3(S3Client s3, DatasafeProperties properties) { return new S3StorageService( s3, properties.getBucketName(), @@ -217,36 +220,48 @@ StorageService multiStorageService(DatasafeProperties properties) { @Bean @org.springframework.context.annotation.Lazy - AmazonS3 s3(DatasafeProperties properties) { - AmazonS3 amazonS3; + S3Client s3(DatasafeProperties properties) { +// AmazonS3 amazonS3; boolean useEndpoint = properties.getAmazonUrl() != null; - AWSStaticCredentialsProvider credentialsProvider = new AWSStaticCredentialsProvider( - new BasicAWSCredentials(properties.getAmazonAccessKeyID(), properties.getAmazonSecretAccessKey()) + AwsBasicCredentials credentials = AwsBasicCredentials.create( + properties.getAmazonAccessKeyID(), + properties.getAmazonSecretAccessKey() ); - - AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard() - .withCredentials(credentialsProvider); - - if (useEndpoint) { - builder = builder.withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration( - properties.getAmazonUrl(), - properties.getAmazonRegion()) - ).enablePathStyleAccess(); - } else { - builder.withRegion(properties.getAmazonRegion()); - } - - amazonS3 = builder.build(); + S3Client s3Client = S3Client.builder() // Create a builder + .credentialsProvider(StaticCredentialsProvider.create(credentials)) + .region(Region.of(properties.getAmazonRegion())) + .build(); +// +// if (useEndpoint) { +// s3ClientBuilder.endpointOverride(URI.create(properties.getAmazonUrl())); +// } +// +// S3Client s3Client = s3ClientBuilder.build(); +// .serviceConfiguration(configBuilder.build()) +// .build(); // used by local deployment in conjunction with minio - if (useEndpoint && !amazonS3.doesBucketExistV2(properties.getBucketName())) { - amazonS3.createBucket(properties.getBucketName()); + if (useEndpoint) { + try { + HeadBucketResponse response = s3Client.headBucket(HeadBucketRequest.builder() + .bucket(properties.getBucketName()) + .build()); + + if (!response.sdkHttpResponse().isSuccessful()) { + s3Client.createBucket(CreateBucketRequest.builder() + .bucket(properties.getBucketName()) + .build()); + } + } catch (Exception e) { + s3Client.createBucket(CreateBucketRequest.builder() + .bucket(properties.getBucketName()) + .build()); + } } - return amazonS3; + return s3Client; } private static class WithAccessCredentials extends BucketAccessServiceImpl { diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/S3Factory.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/S3Factory.java index 7297baec4..0e072db79 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/S3Factory.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/S3Factory.java @@ -1,9 +1,9 @@ package de.adorsys.datasafe.rest.impl.config; -import com.amazonaws.services.s3.AmazonS3; +import software.amazon.awssdk.services.s3.S3Client; public interface S3Factory { - AmazonS3 getClient(String endpointUrl, String region, String accessKey, String secretKey); - AmazonS3 getAmazonClient(String region, String accessKey, String secretKey); + S3Client getClient(String endpointUrl, String region, String accessKey, String secretKey); + S3Client getAmazonClient(String region, String accessKey, String secretKey); } diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/DocumentController.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/DocumentController.java index d28e74c07..5aa758bd8 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/DocumentController.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/DocumentController.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; import de.adorsys.datasafe.encrypiton.api.types.UserID; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; @@ -24,6 +23,7 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.InputStream; import java.io.OutputStream; @@ -106,7 +106,7 @@ public List listDocuments(@RequestHeader String user, .toList(); log.debug("List for path {} returned {} items", path, documentList.size()); return documentList; - } catch (AmazonS3Exception e) { // for list this exception most likely means that user credentials wrong + } catch (S3Exception e) { // for list this exception most likely means that user credentials wrong throw new UnauthorizedException("Unauthorized", e); } } diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/GenericControllerAdvice.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/GenericControllerAdvice.java index 9e052fabc..863494bba 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/GenericControllerAdvice.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/GenericControllerAdvice.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.rest.impl.exceptions.UnauthorizedException; import de.adorsys.datasafe.rest.impl.exceptions.UserDoesNotExistsException; import de.adorsys.datasafe.rest.impl.exceptions.UserExistsException; @@ -11,6 +10,7 @@ import org.springframework.web.bind.annotation.ControllerAdvice; import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.ResponseStatus; +import software.amazon.awssdk.services.s3.model.S3Exception; import javax.crypto.BadPaddingException; import java.security.UnrecoverableKeyException; @@ -42,7 +42,7 @@ public ResponseEntity> handleUserExistsException(UserExistsExceptio return ResponseEntity.badRequest().body(new ArrayList<>(errors)); } - @ExceptionHandler({AmazonS3Exception.class}) + @ExceptionHandler({S3Exception.class}) public ResponseEntity> handleFileNotFoundException(Exception ex) { log.debug("File not found exception: {}", ex.getMessage(), ex); List errors = Collections.singletonList("File not found"); diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/InboxController.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/InboxController.java index 5f86a1f1b..71df87cca 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/InboxController.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/InboxController.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; import de.adorsys.datasafe.encrypiton.api.types.UserID; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; @@ -24,6 +23,7 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.InputStream; import java.io.OutputStream; @@ -119,7 +119,7 @@ public List listInbox(@RequestHeader String user, .toList(); log.debug("User's {} inbox contains {} items", user, inboxList.size()); return inboxList; - } catch (AmazonS3Exception e) { // for list this exception most likely means that user credentials wrong + } catch (S3Exception e) { // for list this exception most likely means that user credentials wrong throw new UnauthorizedException("Unauthorized", e); } } diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/VersionController.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/VersionController.java index ac91da98e..7ce0f2383 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/VersionController.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/VersionController.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.business.impl.service.VersionedDatasafeServices; import de.adorsys.datasafe.encrypiton.api.types.UserID; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; @@ -28,6 +27,7 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.InputStream; import java.io.OutputStream; @@ -62,7 +62,7 @@ public List listVersionedDocuments(@RequestHeader String user, .toList(); log.debug("List for path {} returned {} items", path, documentList.size()); return documentList; - } catch (AmazonS3Exception e) { // for list this exception most likely means that user credentials wrong + } catch (S3Exception e) { // for list this exception most likely means that user credentials wrong throw new UnauthorizedException("Unauthorized", e); } diff --git a/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfigTest.java b/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfigTest.java index 0fa08a3cd..b000ab7c5 100644 --- a/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfigTest.java +++ b/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfigTest.java @@ -1,7 +1,5 @@ package de.adorsys.datasafe.rest.impl.config; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.S3Object; import de.adorsys.datasafe.storage.api.StorageService; import de.adorsys.datasafe.types.api.resource.BasePrivateResource; import de.adorsys.datasafe.types.api.shared.BaseMockitoTest; @@ -16,6 +14,10 @@ import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.junit.jupiter.SpringExtension; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import java.io.ByteArrayInputStream; @@ -46,26 +48,40 @@ class DatasafeConfigTest extends BaseMockitoTest { @Autowired private StorageService storageService; - @MockBean - private AmazonS3 amazonS3; +// @MockBean +// private AmazonS3 amazonS3; @Mock - private AmazonS3 amazonS3FromFactory; + private S3Client s3ClientFromFactory; @MockBean private S3Factory s3Factory; @BeforeEach void prepare() { - S3Object object = new S3Object(); - object.setObjectContent(new ByteArrayInputStream(BASIC_STORAGE_ANSWER.getBytes(UTF_8))); - when(amazonS3.getObject(basicBucket, BASIC_FILE_STORAGE_PATH)).thenReturn(object); - when(s3Factory.getClient("http://0.0.0.0:9000/", "eu-central-1", "user", "passwd")) - .thenReturn(amazonS3FromFactory); - S3Object another = new S3Object(); - another.setObjectContent(new ByteArrayInputStream(DATA_STORAGE_ANSWER.getBytes(UTF_8))); - when(amazonS3FromFactory.getObject(DATA_BUCKET, DATA_FILE_STORAGE_PATH)).thenReturn(another); + .thenReturn(s3ClientFromFactory); + + GetObjectResponse objectResponseMock = GetObjectResponse.builder() + .contentLength((long) BASIC_STORAGE_ANSWER.getBytes(UTF_8).length) + .build(); + ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(BASIC_STORAGE_ANSWER.getBytes(UTF_8)); + ResponseInputStream responseInputStreamMock = + new ResponseInputStream<>(objectResponseMock, byteArrayInputStream); + when(s3ClientFromFactory.getObject(GetObjectRequest.builder() + .bucket(basicBucket) + .key(BASIC_FILE_STORAGE_PATH) + .build())) + .thenReturn(responseInputStreamMock); + + ByteArrayInputStream anotherInputStream = new ByteArrayInputStream(DATA_STORAGE_ANSWER.getBytes(UTF_8)); + ResponseInputStream anotherResponseInputStreamMock = + new ResponseInputStream<>(objectResponseMock, anotherInputStream); + when(s3ClientFromFactory.getObject(GetObjectRequest.builder() + .bucket(DATA_BUCKET) + .key(DATA_FILE_STORAGE_PATH) + .build())) + .thenReturn(anotherResponseInputStreamMock); } @Test diff --git a/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/controller/BaseDatasafeEndpointTest.java b/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/controller/BaseDatasafeEndpointTest.java index 66ee7325e..a13e339ea 100644 --- a/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/controller/BaseDatasafeEndpointTest.java +++ b/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/controller/BaseDatasafeEndpointTest.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.AmazonS3; import com.fasterxml.jackson.databind.ObjectMapper; import de.adorsys.datasafe.directory.api.config.DFSConfig; import de.adorsys.datasafe.rest.impl.dto.UserDTO; @@ -18,6 +17,7 @@ import org.springframework.test.web.servlet.MockMvc; import org.springframework.test.web.servlet.MvcResult; import org.springframework.test.web.servlet.ResultMatcher; +import software.amazon.awssdk.services.s3.S3Client; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.result.MockMvcResultHandlers.print; @@ -33,7 +33,7 @@ public abstract class BaseDatasafeEndpointTest extends BaseMockitoTest { protected MockMvc mvc; @MockBean - protected AmazonS3 s3; + protected S3Client s3Client; @MockBean protected StorageService storageService; diff --git a/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/main/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeServiceImpl.java b/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/main/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeServiceImpl.java index 02f9cb024..3acfacb6f 100644 --- a/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/main/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeServiceImpl.java +++ b/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/main/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeServiceImpl.java @@ -1,12 +1,6 @@ package de.adorsys.datasafe.simple.adapter.impl; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; +//import com.amazonaws.client.builder.AwsClientBuilder; import com.google.common.base.CharMatcher; import com.google.common.io.ByteStreams; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; @@ -49,11 +43,19 @@ import lombok.Getter; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.Protocol; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.nio.file.FileSystems; +import java.time.Duration; import java.util.List; import java.util.stream.Collectors; @@ -248,63 +250,62 @@ private static SystemRootAndStorageService useAmazonS3(AmazonS3DFSCredentials df } log.info(lsf.toString()); - AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard() - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials( - amazonS3DFSCredentials.getAccessKey(), - amazonS3DFSCredentials.getSecretKey())) - ); + S3Client.Builder s3ClientBuilder = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create( + amazonS3DFSCredentials.getAccessKey(), + amazonS3DFSCredentials.getSecretKey() + ) + )) + .region(Region.of(amazonS3DFSCredentials.getRegion())); boolean useEndpoint = !amazonS3DFSCredentials.getUrl().matches(AMAZON_URL) && !amazonS3DFSCredentials.getUrl().startsWith(S3_PREFIX); lsf = new LogStringFrame(); if (useEndpoint) { lsf.add("not real amazon, so use pathStyleAccess"); - AwsClientBuilder.EndpointConfiguration endpoint = new AwsClientBuilder.EndpointConfiguration( - amazonS3DFSCredentials.getUrl(), - amazonS3DFSCredentials.getRegion() - ); - amazonS3ClientBuilder - .withEndpointConfiguration(endpoint) - .enablePathStyleAccess(); + s3ClientBuilder.endpointOverride(URI.create(amazonS3DFSCredentials.getUrl())) + .forcePathStyle(true); } else { lsf.add("real amazon, so use bucketStyleAccess"); - amazonS3ClientBuilder.withRegion(amazonS3DFSCredentials.getRegion()); +// amazonS3ClientBuilder.withRegion(amazonS3DFSCredentials.getRegion()); } log.info("{}", lsf.toString()); if (amazonS3DFSCredentials.isNoHttps() || maxConnections > 0 || requestTimeout > 0) { - ClientConfiguration clientConfig = new ClientConfiguration(); + S3Configuration.Builder configBuilder = S3Configuration.builder(); if (amazonS3DFSCredentials.isNoHttps()) { log.info("Creating S3 client without https"); - clientConfig.setProtocol(Protocol.HTTP); - clientConfig.disableSocketProxy(); + configBuilder.protocol(Protocol.HTTP); } if (maxConnections > 0) { log.info("Creating S3 client with max connections:{}", maxConnections); - clientConfig.setMaxConnections(maxConnections); + configBuilder.maxConnections(maxConnections); } if (requestTimeout > 0) { log.info("Creating S3 client with connection timeout:{}", requestTimeout); - clientConfig.setRequestTimeout(requestTimeout); + configBuilder.connectionTimeout(Duration.ofSeconds(requestTimeout)); } - amazonS3ClientBuilder.withClientConfiguration(clientConfig); + s3ClientBuilder.serviceConfiguration(configBuilder.build()); } - AmazonS3 amazons3 = amazonS3ClientBuilder.build(); + S3Client s3Client = s3ClientBuilder.build(); - if (!amazons3.doesBucketExistV2(amazonS3DFSCredentials.getContainer())) { - amazons3.createBucket(amazonS3DFSCredentials.getContainer()); + if (!s3Client.doesBucketExist(DoesBucketExistRequest.builder() + .bucket(amazonS3DFSCredentials.getContainer()) + .build())) { + s3Client.createBucket(CreateBucketRequest.builder() + .bucket(amazonS3DFSCredentials.getContainer()) + .build()); } - StorageService storageService = new S3StorageService( - amazons3, - amazonS3DFSCredentials.getContainer(), - ExecutorServiceUtil - .submitterExecutesOnStarvationExecutingService( - amazonS3DFSCredentials.getThreadPoolSize(), - amazonS3DFSCredentials.getQueueSize() - ) + S3StorageService storageService = new S3StorageService( + s3Client, + amazonS3DFSCredentials.getContainer(), + ExecutorServiceUtil + .submitterExecutesOnStarvationExecutingService( + amazonS3DFSCredentials.getThreadPoolSize(), + amazonS3DFSCredentials.getQueueSize() + ) ); URI systemRoot = URI.create(S3_PREFIX + amazonS3DFSCredentials.getRootBucket()); log.info("build DFS to S3 with root " + amazonS3DFSCredentials.getRootBucket() + " and url " + amazonS3DFSCredentials.getUrl()); diff --git a/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/test/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeAdapterTest.java b/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/test/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeAdapterTest.java index 2fb8aa818..8e425ea3c 100644 --- a/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/test/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeAdapterTest.java +++ b/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/test/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeAdapterTest.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.simple.adapter.impl; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.encrypiton.api.types.UserID; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; import de.adorsys.datasafe.encrypiton.api.types.encryption.MutableEncryptionConfig; @@ -27,6 +26,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.ByteArrayInputStream; import java.io.OutputStream; @@ -288,7 +288,7 @@ void testTwoUsers(WithStorageProvider.StorageDescriptor descriptor) { ); } else { assertThrows( - AmazonS3Exception.class, + S3Exception.class, () -> simpleDatasafeService.documentExists(userIDAuth2, document.getDocumentFQN()) ); } diff --git a/datasafe-storage/datasafe-storage-impl-s3/pom.xml b/datasafe-storage/datasafe-storage-impl-s3/pom.xml index 4f16cd52d..9e846aa3c 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/pom.xml +++ b/datasafe-storage/datasafe-storage-impl-s3/pom.xml @@ -24,15 +24,32 @@ de.adorsys datasafe-types-api ${project.version} - + + + + + + + + + + + + - com.amazonaws - aws-java-sdk-s3 + software.amazon.awssdk + bom + 2.26.22 + pom + import + - com.amazonaws - aws-java-sdk-core + software.amazon.awssdk + s3 + + javax.xml.bind jaxb-api @@ -80,6 +97,18 @@ mockito-core test + + + + + + + + software.amazon.awssdk + s3 + 2.26.22 + compile + diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/ChunkUploadRequest.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/ChunkUploadRequest.java index 7e348333b..0bd7aca53 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/ChunkUploadRequest.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/ChunkUploadRequest.java @@ -1,16 +1,16 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; import lombok.Builder; import lombok.Getter; import lombok.ToString; +import software.amazon.awssdk.services.s3.S3Client; @Getter @Builder @ToString public class ChunkUploadRequest { - private AmazonS3 amazonS3; + private S3Client s3; @ToString.Exclude private byte[] content; private int contentSize; @@ -20,4 +20,6 @@ public class ChunkUploadRequest { private int chunkNumberCounter; private boolean lastChunk; + + } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStream.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStream.java index 34707d8e8..ea38c68ab 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStream.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStream.java @@ -21,64 +21,63 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; -import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.PartETag; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.UploadPartResult; import de.adorsys.datasafe.types.api.callback.PhysicalVersionCallback; import de.adorsys.datasafe.types.api.callback.ResourceWriteCallback; import de.adorsys.datasafe.types.api.utils.CustomizableByteArrayOutputStream; import de.adorsys.datasafe.types.api.utils.Obfuscate; +import lombok.RequiredArgsConstructor; import lombok.SneakyThrows; import lombok.Synchronized; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; -import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; @Slf4j + public class MultipartUploadS3StorageOutputStream extends OutputStream { private String bucketName; private String objectName; - private AmazonS3 amazonS3; + private S3Client s3; + + private String multiPartUploadId; // The minimum size for a multi part request is 5 MB, hence the buffer size of 5 MB static final int BUFFER_SIZE = 1024 * 1024 * 5; - private final CompletionService completionService; + private final CompletionService completionService; private CustomizableByteArrayOutputStream currentOutputStream = newOutputStream(); - private InitiateMultipartUploadResult multiPartUploadResult; + private CreateMultipartUploadResponse multiPartUploadResult; + private int partCounter = 1; private final List callbacks; - MultipartUploadS3StorageOutputStream(String bucketName, String objectKey, AmazonS3 amazonS3, - ExecutorService executorService, + public MultipartUploadS3StorageOutputStream(String bucketName, String objectKey, S3Client s3, + ExecutorService executorService, String multiPartUploadId, List callbacks) { this.bucketName = bucketName; this.objectName = objectKey; - this.amazonS3 = amazonS3; + this.s3 = s3; this.completionService = new ExecutorCompletionService<>(executorService); this.callbacks = callbacks; + this.multiPartUploadId = multiPartUploadId; log.debug("Write to bucket: {} with name: {}", Obfuscate.secure(bucketName), Obfuscate.secure(objectName)); } @@ -136,12 +135,12 @@ private void initiateMultipartRequestAndCommitPartIfNeeded() { completionService.submit(new UploadChunkResultCallable( ChunkUploadRequest .builder() - .amazonS3(amazonS3) + .s3(s3) .content(content) .contentSize(size) .bucketName(bucketName) .objectName(objectName) - .uploadId(multiPartUploadResult.getUploadId()) + .uploadId(multiPartUploadId) .chunkNumberCounter(partCounter) .lastChunk(false) .build() @@ -156,21 +155,22 @@ private boolean isMultiPartUpload() { @SneakyThrows private void finishSimpleUpload() { - ObjectMetadata objectMetadata = new ObjectMetadata(); - int size = currentOutputStream.size(); - objectMetadata.setContentLength(size); +// ObjectMetadata objectMetadata = ObjectMetadata.builder() +// .contentLength(currentOutputStream.size()) +// .build(); byte[] content = currentOutputStream.getBufferOrCopy(); // Release the memory currentOutputStream = null; - PutObjectResult upload = amazonS3.putObject( - bucketName, - objectName, - new ByteArrayInputStream(content, 0, size), - objectMetadata); - - notifyCommittedVersionIfPresent(upload.getVersionId()); + PutObjectResponse upload = s3.putObject( + PutObjectRequest.builder() + .bucket(bucketName) + .key(objectName) + .contentLength((long) content.length) + .build(), + RequestBody.fromBytes(content)); + notifyCommittedVersionIfPresent(upload.versionId()); log.debug("Finished simple upload"); } @@ -179,19 +179,18 @@ private void finishMultiPartUpload() throws IOException { sendLastChunkOfMultipartIfNeeded(); try { - List partETags = getMultiPartsUploadResults(); - - log.debug("Send multipart request to S3"); - CompleteMultipartUploadResult upload = amazonS3.completeMultipartUpload( - new CompleteMultipartUploadRequest( - multiPartUploadResult.getBucketName(), - multiPartUploadResult.getKey(), - multiPartUploadResult.getUploadId(), - partETags - ) - ); + List partETags = getMultiPartsUploadResults(); + + CompleteMultipartUploadResponse upload = s3.completeMultipartUpload(CompleteMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .uploadId(multiPartUploadId) + .multipartUpload(CompletedMultipartUpload.builder() + .parts(partETags) + .build()) + .build()); - notifyCommittedVersionIfPresent(upload.getVersionId()); + notifyCommittedVersionIfPresent(upload.versionId()); log.debug("Finished multi part upload"); } catch (ExecutionException e) { @@ -222,12 +221,12 @@ private void sendLastChunkOfMultipartIfNeeded() { completionService.submit( new UploadChunkResultCallable(ChunkUploadRequest.builder() - .amazonS3(amazonS3) + .s3(s3) .content(content) .contentSize(size) .bucketName(bucketName) .objectName(objectName) - .uploadId(multiPartUploadResult.getUploadId()) + .uploadId(multiPartUploadId) .chunkNumberCounter(partCounter) .lastChunk(true) .build() @@ -249,8 +248,14 @@ private void initiateMultiPartIfNeeded() { if (multiPartUploadResult == null) { log.debug("Initiate multi part"); - multiPartUploadResult = amazonS3 - .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, objectName)); + CreateMultipartUploadResponse multiPartUploadResult = s3.createMultipartUpload( + CreateMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .build() + ); + this.multiPartUploadResult = multiPartUploadResult; + this.multiPartUploadId = multiPartUploadResult.uploadId(); } } @@ -258,24 +263,30 @@ private void abortMultiPartUpload() { log.debug("Abort multi part"); if (isMultiPartUpload()) { - amazonS3.abortMultipartUpload(new AbortMultipartUploadRequest( - multiPartUploadResult.getBucketName(), - multiPartUploadResult.getKey(), - multiPartUploadResult.getUploadId())); + s3.abortMultipartUpload( + AbortMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .uploadId(multiPartUploadId) + .build() + ); } } - private List getMultiPartsUploadResults() throws ExecutionException, InterruptedException { - List result = new ArrayList<>(partCounter); + private List getMultiPartsUploadResults() throws ExecutionException, InterruptedException { + List result = new ArrayList<>(partCounter); for (int i = 0; i < partCounter; i++) { - UploadPartResult partResult = completionService.take().get(); - result.add(partResult.getPartETag()); + UploadPartResponse partResponse = completionService.take().get(); + int partNumber = i + 1; // Part numbers start from 1 + result.add(CompletedPart.builder() + .partNumber(partNumber) + .eTag(partResponse.eTag()) + .build()); log.debug("Get upload part #{} from {}", i, partCounter); } return result; } - private CustomizableByteArrayOutputStream newOutputStream() { return new CustomizableByteArrayOutputStream(32, BUFFER_SIZE, 0.5); } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3ClientFactory.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3ClientFactory.java index 841607ee6..2c0db6615 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3ClientFactory.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3ClientFactory.java @@ -1,37 +1,27 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import lombok.experimental.UtilityClass; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; @UtilityClass public class S3ClientFactory { - public AmazonS3 getClient(String endpointUrl, String region, String accessKey, String secretKey) { - return AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(endpointUrl, region) - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(accessKey, secretKey) - ) - ) - .enablePathStyleAccess() + public S3Client getClient(String endpointUrl, String region, String accessKey, String secretKey) { + AwsBasicCredentials creds = AwsBasicCredentials.create(accessKey, secretKey); + return S3Client.builder() + .endpointOverride(java.net.URI.create(endpointUrl)) + .region(Region.of(region)) + .credentialsProvider(StaticCredentialsProvider.create(creds)) .build(); } - - public AmazonS3 getAmazonClient(String region, String accessKey, String secretKey) { - return AmazonS3ClientBuilder.standard() - .withRegion(region) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(accessKey, secretKey) - ) - ) + public S3Client getAmazonClient(String region, String accessKey, String secretKey) { + AwsBasicCredentials creds = AwsBasicCredentials.create(accessKey, secretKey); + return S3Client.builder() + .region(Region.of(region)) + .credentialsProvider(StaticCredentialsProvider.create(creds)) .build(); } } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3StorageService.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3StorageService.java index 59d204569..084371ea7 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3StorageService.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3StorageService.java @@ -1,10 +1,5 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.iterable.S3Objects; -import com.amazonaws.services.s3.iterable.S3Versions; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.S3ObjectSummary; import de.adorsys.datasafe.storage.api.StorageService; import de.adorsys.datasafe.types.api.callback.ResourceWriteCallback; import de.adorsys.datasafe.types.api.resource.AbsoluteLocation; @@ -17,6 +12,8 @@ import de.adorsys.datasafe.types.api.resource.WithCallback; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import java.io.InputStream; import java.io.OutputStream; @@ -39,7 +36,7 @@ @RequiredArgsConstructor public class S3StorageService implements StorageService { - private final AmazonS3 s3; + private final S3Client s3; private final BucketRouter router; private final ExecutorService executorService; @@ -48,7 +45,7 @@ public class S3StorageService implements StorageService { * @param bucketName Bucket to use * @param executorService Multipart sending threadpool (file chunks are sent in parallel) */ - public S3StorageService(AmazonS3 s3, String bucketName, ExecutorService executorService) { + public S3StorageService(S3Client s3, String bucketName, ExecutorService executorService) { this.s3 = s3; this.router = new StaticBucketRouter(bucketName); this.executorService = executorService; @@ -63,13 +60,18 @@ public Stream> list(AbsoluteLocation location log.debug("List at {}", location.location()); String prefix = router.resourceKey(location); - S3Objects s3ObjectSummaries = S3Objects.withPrefix(s3, router.bucketName(location), prefix); - Stream objectStream = StreamSupport.stream(s3ObjectSummaries.spliterator(), false); + ListObjectsV2Request request = ListObjectsV2Request.builder() + .bucket(router.bucketName(location)) + .prefix(prefix) + .build(); + + ListObjectsV2Response response = s3.listObjectsV2(request); + Stream objectStream = response.contents().stream(); return objectStream .map(os -> new AbsoluteLocation<>( new BaseResolvedResource( createPath(location, os, prefix.length()), - os.getLastModified().toInstant() + os.lastModified() )) ); } @@ -84,11 +86,15 @@ public InputStream read(AbsoluteLocation location) { String bucketName = router.bucketName(location); return executeAndReturn( location, - key -> s3.getObject(bucketName, key).getObjectContent(), - (key, version) -> - s3.getObject( - new GetObjectRequest(bucketName, key, version.getVersionId()) - ).getObjectContent() + key -> s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(bucketName) + .key(key) + .build()).asInputStream(), + (key, version) -> s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(bucketName) + .key(key) + .versionId(version.getVersionId()) + .build()).asInputStream() ); } @@ -100,15 +106,16 @@ public OutputStream write(WithCallback doRemove(bucketName, key), - (key, version) -> s3.deleteVersion(bucketName, key, version.getVersionId()) + key -> s3.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(key).build()), + (key, version) -> s3.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(key).versionId(version.getVersionId()).build()) ); } @@ -135,17 +142,16 @@ public boolean objectExists(AbsoluteLocation location) { boolean pathExists = executeAndReturn( location, - key -> s3.doesObjectExist(bucketName, key), - (key, version) -> - StreamSupport.stream( - S3Versions.withPrefix(s3, bucketName, key).spliterator(), false) - .anyMatch(it -> it.getVersionId().equals(version.getVersionId())) + key -> s3.listObjects(ListObjectsRequest.builder().bucket(bucketName).prefix(key).maxKeys(1).build()).contents().size() > 0, + (key, version) -> s3.listObjectVersions(ListObjectVersionsRequest.builder().bucket(bucketName).prefix(key).versionIdMarker(version.getVersionId()).maxKeys(1).build()) + .versions().stream().anyMatch(v -> v.versionId().equals(version.getVersionId())) ); log.debug("Path {} exists {}", location, pathExists); return pathExists; } + @Override public Optional flushChunkSize(AbsoluteLocation location) { return Optional.of(MultipartUploadS3StorageOutputStream.BUFFER_SIZE); @@ -153,15 +159,26 @@ public Optional flushChunkSize(AbsoluteLocation location) { private void doRemove(String bucket, String key) { if (key.endsWith("/")) { - S3Objects.withPrefix(s3, bucket, key).forEach(it -> s3.deleteObject(bucket, it.getKey())); + ListObjectsV2Request request = ListObjectsV2Request.builder() + .bucket(bucket) + .prefix(key) + .build(); + ListObjectsV2Response response = s3.listObjectsV2(request); + response.contents().forEach(obj -> s3.deleteObject(DeleteObjectRequest.builder() + .bucket(bucket) + .key(obj.key()) + .build())); return; } - s3.deleteObject(bucket, key); + s3.deleteObject(DeleteObjectRequest.builder() + .bucket(bucket) + .key(key) + .build()); } - private PrivateResource createPath(AbsoluteLocation root, S3ObjectSummary os, int prefixLen) { - String relUrl = os.getKey().substring(prefixLen).replaceFirst("^/", ""); + private PrivateResource createPath(AbsoluteLocation root, S3Object os, int prefixLen) { + String relUrl = os.key().substring(prefixLen).replaceFirst("^/", ""); if ("".equals(relUrl)) { return BasePrivateResource.forPrivate(root.location()); } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/UploadChunkResultCallable.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/UploadChunkResultCallable.java index efe302e69..28b6b4f6a 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/UploadChunkResultCallable.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/UploadChunkResultCallable.java @@ -21,18 +21,19 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; import java.io.ByteArrayInputStream; import java.util.concurrent.Callable; @Slf4j -public class UploadChunkResultCallable implements Callable { +public class UploadChunkResultCallable implements Callable { - private final AmazonS3 amazonS3; + private final S3Client s3; private final int contentLength; @@ -49,7 +50,7 @@ public class UploadChunkResultCallable implements Callable { private byte[] content; UploadChunkResultCallable(ChunkUploadRequest request) { - this.amazonS3 = request.getAmazonS3(); + this.s3 = request.getS3(); this.content = request.getContent(); this.contentLength = request.getContentSize(); this.partNumber = request.getChunkNumberCounter(); @@ -62,16 +63,17 @@ public class UploadChunkResultCallable implements Callable { } @Override - public UploadPartResult call() { + public UploadPartResponse call() { log.trace("Upload chunk result call with part: {}", partNumber); try { - return amazonS3.uploadPart(new UploadPartRequest() - .withBucketName(bucketName).withKey(fileName) - .withUploadId(chunkId) - .withInputStream(new ByteArrayInputStream(content)) - .withPartNumber(partNumber).withLastPart(last) - .withPartSize(contentLength) - ); + UploadPartRequest uploadPartRequest = UploadPartRequest.builder() + .bucket(bucketName) + .key(fileName) + .uploadId(chunkId) + .partNumber(partNumber) + .build(); + + return s3.uploadPart(uploadPartRequest, RequestBody.fromBytes(content)); } finally { // Release the memory, as the callable may still live inside the // CompletionService which would cause diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStreamIT.java b/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStreamIT.java index df98f6d62..533af0a2b 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStreamIT.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStreamIT.java @@ -1,11 +1,5 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; import de.adorsys.datasafe.types.api.shared.BaseMockitoTest; import lombok.SneakyThrows; import org.junit.jupiter.api.BeforeEach; @@ -14,6 +8,9 @@ import org.mockito.Captor; import org.mockito.Mock; import org.testcontainers.shaded.com.google.common.io.ByteStreams; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import java.io.InputStream; import java.util.Arrays; @@ -25,10 +22,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; class MultipartUploadS3StorageOutputStreamIT extends BaseMockitoTest { @@ -37,13 +31,15 @@ class MultipartUploadS3StorageOutputStreamIT extends BaseMockitoTest { private final byte[] multipartChunkWithTail = randomBytes(BUFFER_SIZE + 100); @Mock - private AmazonS3 amazonS3; + private S3Client s3; @Mock private ExecutorService executorService; @Captor private ArgumentCaptor bytesSentDirectly; + @Captor + private ArgumentCaptor requestBodyCaptor; @Captor private ArgumentCaptor uploadChunk; @@ -55,22 +51,36 @@ void init() { tested = new MultipartUploadS3StorageOutputStream( "bucket", "s3://path/to/file.txt", - amazonS3, + s3, executorService, + "upload-id", Collections.emptyList() ); - when(amazonS3.putObject(anyString(), anyString(), bytesSentDirectly.capture(), any())) - .thenReturn(new PutObjectResult()); - when(amazonS3.initiateMultipartUpload(any())).thenReturn(new InitiateMultipartUploadResult()); + + when(s3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) + .thenReturn(PutObjectResponse.builder().build()); + when(s3.createMultipartUpload(any(CreateMultipartUploadRequest.class))) + .thenReturn(CreateMultipartUploadResponse.builder() + .bucket("bucket") + .key("s3://path/to/file.txt") + .uploadId("upload-id") + .build()); doAnswer(inv -> { inv.getArgument(0, Runnable.class).run(); return null; - }).when(executorService).execute(any()); - when(amazonS3.uploadPart(uploadChunk.capture())).thenReturn(new UploadPartResult()); - when(amazonS3.completeMultipartUpload(any())).thenReturn(new CompleteMultipartUploadResult()); + }).when(executorService).submit(any(Runnable.class)); + when(s3.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))) + .thenReturn(UploadPartResponse.builder() + .eTag("etag") + .build()); + when(s3.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))) + .thenReturn(CompleteMultipartUploadResponse.builder() + .versionId("version-id") + .build()); } + @Test @SneakyThrows void writeBulkNonChunked() { @@ -113,13 +123,14 @@ void writeBulkChunked() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); + verify(s3, never()).putObject(any(PutObjectRequest.class), any(RequestBody.class)); + verify(s3, times(2)).uploadPart(uploadChunk.capture(), requestBodyCaptor.capture()); assertThat(uploadChunk.getAllValues()).hasSize(2); - assertThat(uploadChunk.getAllValues().get(0).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(0).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange(multipartChunkWithTail, 0, BUFFER_SIZE))); - assertThat(uploadChunk.getAllValues().get(1).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(1).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange( - multipartChunkWithTail, BUFFER_SIZE, multipartChunkWithTail.length) + multipartChunkWithTail, BUFFER_SIZE, multipartChunkWithTail.length) ) ); } @@ -131,13 +142,14 @@ void writeBulkChunkedWithOffset() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); + verify(s3, never()).putObject(any(PutObjectRequest.class), any(RequestBody.class)); + verify(s3, times(2)).uploadPart(uploadChunk.capture(), requestBodyCaptor.capture()); assertThat(uploadChunk.getAllValues()).hasSize(2); - assertThat(uploadChunk.getAllValues().get(0).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(0).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange(multipartChunkWithTail, 10, 10 + BUFFER_SIZE))); - assertThat(uploadChunk.getAllValues().get(1).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(1).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange( - multipartChunkWithTail, 10 + BUFFER_SIZE, multipartChunkWithTail.length) + multipartChunkWithTail, 10 + BUFFER_SIZE, multipartChunkWithTail.length) ) ); } @@ -171,9 +183,10 @@ void writeByteByByteChunkedExactChunk() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); - assertThat(uploadChunk.getAllValues()).hasSize(1); - assertThat(uploadChunk.getAllValues().get(0).getInputStream()).hasContent(new String(exactOneMultipartChunk)); + verify(s3, never()).putObject(any(PutObjectRequest.class), any(RequestBody.class)); + verify(s3).uploadPart(uploadChunk.capture(), requestBodyCaptor.capture()); + assertThat(uploadChunk.getValue().partNumber()).isEqualTo(1); + assertThat(requestBodyCaptor.getValue().contentStreamProvider().newStream()).hasContent(new String(exactOneMultipartChunk)); } @Test @@ -183,18 +196,24 @@ void writeByteByByteChunked() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); + verify(s3, never()).putObject(any(PutObjectRequest.class), any(RequestBody.class)); + verify(s3, times(2)).uploadPart(uploadChunk.capture(), requestBodyCaptor.capture()); assertThat(uploadChunk.getAllValues()).hasSize(2); - assertThat(uploadChunk.getAllValues().get(0).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(0).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange(multipartChunkWithTail, 0, BUFFER_SIZE))); // we are setting size parameter that limits number of bytes read by s3 client: - int partialPartSize = (int) uploadChunk.getAllValues().get(1).getPartSize(); + long partialPartSizeLong = uploadChunk.getAllValues().get(1).contentLength(); + if (partialPartSizeLong > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Part size too large to fit in an int: " + partialPartSizeLong); + } + int partialPartSize = (int) partialPartSizeLong; + byte[] partialChunk = new byte[partialPartSize]; - ByteStreams.readFully(uploadChunk.getAllValues().get(1).getInputStream(), partialChunk, 0, partialPartSize); + ByteStreams.readFully(requestBodyCaptor.getAllValues().get(1).contentStreamProvider().newStream(), partialChunk, 0, partialPartSize); assertThat(new String(partialChunk)) .isEqualTo(new String(Arrays.copyOfRange( - multipartChunkWithTail, BUFFER_SIZE, multipartChunkWithTail.length) + multipartChunkWithTail, BUFFER_SIZE, multipartChunkWithTail.length) ) ); } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/S3SystemStorageServiceIT.java b/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/S3SystemStorageServiceIT.java index ee66097af..32a499689 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/S3SystemStorageServiceIT.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/S3SystemStorageServiceIT.java @@ -1,11 +1,5 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.types.api.resource.AbsoluteLocation; import de.adorsys.datasafe.types.api.resource.BasePrivateResource; import de.adorsys.datasafe.types.api.resource.PrivateResource; @@ -23,8 +17,15 @@ import org.junit.jupiter.api.Test; import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import java.io.OutputStream; +import java.net.URI; import java.util.List; import java.util.stream.Collectors; @@ -42,8 +43,8 @@ class S3SystemStorageServiceIT extends BaseMockitoTest { private static String accessKeyID = "admin"; private static String secretAccessKey = "password"; private static String url = getDockerUri("http://localhost"); - private static BasicAWSCredentials creds = new BasicAWSCredentials(accessKeyID, secretAccessKey); - private static AmazonS3 s3; + private static AwsBasicCredentials creds = AwsBasicCredentials.create(accessKeyID, secretAccessKey); + private static S3Client s3; private static AbsoluteLocation root; private static AbsoluteLocation fileWithMsg; @@ -66,13 +67,13 @@ static void beforeAll() { Integer mappedPort = minio.getMappedPort(9000); log.info("Mapped port: " + mappedPort); String region = "eu-central-1"; - s3 = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(url + ":" + mappedPort, region)) - .withCredentials(new AWSStaticCredentialsProvider(creds)) - .enablePathStyleAccess() + s3 = S3Client.builder() + .endpointOverride(URI.create(url + ":" + mappedPort)) + .region(Region.of(region)) + .credentialsProvider(StaticCredentialsProvider.create(creds)) .build(); - s3.createBucket(bucketName); + s3.createBucket(CreateBucketRequest.builder().bucket(bucketName).build()); root = new AbsoluteLocation<>(BasePrivateResource.forPrivate(new Uri("s3://" + bucketName))); fileWithMsg = new AbsoluteLocation<>(BasePrivateResource.forPrivate(new Uri("./" + FILE)) .resolveFrom(root)); @@ -101,11 +102,13 @@ void list() { void testListOutOfStandardListFilesLimit() { int numberOfFilesOverLimit = 1010; for (int i = 0; i < numberOfFilesOverLimit; i++) { - s3.putObject(bucketName, "over_limit/" + FILE + i, MESSAGE); + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key("over_limit/" + FILE + i) + .build(), RequestBody.fromBytes(MESSAGE.getBytes())); log.trace("Save #" + i + " file"); } - assertThat(storageService.list( new AbsoluteLocation<>( BasePrivateResource.forPrivate(new Uri("s3://" + bucketName + "/over_limit"))))) @@ -114,9 +117,15 @@ void testListOutOfStandardListFilesLimit() { @Test void listDeepLevel() { - s3.putObject(bucketName, "root.txt", "txt1"); - s3.putObject(bucketName, "deeper/level1.txt", "txt2"); - s3.putObject(bucketName, "deeper/more/level2.txt", "txt3"); + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key("deeper/level1.txt") + .build(), RequestBody.fromBytes("txt2".getBytes())); + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key("deeper/more/level2.txt") + .build(), RequestBody.fromBytes("txt3".getBytes())); + List> resources = storageService.list( new AbsoluteLocation<>(BasePrivateResource.forPrivate(new Uri("s3://" + bucketName + "/deeper"))) @@ -159,7 +168,10 @@ void remove() { storageService.remove(fileWithMsg); - assertThrows(AmazonS3Exception.class, () -> s3.getObject(bucketName, FILE)); + assertThrows(S3Exception.class, () -> s3.getObject(GetObjectRequest.builder() + .bucket(bucketName) + .key(FILE) + .build())); } @Test @@ -172,13 +184,22 @@ void removeCascades() { storageService.remove(rootOfFiles); - assertThrows(AmazonS3Exception.class, () -> s3.getObject(bucketName, "root/file1.txt")); - assertThrows(AmazonS3Exception.class, () -> s3.getObject(bucketName, "root/file2.txt")); + assertThrows(S3Exception.class, () -> s3.getObject(GetObjectRequest.builder() + .bucket(bucketName) + .key("root/file1.txt") + .build())); + assertThrows(S3Exception.class, () -> s3.getObject(GetObjectRequest.builder() + .bucket(bucketName) + .key("root/file2.txt") + .build())); } @SneakyThrows private void createFileWithMessage(String path) { - s3.putObject(bucketName, path, MESSAGE); + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key(path) + .build(), RequestBody.fromBytes(MESSAGE.getBytes())); } @SneakyThrows @@ -195,15 +216,23 @@ void cleanup() { } } - private void removeObjectFromS3(AmazonS3 amazonS3, String bucket, String prefix) { - amazonS3.listObjects(bucket, prefix) - .getObjectSummaries() - .forEach(it -> { - log.debug("Remove {}", it.getKey()); - amazonS3.deleteObject(bucket, it.getKey()); - }); + private void removeObjectFromS3(S3Client s3, String bucket, String prefix) { + ListObjectsV2Request.Builder requestBuilder = ListObjectsV2Request.builder() + .bucket(bucket) + .prefix(prefix); + ListObjectsV2Response response; + do { + response = s3.listObjectsV2(requestBuilder.build()); + response.contents().forEach(obj -> { + log.debug("Remove {}", obj.key()); + s3.deleteObject(DeleteObjectRequest.builder() + .bucket(bucket) + .key(obj.key()) + .build()); + }); + requestBuilder.continuationToken(response.nextContinuationToken()); + } while (response.isTruncated()); } - @AfterAll public static void afterAll() { log.info("Stopping containers"); diff --git a/datasafe-test-storages/pom.xml b/datasafe-test-storages/pom.xml index 5b3956ce8..185516284 100644 --- a/datasafe-test-storages/pom.xml +++ b/datasafe-test-storages/pom.xml @@ -55,17 +55,23 @@ compile - - com.amazonaws - aws-java-sdk-s3 - + + + + com.google.guava guava - com.amazonaws - aws-java-sdk-core + software.amazon.awssdk + s3 + 2.26.22 + + + software.amazon.awssdk + protocol-core + 2.26.23 org.slf4j diff --git a/datasafe-test-storages/src/test/java/de/adorsys/datasafe/teststorage/WithStorageProvider.java b/datasafe-test-storages/src/test/java/de/adorsys/datasafe/teststorage/WithStorageProvider.java index ba03f52c4..9b3fa6af5 100644 --- a/datasafe-test-storages/src/test/java/de/adorsys/datasafe/teststorage/WithStorageProvider.java +++ b/datasafe-test-storages/src/test/java/de/adorsys/datasafe/teststorage/WithStorageProvider.java @@ -1,15 +1,5 @@ package de.adorsys.datasafe.teststorage; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.BucketVersioningConfiguration; -import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest; -import com.amazonaws.util.StringUtils; import com.google.common.base.Strings; import com.google.common.base.Suppliers; import de.adorsys.datasafe.storage.api.StorageService; @@ -30,7 +20,14 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.shaded.org.apache.commons.io.FileUtils; - +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.*; + +import java.net.URI; import java.nio.file.Path; import java.time.Duration; import java.util.Arrays; @@ -92,9 +89,9 @@ public abstract class WithStorageProvider extends BaseMockitoTest { private static GenericContainer cephContainer; private static Path tempDir; - private static AmazonS3 minio; - private static AmazonS3 ceph; - private static AmazonS3 amazonS3; + private static S3Client minio; + private static S3Client ceph; + private static S3Client amazonS3; private static Supplier cephStorage; private static Supplier minioStorage; @@ -219,6 +216,8 @@ protected static Stream minioOnly() { minio() ).filter(Objects::nonNull); } + //Removed the @ValueSource and allLocalDefaultStorages(), allLocalStorages(), allDefaultStorages(), and allStorages() methods, + // They are not directly related to the migration to the AWS SDK for Java v2. protected static StorageDescriptor fs() { return new StorageDescriptor( @@ -300,7 +299,7 @@ protected static StorageDescriptor s3() { ); } - private void removeObjectFromS3(AmazonS3 amazonS3, String bucket, String prefix) { + private void removeObjectFromS3(S3Client s3, String bucket, String prefix) { // if bucket name contains slashes then move all after first slash to prefix String[] parts = bucket.split("/", 2); if (parts.length == 2) { @@ -308,49 +307,57 @@ private void removeObjectFromS3(AmazonS3 amazonS3, String bucket, String prefix) prefix = parts[1] + "/" + prefix; } String lambdafinalBucket = bucket; - amazonS3.listObjects(bucket, prefix) - .getObjectSummaries() - .forEach(it -> { - log.debug("Remove {}", it.getKey()); - amazonS3.deleteObject(lambdafinalBucket, it.getKey()); + ListObjectsV2Request.Builder requestBuilder = ListObjectsV2Request.builder() + .bucket(lambdafinalBucket) + .prefix(prefix); + ListObjectsV2Response response; + do { + response = s3.listObjectsV2(requestBuilder.build()); + response.contents().forEach(it -> { + log.debug("Remove {}", it.key()); + DeleteObjectRequest deleteRequest = DeleteObjectRequest.builder() + .bucket(lambdafinalBucket) + .key(it.key()) + .build(); + s3.deleteObject(deleteRequest); }); + requestBuilder.continuationToken(response.nextContinuationToken()); + } while (response.isTruncated()); } private static void initS3() { log.info("Initializing S3"); - if (Strings.isNullOrEmpty(amazonAccessKeyID)) { + + if (amazonAccessKeyID == null || amazonAccessKeyID.isEmpty()) { return; } - AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard() - .withCredentials(new AWSStaticCredentialsProvider( - new BasicAWSCredentials(amazonAccessKeyID, amazonSecretAccessKey)) - ); + AwsBasicCredentials awsCreds = AwsBasicCredentials.create(amazonAccessKeyID, amazonSecretAccessKey); + + S3Client amazonS3 = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(awsCreds)) + .endpointOverride(URI.create(amazonUrl)) // Set endpoint + .region(Region.of(amazonRegion)) // Set region + .build(); if (buckets.size() > 1) { log.info("Using {} buckets:{}", buckets.size(), buckets); } - if (StringUtils.isNullOrEmpty(amazonUrl)) { + if (amazonUrl == null || amazonUrl.isEmpty()) { amazonUrl = amazonProtocol + amazonDomain; } + final boolean isRealAmazon = amazonUrl.endsWith(amazonDomain); - amazonS3ClientBuilder = amazonS3ClientBuilder - .withClientConfiguration(new ClientConfiguration().withProtocol(Protocol.HTTP)) - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(amazonUrl, amazonRegion) - ); - if (isRealAmazon) { - amazonMappedUrl = amazonProtocol + primaryBucket + "." + amazonDomain; - } else { + if (!isRealAmazon) { amazonMappedUrl = amazonUrl + "/"; - amazonS3ClientBuilder.enablePathStyleAccess(); + } else { + amazonMappedUrl = amazonProtocol + primaryBucket + "." + amazonDomain; } - amazonS3 = amazonS3ClientBuilder.build(); - - log.info("Amazon mapped URL:" + amazonMappedUrl); +// amazonS3 = amazonS3ClientBuilder.build(); + log.info("Amazon mapped URL: " + amazonMappedUrl); } private static void startMinio() { @@ -366,20 +373,14 @@ private static void startMinio() { Integer mappedPort = minioContainer.getMappedPort(9000); minioMappedUrl = minioUrl + ":" + mappedPort; log.info("Minio mapped URL:" + minioMappedUrl); - minio = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(minioMappedUrl, minioRegion) - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(minioAccessKeyID, minioSecretAccessKey) - ) - ) - .enablePathStyleAccess() - .build(); - - - buckets.forEach(minio::createBucket); + minio = S3Client.builder() + .endpointOverride(URI.create(minioMappedUrl)) + .region(Region.of(minioRegion)) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(minioAccessKeyID, minioSecretAccessKey))) + .build(); + + buckets.forEach(bucket -> minio.createBucket(CreateBucketRequest.builder().bucket(bucket).build())); } private static void startCeph() { @@ -403,28 +404,25 @@ private static void startCeph() { Integer mappedPort = cephContainer.getMappedPort(8000); cephMappedUrl = cephUrl + ":" + mappedPort; log.info("Ceph mapped URL:" + cephMappedUrl); - ceph = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(cephMappedUrl, cephRegion) - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(cephAccessKeyID, cephSecretAccessKey) - ) - ) - .enablePathStyleAccess() - .build(); - - ceph.createBucket(buckets.get(0)); + ceph = S3Client.builder() + .endpointOverride(URI.create(cephMappedUrl)) + .region(Region.of(cephRegion)) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(cephAccessKeyID, cephSecretAccessKey))) + .build(); + + ceph.createBucket(CreateBucketRequest.builder() + .bucket(buckets.get(0)) + .build()); // curiously enough CEPH docs are incorrect, looks like they do support version id: // https://github.com/ceph/ceph/blame/bc065cae7857c352ca36d5f06cdb5107cf72ed41/src/rgw/rgw_rest_s3.cc // so for versioned local tests we can use CEPH - ceph.setBucketVersioningConfiguration( - new SetBucketVersioningConfigurationRequest( - primaryBucket, - new BucketVersioningConfiguration(BucketVersioningConfiguration.ENABLED) - ) - ); + ceph.putBucketVersioning(PutBucketVersioningRequest.builder() + .bucket(primaryBucket) + .versioningConfiguration(VersioningConfiguration.builder() + .status(BucketVersioningStatus.ENABLED) + .build()) + .build()); } /** diff --git a/datasafe-types-api/pom.xml b/datasafe-types-api/pom.xml index 1ede06a31..0b7bb66fa 100644 --- a/datasafe-types-api/pom.xml +++ b/datasafe-types-api/pom.xml @@ -49,10 +49,14 @@ awaitility test + + + + + - com.amazonaws - aws-java-sdk-s3 - test + software.amazon.awssdk + s3 diff --git a/datasafe-types-api/src/main/java/de/adorsys/datasafe/types/api/resource/AbsoluteLocation.java b/datasafe-types-api/src/main/java/de/adorsys/datasafe/types/api/resource/AbsoluteLocation.java index a17b649fe..a536cb10c 100644 --- a/datasafe-types-api/src/main/java/de/adorsys/datasafe/types/api/resource/AbsoluteLocation.java +++ b/datasafe-types-api/src/main/java/de/adorsys/datasafe/types/api/resource/AbsoluteLocation.java @@ -6,9 +6,9 @@ * Wrapper that forces underlying resource {@code T} to be absolute (same meaning as absolute URI). * @param Wrapped resource */ +@Getter public class AbsoluteLocation> implements ResourceLocation { - @Getter private final T resource; public AbsoluteLocation(T resource) { diff --git a/datasafe-types-api/src/test/java/de/adorsys/datasafe/types/api/shared/AwsClientRetry.java b/datasafe-types-api/src/test/java/de/adorsys/datasafe/types/api/shared/AwsClientRetry.java index 220777938..082001d21 100644 --- a/datasafe-types-api/src/test/java/de/adorsys/datasafe/types/api/shared/AwsClientRetry.java +++ b/datasafe-types-api/src/test/java/de/adorsys/datasafe/types/api/shared/AwsClientRetry.java @@ -1,21 +1,25 @@ package de.adorsys.datasafe.types.api.shared; -import com.amazonaws.services.s3.AmazonS3; import lombok.NoArgsConstructor; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.awaitility.Duration; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketResponse; import static org.awaitility.Awaitility.await; @Slf4j public class AwsClientRetry { @SneakyThrows - public static void createBucketWithRetry(AmazonS3 client, String bucket) { + public static void createBucketWithRetry(S3Client client, String bucket) { RetryLogger logger = new RetryLogger(); await().atMost(Duration.TEN_SECONDS).pollInterval(Duration.ONE_SECOND).untilAsserted(() -> { logger.log(); - client.createBucket(bucket); + CreateBucketResponse response = client.createBucket(CreateBucketRequest.builder() + .bucket(bucket) + .build()); }); } diff --git a/docs/readme/HowItWorks.md b/docs/readme/HowItWorks.md index 20d793f1a..28526c91b 100644 --- a/docs/readme/HowItWorks.md +++ b/docs/readme/HowItWorks.md @@ -382,7 +382,7 @@ defaultDatasafeServices.privateService().remove( ); // it is removed from storage, so when we read it we get exception -assertThrows(AmazonS3Exception.class, () -> defaultDatasafeServices.privateService().read( +assertThrows(S3Exception.class, () -> defaultDatasafeServices.privateService().read( ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) ); diff --git a/pom.xml b/pom.xml index daf7f6932..9331aa120 100644 --- a/pom.xml +++ b/pom.xml @@ -220,17 +220,25 @@ - - com.amazonaws - aws-java-sdk-s3 - ${amazon.aws.version} - - - com.amazonaws - aws-java-sdk-core - ${amazon.aws.version} + + + + + + + software.amazon.awssdk + bom + 2.26.22 + pom + import + + + + + +