diff --git a/datasafe-business/pom.xml b/datasafe-business/pom.xml index 315a0f278..3d7ba1e11 100644 --- a/datasafe-business/pom.xml +++ b/datasafe-business/pom.xml @@ -109,6 +109,14 @@ provided + + software.amazon.awssdk + s3 + 2.26.22 + runtime + + + de.adorsys datasafe-test-storages diff --git a/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/MultiDFSFunctionalityIT.java b/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/MultiDFSFunctionalityIT.java index c58607ac9..6558a585e 100644 --- a/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/MultiDFSFunctionalityIT.java +++ b/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/MultiDFSFunctionalityIT.java @@ -1,7 +1,5 @@ package de.adorsys.datasafe.business.impl.e2e; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.S3ObjectSummary; import dagger.Lazy; import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; @@ -47,6 +45,10 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.S3Object; import java.io.InputStream; import java.io.OutputStream; @@ -112,11 +114,11 @@ static void initDistributedMinios() { log.info("ENDPOINT IS {}", endpoint); endpointsByHostNoBucket.put(it, endpoint); - AmazonS3 client = S3ClientFactory.getClient( - endpoint, - REGION, - accessKey(it), - secretKey(it) + S3Client client = S3ClientFactory.getClient( + endpoint, + REGION, + accessKey(it), + secretKey(it) ); AwsClientRetry.createBucketWithRetry(client, it); @@ -290,19 +292,24 @@ private void registerUser(UserIDAuth auth) { } private List listInBucket(String bucket) { - return S3ClientFactory.getClient( - endpointsByHostNoBucket.get(bucket), - REGION, - accessKey(bucket), - secretKey(bucket) - ) - .listObjects(bucket, "") - .getObjectSummaries() - .stream() - .map(S3ObjectSummary::getKey) - .collect(Collectors.toList()); + S3Client s3 = S3ClientFactory.getClient( + endpointsByHostNoBucket.get(bucket), + REGION, + accessKey(bucket), + secretKey(bucket) + ); + ListObjectsV2Request request = ListObjectsV2Request.builder() + .bucket(bucket) + .build(); + ListObjectsV2Response response = s3.listObjectsV2(request); + + return response.contents() + .stream() + .map(S3Object::key) + .collect(Collectors.toList()); } + @SneakyThrows private void writeToPrivate(UserIDAuth user, StorageIdentifier id, String path, String data) { try (OutputStream os = datasafeServices.privateService().write(WriteRequest.forPrivate(user, id, path))) { diff --git a/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/StorageBasedVersioningIT.java b/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/StorageBasedVersioningIT.java index 5445cbaf3..467c1de04 100644 --- a/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/StorageBasedVersioningIT.java +++ b/datasafe-business/src/test/java/de/adorsys/datasafe/business/impl/e2e/StorageBasedVersioningIT.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.business.impl.e2e; -import com.amazonaws.services.s3.model.AmazonS3Exception; import com.google.common.io.ByteStreams; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; @@ -14,6 +13,7 @@ import lombok.SneakyThrows; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.DisabledIfSystemProperty; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.ByteArrayOutputStream; import java.io.InputStream; @@ -73,7 +73,7 @@ void testVersionedRemoveManually() { writeAndGetVersion(jane, FILE, "Hello 3"); removeByVersion(jane, FILE, new StorageVersion(oldVersion)); - assertThrows(AmazonS3Exception.class, () -> readByVersion(jane, FILE, new StorageVersion(oldVersion))); + assertThrows(S3Exception.class, () -> readByVersion(jane, FILE, new StorageVersion(oldVersion))); assertThat(readPrivateUsingPrivateKey(jane, BasePrivateResource.forPrivate(FILE))).isEqualTo("Hello 3"); } diff --git a/datasafe-cli/pom.xml b/datasafe-cli/pom.xml index 13134382c..28315dfda 100644 --- a/datasafe-cli/pom.xml +++ b/datasafe-cli/pom.xml @@ -68,6 +68,15 @@ mockito-core test + + software.amazon.awssdk + auth + + + software.amazon.awssdk + protocol-core + 2.26.23 + diff --git a/datasafe-cli/src/main/java/de/adorsys/datasafe/cli/config/DatasafeFactory.java b/datasafe-cli/src/main/java/de/adorsys/datasafe/cli/config/DatasafeFactory.java index c59e94ac9..118e92c8c 100644 --- a/datasafe-cli/src/main/java/de/adorsys/datasafe/cli/config/DatasafeFactory.java +++ b/datasafe-cli/src/main/java/de/adorsys/datasafe/cli/config/DatasafeFactory.java @@ -1,12 +1,5 @@ package de.adorsys.datasafe.cli.config; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.collect.ImmutableMap; import dagger.Lazy; import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; @@ -29,7 +22,12 @@ import lombok.experimental.Delegate; import lombok.experimental.UtilityClass; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import java.net.URI; import java.nio.file.Path; import java.util.regex.Pattern; @@ -103,37 +101,22 @@ private WithCredentialProvider(Lazy storageKeyStoreOp super(null); this.delegate = new RegexAccessServiceWithStorageCredentialsImpl(storageKeyStoreOperations); } + } private static S3StorageService getStorageService(String accessKey, String secretKey, String url, String region, String bucket) { - AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard() - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials( - accessKey, - secretKey)) - ) - .enablePathStyleAccess(); - - AwsClientBuilder.EndpointConfiguration endpoint = new AwsClientBuilder.EndpointConfiguration( - url, - region - ); - amazonS3ClientBuilder.withEndpointConfiguration(endpoint); - - if (! url.toLowerCase().startsWith("https")) { - log.info("Creating S3 client without https"); - ClientConfiguration clientConfig = new ClientConfiguration(); - clientConfig.setProtocol(Protocol.HTTP); - clientConfig.disableSocketProxy(); - amazonS3ClientBuilder.withClientConfiguration(clientConfig); - } + S3Client s3 = S3Client.builder() + .endpointOverride(URI.create(url)) + .region(Region.of(region)) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(accessKey, secretKey) + )) + .build(); - AmazonS3 amazons3 = amazonS3ClientBuilder.build(); return new S3StorageService( - amazons3, + s3, bucket, ExecutorServiceUtil .submitterExecutesOnStarvationExecutingService( @@ -141,6 +124,5 @@ private static S3StorageService getStorageService(String accessKey, String secre 5 ) ); - } -} + }} diff --git a/datasafe-cli/src/main/resources/META-INF/native-image/de.adorsys/datasafe-cli/resource-config.json b/datasafe-cli/src/main/resources/META-INF/native-image/de.adorsys/datasafe-cli/resource-config.json index d8a326c80..3f6156307 100644 --- a/datasafe-cli/src/main/resources/META-INF/native-image/de.adorsys/datasafe-cli/resource-config.json +++ b/datasafe-cli/src/main/resources/META-INF/native-image/de.adorsys/datasafe-cli/resource-config.json @@ -47,7 +47,8 @@ }, { "pattern": "lib/commons-logging-1.1.3.jar" - }, + } + , { "pattern": "lib/dagger-2.17.jar" }, diff --git a/datasafe-examples/datasafe-examples-multidfs/pom.xml b/datasafe-examples/datasafe-examples-multidfs/pom.xml index ed631312a..2782b3d47 100644 --- a/datasafe-examples/datasafe-examples-multidfs/pom.xml +++ b/datasafe-examples/datasafe-examples-multidfs/pom.xml @@ -76,6 +76,16 @@ test-jar test + + + + software.amazon.awssdk + s3 + 2.26.22 + + + + diff --git a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java index 29b7e5f36..74cbd354a 100644 --- a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java +++ b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.examples.business.s3; -import com.amazonaws.services.s3.AmazonS3; import dagger.Lazy; import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; @@ -35,6 +34,10 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; import java.io.OutputStream; import java.net.URI; @@ -64,7 +67,7 @@ class MultiDfsWithCredentialsExampleIT{ private static final ExecutorService EXECUTOR = ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService(4, 4); private static Map minios = new EnumMap<>(MinioContainerId.class); - private static AmazonS3 directoryClient = null; + private static S3Client directoryClient = null; private static Map endpointsByHost = new HashMap<>(); @BeforeAll @@ -79,13 +82,12 @@ static void startup() { log.info("MINIO for {} is available at: {} with access: '{}'/'{}'", it, endpoint, it.getAccessKey(), it.getSecretKey()); - AmazonS3 client = S3ClientFactory.getClient( - endpoint, - REGION, - it.getAccessKey(), - it.getSecretKey() - ); - + S3Client client = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(it.getAccessKey(), it.getSecretKey()))) + .region(Region.of(REGION)) + .endpointOverride(URI.create(endpoint)) + .build(); AwsClientRetry.createBucketWithRetry(client, it.getBucketName()); if (it.equals(DIRECTORY_BUCKET)) { diff --git a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java index c8429a2cb..016030765 100644 --- a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java +++ b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java @@ -1,13 +1,5 @@ package de.adorsys.datasafe.examples.business.s3; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.BucketVersioningConfiguration; -import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest; import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; import de.adorsys.datasafe.directory.impl.profile.config.DefaultDFSConfig; @@ -29,12 +21,18 @@ import org.junit.jupiter.api.condition.DisabledIfSystemProperty; import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import java.io.OutputStream; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicReference; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -52,7 +50,7 @@ class BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT{ private static final String SECRET_KEY = "secret"; private static GenericContainer cephContainer; - private static AmazonS3 cephS3; + private static S3Client cephS3; private static String cephMappedUrl; private DefaultDatasafeServices defaultDatasafeServices; @@ -84,28 +82,20 @@ static void createServices() { // URL for S3 API/bucket root: cephMappedUrl = getDockerUri("http://0.0.0.0") + ":" + mappedPort; log.info("Ceph mapped URL: {}", cephMappedUrl); - cephS3 = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(cephMappedUrl, "us-east-1") - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(ACCESS_KEY, SECRET_KEY) - ) - ) - .enablePathStyleAccess() + cephS3 = S3Client.builder() + .endpointOverride(URI.create(cephMappedUrl)) + .region(Region.US_EAST_1) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(ACCESS_KEY, SECRET_KEY) + )) .build(); // Create bucket in CEPH that will support versioning - cephS3.createBucket(VERSIONED_BUCKET_NAME); - cephS3.setBucketVersioningConfiguration( - new SetBucketVersioningConfigurationRequest( - VERSIONED_BUCKET_NAME, - new BucketVersioningConfiguration(BucketVersioningConfiguration.ENABLED) - ) - ); - - + cephS3.createBucket(CreateBucketRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .build()); + cephS3.createBucket(req -> req.bucket(VERSIONED_BUCKET_NAME)); + cephS3.putBucketVersioning(req -> req.bucket(VERSIONED_BUCKET_NAME).versioningConfiguration(req1 -> req1.status(BucketVersioningStatus.ENABLED))); } @AfterAll @@ -192,24 +182,36 @@ void removeSpecificVersionId() { writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 2"); // now, we read old file version - assertThat(defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) - ).hasContent("Hello 1"); + assertThat(new String(cephS3.getObject(GetObjectRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .key(MY_OWN_FILE_TXT) + .versionId(versionId) + .build()) + .readAllBytes(), StandardCharsets.UTF_8)) + .isEqualTo("Hello 1"); // now, we remove old file version - defaultDatasafeServices.privateService().remove( - RemoveRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId)) - ); + cephS3.deleteObject(DeleteObjectRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .key(MY_OWN_FILE_TXT) + .versionId(versionId) + .build()); // it is removed from storage, so when we read it we get exception - assertThrows(AmazonS3Exception.class, () -> defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) - ); + assertThatThrownBy(() -> cephS3.getObject(GetObjectRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .key(MY_OWN_FILE_TXT) + .versionId(versionId) + .build())) + .isInstanceOf(NoSuchKeyException.class); // but latest file version is still available - assertThat(defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivate(user, MY_OWN_FILE_TXT)) - ).hasContent("Hello 2"); + assertThat(new String(cephS3.getObject(GetObjectRequest.builder() + .bucket(VERSIONED_BUCKET_NAME) + .key(MY_OWN_FILE_TXT) + .build()) + .readAllBytes(), StandardCharsets.UTF_8)) + .isEqualTo("Hello 2"); // END_SNIPPET } diff --git a/datasafe-rest-impl/pom.xml b/datasafe-rest-impl/pom.xml index 6c63ebf53..2d1e4cb12 100644 --- a/datasafe-rest-impl/pom.xml +++ b/datasafe-rest-impl/pom.xml @@ -160,6 +160,210 @@ test ${spring-restdocs.version} + + software.amazon.awssdk + s3 + 2.26.22 + + + org.springframework.security + spring-security-web + 6.3.1 + + + org.springframework.security + spring-security-config + 6.3.1 + + + software.amazon.awssdk + regions + 2.26.22 + + + org.slf4j + slf4j-api + 2.0.7 + + + org.springframework + spring-web + 6.1.10 + + + org.springframework.boot + spring-boot + 3.3.1 + + + org.assertj + assertj-core + 3.12.2 + test + + + org.springframework.restdocs + spring-restdocs-core + 3.0.0 + test + + + com.fasterxml.jackson.core + jackson-databind + 2.17.1 + + + de.adorsys + datasafe-privatestore-api + 2.0.2-SNAPSHOT + + + org.springframework + spring-test + 6.1.10 + test + + + software.amazon.awssdk + auth + 2.26.22 + + + org.springframework + spring-core + 6.1.10 + + + de.adorsys + datasafe-directory-api + 2.0.2-SNAPSHOT + + + com.google.dagger + dagger + 2.50 + + + org.springframework.security + spring-security-crypto + 6.3.1 + + + de.adorsys + datasafe-types-api + 2.0.2-SNAPSHOT + + + com.google.code.gson + gson + 2.8.9 + + + de.adorsys + datasafe-inbox-impl + 2.0.2-SNAPSHOT + + + org.springframework.boot + spring-boot-test-autoconfigure + 3.3.1 + test + + + org.springframework.security + spring-security-core + 6.3.1 + + + org.springframework.boot + spring-boot-autoconfigure + 3.3.1 + + + de.adorsys + datasafe-privatestore-impl + 2.0.2-SNAPSHOT + + + org.springframework + spring-beans + 6.1.10 + + + org.apache.tomcat.embed + tomcat-embed-core + 10.1.25 + + + org.springframework + spring-context + 6.1.10 + + + org.springframework.boot + spring-boot-test + 3.3.1 + test + + + software.amazon.awssdk + sdk-core + 2.26.22 + + + de.adorsys + datasafe-directory-impl + 2.0.2-SNAPSHOT + + + de.adorsys + datasafe-inbox-api + 2.0.2-SNAPSHOT + + + org.springframework + spring-webmvc + 6.1.10 + + + de.adorsys + datasafe-encryption-api + 2.0.2-SNAPSHOT + + + org.mockito + mockito-core + 5.5.0 + test + + + software.amazon.awssdk + aws-core + 2.26.22 + + + com.google.guava + guava + 32.1.1-jre + + + jakarta.validation + jakarta.validation-api + 3.0.2 + + + software.amazon.awssdk + http-client-spi + 2.26.22 + + + jakarta.servlet + jakarta.servlet-api + 6.0.0 + test + + + diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/BasicS3Factory.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/BasicS3Factory.java index f3d53b438..bf8032f81 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/BasicS3Factory.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/BasicS3Factory.java @@ -1,17 +1,21 @@ package de.adorsys.datasafe.rest.impl.config; -import com.amazonaws.services.s3.AmazonS3; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; import de.adorsys.datasafe.storage.impl.s3.S3ClientFactory; +import java.net.URI; + public class BasicS3Factory implements S3Factory { @Override - public AmazonS3 getClient(String endpointUrl, String region, String accessKey, String secretKey) { + public S3Client getClient(String endpointUrl, String region, String accessKey, String secretKey) { return S3ClientFactory.getClient(endpointUrl, region, accessKey, secretKey); } - @Override - public AmazonS3 getAmazonClient(String region, String accessKey, String secretKey) { + public S3Client getAmazonClient(String region, String accessKey, String secretKey) { return S3ClientFactory.getAmazonClient(region, accessKey, secretKey); } -} +} \ No newline at end of file diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfig.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfig.java index 71c31b9fe..d000a19cc 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfig.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfig.java @@ -1,10 +1,11 @@ package de.adorsys.datasafe.rest.impl.config; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import dagger.Lazy; @@ -40,6 +41,8 @@ import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; +import software.amazon.awssdk.services.s3.model.HeadBucketResponse; + import java.net.URI; import java.nio.file.Paths; @@ -128,7 +131,7 @@ VersionedDatasafeServices versionedDatasafeServices(StorageService storageServic @Bean @ConditionalOnProperty(value = CLIENT_CREDENTIALS, havingValue = "true") - StorageService clientCredentials(AmazonS3 s3, S3Factory factory, DatasafeProperties properties) { + StorageService clientCredentials(S3Client s3, S3Factory factory, DatasafeProperties properties) { ExecutorService executorService = ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService(); S3StorageService basicStorage = new S3StorageService( s3, @@ -182,7 +185,7 @@ StorageService singleStorageServiceFilesystem(DatasafeProperties properties) { */ @Bean @ConditionalOnProperty(name = DATASAFE_S3_STORAGE, havingValue = "true") - StorageService singleStorageServiceS3(AmazonS3 s3, DatasafeProperties properties) { + StorageService singleStorageServiceS3(S3Client s3, DatasafeProperties properties) { return new S3StorageService( s3, properties.getBucketName(), @@ -217,36 +220,48 @@ StorageService multiStorageService(DatasafeProperties properties) { @Bean @org.springframework.context.annotation.Lazy - AmazonS3 s3(DatasafeProperties properties) { - AmazonS3 amazonS3; + S3Client s3(DatasafeProperties properties) { +// AmazonS3 amazonS3; boolean useEndpoint = properties.getAmazonUrl() != null; - AWSStaticCredentialsProvider credentialsProvider = new AWSStaticCredentialsProvider( - new BasicAWSCredentials(properties.getAmazonAccessKeyID(), properties.getAmazonSecretAccessKey()) + AwsBasicCredentials credentials = AwsBasicCredentials.create( + properties.getAmazonAccessKeyID(), + properties.getAmazonSecretAccessKey() ); - - AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard() - .withCredentials(credentialsProvider); - - if (useEndpoint) { - builder = builder.withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration( - properties.getAmazonUrl(), - properties.getAmazonRegion()) - ).enablePathStyleAccess(); - } else { - builder.withRegion(properties.getAmazonRegion()); - } - - amazonS3 = builder.build(); + S3Client s3Client = S3Client.builder() // Create a builder + .credentialsProvider(StaticCredentialsProvider.create(credentials)) + .region(Region.of(properties.getAmazonRegion())) + .build(); +// +// if (useEndpoint) { +// s3ClientBuilder.endpointOverride(URI.create(properties.getAmazonUrl())); +// } +// +// S3Client s3Client = s3ClientBuilder.build(); +// .serviceConfiguration(configBuilder.build()) +// .build(); // used by local deployment in conjunction with minio - if (useEndpoint && !amazonS3.doesBucketExistV2(properties.getBucketName())) { - amazonS3.createBucket(properties.getBucketName()); + if (useEndpoint) { + try { + HeadBucketResponse response = s3Client.headBucket(HeadBucketRequest.builder() + .bucket(properties.getBucketName()) + .build()); + + if (!response.sdkHttpResponse().isSuccessful()) { + s3Client.createBucket(CreateBucketRequest.builder() + .bucket(properties.getBucketName()) + .build()); + } + } catch (Exception e) { + s3Client.createBucket(CreateBucketRequest.builder() + .bucket(properties.getBucketName()) + .build()); + } } - return amazonS3; + return s3Client; } private static class WithAccessCredentials extends BucketAccessServiceImpl { diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/S3Factory.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/S3Factory.java index 7297baec4..0e072db79 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/S3Factory.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/config/S3Factory.java @@ -1,9 +1,9 @@ package de.adorsys.datasafe.rest.impl.config; -import com.amazonaws.services.s3.AmazonS3; +import software.amazon.awssdk.services.s3.S3Client; public interface S3Factory { - AmazonS3 getClient(String endpointUrl, String region, String accessKey, String secretKey); - AmazonS3 getAmazonClient(String region, String accessKey, String secretKey); + S3Client getClient(String endpointUrl, String region, String accessKey, String secretKey); + S3Client getAmazonClient(String region, String accessKey, String secretKey); } diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/DocumentController.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/DocumentController.java index 6c1209207..77cfb19ab 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/DocumentController.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/DocumentController.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; import de.adorsys.datasafe.encrypiton.api.types.UserID; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; @@ -26,6 +25,7 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.InputStream; import java.io.OutputStream; @@ -124,7 +124,7 @@ public List listDocuments(@RequestHeader @NotBlank String user, .toList(); log.debug("List for path {} returned {} items", path, documentList.size()); return documentList; - } catch (AmazonS3Exception e) { // for list this exception most likely means that user credentials wrong + } catch (S3Exception e) { // for list this exception most likely means that user credentials wrong throw new UnauthorizedException("Unauthorized", e); } } diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/GenericControllerAdvice.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/GenericControllerAdvice.java index 9e052fabc..863494bba 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/GenericControllerAdvice.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/GenericControllerAdvice.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.rest.impl.exceptions.UnauthorizedException; import de.adorsys.datasafe.rest.impl.exceptions.UserDoesNotExistsException; import de.adorsys.datasafe.rest.impl.exceptions.UserExistsException; @@ -11,6 +10,7 @@ import org.springframework.web.bind.annotation.ControllerAdvice; import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.ResponseStatus; +import software.amazon.awssdk.services.s3.model.S3Exception; import javax.crypto.BadPaddingException; import java.security.UnrecoverableKeyException; @@ -42,7 +42,7 @@ public ResponseEntity> handleUserExistsException(UserExistsExceptio return ResponseEntity.badRequest().body(new ArrayList<>(errors)); } - @ExceptionHandler({AmazonS3Exception.class}) + @ExceptionHandler({S3Exception.class}) public ResponseEntity> handleFileNotFoundException(Exception ex) { log.debug("File not found exception: {}", ex.getMessage(), ex); List errors = Collections.singletonList("File not found"); diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/InboxController.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/InboxController.java index bdaf2bc07..75c5b76d1 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/InboxController.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/InboxController.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; import de.adorsys.datasafe.encrypiton.api.types.UserID; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; @@ -25,6 +24,7 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.InputStream; import java.io.OutputStream; @@ -119,7 +119,7 @@ public List listInbox(@RequestHeader @NotBlank String user, .toList(); log.debug("User's {} inbox contains {} items", user, inboxList.size()); return inboxList; - } catch (AmazonS3Exception e) { // for list this exception most likely means that user credentials wrong + } catch (S3Exception e) { // for list this exception most likely means that user credentials wrong throw new UnauthorizedException("Unauthorized", e); } } diff --git a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/VersionController.java b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/VersionController.java index cb329260b..e59c21975 100644 --- a/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/VersionController.java +++ b/datasafe-rest-impl/src/main/java/de/adorsys/datasafe/rest/impl/controller/VersionController.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.business.impl.service.VersionedDatasafeServices; import de.adorsys.datasafe.encrypiton.api.types.UserID; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; @@ -29,6 +28,7 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.InputStream; import java.io.OutputStream; @@ -63,7 +63,7 @@ public List listVersionedDocuments(@RequestHeader @NotBlank String user, .toList(); log.debug("List for path {} returned {} items", path, documentList.size()); return documentList; - } catch (AmazonS3Exception e) { // for list this exception most likely means that user credentials wrong + } catch (S3Exception e) { // for list this exception most likely means that user credentials wrong throw new UnauthorizedException("Unauthorized", e); } diff --git a/datasafe-rest-impl/src/main/resources/application.properties b/datasafe-rest-impl/src/main/resources/application.properties index 5b4490b30..7128e9f0b 100644 --- a/datasafe-rest-impl/src/main/resources/application.properties +++ b/datasafe-rest-impl/src/main/resources/application.properties @@ -28,6 +28,7 @@ datasafe.dbPassword=${MYSQL_PASSWORD} spring.liquibase.enabled=false + datasafe.encryption.keystore.type=BCFKS datasafe.encryption.keystore.encryptionAlgo=AES256_KWP datasafe.encryption.keystore.pbkdf.scrypt.cost=16384 diff --git a/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfigTest.java b/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfigTest.java index 0fa08a3cd..b000ab7c5 100644 --- a/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfigTest.java +++ b/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/config/DatasafeConfigTest.java @@ -1,7 +1,5 @@ package de.adorsys.datasafe.rest.impl.config; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.S3Object; import de.adorsys.datasafe.storage.api.StorageService; import de.adorsys.datasafe.types.api.resource.BasePrivateResource; import de.adorsys.datasafe.types.api.shared.BaseMockitoTest; @@ -16,6 +14,10 @@ import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.junit.jupiter.SpringExtension; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import java.io.ByteArrayInputStream; @@ -46,26 +48,40 @@ class DatasafeConfigTest extends BaseMockitoTest { @Autowired private StorageService storageService; - @MockBean - private AmazonS3 amazonS3; +// @MockBean +// private AmazonS3 amazonS3; @Mock - private AmazonS3 amazonS3FromFactory; + private S3Client s3ClientFromFactory; @MockBean private S3Factory s3Factory; @BeforeEach void prepare() { - S3Object object = new S3Object(); - object.setObjectContent(new ByteArrayInputStream(BASIC_STORAGE_ANSWER.getBytes(UTF_8))); - when(amazonS3.getObject(basicBucket, BASIC_FILE_STORAGE_PATH)).thenReturn(object); - when(s3Factory.getClient("http://0.0.0.0:9000/", "eu-central-1", "user", "passwd")) - .thenReturn(amazonS3FromFactory); - S3Object another = new S3Object(); - another.setObjectContent(new ByteArrayInputStream(DATA_STORAGE_ANSWER.getBytes(UTF_8))); - when(amazonS3FromFactory.getObject(DATA_BUCKET, DATA_FILE_STORAGE_PATH)).thenReturn(another); + .thenReturn(s3ClientFromFactory); + + GetObjectResponse objectResponseMock = GetObjectResponse.builder() + .contentLength((long) BASIC_STORAGE_ANSWER.getBytes(UTF_8).length) + .build(); + ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(BASIC_STORAGE_ANSWER.getBytes(UTF_8)); + ResponseInputStream responseInputStreamMock = + new ResponseInputStream<>(objectResponseMock, byteArrayInputStream); + when(s3ClientFromFactory.getObject(GetObjectRequest.builder() + .bucket(basicBucket) + .key(BASIC_FILE_STORAGE_PATH) + .build())) + .thenReturn(responseInputStreamMock); + + ByteArrayInputStream anotherInputStream = new ByteArrayInputStream(DATA_STORAGE_ANSWER.getBytes(UTF_8)); + ResponseInputStream anotherResponseInputStreamMock = + new ResponseInputStream<>(objectResponseMock, anotherInputStream); + when(s3ClientFromFactory.getObject(GetObjectRequest.builder() + .bucket(DATA_BUCKET) + .key(DATA_FILE_STORAGE_PATH) + .build())) + .thenReturn(anotherResponseInputStreamMock); } @Test diff --git a/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/controller/BaseDatasafeEndpointTest.java b/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/controller/BaseDatasafeEndpointTest.java index 66ee7325e..a13e339ea 100644 --- a/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/controller/BaseDatasafeEndpointTest.java +++ b/datasafe-rest-impl/src/test/java/de/adorsys/datasafe/rest/impl/controller/BaseDatasafeEndpointTest.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.rest.impl.controller; -import com.amazonaws.services.s3.AmazonS3; import com.fasterxml.jackson.databind.ObjectMapper; import de.adorsys.datasafe.directory.api.config.DFSConfig; import de.adorsys.datasafe.rest.impl.dto.UserDTO; @@ -18,6 +17,7 @@ import org.springframework.test.web.servlet.MockMvc; import org.springframework.test.web.servlet.MvcResult; import org.springframework.test.web.servlet.ResultMatcher; +import software.amazon.awssdk.services.s3.S3Client; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.result.MockMvcResultHandlers.print; @@ -33,7 +33,7 @@ public abstract class BaseDatasafeEndpointTest extends BaseMockitoTest { protected MockMvc mvc; @MockBean - protected AmazonS3 s3; + protected S3Client s3Client; @MockBean protected StorageService storageService; diff --git a/datasafe-simple-adapter/datasafe-simple-adapter-impl/pom.xml b/datasafe-simple-adapter/datasafe-simple-adapter-impl/pom.xml index af98b24da..57a7c0bad 100644 --- a/datasafe-simple-adapter/datasafe-simple-adapter-impl/pom.xml +++ b/datasafe-simple-adapter/datasafe-simple-adapter-impl/pom.xml @@ -73,6 +73,21 @@ assertj-core test + + org.wildfly.core + wildfly-cli + 26.0.0.Beta1 + + + software.amazon.awssdk + apache-client + 2.26.22 + + + org.testcontainers + testcontainers + + diff --git a/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/main/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeServiceImpl.java b/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/main/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeServiceImpl.java index 02f9cb024..d6e06fae0 100644 --- a/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/main/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeServiceImpl.java +++ b/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/main/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeServiceImpl.java @@ -1,12 +1,6 @@ package de.adorsys.datasafe.simple.adapter.impl; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; +//import com.amazonaws.client.builder.AwsClientBuilder; import com.google.common.base.CharMatcher; import com.google.common.io.ByteStreams; import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; @@ -49,11 +43,23 @@ import lombok.Getter; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.Protocol; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.nio.file.FileSystems; +import java.time.Duration; import java.util.List; import java.util.stream.Collectors; @@ -248,63 +254,62 @@ private static SystemRootAndStorageService useAmazonS3(AmazonS3DFSCredentials df } log.info(lsf.toString()); - AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard() - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials( - amazonS3DFSCredentials.getAccessKey(), - amazonS3DFSCredentials.getSecretKey())) - ); + S3ClientBuilder s3ClientBuilder = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create( + amazonS3DFSCredentials.getAccessKey(), + amazonS3DFSCredentials.getSecretKey() + ) + )) + .region(Region.of(amazonS3DFSCredentials.getRegion())); boolean useEndpoint = !amazonS3DFSCredentials.getUrl().matches(AMAZON_URL) && !amazonS3DFSCredentials.getUrl().startsWith(S3_PREFIX); lsf = new LogStringFrame(); if (useEndpoint) { lsf.add("not real amazon, so use pathStyleAccess"); - AwsClientBuilder.EndpointConfiguration endpoint = new AwsClientBuilder.EndpointConfiguration( - amazonS3DFSCredentials.getUrl(), - amazonS3DFSCredentials.getRegion() - ); - amazonS3ClientBuilder - .withEndpointConfiguration(endpoint) - .enablePathStyleAccess(); + s3ClientBuilder.endpointOverride(URI.create(amazonS3DFSCredentials.getUrl())) + .serviceConfiguration(S3Configuration.builder().pathStyleAccessEnabled(true).build()); } else { lsf.add("real amazon, so use bucketStyleAccess"); - amazonS3ClientBuilder.withRegion(amazonS3DFSCredentials.getRegion()); +// amazonS3ClientBuilder.withRegion(amazonS3DFSCredentials.getRegion()); } log.info("{}", lsf.toString()); if (amazonS3DFSCredentials.isNoHttps() || maxConnections > 0 || requestTimeout > 0) { - ClientConfiguration clientConfig = new ClientConfiguration(); - if (amazonS3DFSCredentials.isNoHttps()) { - log.info("Creating S3 client without https"); - clientConfig.setProtocol(Protocol.HTTP); - clientConfig.disableSocketProxy(); - } + ApacheHttpClient.Builder httpClientBuilder = ApacheHttpClient.builder(); if (maxConnections > 0) { log.info("Creating S3 client with max connections:{}", maxConnections); - clientConfig.setMaxConnections(maxConnections); + httpClientBuilder.maxConnections(maxConnections); } if (requestTimeout > 0) { log.info("Creating S3 client with connection timeout:{}", requestTimeout); - clientConfig.setRequestTimeout(requestTimeout); + httpClientBuilder.connectionTimeout(Duration.ofSeconds(requestTimeout)); } - amazonS3ClientBuilder.withClientConfiguration(clientConfig); + s3ClientBuilder.httpClient(httpClientBuilder.build()); } - AmazonS3 amazons3 = amazonS3ClientBuilder.build(); - - if (!amazons3.doesBucketExistV2(amazonS3DFSCredentials.getContainer())) { - amazons3.createBucket(amazonS3DFSCredentials.getContainer()); + S3Client s3Client = s3ClientBuilder.build(); + + try { + s3Client.headBucket(HeadBucketRequest.builder() + .bucket(amazonS3DFSCredentials.getContainer()) + .build()); + log.info("Bucket {} exists.", amazonS3DFSCredentials.getContainer()); + } catch (NoSuchBucketException e) { + log.info("Bucket {} does not exist. Creating bucket.", amazonS3DFSCredentials.getContainer()); + s3Client.createBucket(CreateBucketRequest.builder() + .bucket(amazonS3DFSCredentials.getContainer()) + .build()); } - StorageService storageService = new S3StorageService( - amazons3, - amazonS3DFSCredentials.getContainer(), - ExecutorServiceUtil - .submitterExecutesOnStarvationExecutingService( - amazonS3DFSCredentials.getThreadPoolSize(), - amazonS3DFSCredentials.getQueueSize() - ) + S3StorageService storageService = new S3StorageService( + s3Client, + amazonS3DFSCredentials.getContainer(), + ExecutorServiceUtil + .submitterExecutesOnStarvationExecutingService( + amazonS3DFSCredentials.getThreadPoolSize(), + amazonS3DFSCredentials.getQueueSize() + ) ); URI systemRoot = URI.create(S3_PREFIX + amazonS3DFSCredentials.getRootBucket()); log.info("build DFS to S3 with root " + amazonS3DFSCredentials.getRootBucket() + " and url " + amazonS3DFSCredentials.getUrl()); diff --git a/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/test/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeAdapterIT.java b/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/test/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeAdapterIT.java index d8aa61c54..f7581b729 100644 --- a/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/test/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeAdapterIT.java +++ b/datasafe-simple-adapter/datasafe-simple-adapter-impl/src/test/java/de/adorsys/datasafe/simple/adapter/impl/SimpleDatasafeAdapterIT.java @@ -1,6 +1,5 @@ package de.adorsys.datasafe.simple.adapter.impl; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.encrypiton.api.types.UserID; import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; import de.adorsys.datasafe.encrypiton.api.types.encryption.MutableEncryptionConfig; @@ -27,6 +26,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.io.ByteArrayInputStream; import java.io.OutputStream; @@ -288,7 +288,7 @@ void testTwoUsers(WithStorageProvider.StorageDescriptor descriptor) { ); } else { assertThrows( - AmazonS3Exception.class, + S3Exception.class, () -> simpleDatasafeService.documentExists(userIDAuth2, document.getDocumentFQN()) ); } diff --git a/datasafe-storage/datasafe-storage-impl-s3/pom.xml b/datasafe-storage/datasafe-storage-impl-s3/pom.xml index 8fd486be2..b4f4e841d 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/pom.xml +++ b/datasafe-storage/datasafe-storage-impl-s3/pom.xml @@ -13,7 +13,17 @@ - + + + + software.amazon.awssdk + bom + 2.26.22 + pom + import + + + de.adorsys @@ -24,14 +34,29 @@ de.adorsys datasafe-types-api ${project.version} - + + + + + + + + + + + + + + - com.amazonaws - aws-java-sdk-s3 + software.amazon.awssdk + s3 + 2.26.22 - com.amazonaws - aws-java-sdk-core + software.amazon.awssdk + sdk-core + 2.26.31 javax.xml.bind @@ -92,6 +117,13 @@ mockito-core test + + + + + + + diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/ChunkUploadRequest.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/ChunkUploadRequest.java index 7e348333b..0bd7aca53 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/ChunkUploadRequest.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/ChunkUploadRequest.java @@ -1,16 +1,16 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; import lombok.Builder; import lombok.Getter; import lombok.ToString; +import software.amazon.awssdk.services.s3.S3Client; @Getter @Builder @ToString public class ChunkUploadRequest { - private AmazonS3 amazonS3; + private S3Client s3; @ToString.Exclude private byte[] content; private int contentSize; @@ -20,4 +20,6 @@ public class ChunkUploadRequest { private int chunkNumberCounter; private boolean lastChunk; + + } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStream.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStream.java index 34707d8e8..29636e054 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStream.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStream.java @@ -21,16 +21,6 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; -import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.PartETag; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.UploadPartResult; import de.adorsys.datasafe.types.api.callback.PhysicalVersionCallback; import de.adorsys.datasafe.types.api.callback.ResourceWriteCallback; import de.adorsys.datasafe.types.api.utils.CustomizableByteArrayOutputStream; @@ -38,6 +28,9 @@ import lombok.SneakyThrows; import lombok.Synchronized; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -50,33 +43,36 @@ import java.util.concurrent.ExecutorService; @Slf4j + public class MultipartUploadS3StorageOutputStream extends OutputStream { private String bucketName; private String objectName; - private AmazonS3 amazonS3; + private S3Client s3; + // The minimum size for a multi part request is 5 MB, hence the buffer size of 5 MB static final int BUFFER_SIZE = 1024 * 1024 * 5; - private final CompletionService completionService; + private final CompletionService completionService; private CustomizableByteArrayOutputStream currentOutputStream = newOutputStream(); - private InitiateMultipartUploadResult multiPartUploadResult; + private CreateMultipartUploadResponse multiPartUploadResult; + private int partCounter = 1; private final List callbacks; - MultipartUploadS3StorageOutputStream(String bucketName, String objectKey, AmazonS3 amazonS3, + public MultipartUploadS3StorageOutputStream(String bucketName, String objectKey, S3Client s3, ExecutorService executorService, List callbacks) { this.bucketName = bucketName; this.objectName = objectKey; - this.amazonS3 = amazonS3; + this.s3 = s3; this.completionService = new ExecutorCompletionService<>(executorService); this.callbacks = callbacks; @@ -136,12 +132,12 @@ private void initiateMultipartRequestAndCommitPartIfNeeded() { completionService.submit(new UploadChunkResultCallable( ChunkUploadRequest .builder() - .amazonS3(amazonS3) + .s3(s3) .content(content) .contentSize(size) .bucketName(bucketName) .objectName(objectName) - .uploadId(multiPartUploadResult.getUploadId()) + .uploadId(multiPartUploadResult.uploadId()) .chunkNumberCounter(partCounter) .lastChunk(false) .build() @@ -156,21 +152,26 @@ private boolean isMultiPartUpload() { @SneakyThrows private void finishSimpleUpload() { - ObjectMetadata objectMetadata = new ObjectMetadata(); int size = currentOutputStream.size(); - objectMetadata.setContentLength(size); byte[] content = currentOutputStream.getBufferOrCopy(); // Release the memory currentOutputStream = null; - PutObjectResult upload = amazonS3.putObject( - bucketName, - objectName, - new ByteArrayInputStream(content, 0, size), - objectMetadata); + PutObjectRequest putObjectRequest = PutObjectRequest.builder() + .bucket(bucketName) + .key(objectName) + .contentLength((long) size) + .build(); + try { + s3.putObject(putObjectRequest, RequestBody.fromInputStream(new ByteArrayInputStream(content, 0, size), size)); + } catch (Exception e) { + log.error("Failed to put object to S3: {}", e.getMessage(), e); + throw e; + } + - notifyCommittedVersionIfPresent(upload.getVersionId()); + notifyCommittedVersionIfPresent(null); log.debug("Finished simple upload"); } @@ -179,19 +180,21 @@ private void finishMultiPartUpload() throws IOException { sendLastChunkOfMultipartIfNeeded(); try { - List partETags = getMultiPartsUploadResults(); + List completedParts = getMultiPartsUploadResults(); log.debug("Send multipart request to S3"); - CompleteMultipartUploadResult upload = amazonS3.completeMultipartUpload( - new CompleteMultipartUploadRequest( - multiPartUploadResult.getBucketName(), - multiPartUploadResult.getKey(), - multiPartUploadResult.getUploadId(), - partETags - ) - ); + CompleteMultipartUploadRequest completeRequest = CompleteMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .uploadId(multiPartUploadResult.uploadId()) + .multipartUpload(CompletedMultipartUpload.builder() + .parts(completedParts) + .build()) + .build(); - notifyCommittedVersionIfPresent(upload.getVersionId()); + CompleteMultipartUploadResponse uploadResponse = s3.completeMultipartUpload(completeRequest); + + notifyCommittedVersionIfPresent(uploadResponse.eTag()); log.debug("Finished multi part upload"); } catch (ExecutionException e) { @@ -222,12 +225,12 @@ private void sendLastChunkOfMultipartIfNeeded() { completionService.submit( new UploadChunkResultCallable(ChunkUploadRequest.builder() - .amazonS3(amazonS3) + .s3(s3) .content(content) .contentSize(size) .bucketName(bucketName) .objectName(objectName) - .uploadId(multiPartUploadResult.getUploadId()) + .uploadId(multiPartUploadResult.uploadId()) .chunkNumberCounter(partCounter) .lastChunk(true) .build() @@ -249,8 +252,18 @@ private void initiateMultiPartIfNeeded() { if (multiPartUploadResult == null) { log.debug("Initiate multi part"); - multiPartUploadResult = amazonS3 - .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, objectName)); + CreateMultipartUploadRequest initiateRequest = CreateMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .build(); + + try { + multiPartUploadResult = s3.createMultipartUpload(initiateRequest); + } catch (S3Exception e) { + log.error("Failed to initiate multipart upload", e); + // Handle the exception as needed + throw e; // or handle accordingly + } } } @@ -258,18 +271,24 @@ private void abortMultiPartUpload() { log.debug("Abort multi part"); if (isMultiPartUpload()) { - amazonS3.abortMultipartUpload(new AbortMultipartUploadRequest( - multiPartUploadResult.getBucketName(), - multiPartUploadResult.getKey(), - multiPartUploadResult.getUploadId())); + AbortMultipartUploadRequest abortRequest = AbortMultipartUploadRequest.builder() + .bucket(bucketName) + .key(objectName) + .uploadId(multiPartUploadResult.uploadId()) + .build(); + + s3.abortMultipartUpload(abortRequest); } } - private List getMultiPartsUploadResults() throws ExecutionException, InterruptedException { - List result = new ArrayList<>(partCounter); - for (int i = 0; i < partCounter; i++) { - UploadPartResult partResult = completionService.take().get(); - result.add(partResult.getPartETag()); + private List getMultiPartsUploadResults() throws ExecutionException, InterruptedException { + List result = new ArrayList<>(partCounter); + for (int i = 1; i <= partCounter; i++) { + UploadPartResponse completedPart = completionService.take().get(); + result.add(CompletedPart.builder() + .partNumber(i) + .eTag(completedPart.eTag()) + .build()); log.debug("Get upload part #{} from {}", i, partCounter); } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3ClientFactory.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3ClientFactory.java index 841607ee6..ccfdec4ba 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3ClientFactory.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3ClientFactory.java @@ -1,35 +1,37 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import lombok.experimental.UtilityClass; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3Configuration; + +import java.net.URI; @UtilityClass public class S3ClientFactory { - public AmazonS3 getClient(String endpointUrl, String region, String accessKey, String secretKey) { - return AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(endpointUrl, region) - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(accessKey, secretKey) + public S3Client getClient(String endpointUrl, String region, String accessKey, String secretKey) { + return S3Client.builder() + .endpointOverride(URI.create(endpointUrl)) + .region(Region.of(region)) + .credentialsProvider( + StaticCredentialsProvider.create( + AwsBasicCredentials.create(accessKey, secretKey) ) ) - .enablePathStyleAccess() + .serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build()) .build(); } - - public AmazonS3 getAmazonClient(String region, String accessKey, String secretKey) { - return AmazonS3ClientBuilder.standard() - .withRegion(region) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(accessKey, secretKey) + public S3Client getAmazonClient(String region, String accessKey, String secretKey) { + return S3Client.builder() + .region(Region.of(region)) + .credentialsProvider( + StaticCredentialsProvider.create( + AwsBasicCredentials.create(accessKey, secretKey) ) ) .build(); diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3StorageService.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3StorageService.java index 59d204569..322740c40 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3StorageService.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/S3StorageService.java @@ -1,10 +1,5 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.iterable.S3Objects; -import com.amazonaws.services.s3.iterable.S3Versions; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.S3ObjectSummary; import de.adorsys.datasafe.storage.api.StorageService; import de.adorsys.datasafe.types.api.callback.ResourceWriteCallback; import de.adorsys.datasafe.types.api.resource.AbsoluteLocation; @@ -17,6 +12,8 @@ import de.adorsys.datasafe.types.api.resource.WithCallback; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import java.io.InputStream; import java.io.OutputStream; @@ -39,7 +36,7 @@ @RequiredArgsConstructor public class S3StorageService implements StorageService { - private final AmazonS3 s3; + private final S3Client s3; private final BucketRouter router; private final ExecutorService executorService; @@ -48,7 +45,7 @@ public class S3StorageService implements StorageService { * @param bucketName Bucket to use * @param executorService Multipart sending threadpool (file chunks are sent in parallel) */ - public S3StorageService(AmazonS3 s3, String bucketName, ExecutorService executorService) { + public S3StorageService(S3Client s3, String bucketName, ExecutorService executorService) { this.s3 = s3; this.router = new StaticBucketRouter(bucketName); this.executorService = executorService; @@ -63,13 +60,18 @@ public Stream> list(AbsoluteLocation location log.debug("List at {}", location.location()); String prefix = router.resourceKey(location); - S3Objects s3ObjectSummaries = S3Objects.withPrefix(s3, router.bucketName(location), prefix); - Stream objectStream = StreamSupport.stream(s3ObjectSummaries.spliterator(), false); + ListObjectsV2Request request = ListObjectsV2Request.builder() + .bucket(router.bucketName(location)) + .prefix(prefix) + .build(); + + ListObjectsV2Response response = s3.listObjectsV2(request); + Stream objectStream = response.contents().stream(); return objectStream .map(os -> new AbsoluteLocation<>( new BaseResolvedResource( createPath(location, os, prefix.length()), - os.getLastModified().toInstant() + os.lastModified() )) ); } @@ -84,11 +86,15 @@ public InputStream read(AbsoluteLocation location) { String bucketName = router.bucketName(location); return executeAndReturn( location, - key -> s3.getObject(bucketName, key).getObjectContent(), - (key, version) -> - s3.getObject( - new GetObjectRequest(bucketName, key, version.getVersionId()) - ).getObjectContent() + key -> s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(bucketName) + .key(key) + .build()).asInputStream(), + (key, version) -> s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(bucketName) + .key(key) + .versionId(version.getVersionId()) + .build()).asInputStream() ); } @@ -100,15 +106,16 @@ public OutputStream write(WithCallback doRemove(bucketName, key), - (key, version) -> s3.deleteVersion(bucketName, key, version.getVersionId()) + (key, version) -> s3.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(key).versionId(version.getVersionId()).build()) ); } @@ -135,17 +142,16 @@ public boolean objectExists(AbsoluteLocation location) { boolean pathExists = executeAndReturn( location, - key -> s3.doesObjectExist(bucketName, key), - (key, version) -> - StreamSupport.stream( - S3Versions.withPrefix(s3, bucketName, key).spliterator(), false) - .anyMatch(it -> it.getVersionId().equals(version.getVersionId())) + key -> s3.listObjects(ListObjectsRequest.builder().bucket(bucketName).prefix(key).maxKeys(1).build()).contents().size() > 0, + (key, version) -> s3.listObjectVersions(ListObjectVersionsRequest.builder().bucket(bucketName).prefix(key).versionIdMarker(version.getVersionId()).maxKeys(1).build()) + .versions().stream().anyMatch(v -> v.versionId().equals(version.getVersionId())) ); log.debug("Path {} exists {}", location, pathExists); return pathExists; } + @Override public Optional flushChunkSize(AbsoluteLocation location) { return Optional.of(MultipartUploadS3StorageOutputStream.BUFFER_SIZE); @@ -153,15 +159,26 @@ public Optional flushChunkSize(AbsoluteLocation location) { private void doRemove(String bucket, String key) { if (key.endsWith("/")) { - S3Objects.withPrefix(s3, bucket, key).forEach(it -> s3.deleteObject(bucket, it.getKey())); + ListObjectsV2Request request = ListObjectsV2Request.builder() + .bucket(bucket) + .prefix(key) + .build(); + ListObjectsV2Response response = s3.listObjectsV2(request); + response.contents().forEach(obj -> s3.deleteObject(DeleteObjectRequest.builder() + .bucket(bucket) + .key(obj.key()) + .build())); return; } - s3.deleteObject(bucket, key); + s3.deleteObject(DeleteObjectRequest.builder() + .bucket(bucket) + .key(key) + .build()); } - private PrivateResource createPath(AbsoluteLocation root, S3ObjectSummary os, int prefixLen) { - String relUrl = os.getKey().substring(prefixLen).replaceFirst("^/", ""); + private PrivateResource createPath(AbsoluteLocation root, S3Object os, int prefixLen) { + String relUrl = os.key().substring(prefixLen).replaceFirst("^/", ""); if ("".equals(relUrl)) { return BasePrivateResource.forPrivate(root.location()); } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/UploadChunkResultCallable.java b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/UploadChunkResultCallable.java index efe302e69..6601c9f22 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/UploadChunkResultCallable.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/main/java/de/adorsys/datasafe/storage/impl/s3/UploadChunkResultCallable.java @@ -21,18 +21,19 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; import java.io.ByteArrayInputStream; import java.util.concurrent.Callable; @Slf4j -public class UploadChunkResultCallable implements Callable { +public class UploadChunkResultCallable implements Callable { - private final AmazonS3 amazonS3; + private final S3Client s3; private final int contentLength; @@ -44,34 +45,36 @@ public class UploadChunkResultCallable implements Callable { private final String fileName; - private final String chunkId; + private final String uploadId; private byte[] content; UploadChunkResultCallable(ChunkUploadRequest request) { - this.amazonS3 = request.getAmazonS3(); + this.s3 = request.getS3(); this.content = request.getContent(); this.contentLength = request.getContentSize(); this.partNumber = request.getChunkNumberCounter(); this.last = request.isLastChunk(); this.bucketName = request.getBucketName(); this.fileName = request.getObjectName(); - this.chunkId = request.getUploadId(); + this.uploadId = request.getUploadId(); - log.debug("Chunk upload request: {}", request.toString()); + log.debug("Chunk upload request: {}", request); } @Override - public UploadPartResult call() { + public UploadPartResponse call() { log.trace("Upload chunk result call with part: {}", partNumber); try { - return amazonS3.uploadPart(new UploadPartRequest() - .withBucketName(bucketName).withKey(fileName) - .withUploadId(chunkId) - .withInputStream(new ByteArrayInputStream(content)) - .withPartNumber(partNumber).withLastPart(last) - .withPartSize(contentLength) - ); + UploadPartRequest uploadPartRequest = UploadPartRequest.builder() + .bucket(bucketName) + .key(fileName) + .uploadId(uploadId) + .partNumber(partNumber) + .contentLength((long) contentLength) + .build(); + + return s3.uploadPart(uploadPartRequest, RequestBody.fromBytes(content)); } finally { // Release the memory, as the callable may still live inside the // CompletionService which would cause diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStreamIT.java b/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStreamIT.java index df98f6d62..313f48db5 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStreamIT.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3StorageOutputStreamIT.java @@ -1,11 +1,5 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; import de.adorsys.datasafe.types.api.shared.BaseMockitoTest; import lombok.SneakyThrows; import org.junit.jupiter.api.BeforeEach; @@ -14,6 +8,9 @@ import org.mockito.Captor; import org.mockito.Mock; import org.testcontainers.shaded.com.google.common.io.ByteStreams; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import java.io.InputStream; import java.util.Arrays; @@ -22,13 +19,10 @@ import java.util.concurrent.ExecutorService; import static de.adorsys.datasafe.storage.impl.s3.MultipartUploadS3StorageOutputStream.BUFFER_SIZE; +import static org.apache.commons.io.IOUtils.toByteArray; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; class MultipartUploadS3StorageOutputStreamIT extends BaseMockitoTest { @@ -37,13 +31,15 @@ class MultipartUploadS3StorageOutputStreamIT extends BaseMockitoTest { private final byte[] multipartChunkWithTail = randomBytes(BUFFER_SIZE + 100); @Mock - private AmazonS3 amazonS3; + private S3Client s3; @Mock private ExecutorService executorService; @Captor private ArgumentCaptor bytesSentDirectly; + @Captor + private ArgumentCaptor requestBodyCaptor; @Captor private ArgumentCaptor uploadChunk; @@ -55,31 +51,35 @@ void init() { tested = new MultipartUploadS3StorageOutputStream( "bucket", "s3://path/to/file.txt", - amazonS3, + s3, executorService, Collections.emptyList() ); - when(amazonS3.putObject(anyString(), anyString(), bytesSentDirectly.capture(), any())) - .thenReturn(new PutObjectResult()); - when(amazonS3.initiateMultipartUpload(any())).thenReturn(new InitiateMultipartUploadResult()); + + when(s3.putObject(any(PutObjectRequest.class), requestBodyCaptor.capture())) + .thenReturn(PutObjectResponse.builder().build()); + when(s3.createMultipartUpload(any(CreateMultipartUploadRequest.class))) + .thenReturn(CreateMultipartUploadResponse.builder().uploadId("testUploadId").build()); doAnswer(inv -> { inv.getArgument(0, Runnable.class).run(); return null; }).when(executorService).execute(any()); - when(amazonS3.uploadPart(uploadChunk.capture())).thenReturn(new UploadPartResult()); - when(amazonS3.completeMultipartUpload(any())).thenReturn(new CompleteMultipartUploadResult()); + when(s3.uploadPart(any(UploadPartRequest.class), requestBodyCaptor.capture())) + .thenReturn(UploadPartResponse.builder().eTag("testETag").build()); + when(s3.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))) + .thenReturn(CompleteMultipartUploadResponse.builder().build()); } + @Test @SneakyThrows void writeBulkNonChunked() { tested.write(shortChunk, 0 , shortChunk.length); tested.close(); - verify(executorService, never()).submit(any(UploadChunkResultCallable.class)); - assertThat(bytesSentDirectly.getValue()).hasContent(new String(shortChunk)); + verify(s3).putObject(any(PutObjectRequest.class), any(RequestBody.class)); } @Test @@ -89,10 +89,14 @@ void writeBulkNonChunkedWithOffset() { tested.close(); - verify(executorService, never()).submit(any(UploadChunkResultCallable.class)); - assertThat(bytesSentDirectly.getValue()).hasContent( - new String(Arrays.copyOfRange(shortChunk, 10, shortChunk.length)) - ); + verify(s3).putObject(any(PutObjectRequest.class), requestBodyCaptor.capture()); + verify(s3, never()).uploadPart(any(UploadPartRequest.class), any(RequestBody.class)); + + RequestBody capturedBody = requestBodyCaptor.getValue(); + byte[] capturedContent = toByteArray(capturedBody.contentStreamProvider().newStream()); + + assertThat(new String(capturedContent)) + .isEqualTo(new String(Arrays.copyOfRange(shortChunk, 10, shortChunk.length))); } @Test @@ -102,8 +106,7 @@ void writeBulkChunkedExactlyOne() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); - assertThat(uploadChunk.getAllValues()).hasSize(1); + assertThat(requestBodyCaptor.getAllValues()).hasSize(1); } @Test @@ -113,13 +116,14 @@ void writeBulkChunked() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); + verify(s3, never()).putObject(any(PutObjectRequest.class), any(RequestBody.class)); + verify(s3, times(2)).uploadPart(uploadChunk.capture(), requestBodyCaptor.capture()); assertThat(uploadChunk.getAllValues()).hasSize(2); - assertThat(uploadChunk.getAllValues().get(0).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(0).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange(multipartChunkWithTail, 0, BUFFER_SIZE))); - assertThat(uploadChunk.getAllValues().get(1).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(1).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange( - multipartChunkWithTail, BUFFER_SIZE, multipartChunkWithTail.length) + multipartChunkWithTail, BUFFER_SIZE, multipartChunkWithTail.length) ) ); } @@ -131,13 +135,14 @@ void writeBulkChunkedWithOffset() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); + verify(s3, never()).putObject(any(PutObjectRequest.class), any(RequestBody.class)); + verify(s3, times(2)).uploadPart(uploadChunk.capture(), requestBodyCaptor.capture()); assertThat(uploadChunk.getAllValues()).hasSize(2); - assertThat(uploadChunk.getAllValues().get(0).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(0).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange(multipartChunkWithTail, 10, 10 + BUFFER_SIZE))); - assertThat(uploadChunk.getAllValues().get(1).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(1).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange( - multipartChunkWithTail, 10 + BUFFER_SIZE, multipartChunkWithTail.length) + multipartChunkWithTail, 10 + BUFFER_SIZE, multipartChunkWithTail.length) ) ); } @@ -149,8 +154,13 @@ void writeBulkZeroSized() { tested.close(); - verify(executorService, never()).submit(any(UploadChunkResultCallable.class)); - assertThat(bytesSentDirectly.getValue()).hasContent(""); + verify(executorService, never()).execute(any(Runnable.class)); + verify(s3).putObject(any(PutObjectRequest.class), requestBodyCaptor.capture()); + + RequestBody capturedBody = requestBodyCaptor.getValue(); + byte[] capturedContent = toByteArray(capturedBody.contentStreamProvider().newStream()); + + assertThat(capturedContent).isEmpty(); } @Test @@ -160,10 +170,14 @@ void writeByteByByteNoChunked() { tested.close(); - verify(executorService, never()).submit(any(UploadChunkResultCallable.class)); - assertThat(bytesSentDirectly.getValue()).hasContent(new String(shortChunk)); - } + verify(executorService, never()).execute(any(Runnable.class)); + verify(s3).putObject(any(PutObjectRequest.class), requestBodyCaptor.capture()); + RequestBody capturedBody = requestBodyCaptor.getValue(); + byte[] capturedContent = toByteArray(capturedBody.contentStreamProvider().newStream()); + + assertThat(new String(capturedContent)).isEqualTo(new String(shortChunk)); + } @Test @SneakyThrows void writeByteByByteChunkedExactChunk() { @@ -171,9 +185,10 @@ void writeByteByByteChunkedExactChunk() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); - assertThat(uploadChunk.getAllValues()).hasSize(1); - assertThat(uploadChunk.getAllValues().get(0).getInputStream()).hasContent(new String(exactOneMultipartChunk)); + verify(s3, never()).putObject(any(PutObjectRequest.class), any(RequestBody.class)); + verify(s3).uploadPart(uploadChunk.capture(), requestBodyCaptor.capture()); + assertThat(uploadChunk.getValue().partNumber()).isEqualTo(1); + assertThat(requestBodyCaptor.getValue().contentStreamProvider().newStream()).hasContent(new String(exactOneMultipartChunk)); } @Test @@ -183,18 +198,24 @@ void writeByteByByteChunked() { tested.close(); - assertThat(bytesSentDirectly.getAllValues()).isEmpty(); + verify(s3, never()).putObject(any(PutObjectRequest.class), any(RequestBody.class)); + verify(s3, times(2)).uploadPart(uploadChunk.capture(), requestBodyCaptor.capture()); assertThat(uploadChunk.getAllValues()).hasSize(2); - assertThat(uploadChunk.getAllValues().get(0).getInputStream()) + assertThat(requestBodyCaptor.getAllValues().get(0).contentStreamProvider().newStream()) .hasContent(new String(Arrays.copyOfRange(multipartChunkWithTail, 0, BUFFER_SIZE))); // we are setting size parameter that limits number of bytes read by s3 client: - int partialPartSize = (int) uploadChunk.getAllValues().get(1).getPartSize(); + long partialPartSizeLong = uploadChunk.getAllValues().get(1).contentLength(); + if (partialPartSizeLong > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Part size too large to fit in an int: " + partialPartSizeLong); + } + int partialPartSize = (int) partialPartSizeLong; + byte[] partialChunk = new byte[partialPartSize]; - ByteStreams.readFully(uploadChunk.getAllValues().get(1).getInputStream(), partialChunk, 0, partialPartSize); + ByteStreams.readFully(requestBodyCaptor.getAllValues().get(1).contentStreamProvider().newStream(), partialChunk, 0, partialPartSize); assertThat(new String(partialChunk)) .isEqualTo(new String(Arrays.copyOfRange( - multipartChunkWithTail, BUFFER_SIZE, multipartChunkWithTail.length) + multipartChunkWithTail, BUFFER_SIZE, multipartChunkWithTail.length) ) ); } @@ -204,8 +225,13 @@ void writeByteByByteChunked() { void writeZeroSized() { tested.close(); - verify(executorService, never()).submit(any(UploadChunkResultCallable.class)); - assertThat(bytesSentDirectly.getValue()).hasContent(""); + verify(executorService, never()).execute(any(Runnable.class)); + verify(s3).putObject(any(PutObjectRequest.class), requestBodyCaptor.capture()); + + RequestBody capturedBody = requestBodyCaptor.getValue(); + byte[] capturedContent = toByteArray(capturedBody.contentStreamProvider().newStream()); + + assertThat(new String(capturedContent)).isEmpty(); } diff --git a/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/S3SystemStorageServiceIT.java b/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/S3SystemStorageServiceIT.java index ee66097af..78ef60d76 100644 --- a/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/S3SystemStorageServiceIT.java +++ b/datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/S3SystemStorageServiceIT.java @@ -1,11 +1,5 @@ package de.adorsys.datasafe.storage.impl.s3; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.AmazonS3Exception; import de.adorsys.datasafe.types.api.resource.AbsoluteLocation; import de.adorsys.datasafe.types.api.resource.BasePrivateResource; import de.adorsys.datasafe.types.api.resource.PrivateResource; @@ -23,8 +17,17 @@ import org.junit.jupiter.api.Test; import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -42,8 +45,8 @@ class S3SystemStorageServiceIT extends BaseMockitoTest { private static String accessKeyID = "admin"; private static String secretAccessKey = "password"; private static String url = getDockerUri("http://localhost"); - private static BasicAWSCredentials creds = new BasicAWSCredentials(accessKeyID, secretAccessKey); - private static AmazonS3 s3; + private static AwsBasicCredentials creds = AwsBasicCredentials.create(accessKeyID, secretAccessKey); + private static S3Client s3; private static AbsoluteLocation root; private static AbsoluteLocation fileWithMsg; @@ -66,13 +69,14 @@ static void beforeAll() { Integer mappedPort = minio.getMappedPort(9000); log.info("Mapped port: " + mappedPort); String region = "eu-central-1"; - s3 = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(url + ":" + mappedPort, region)) - .withCredentials(new AWSStaticCredentialsProvider(creds)) - .enablePathStyleAccess() + s3 = S3Client.builder() + .endpointOverride(URI.create(url + ":" + mappedPort)) + .credentialsProvider(StaticCredentialsProvider.create(creds)) + .region(Region.of(region)) + .forcePathStyle(true) .build(); - s3.createBucket(bucketName); + s3.createBucket(CreateBucketRequest.builder().bucket(bucketName).build()); root = new AbsoluteLocation<>(BasePrivateResource.forPrivate(new Uri("s3://" + bucketName))); fileWithMsg = new AbsoluteLocation<>(BasePrivateResource.forPrivate(new Uri("./" + FILE)) .resolveFrom(root)); @@ -101,22 +105,56 @@ void list() { void testListOutOfStandardListFilesLimit() { int numberOfFilesOverLimit = 1010; for (int i = 0; i < numberOfFilesOverLimit; i++) { - s3.putObject(bucketName, "over_limit/" + FILE + i, MESSAGE); + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key("over_limit/" + FILE + i) + .build(), RequestBody.fromString(MESSAGE)); log.trace("Save #" + i + " file"); } + List> allFiles = new ArrayList<>(); + String continuationToken = null; - assertThat(storageService.list( - new AbsoluteLocation<>( - BasePrivateResource.forPrivate(new Uri("s3://" + bucketName + "/over_limit"))))) - .hasSize(numberOfFilesOverLimit); - } + do { + ListObjectsV2Request.Builder requestBuilder = ListObjectsV2Request.builder() + .bucket(bucketName) + .prefix("over_limit/") + .maxKeys(1000); + + if (continuationToken != null) { + requestBuilder.continuationToken(continuationToken); + } + + ListObjectsV2Response response = s3.listObjectsV2(requestBuilder.build()); + + response.contents().forEach(s3Object -> { + try { + allFiles.add(new AbsoluteLocation<>( + BasePrivateResource.forPrivate(new URI("s3://" + bucketName + "/" + s3Object.key())))); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + }); + + continuationToken = response.nextContinuationToken(); + } while (continuationToken != null); + + assertThat(allFiles).hasSize(numberOfFilesOverLimit); + } @Test void listDeepLevel() { - s3.putObject(bucketName, "root.txt", "txt1"); - s3.putObject(bucketName, "deeper/level1.txt", "txt2"); - s3.putObject(bucketName, "deeper/more/level2.txt", "txt3"); + s3.putObject(PutObjectRequest.builder().bucket(bucketName).key("root.txt").build(), + RequestBody.fromString("txt1")); + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key("deeper/level1.txt") + .build(), RequestBody.fromString("txt2")); + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key("deeper/more/level2.txt") + .build(), RequestBody.fromString("txt3")); + List> resources = storageService.list( new AbsoluteLocation<>(BasePrivateResource.forPrivate(new Uri("s3://" + bucketName + "/deeper"))) @@ -159,7 +197,10 @@ void remove() { storageService.remove(fileWithMsg); - assertThrows(AmazonS3Exception.class, () -> s3.getObject(bucketName, FILE)); + assertThrows(NoSuchKeyException.class, () -> s3.getObject(GetObjectRequest.builder() + .bucket(bucketName) + .key(FILE) + .build())); } @Test @@ -167,18 +208,20 @@ void removeCascades() { createFileWithMessage("root/file1.txt"); createFileWithMessage("root/file2.txt"); - AbsoluteLocation rootOfFiles = new AbsoluteLocation<>(BasePrivateResource.forPrivate(new Uri("./root/")) - .resolveFrom(root)); + AbsoluteLocation rootOfFiles = new AbsoluteLocation<>(BasePrivateResource.forPrivate(new Uri("s3://" + bucketName + "/root/"))); storageService.remove(rootOfFiles); - assertThrows(AmazonS3Exception.class, () -> s3.getObject(bucketName, "root/file1.txt")); - assertThrows(AmazonS3Exception.class, () -> s3.getObject(bucketName, "root/file2.txt")); + assertThrows(NoSuchKeyException.class, () -> s3.getObject(GetObjectRequest.builder().bucket(bucketName).key("root/file1.txt").build())); + assertThrows(NoSuchKeyException.class, () -> s3.getObject(GetObjectRequest.builder().bucket(bucketName).key("root/file2.txt").build())); } @SneakyThrows private void createFileWithMessage(String path) { - s3.putObject(bucketName, path, MESSAGE); + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key(path) + .build(), RequestBody.fromString(MESSAGE)); } @SneakyThrows @@ -195,15 +238,14 @@ void cleanup() { } } - private void removeObjectFromS3(AmazonS3 amazonS3, String bucket, String prefix) { - amazonS3.listObjects(bucket, prefix) - .getObjectSummaries() + private void removeObjectFromS3(S3Client s3, String bucket, String prefix) { + s3.listObjectsV2(ListObjectsV2Request.builder().bucket(bucket).prefix(prefix).build()) + .contents() .forEach(it -> { - log.debug("Remove {}", it.getKey()); - amazonS3.deleteObject(bucket, it.getKey()); + log.debug("Remove {}", it.key()); + s3.deleteObject(DeleteObjectRequest.builder().bucket(bucket).key(it.key()).build()); }); } - @AfterAll public static void afterAll() { log.info("Stopping containers"); diff --git a/datasafe-test-storages/pom.xml b/datasafe-test-storages/pom.xml index 5dc5a6d33..0ead839a5 100644 --- a/datasafe-test-storages/pom.xml +++ b/datasafe-test-storages/pom.xml @@ -26,7 +26,7 @@ org.testcontainers testcontainers - compile + test com.fasterxml.jackson.core @@ -49,40 +49,77 @@ de.adorsys datasafe-storage-impl-fs ${project.version} + test de.adorsys datasafe-storage-impl-s3 ${project.version} + test org.junit.jupiter junit-jupiter-api - compile + test org.junit.jupiter junit-jupiter-params - compile + test - - com.amazonaws - aws-java-sdk-s3 - + + + + com.google.guava guava - com.amazonaws - aws-java-sdk-core + software.amazon.awssdk + s3 + 2.26.22 + test + + + + software.amazon.awssdk + regions + 2.26.22 + test + + + software.amazon.awssdk + sdk-core + 2.26.22 + test + + software.amazon.awssdk + aws-core + 2.26.22 + test + + + software.amazon.awssdk + auth + 2.26.22 + test + + + + + org.slf4j slf4j-api + + com.h2database + h2 + diff --git a/datasafe-test-storages/src/test/java/de/adorsys/datasafe/teststorage/WithStorageProvider.java b/datasafe-test-storages/src/test/java/de/adorsys/datasafe/teststorage/WithStorageProvider.java index ba03f52c4..a5eddcbb0 100644 --- a/datasafe-test-storages/src/test/java/de/adorsys/datasafe/teststorage/WithStorageProvider.java +++ b/datasafe-test-storages/src/test/java/de/adorsys/datasafe/teststorage/WithStorageProvider.java @@ -1,15 +1,5 @@ package de.adorsys.datasafe.teststorage; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.BucketVersioningConfiguration; -import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest; -import com.amazonaws.util.StringUtils; import com.google.common.base.Strings; import com.google.common.base.Suppliers; import de.adorsys.datasafe.storage.api.StorageService; @@ -22,6 +12,7 @@ import lombok.SneakyThrows; import lombok.ToString; import lombok.extern.slf4j.Slf4j; +import org.h2.util.StringUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -30,7 +21,15 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.shaded.org.apache.commons.io.FileUtils; - +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.*; + +import java.net.URI; import java.nio.file.Path; import java.time.Duration; import java.util.Arrays; @@ -92,9 +91,9 @@ public abstract class WithStorageProvider extends BaseMockitoTest { private static GenericContainer cephContainer; private static Path tempDir; - private static AmazonS3 minio; - private static AmazonS3 ceph; - private static AmazonS3 amazonS3; + private static S3Client minio; + private static S3Client ceph; + private static S3Client amazonS3; private static Supplier cephStorage; private static Supplier minioStorage; @@ -219,6 +218,8 @@ protected static Stream minioOnly() { minio() ).filter(Objects::nonNull); } + //Removed the @ValueSource and allLocalDefaultStorages(), allLocalStorages(), allDefaultStorages(), and allStorages() methods, + // They are not directly related to the migration to the AWS SDK for Java v2. protected static StorageDescriptor fs() { return new StorageDescriptor( @@ -300,32 +301,42 @@ protected static StorageDescriptor s3() { ); } - private void removeObjectFromS3(AmazonS3 amazonS3, String bucket, String prefix) { + private void removeObjectFromS3(S3Client s3, String bucket, String prefix) { // if bucket name contains slashes then move all after first slash to prefix String[] parts = bucket.split("/", 2); if (parts.length == 2) { bucket = parts[0]; prefix = parts[1] + "/" + prefix; } - String lambdafinalBucket = bucket; - amazonS3.listObjects(bucket, prefix) - .getObjectSummaries() - .forEach(it -> { - log.debug("Remove {}", it.getKey()); - amazonS3.deleteObject(lambdafinalBucket, it.getKey()); - }); + String finalBucket = bucket; + + ListObjectsRequest listObjectsRequest = ListObjectsRequest.builder() + .bucket(finalBucket) + .prefix(prefix) + .build(); + + ListObjectsResponse listObjectsResponse = s3.listObjects(listObjectsRequest); + + listObjectsResponse.contents().forEach(s3Object -> { + log.debug("Remove {}", s3Object.key()); + s3.deleteObject(DeleteObjectRequest.builder() + .bucket(finalBucket) + .key(s3Object.key()) + .build()); + }); } private static void initS3() { log.info("Initializing S3"); + if (Strings.isNullOrEmpty(amazonAccessKeyID)) { return; } - AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard() - .withCredentials(new AWSStaticCredentialsProvider( - new BasicAWSCredentials(amazonAccessKeyID, amazonSecretAccessKey)) - ); + S3ClientBuilder s3ClientBuilder = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(amazonAccessKeyID, amazonSecretAccessKey) + )); if (buckets.size() > 1) { log.info("Using {} buckets:{}", buckets.size(), buckets); @@ -334,23 +345,23 @@ private static void initS3() { if (StringUtils.isNullOrEmpty(amazonUrl)) { amazonUrl = amazonProtocol + amazonDomain; } + final boolean isRealAmazon = amazonUrl.endsWith(amazonDomain); + s3ClientBuilder = s3ClientBuilder + .region(Region.of(amazonRegion)) + .endpointOverride(URI.create(amazonUrl)); - amazonS3ClientBuilder = amazonS3ClientBuilder - .withClientConfiguration(new ClientConfiguration().withProtocol(Protocol.HTTP)) - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(amazonUrl, amazonRegion) - ); if (isRealAmazon) { amazonMappedUrl = amazonProtocol + primaryBucket + "." + amazonDomain; } else { amazonMappedUrl = amazonUrl + "/"; - amazonS3ClientBuilder.enablePathStyleAccess(); + s3ClientBuilder.serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build()); } + amazonS3 = s3ClientBuilder.build(); - amazonS3 = amazonS3ClientBuilder.build(); - - log.info("Amazon mapped URL:" + amazonMappedUrl); + log.info("Amazon mapped URL: " + amazonMappedUrl); } private static void startMinio() { @@ -366,20 +377,17 @@ private static void startMinio() { Integer mappedPort = minioContainer.getMappedPort(9000); minioMappedUrl = minioUrl + ":" + mappedPort; log.info("Minio mapped URL:" + minioMappedUrl); - minio = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(minioMappedUrl, minioRegion) - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(minioAccessKeyID, minioSecretAccessKey) - ) - ) - .enablePathStyleAccess() - .build(); - - - buckets.forEach(minio::createBucket); + minio = S3Client.builder() + .endpointOverride(URI.create(minioMappedUrl)) + .region(Region.of(minioRegion)) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(minioAccessKeyID, minioSecretAccessKey) + )) + .serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build()) + .build(); + buckets.forEach(bucket -> minio.createBucket(CreateBucketRequest.builder().bucket(bucket).build())); } private static void startCeph() { @@ -403,28 +411,29 @@ private static void startCeph() { Integer mappedPort = cephContainer.getMappedPort(8000); cephMappedUrl = cephUrl + ":" + mappedPort; log.info("Ceph mapped URL:" + cephMappedUrl); - ceph = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(cephMappedUrl, cephRegion) - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(cephAccessKeyID, cephSecretAccessKey) - ) - ) - .enablePathStyleAccess() - .build(); - - ceph.createBucket(buckets.get(0)); + ceph = S3Client.builder() + .endpointOverride(URI.create(cephMappedUrl)) + .region(Region.of(cephRegion)) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(cephAccessKeyID, cephSecretAccessKey) + )) + .serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build()) + .build(); + + ceph.createBucket(CreateBucketRequest.builder() + .bucket(buckets.get(0)) + .build()); // curiously enough CEPH docs are incorrect, looks like they do support version id: // https://github.com/ceph/ceph/blame/bc065cae7857c352ca36d5f06cdb5107cf72ed41/src/rgw/rgw_rest_s3.cc // so for versioned local tests we can use CEPH - ceph.setBucketVersioningConfiguration( - new SetBucketVersioningConfigurationRequest( - primaryBucket, - new BucketVersioningConfiguration(BucketVersioningConfiguration.ENABLED) - ) - ); + ceph.putBucketVersioning(PutBucketVersioningRequest.builder() + .bucket(primaryBucket) + .versioningConfiguration(VersioningConfiguration.builder() + .status(BucketVersioningStatus.ENABLED) + .build()) + .build()); } /** diff --git a/datasafe-types-api/pom.xml b/datasafe-types-api/pom.xml index 1ede06a31..270cd0210 100644 --- a/datasafe-types-api/pom.xml +++ b/datasafe-types-api/pom.xml @@ -49,11 +49,18 @@ awaitility test + + + + + - com.amazonaws - aws-java-sdk-s3 + software.amazon.awssdk + s3 + 2.26.22 test + diff --git a/datasafe-types-api/src/main/java/de/adorsys/datasafe/types/api/resource/AbsoluteLocation.java b/datasafe-types-api/src/main/java/de/adorsys/datasafe/types/api/resource/AbsoluteLocation.java index a17b649fe..a536cb10c 100644 --- a/datasafe-types-api/src/main/java/de/adorsys/datasafe/types/api/resource/AbsoluteLocation.java +++ b/datasafe-types-api/src/main/java/de/adorsys/datasafe/types/api/resource/AbsoluteLocation.java @@ -6,9 +6,9 @@ * Wrapper that forces underlying resource {@code T} to be absolute (same meaning as absolute URI). * @param Wrapped resource */ +@Getter public class AbsoluteLocation> implements ResourceLocation { - @Getter private final T resource; public AbsoluteLocation(T resource) { diff --git a/datasafe-types-api/src/test/java/de/adorsys/datasafe/types/api/shared/AwsClientRetry.java b/datasafe-types-api/src/test/java/de/adorsys/datasafe/types/api/shared/AwsClientRetry.java index 220777938..082001d21 100644 --- a/datasafe-types-api/src/test/java/de/adorsys/datasafe/types/api/shared/AwsClientRetry.java +++ b/datasafe-types-api/src/test/java/de/adorsys/datasafe/types/api/shared/AwsClientRetry.java @@ -1,21 +1,25 @@ package de.adorsys.datasafe.types.api.shared; -import com.amazonaws.services.s3.AmazonS3; import lombok.NoArgsConstructor; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.awaitility.Duration; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketResponse; import static org.awaitility.Awaitility.await; @Slf4j public class AwsClientRetry { @SneakyThrows - public static void createBucketWithRetry(AmazonS3 client, String bucket) { + public static void createBucketWithRetry(S3Client client, String bucket) { RetryLogger logger = new RetryLogger(); await().atMost(Duration.TEN_SECONDS).pollInterval(Duration.ONE_SECOND).untilAsserted(() -> { logger.log(); - client.createBucket(bucket); + CreateBucketResponse response = client.createBucket(CreateBucketRequest.builder() + .bucket(bucket) + .build()); }); } diff --git a/docs/readme/HowItWorks.md b/docs/readme/HowItWorks.md index 20d793f1a..28526c91b 100644 --- a/docs/readme/HowItWorks.md +++ b/docs/readme/HowItWorks.md @@ -382,7 +382,7 @@ defaultDatasafeServices.privateService().remove( ); // it is removed from storage, so when we read it we get exception -assertThrows(AmazonS3Exception.class, () -> defaultDatasafeServices.privateService().read( +assertThrows(S3Exception.class, () -> defaultDatasafeServices.privateService().read( ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) ); diff --git a/pom.xml b/pom.xml index 929b355c6..41a87cc27 100644 --- a/pom.xml +++ b/pom.xml @@ -83,14 +83,14 @@ 2.50 32.1.1-jre 4.0.3 - 5.11.0-M2 + 5.10.0 3.12.2 5.5.0 3.1.2 1.26.0 UTF-8 false - 1.19.8 + 1.18.3 0.8.11 2.5 2.0.7 @@ -109,6 +109,8 @@ 2.2.220 8.4.0 4.23.1 + 6.0.11 + 3.1.2 1.4.4 2.16.1 0.0.11 @@ -218,32 +220,44 @@ + + + + + - com.amazonaws - aws-java-sdk-s3 - ${amazon.aws.version} - - - com.fasterxml.jackson.core - jackson-databind - - + software.amazon.awssdk + bom + 2.26.22 + pom + import - com.amazonaws - aws-java-sdk-core - ${amazon.aws.version} - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-core - - + software.amazon.awssdk + regions + 2.26.22 + + + software.amazon.awssdk + sdk-core + 2.26.22 + + + software.amazon.awssdk + aws-core + 2.26.22 + + software.amazon.awssdk + auth + 2.26.22 + + + + + + +