From 375a04c596f3d410f83e556957da3055273bf03b Mon Sep 17 00:00:00 2001 From: forkimenjeckayang Date: Thu, 11 Apr 2024 13:18:51 +0100 Subject: [PATCH 1/5] Renamed file(datasafe-examples): Renamed MultiDfsWithCredentialsExampleTest file to MultiDfsWithCredentialsExampleIT since it contains an integration test --- .../s3/MultiDfsWithCredentialsExampleIT.java | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) create mode 100644 datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java diff --git a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java new file mode 100644 index 000000000..cf779ff93 --- /dev/null +++ b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java @@ -0,0 +1,255 @@ +package de.adorsys.datasafe.examples.business.s3; + +import com.amazonaws.services.s3.AmazonS3; +import dagger.Lazy; +import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; +import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; +import de.adorsys.datasafe.directory.api.profile.keys.StorageKeyStoreOperations; +import de.adorsys.datasafe.directory.api.types.StorageCredentials; +import de.adorsys.datasafe.directory.api.types.UserPrivateProfile; +import de.adorsys.datasafe.directory.impl.profile.config.DFSConfigWithStorageCreds; +import de.adorsys.datasafe.directory.impl.profile.dfs.BucketAccessServiceImpl; +import de.adorsys.datasafe.directory.impl.profile.dfs.BucketAccessServiceImplRuntimeDelegatable; +import de.adorsys.datasafe.directory.impl.profile.dfs.RegexAccessServiceWithStorageCredentialsImpl; +import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; +import de.adorsys.datasafe.storage.api.RegexDelegatingStorage; +import de.adorsys.datasafe.storage.api.StorageService; +import de.adorsys.datasafe.storage.api.UriBasedAuthStorageService; +import de.adorsys.datasafe.storage.impl.s3.S3ClientFactory; +import de.adorsys.datasafe.storage.impl.s3.S3StorageService; +import de.adorsys.datasafe.types.api.actions.ReadRequest; +import de.adorsys.datasafe.types.api.actions.WriteRequest; +import de.adorsys.datasafe.types.api.context.BaseOverridesRegistry; +import de.adorsys.datasafe.types.api.context.overrides.OverridesRegistry; +import de.adorsys.datasafe.types.api.resource.AbsoluteLocation; +import de.adorsys.datasafe.types.api.resource.BasePrivateResource; +import de.adorsys.datasafe.types.api.resource.StorageIdentifier; +import de.adorsys.datasafe.types.api.shared.AwsClientRetry; +import de.adorsys.datasafe.types.api.utils.ExecutorServiceUtil; +import lombok.SneakyThrows; +import lombok.experimental.Delegate; +import lombok.extern.slf4j.Slf4j; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; + +import java.io.OutputStream; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.regex.Pattern; + +import static de.adorsys.datasafe.examples.business.s3.MinioContainerId.DIRECTORY_BUCKET; +import static de.adorsys.datasafe.examples.business.s3.MinioContainerId.FILES_BUCKET_ONE; +import static de.adorsys.datasafe.examples.business.s3.MinioContainerId.FILES_BUCKET_TWO; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This example shows how client can register storage system and securely store its access details. + * Here, we will use 2 Datasafe class instances - one for securely storing user access credentials + * - configBucket and another is for accessing users' private files stored in + * filesBucketOne, filesBucketTwo. + */ +@Slf4j +class MultiDfsWithCredentialsExampleTest { + + private static final String REGION = "eu-central-1"; + private static final ExecutorService EXECUTOR = ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService(4, 4); + + private static Map minios = new EnumMap<>(MinioContainerId.class); + private static AmazonS3 directoryClient = null; + private static Map endpointsByHost = new HashMap<>(); + + @BeforeAll + static void startup() { + // Create all required minio-backed S3 buckets: + Arrays.stream(MinioContainerId.values()).forEach(it -> { + GenericContainer minio = createAndStartMinio(it.getAccessKey(), it.getSecretKey()); + minios.put(it, minio); + + String endpoint = getDockerUri("http://127.0.0.1") + ":" + minio.getFirstMappedPort() + "/"; + endpointsByHost.put(it, endpoint + REGION + "/" + it.getBucketName() + "/"); + log.info("MINIO for {} is available at: {} with access: '{}'/'{}'", it, endpoint, it.getAccessKey(), + it.getSecretKey()); + + AmazonS3 client = S3ClientFactory.getClient( + endpoint, + REGION, + it.getAccessKey(), + it.getSecretKey() + ); + + AwsClientRetry.createBucketWithRetry(client, it.getBucketName()); + + if (it.equals(DIRECTORY_BUCKET)) { + directoryClient = client; + } + }); + } + + @AfterAll + static void shutdown() { + minios.values().forEach(GenericContainer::stop); + } + + @Test + @SneakyThrows + void testMultiUserStorageUserSetup() { + // BEGIN_SNIPPET:Datasafe with multi-dfs setup + String directoryBucketS3Uri = "s3://" + DIRECTORY_BUCKET.getBucketName() + "/"; + // static client that will be used to access `directory` bucket: + StorageService directoryStorage = new S3StorageService( + directoryClient, + DIRECTORY_BUCKET.getBucketName(), + EXECUTOR + ); + + OverridesRegistry registry = new BaseOverridesRegistry(); + DefaultDatasafeServices multiDfsDatasafe = DaggerDefaultDatasafeServices + .builder() + .config(new DFSConfigWithStorageCreds(directoryBucketS3Uri, "PAZZWORT"::toCharArray)) + // This storage service will route requests to proper bucket based on URI content: + // URI with directoryBucket to `directoryStorage` + // URI with filesBucketOne will get dynamically generated S3Storage + // URI with filesBucketTwo will get dynamically generated S3Storage + .storage( + new RegexDelegatingStorage( + ImmutableMap.builder() + // bind URI that contains `directoryBucket` to directoryStorage + .put(Pattern.compile(directoryBucketS3Uri + ".+"), directoryStorage) + .put( + Pattern.compile(getDockerUri("http://127.0.0.1") + ".+"), + // Dynamically creates S3 client with bucket name equal to host value + new UriBasedAuthStorageService( + acc -> new S3StorageService( + S3ClientFactory.getClient( + acc.getEndpoint(), + acc.getRegion(), + acc.getAccessKey(), + acc.getSecretKey() + ), + // Bucket name is encoded in first path segment + acc.getBucketName(), + EXECUTOR + ) + ) + ).build() + ) + ) + .overridesRegistry(registry) + .build(); + // Instead of default BucketAccessService we will use service that reads storage access credentials from + // keystore + BucketAccessServiceImplRuntimeDelegatable.overrideWith( + registry, args -> new WithCredentialProvider(args.getStorageKeyStoreOperations()) + ); + + // John will have all his private files stored on `filesBucketOne` and `filesBucketOne`. + // Depending on path of file - filesBucketOne or filesBucketTwo - requests will be routed to proper bucket. + // I.e. path filesBucketOne/path/to/file will end up in `filesBucketOne` with key path/to/file + // his profile and access credentials for `filesBucketOne` will be in `configBucket` + UserIDAuth john = new UserIDAuth("john", "secret"::toCharArray); + // Here, nothing expects John has own storage credentials: + multiDfsDatasafe.userProfile().registerUsingDefaults(john); + + // Tell system that John will use his own storage credentials - regex match: + StorageIdentifier bucketOne = new StorageIdentifier(endpointsByHost.get(FILES_BUCKET_ONE) + ".+"); + StorageIdentifier bucketTwo = new StorageIdentifier(endpointsByHost.get(FILES_BUCKET_TWO) + ".+"); + // Set location for John's credentials keystore and put storage credentials into it: + UserPrivateProfile profile = multiDfsDatasafe.userProfile().privateProfile(john); + profile.getPrivateStorage().put( + bucketOne, + new AbsoluteLocation<>(BasePrivateResource.forPrivate(endpointsByHost.get(FILES_BUCKET_ONE) + "/")) + ); + profile.getPrivateStorage().put( + bucketTwo, + new AbsoluteLocation<>(BasePrivateResource.forPrivate(endpointsByHost.get(FILES_BUCKET_TWO) + "/")) + ); + multiDfsDatasafe.userProfile().updatePrivateProfile(john, profile); + + // register John's DFS access for `filesBucketOne` minio bucket + multiDfsDatasafe.userProfile().registerStorageCredentials( + john, + bucketOne, + new StorageCredentials( + FILES_BUCKET_ONE.getAccessKey(), + FILES_BUCKET_ONE.getSecretKey() + ) + ); + // register John's DFS access for `filesBucketTwo` minio bucket + multiDfsDatasafe.userProfile().registerStorageCredentials( + john, + bucketTwo, + new StorageCredentials( + FILES_BUCKET_TWO.getAccessKey(), + FILES_BUCKET_TWO.getSecretKey() + ) + ); + + // Configuring multi-storage is done, user can use his multi-storage: + + // store this file on `filesBucketOne` + try (OutputStream os = multiDfsDatasafe.privateService() + .write(WriteRequest.forPrivate(john, bucketOne, "my/file.txt"))) { + os.write("Content on bucket number ONE".getBytes(StandardCharsets.UTF_8)); + } + + // store this file on `filesBucketTwo` + try (OutputStream os = multiDfsDatasafe.privateService() + .write(WriteRequest.forPrivate(john, bucketTwo, "my/file.txt"))) { + os.write("Content on bucket number TWO".getBytes(StandardCharsets.UTF_8)); + } + + // read file from `filesBucketOne` + assertThat(multiDfsDatasafe.privateService() + .read(ReadRequest.forPrivate(john, bucketOne, "my/file.txt")) + ).hasContent("Content on bucket number ONE"); + + // read file from `filesBucketTwo` + assertThat(multiDfsDatasafe.privateService() + .read(ReadRequest.forPrivate(john, bucketTwo, "my/file.txt")) + ).hasContent("Content on bucket number TWO"); + // END_SNIPPET + } + + private static GenericContainer createAndStartMinio(String accessKey, String secretKey) { + GenericContainer minioContainer = new GenericContainer("minio/minio") + .withExposedPorts(9000) + .withEnv("MINIO_ACCESS_KEY", accessKey) + .withEnv("MINIO_SECRET_KEY", secretKey) + .withCommand("server /data") + .waitingFor(Wait.defaultWaitStrategy()); + + minioContainer.start(); + return minioContainer; + } + + private static class WithCredentialProvider extends BucketAccessServiceImpl { + + @Delegate + private final RegexAccessServiceWithStorageCredentialsImpl delegate; + + private WithCredentialProvider(Lazy storageKeyStoreOperations) { + super(null); + this.delegate = new RegexAccessServiceWithStorageCredentialsImpl(storageKeyStoreOperations); + } + } + + @SneakyThrows + private static String getDockerUri(String defaultUri) { + String dockerHost = System.getenv("DOCKER_HOST"); + if (dockerHost == null) { + return defaultUri; + } + + URI dockerUri = new URI(dockerHost); + return "http://" + dockerUri.getHost(); + } +} From 29b6c3602f94ab132233c8d1f41bd3c16aed9153 Mon Sep 17 00:00:00 2001 From: forkimenjeckayang Date: Thu, 11 Apr 2024 13:21:14 +0100 Subject: [PATCH 2/5] Renamed file(datasafe-examples): Renamed BaseUserOperationsWithDefaultDatasafeOnVersionedStorageTest file to BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT since it contains an integration test --- ...thDefaultDatasafeOnVersionedStorageIT.java | 247 ++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100644 datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java diff --git a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java new file mode 100644 index 000000000..0f41f74b9 --- /dev/null +++ b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java @@ -0,0 +1,247 @@ +package de.adorsys.datasafe.examples.business.s3; + +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.model.AmazonS3Exception; +import com.amazonaws.services.s3.model.BucketVersioningConfiguration; +import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest; +import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; +import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; +import de.adorsys.datasafe.directory.impl.profile.config.DefaultDFSConfig; +import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; +import de.adorsys.datasafe.storage.impl.s3.S3StorageService; +import de.adorsys.datasafe.types.api.actions.ListRequest; +import de.adorsys.datasafe.types.api.actions.ReadRequest; +import de.adorsys.datasafe.types.api.actions.RemoveRequest; +import de.adorsys.datasafe.types.api.actions.WriteRequest; +import de.adorsys.datasafe.types.api.callback.PhysicalVersionCallback; +import de.adorsys.datasafe.types.api.resource.StorageVersion; +import de.adorsys.datasafe.types.api.utils.ExecutorServiceUtil; +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.DisabledIfSystemProperty; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.OutputStream; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.atomic.AtomicReference; + +import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; + +/** + * This test shows simplistic usage of Datasafe default services that reside on versioned storage system. + */ +@Slf4j +@DisabledIfSystemProperty(named = "SKIP_CEPH", matches = "true") +class BaseUserOperationsWithDefaultDatasafeOnVersionedStorageTest { + + private static final String MY_OWN_FILE_TXT = "my/own/file.txt"; + + private static final String VERSIONED_BUCKET_NAME = "home"; + private static final String ACCESS_KEY = "access"; + private static final String SECRET_KEY = "secret"; + + private static GenericContainer cephContainer; + private static AmazonS3 cephS3; + private static String cephMappedUrl; + + private DefaultDatasafeServices defaultDatasafeServices; + + /** + * This creates CEPH Rados gateway in docker container and creates S3 client for it. + */ + @BeforeAll + static void createServices() { + log.info("Starting CEPH"); + // Create CEPH container: + cephContainer = new GenericContainer("ceph/daemon") + .withExposedPorts(8000, 5000) + .withEnv("RGW_FRONTEND_PORT", "8000") + .withEnv("SREE_PORT", "5000") + .withEnv("DEBUG", "verbose") + .withEnv("CEPH_DEMO_UID", "nano") + .withEnv("MON_IP", "127.0.0.1") + .withEnv("CEPH_PUBLIC_NETWORK", "0.0.0.0/0") + .withEnv("CEPH_DAEMON", "demo") + .withEnv("DEMO_DAEMONS", "mon,mgr,osd,rgw") + .withEnv("CEPH_DEMO_ACCESS_KEY", ACCESS_KEY) + .withEnv("CEPH_DEMO_SECRET_KEY", SECRET_KEY) + .withCommand("mkdir -p /etc/ceph && mkdir -p /var/lib/ceph && /entrypoint.sh") + .waitingFor(Wait.defaultWaitStrategy()); + + cephContainer.start(); + Integer mappedPort = cephContainer.getMappedPort(8000); + // URL for S3 API/bucket root: + cephMappedUrl = getDockerUri("http://0.0.0.0") + ":" + mappedPort; + log.info("Ceph mapped URL: {}", cephMappedUrl); + cephS3 = AmazonS3ClientBuilder.standard() + .withEndpointConfiguration( + new AwsClientBuilder.EndpointConfiguration(cephMappedUrl, "us-east-1") + ) + .withCredentials( + new AWSStaticCredentialsProvider( + new BasicAWSCredentials(ACCESS_KEY, SECRET_KEY) + ) + ) + .enablePathStyleAccess() + .build(); + + // Create bucket in CEPH that will support versioning + cephS3.createBucket(VERSIONED_BUCKET_NAME); + cephS3.setBucketVersioningConfiguration( + new SetBucketVersioningConfigurationRequest( + VERSIONED_BUCKET_NAME, + new BucketVersioningConfiguration(BucketVersioningConfiguration.ENABLED) + ) + ); + + + } + + @AfterAll + static void stopCeph() { + cephContainer.stop(); + } + + @BeforeEach + void init() { + // this will create all Datasafe files and user documents under S3 bucket root, we assume that + // S3 versioned bucket was already created + defaultDatasafeServices = DaggerDefaultDatasafeServices.builder() + .config(new DefaultDFSConfig(cephMappedUrl, "secret"::toCharArray)) + .storage(new S3StorageService( + cephS3, + VERSIONED_BUCKET_NAME, + ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService())) + .build(); + } + + /** + * S3 storage adapter supports sending back file version (if S3 storage returns it) when storing object to + * bucket and it allows reading object using its version too. + */ + @Test + @SneakyThrows + void writeFileThenReadLatestAndReadByVersion() { + // BEGIN_SNIPPET:Versioned storage support - writing file and reading back + // creating new user + UserIDAuth user = registerUser("john"); + + // writing data to my/own/file.txt 3 times with different content: + // 1st time, writing into my/own/file.txt: + // Expanded snippet of how to capture file version when writing object: + AtomicReference version = new AtomicReference<>(); + try (OutputStream os = defaultDatasafeServices.privateService() + .write(WriteRequest.forDefaultPrivate(user, MY_OWN_FILE_TXT) + .toBuilder() + .callback((PhysicalVersionCallback) version::set) + .build()) + ) { + // Initial version will contain "Hello 1": + os.write("Hello 1".getBytes(StandardCharsets.UTF_8)); + } + // this variable has our initial file version: + String version1 = version.get(); + + // Write 2 more times different data to same file - my/own/file.txt: + String version2 = writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 2"); + // Last version will contain "Hello 3": + String version3 = writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 3"); + + // now, when we read file without specifying version - we see latest file content: + assertThat(defaultDatasafeServices.privateService().read( + ReadRequest.forDefaultPrivate(user, MY_OWN_FILE_TXT)) + ).hasContent("Hello 3"); + + // but if we specify file version - we get content for it: + assertThat(defaultDatasafeServices.privateService().read( + ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(version1))) + ).hasContent("Hello 1"); + // END_SNIPPET + + log.debug("version 1 " + version1); + log.debug("version 2 " + version2); + log.debug("version 3 " + version3); + assertThat(defaultDatasafeServices.privateService().list(ListRequest.forDefaultPrivate(user, ""))).hasSize(1); + assertThat(version1.equals(version2)).isFalse(); + assertThat(version1.equals(version3)).isFalse(); + } + + /** + * Example of how to remove specific version id + */ + @Test + @SneakyThrows + void removeSpecificVersionId() { + // BEGIN_SNIPPET:Versioned storage support - removing specific version + // creating new user + UserIDAuth user = registerUser("john"); + + // writing data to my/own/file.txt 2 times with different content: + String versionId = writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 1"); + writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 2"); + + // now, we read old file version + assertThat(defaultDatasafeServices.privateService().read( + ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) + ).hasContent("Hello 1"); + + // now, we remove old file version + defaultDatasafeServices.privateService().remove( + RemoveRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId)) + ); + + // it is removed from storage, so when we read it we get exception + assertThrows(AmazonS3Exception.class, () -> defaultDatasafeServices.privateService().read( + ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) + ); + + // but latest file version is still available + assertThat(defaultDatasafeServices.privateService().read( + ReadRequest.forDefaultPrivate(user, MY_OWN_FILE_TXT)) + ).hasContent("Hello 2"); + // END_SNIPPET + } + + @SneakyThrows + private String writeToPrivate(UserIDAuth user, String path, String fileContent) { + AtomicReference version = new AtomicReference<>(); + try (OutputStream os = defaultDatasafeServices.privateService() + .write(WriteRequest.forDefaultPrivate(user, path) + .toBuilder() + .callback((PhysicalVersionCallback) version::set) + .build()) + ) { + os.write(fileContent.getBytes(StandardCharsets.UTF_8)); + } + + return version.get(); + } + + private UserIDAuth registerUser(String username) { + UserIDAuth creds = new UserIDAuth(username, ("passwrd" + username)::toCharArray); + defaultDatasafeServices.userProfile().registerUsingDefaults(creds); + return creds; + } + + @SneakyThrows + private static String getDockerUri(String defaultUri) { + String dockerHost = System.getenv("DOCKER_HOST"); + if (dockerHost == null) { + return defaultUri; + } + + URI dockerUri = new URI(dockerHost); + return "http://" + dockerUri.getHost(); + } +} From c1548d6ef50bc43e60edb876700d44f0fb4129f7 Mon Sep 17 00:00:00 2001 From: forkimenjeckayang Date: Thu, 11 Apr 2024 14:43:18 +0100 Subject: [PATCH 3/5] Renamed Duplicates(datasafe-examples):removed duplicate files --- .../MultiDfsWithCredentialsExampleTest.java | 255 ------------------ ...DefaultDatasafeOnVersionedStorageTest.java | 247 ----------------- 2 files changed, 502 deletions(-) delete mode 100644 datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleTest.java delete mode 100644 datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageTest.java diff --git a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleTest.java b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleTest.java deleted file mode 100644 index cf779ff93..000000000 --- a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleTest.java +++ /dev/null @@ -1,255 +0,0 @@ -package de.adorsys.datasafe.examples.business.s3; - -import com.amazonaws.services.s3.AmazonS3; -import dagger.Lazy; -import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; -import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; -import de.adorsys.datasafe.directory.api.profile.keys.StorageKeyStoreOperations; -import de.adorsys.datasafe.directory.api.types.StorageCredentials; -import de.adorsys.datasafe.directory.api.types.UserPrivateProfile; -import de.adorsys.datasafe.directory.impl.profile.config.DFSConfigWithStorageCreds; -import de.adorsys.datasafe.directory.impl.profile.dfs.BucketAccessServiceImpl; -import de.adorsys.datasafe.directory.impl.profile.dfs.BucketAccessServiceImplRuntimeDelegatable; -import de.adorsys.datasafe.directory.impl.profile.dfs.RegexAccessServiceWithStorageCredentialsImpl; -import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; -import de.adorsys.datasafe.storage.api.RegexDelegatingStorage; -import de.adorsys.datasafe.storage.api.StorageService; -import de.adorsys.datasafe.storage.api.UriBasedAuthStorageService; -import de.adorsys.datasafe.storage.impl.s3.S3ClientFactory; -import de.adorsys.datasafe.storage.impl.s3.S3StorageService; -import de.adorsys.datasafe.types.api.actions.ReadRequest; -import de.adorsys.datasafe.types.api.actions.WriteRequest; -import de.adorsys.datasafe.types.api.context.BaseOverridesRegistry; -import de.adorsys.datasafe.types.api.context.overrides.OverridesRegistry; -import de.adorsys.datasafe.types.api.resource.AbsoluteLocation; -import de.adorsys.datasafe.types.api.resource.BasePrivateResource; -import de.adorsys.datasafe.types.api.resource.StorageIdentifier; -import de.adorsys.datasafe.types.api.shared.AwsClientRetry; -import de.adorsys.datasafe.types.api.utils.ExecutorServiceUtil; -import lombok.SneakyThrows; -import lombok.experimental.Delegate; -import lombok.extern.slf4j.Slf4j; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.wait.strategy.Wait; -import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; - -import java.io.OutputStream; -import java.net.URI; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.EnumMap; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.regex.Pattern; - -import static de.adorsys.datasafe.examples.business.s3.MinioContainerId.DIRECTORY_BUCKET; -import static de.adorsys.datasafe.examples.business.s3.MinioContainerId.FILES_BUCKET_ONE; -import static de.adorsys.datasafe.examples.business.s3.MinioContainerId.FILES_BUCKET_TWO; -import static org.assertj.core.api.Assertions.assertThat; - -/** - * This example shows how client can register storage system and securely store its access details. - * Here, we will use 2 Datasafe class instances - one for securely storing user access credentials - * - configBucket and another is for accessing users' private files stored in - * filesBucketOne, filesBucketTwo. - */ -@Slf4j -class MultiDfsWithCredentialsExampleTest { - - private static final String REGION = "eu-central-1"; - private static final ExecutorService EXECUTOR = ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService(4, 4); - - private static Map minios = new EnumMap<>(MinioContainerId.class); - private static AmazonS3 directoryClient = null; - private static Map endpointsByHost = new HashMap<>(); - - @BeforeAll - static void startup() { - // Create all required minio-backed S3 buckets: - Arrays.stream(MinioContainerId.values()).forEach(it -> { - GenericContainer minio = createAndStartMinio(it.getAccessKey(), it.getSecretKey()); - minios.put(it, minio); - - String endpoint = getDockerUri("http://127.0.0.1") + ":" + minio.getFirstMappedPort() + "/"; - endpointsByHost.put(it, endpoint + REGION + "/" + it.getBucketName() + "/"); - log.info("MINIO for {} is available at: {} with access: '{}'/'{}'", it, endpoint, it.getAccessKey(), - it.getSecretKey()); - - AmazonS3 client = S3ClientFactory.getClient( - endpoint, - REGION, - it.getAccessKey(), - it.getSecretKey() - ); - - AwsClientRetry.createBucketWithRetry(client, it.getBucketName()); - - if (it.equals(DIRECTORY_BUCKET)) { - directoryClient = client; - } - }); - } - - @AfterAll - static void shutdown() { - minios.values().forEach(GenericContainer::stop); - } - - @Test - @SneakyThrows - void testMultiUserStorageUserSetup() { - // BEGIN_SNIPPET:Datasafe with multi-dfs setup - String directoryBucketS3Uri = "s3://" + DIRECTORY_BUCKET.getBucketName() + "/"; - // static client that will be used to access `directory` bucket: - StorageService directoryStorage = new S3StorageService( - directoryClient, - DIRECTORY_BUCKET.getBucketName(), - EXECUTOR - ); - - OverridesRegistry registry = new BaseOverridesRegistry(); - DefaultDatasafeServices multiDfsDatasafe = DaggerDefaultDatasafeServices - .builder() - .config(new DFSConfigWithStorageCreds(directoryBucketS3Uri, "PAZZWORT"::toCharArray)) - // This storage service will route requests to proper bucket based on URI content: - // URI with directoryBucket to `directoryStorage` - // URI with filesBucketOne will get dynamically generated S3Storage - // URI with filesBucketTwo will get dynamically generated S3Storage - .storage( - new RegexDelegatingStorage( - ImmutableMap.builder() - // bind URI that contains `directoryBucket` to directoryStorage - .put(Pattern.compile(directoryBucketS3Uri + ".+"), directoryStorage) - .put( - Pattern.compile(getDockerUri("http://127.0.0.1") + ".+"), - // Dynamically creates S3 client with bucket name equal to host value - new UriBasedAuthStorageService( - acc -> new S3StorageService( - S3ClientFactory.getClient( - acc.getEndpoint(), - acc.getRegion(), - acc.getAccessKey(), - acc.getSecretKey() - ), - // Bucket name is encoded in first path segment - acc.getBucketName(), - EXECUTOR - ) - ) - ).build() - ) - ) - .overridesRegistry(registry) - .build(); - // Instead of default BucketAccessService we will use service that reads storage access credentials from - // keystore - BucketAccessServiceImplRuntimeDelegatable.overrideWith( - registry, args -> new WithCredentialProvider(args.getStorageKeyStoreOperations()) - ); - - // John will have all his private files stored on `filesBucketOne` and `filesBucketOne`. - // Depending on path of file - filesBucketOne or filesBucketTwo - requests will be routed to proper bucket. - // I.e. path filesBucketOne/path/to/file will end up in `filesBucketOne` with key path/to/file - // his profile and access credentials for `filesBucketOne` will be in `configBucket` - UserIDAuth john = new UserIDAuth("john", "secret"::toCharArray); - // Here, nothing expects John has own storage credentials: - multiDfsDatasafe.userProfile().registerUsingDefaults(john); - - // Tell system that John will use his own storage credentials - regex match: - StorageIdentifier bucketOne = new StorageIdentifier(endpointsByHost.get(FILES_BUCKET_ONE) + ".+"); - StorageIdentifier bucketTwo = new StorageIdentifier(endpointsByHost.get(FILES_BUCKET_TWO) + ".+"); - // Set location for John's credentials keystore and put storage credentials into it: - UserPrivateProfile profile = multiDfsDatasafe.userProfile().privateProfile(john); - profile.getPrivateStorage().put( - bucketOne, - new AbsoluteLocation<>(BasePrivateResource.forPrivate(endpointsByHost.get(FILES_BUCKET_ONE) + "/")) - ); - profile.getPrivateStorage().put( - bucketTwo, - new AbsoluteLocation<>(BasePrivateResource.forPrivate(endpointsByHost.get(FILES_BUCKET_TWO) + "/")) - ); - multiDfsDatasafe.userProfile().updatePrivateProfile(john, profile); - - // register John's DFS access for `filesBucketOne` minio bucket - multiDfsDatasafe.userProfile().registerStorageCredentials( - john, - bucketOne, - new StorageCredentials( - FILES_BUCKET_ONE.getAccessKey(), - FILES_BUCKET_ONE.getSecretKey() - ) - ); - // register John's DFS access for `filesBucketTwo` minio bucket - multiDfsDatasafe.userProfile().registerStorageCredentials( - john, - bucketTwo, - new StorageCredentials( - FILES_BUCKET_TWO.getAccessKey(), - FILES_BUCKET_TWO.getSecretKey() - ) - ); - - // Configuring multi-storage is done, user can use his multi-storage: - - // store this file on `filesBucketOne` - try (OutputStream os = multiDfsDatasafe.privateService() - .write(WriteRequest.forPrivate(john, bucketOne, "my/file.txt"))) { - os.write("Content on bucket number ONE".getBytes(StandardCharsets.UTF_8)); - } - - // store this file on `filesBucketTwo` - try (OutputStream os = multiDfsDatasafe.privateService() - .write(WriteRequest.forPrivate(john, bucketTwo, "my/file.txt"))) { - os.write("Content on bucket number TWO".getBytes(StandardCharsets.UTF_8)); - } - - // read file from `filesBucketOne` - assertThat(multiDfsDatasafe.privateService() - .read(ReadRequest.forPrivate(john, bucketOne, "my/file.txt")) - ).hasContent("Content on bucket number ONE"); - - // read file from `filesBucketTwo` - assertThat(multiDfsDatasafe.privateService() - .read(ReadRequest.forPrivate(john, bucketTwo, "my/file.txt")) - ).hasContent("Content on bucket number TWO"); - // END_SNIPPET - } - - private static GenericContainer createAndStartMinio(String accessKey, String secretKey) { - GenericContainer minioContainer = new GenericContainer("minio/minio") - .withExposedPorts(9000) - .withEnv("MINIO_ACCESS_KEY", accessKey) - .withEnv("MINIO_SECRET_KEY", secretKey) - .withCommand("server /data") - .waitingFor(Wait.defaultWaitStrategy()); - - minioContainer.start(); - return minioContainer; - } - - private static class WithCredentialProvider extends BucketAccessServiceImpl { - - @Delegate - private final RegexAccessServiceWithStorageCredentialsImpl delegate; - - private WithCredentialProvider(Lazy storageKeyStoreOperations) { - super(null); - this.delegate = new RegexAccessServiceWithStorageCredentialsImpl(storageKeyStoreOperations); - } - } - - @SneakyThrows - private static String getDockerUri(String defaultUri) { - String dockerHost = System.getenv("DOCKER_HOST"); - if (dockerHost == null) { - return defaultUri; - } - - URI dockerUri = new URI(dockerHost); - return "http://" + dockerUri.getHost(); - } -} diff --git a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageTest.java b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageTest.java deleted file mode 100644 index 0f41f74b9..000000000 --- a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageTest.java +++ /dev/null @@ -1,247 +0,0 @@ -package de.adorsys.datasafe.examples.business.s3; - -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.BucketVersioningConfiguration; -import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest; -import de.adorsys.datasafe.business.impl.service.DaggerDefaultDatasafeServices; -import de.adorsys.datasafe.business.impl.service.DefaultDatasafeServices; -import de.adorsys.datasafe.directory.impl.profile.config.DefaultDFSConfig; -import de.adorsys.datasafe.encrypiton.api.types.UserIDAuth; -import de.adorsys.datasafe.storage.impl.s3.S3StorageService; -import de.adorsys.datasafe.types.api.actions.ListRequest; -import de.adorsys.datasafe.types.api.actions.ReadRequest; -import de.adorsys.datasafe.types.api.actions.RemoveRequest; -import de.adorsys.datasafe.types.api.actions.WriteRequest; -import de.adorsys.datasafe.types.api.callback.PhysicalVersionCallback; -import de.adorsys.datasafe.types.api.resource.StorageVersion; -import de.adorsys.datasafe.types.api.utils.ExecutorServiceUtil; -import lombok.SneakyThrows; -import lombok.extern.slf4j.Slf4j; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.DisabledIfSystemProperty; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.wait.strategy.Wait; - -import java.io.OutputStream; -import java.net.URI; -import java.nio.charset.StandardCharsets; -import java.util.concurrent.atomic.AtomicReference; - -import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; -import static org.junit.jupiter.api.Assertions.assertThrows; - -/** - * This test shows simplistic usage of Datasafe default services that reside on versioned storage system. - */ -@Slf4j -@DisabledIfSystemProperty(named = "SKIP_CEPH", matches = "true") -class BaseUserOperationsWithDefaultDatasafeOnVersionedStorageTest { - - private static final String MY_OWN_FILE_TXT = "my/own/file.txt"; - - private static final String VERSIONED_BUCKET_NAME = "home"; - private static final String ACCESS_KEY = "access"; - private static final String SECRET_KEY = "secret"; - - private static GenericContainer cephContainer; - private static AmazonS3 cephS3; - private static String cephMappedUrl; - - private DefaultDatasafeServices defaultDatasafeServices; - - /** - * This creates CEPH Rados gateway in docker container and creates S3 client for it. - */ - @BeforeAll - static void createServices() { - log.info("Starting CEPH"); - // Create CEPH container: - cephContainer = new GenericContainer("ceph/daemon") - .withExposedPorts(8000, 5000) - .withEnv("RGW_FRONTEND_PORT", "8000") - .withEnv("SREE_PORT", "5000") - .withEnv("DEBUG", "verbose") - .withEnv("CEPH_DEMO_UID", "nano") - .withEnv("MON_IP", "127.0.0.1") - .withEnv("CEPH_PUBLIC_NETWORK", "0.0.0.0/0") - .withEnv("CEPH_DAEMON", "demo") - .withEnv("DEMO_DAEMONS", "mon,mgr,osd,rgw") - .withEnv("CEPH_DEMO_ACCESS_KEY", ACCESS_KEY) - .withEnv("CEPH_DEMO_SECRET_KEY", SECRET_KEY) - .withCommand("mkdir -p /etc/ceph && mkdir -p /var/lib/ceph && /entrypoint.sh") - .waitingFor(Wait.defaultWaitStrategy()); - - cephContainer.start(); - Integer mappedPort = cephContainer.getMappedPort(8000); - // URL for S3 API/bucket root: - cephMappedUrl = getDockerUri("http://0.0.0.0") + ":" + mappedPort; - log.info("Ceph mapped URL: {}", cephMappedUrl); - cephS3 = AmazonS3ClientBuilder.standard() - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration(cephMappedUrl, "us-east-1") - ) - .withCredentials( - new AWSStaticCredentialsProvider( - new BasicAWSCredentials(ACCESS_KEY, SECRET_KEY) - ) - ) - .enablePathStyleAccess() - .build(); - - // Create bucket in CEPH that will support versioning - cephS3.createBucket(VERSIONED_BUCKET_NAME); - cephS3.setBucketVersioningConfiguration( - new SetBucketVersioningConfigurationRequest( - VERSIONED_BUCKET_NAME, - new BucketVersioningConfiguration(BucketVersioningConfiguration.ENABLED) - ) - ); - - - } - - @AfterAll - static void stopCeph() { - cephContainer.stop(); - } - - @BeforeEach - void init() { - // this will create all Datasafe files and user documents under S3 bucket root, we assume that - // S3 versioned bucket was already created - defaultDatasafeServices = DaggerDefaultDatasafeServices.builder() - .config(new DefaultDFSConfig(cephMappedUrl, "secret"::toCharArray)) - .storage(new S3StorageService( - cephS3, - VERSIONED_BUCKET_NAME, - ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService())) - .build(); - } - - /** - * S3 storage adapter supports sending back file version (if S3 storage returns it) when storing object to - * bucket and it allows reading object using its version too. - */ - @Test - @SneakyThrows - void writeFileThenReadLatestAndReadByVersion() { - // BEGIN_SNIPPET:Versioned storage support - writing file and reading back - // creating new user - UserIDAuth user = registerUser("john"); - - // writing data to my/own/file.txt 3 times with different content: - // 1st time, writing into my/own/file.txt: - // Expanded snippet of how to capture file version when writing object: - AtomicReference version = new AtomicReference<>(); - try (OutputStream os = defaultDatasafeServices.privateService() - .write(WriteRequest.forDefaultPrivate(user, MY_OWN_FILE_TXT) - .toBuilder() - .callback((PhysicalVersionCallback) version::set) - .build()) - ) { - // Initial version will contain "Hello 1": - os.write("Hello 1".getBytes(StandardCharsets.UTF_8)); - } - // this variable has our initial file version: - String version1 = version.get(); - - // Write 2 more times different data to same file - my/own/file.txt: - String version2 = writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 2"); - // Last version will contain "Hello 3": - String version3 = writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 3"); - - // now, when we read file without specifying version - we see latest file content: - assertThat(defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivate(user, MY_OWN_FILE_TXT)) - ).hasContent("Hello 3"); - - // but if we specify file version - we get content for it: - assertThat(defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(version1))) - ).hasContent("Hello 1"); - // END_SNIPPET - - log.debug("version 1 " + version1); - log.debug("version 2 " + version2); - log.debug("version 3 " + version3); - assertThat(defaultDatasafeServices.privateService().list(ListRequest.forDefaultPrivate(user, ""))).hasSize(1); - assertThat(version1.equals(version2)).isFalse(); - assertThat(version1.equals(version3)).isFalse(); - } - - /** - * Example of how to remove specific version id - */ - @Test - @SneakyThrows - void removeSpecificVersionId() { - // BEGIN_SNIPPET:Versioned storage support - removing specific version - // creating new user - UserIDAuth user = registerUser("john"); - - // writing data to my/own/file.txt 2 times with different content: - String versionId = writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 1"); - writeToPrivate(user, MY_OWN_FILE_TXT, "Hello 2"); - - // now, we read old file version - assertThat(defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) - ).hasContent("Hello 1"); - - // now, we remove old file version - defaultDatasafeServices.privateService().remove( - RemoveRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId)) - ); - - // it is removed from storage, so when we read it we get exception - assertThrows(AmazonS3Exception.class, () -> defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivateWithVersion(user, MY_OWN_FILE_TXT, new StorageVersion(versionId))) - ); - - // but latest file version is still available - assertThat(defaultDatasafeServices.privateService().read( - ReadRequest.forDefaultPrivate(user, MY_OWN_FILE_TXT)) - ).hasContent("Hello 2"); - // END_SNIPPET - } - - @SneakyThrows - private String writeToPrivate(UserIDAuth user, String path, String fileContent) { - AtomicReference version = new AtomicReference<>(); - try (OutputStream os = defaultDatasafeServices.privateService() - .write(WriteRequest.forDefaultPrivate(user, path) - .toBuilder() - .callback((PhysicalVersionCallback) version::set) - .build()) - ) { - os.write(fileContent.getBytes(StandardCharsets.UTF_8)); - } - - return version.get(); - } - - private UserIDAuth registerUser(String username) { - UserIDAuth creds = new UserIDAuth(username, ("passwrd" + username)::toCharArray); - defaultDatasafeServices.userProfile().registerUsingDefaults(creds); - return creds; - } - - @SneakyThrows - private static String getDockerUri(String defaultUri) { - String dockerHost = System.getenv("DOCKER_HOST"); - if (dockerHost == null) { - return defaultUri; - } - - URI dockerUri = new URI(dockerHost); - return "http://" + dockerUri.getHost(); - } -} From f37d1d510f6902af5a044e5e8c449008a193e822 Mon Sep 17 00:00:00 2001 From: forkimenjeckayang Date: Thu, 11 Apr 2024 17:14:05 +0100 Subject: [PATCH 4/5] Modification(datasafe-example):Modified MultiDfsWithCredentialsExample file --- .../examples/business/s3/MultiDfsWithCredentialsExampleIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java index cf779ff93..29b7e5f36 100644 --- a/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java +++ b/datasafe-examples/datasafe-examples-multidfs/src/test/java/de/adorsys/datasafe/examples/business/s3/MultiDfsWithCredentialsExampleIT.java @@ -58,7 +58,7 @@ * filesBucketOne, filesBucketTwo. */ @Slf4j -class MultiDfsWithCredentialsExampleTest { +class MultiDfsWithCredentialsExampleIT{ private static final String REGION = "eu-central-1"; private static final ExecutorService EXECUTOR = ExecutorServiceUtil.submitterExecutesOnStarvationExecutingService(4, 4); From f0cbb26403e8f1c09608786bb881b4afc5acc1d6 Mon Sep 17 00:00:00 2001 From: forkimenjeckayang Date: Thu, 11 Apr 2024 17:36:18 +0100 Subject: [PATCH 5/5] Modification(datasafe-example):Modified BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT file --- ...seUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java index 0f41f74b9..c8429a2cb 100644 --- a/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java +++ b/datasafe-examples/datasafe-examples-versioned-s3/src/test/java/de/adorsys/datasafe/examples/business/s3/BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT.java @@ -43,7 +43,7 @@ */ @Slf4j @DisabledIfSystemProperty(named = "SKIP_CEPH", matches = "true") -class BaseUserOperationsWithDefaultDatasafeOnVersionedStorageTest { +class BaseUserOperationsWithDefaultDatasafeOnVersionedStorageIT{ private static final String MY_OWN_FILE_TXT = "my/own/file.txt";