Skip to content

Commit

Permalink
Merge pull request #2173 from adobe/2143-fix-file-deletion
Browse files Browse the repository at this point in the history
Refactor deletion of all files on exit
  • Loading branch information
afranken authored Dec 19, 2024
2 parents 069d87b + 8a742fa commit b30d8bc
Show file tree
Hide file tree
Showing 12 changed files with 111 additions and 101 deletions.
6 changes: 5 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -138,13 +138,17 @@ Version 4.x is JDK17 LTS bytecode compatible, with Docker and JUnit / direct Jav
* Features and fixes
* Allow overriding headers in head object
* Implement If-(Un)modified-Since handling (fixes #829)
* Close all InputStreams and OutputStreams
* Refactorings
* Use Tomcat instead of Jetty as the application container (fixes #2136)
* "FROM" in Dockerfile did not match "as"
* Delete files on shutdown using a `DisposableBean` instead of `File#deleteOnExit()`
* Version updates (deliverable dependencies)
* none
* Bump spring-boot.version from 3.3.5 to 3.4.1
* Version updates (build dependencies)
* Bump github/codeql-action from 3.27.6 to 3.27.9
* Bump actions/upload-artifact from 4.4.3 to 4.5.0
* Bump actions/setup-java from 4.5.0 to 4.6.0
* Bump com.puppycrawl.tools:checkstyle from 10.20.2 to 10.21.0
* Jackson 2.18.2 to 2.17.2 (remove override, use Spring-Boot supplied version)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@
import java.util.List;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.input.BoundedInputStream;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpRange;
Expand Down Expand Up @@ -785,7 +784,13 @@ private static void extractBytesToOutputStream(HttpRange range, S3ObjectMetadata
try (var fis = Files.newInputStream(s3ObjectMetadata.dataPath())) {
var skip = fis.skip(range.getRangeStart(fileSize));
if (skip == range.getRangeStart(fileSize)) {
IOUtils.copy(new BoundedInputStream(fis, bytesToRead), outputStream);
try (var bis = BoundedInputStream
.builder()
.setInputStream(fis)
.setMaxCount(bytesToRead)
.get()) {
bis.transferTo(outputStream);
}
} else {
throw new IllegalStateException("Could not skip exact byte range");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ public void verifyChecksum(Path path, String checksum, ChecksumAlgorithm checksu
public Pair<Path, String> toTempFile(InputStream inputStream, HttpHeaders httpHeaders) {
try {
var tempFile = Files.createTempFile("ObjectService", "toTempFile");
try (OutputStream os = Files.newOutputStream(tempFile)) {
InputStream wrappedStream = wrapStream(inputStream, httpHeaders);
try (var os = Files.newOutputStream(tempFile);
var wrappedStream = wrapStream(inputStream, httpHeaders)) {
wrappedStream.transferTo(os);
ChecksumAlgorithm algorithmFromSdk = checksumAlgorithmFromSdk(httpHeaders);
if (algorithmFromSdk != null
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,16 +51,13 @@ public class BucketStore {
*/
private final Map<String, Object> lockStore = new ConcurrentHashMap<>();
private final File rootFolder;
private final boolean retainFilesOnExit;
private final DateTimeFormatter s3ObjectDateFormat;
private final ObjectMapper objectMapper;

public BucketStore(File rootFolder,
boolean retainFilesOnExit,
DateTimeFormatter s3ObjectDateFormat,
ObjectMapper objectMapper) {
this.rootFolder = rootFolder;
this.retainFilesOnExit = retainFilesOnExit;
this.s3ObjectDateFormat = s3ObjectDateFormat;
this.objectMapper = objectMapper;
}
Expand Down Expand Up @@ -309,9 +306,6 @@ List<UUID> loadBuckets(List<String> bucketNames) {
private void writeToDisk(BucketMetadata bucketMetadata) {
try {
var metaFile = getMetaFilePath(bucketMetadata.name()).toFile();
if (!retainFilesOnExit) {
metaFile.deleteOnExit();
}
synchronized (lockStore.get(bucketMetadata.name())) {
objectMapper.writeValue(metaFile, bucketMetadata);
}
Expand All @@ -328,9 +322,6 @@ private File createBucketFolder(String bucketName) {
try {
var bucketFolder = getBucketFolderPath(bucketName).toFile();
FileUtils.forceMkdir(bucketFolder);
if (!retainFilesOnExit) {
bucketFolder.deleteOnExit();
}
return bucketFolder;
} catch (final IOException e) {
throw new IllegalStateException("Can't create bucket directory!", e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.StreamSupport;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.input.BoundedInputStream;
import org.apache.commons.lang3.stream.Streams;
import org.slf4j.Logger;
Expand All @@ -71,14 +70,10 @@ public class MultipartStore extends StoreBase {
* Any method modifying the underlying file must acquire the lock object before the modification.
*/
private final Map<UUID, Object> lockStore = new ConcurrentHashMap<>();
private final boolean retainFilesOnExit;
private final ObjectStore objectStore;
private final ObjectMapper objectMapper;

public MultipartStore(boolean retainFilesOnExit,
ObjectStore objectStore,
ObjectMapper objectMapper) {
this.retainFilesOnExit = retainFilesOnExit;
public MultipartStore(ObjectStore objectStore, ObjectMapper objectMapper) {
this.objectStore = objectStore;
this.objectMapper = objectMapper;
}
Expand Down Expand Up @@ -217,9 +212,7 @@ public String putPart(BucketMetadata bucket,
String partNumber,
Path path,
Map<String, String> encryptionHeaders) {
var file = inputPathToFile(path,
getPartPath(bucket, uploadId, partNumber),
retainFilesOnExit);
var file = inputPathToFile(path, getPartPath(bucket, uploadId, partNumber));

return hexDigest(encryptionHeaders.get(X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID), file);
}
Expand Down Expand Up @@ -255,29 +248,33 @@ public CompleteMultipartUploadResult completeMultipartUpload(BucketMetadata buck
)
.toList();
Path tempFile = null;
try (var inputStream = toInputStream(partsPaths)) {
try {
tempFile = Files.createTempFile("completeMultipartUpload", "");
inputStream.transferTo(Files.newOutputStream(tempFile));
var checksumFor = checksumFor(partsPaths, uploadInfo);
var etag = hexDigestMultipart(partsPaths);
objectStore.storeS3ObjectMetadata(bucket,
id,
key,
uploadInfo.contentType(),
uploadInfo.storeHeaders(),
tempFile,
uploadInfo.userMetadata(),
encryptionHeaders,
etag,
Collections.emptyList(), //TODO: no tags for multi part uploads?
uploadInfo.checksumAlgorithm(),
checksumFor,
uploadInfo.upload().owner(),
uploadInfo.storageClass()
);
FileUtils.deleteDirectory(partFolder.toFile());
return new CompleteMultipartUploadResult(location, uploadInfo.bucket(),
key, etag, uploadInfo, checksumFor);

try (var is = toInputStream(partsPaths);
var os = newOutputStream(tempFile)) {
is.transferTo(os);
var checksumFor = checksumFor(partsPaths, uploadInfo);
var etag = hexDigestMultipart(partsPaths);
objectStore.storeS3ObjectMetadata(bucket,
id,
key,
uploadInfo.contentType(),
uploadInfo.storeHeaders(),
tempFile,
uploadInfo.userMetadata(),
encryptionHeaders,
etag,
Collections.emptyList(), //TODO: no tags for multi part uploads?
uploadInfo.checksumAlgorithm(),
checksumFor,
uploadInfo.upload().owner(),
uploadInfo.storageClass()
);
FileUtils.deleteDirectory(partFolder.toFile());
return new CompleteMultipartUploadResult(location, uploadInfo.bucket(),
key, etag, uploadInfo, checksumFor);
}
} catch (IOException e) {
throw new IllegalStateException(String.format(
"Error finishing multipart upload bucket=%s, key=%s, id=%s, uploadId=%s",
Expand Down Expand Up @@ -387,13 +384,13 @@ private String copyPartToFile(BucketMetadata bucket,
var targetStream = newOutputStream(partFile.toPath())) {
var skip = sourceStream.skip(from);
if (skip == from) {
IOUtils.copy(BoundedInputStream
try (var bis = BoundedInputStream
.builder()
.setInputStream(sourceStream)
.setMaxCount(len)
.get(),
targetStream
);
.get()) {
bis.transferTo(targetStream);
}
} else {
throw new IllegalStateException("Could not skip exact byte range");
}
Expand Down Expand Up @@ -447,11 +444,7 @@ private void verifyMultipartUploadPreparation(BucketMetadata bucket, UUID id, St

private boolean createPartsFolder(BucketMetadata bucket, String uploadId) {
var partsFolder = getPartsFolder(bucket, uploadId).toFile();
var created = partsFolder.mkdirs();
if (created && !retainFilesOnExit) {
partsFolder.deleteOnExit();
}
return created;
return partsFolder.mkdirs();
}

private Path getMultipartsFolder(BucketMetadata bucket) {
Expand Down Expand Up @@ -490,9 +483,6 @@ private void writeMetafile(BucketMetadata bucket, MultipartUploadInfo uploadInfo
try {
synchronized (lockStore.get(UUID.fromString(uploadId))) {
var metaFile = getUploadMetadataPath(bucket, uploadId).toFile();
if (!retainFilesOnExit) {
metaFile.deleteOnExit();
}
objectMapper.writeValue(metaFile, uploadInfo);
}
} catch (IOException e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,12 @@ public class ObjectStore extends StoreBase {
*/
private final Map<UUID, Object> lockStore = new ConcurrentHashMap<>();

private final boolean retainFilesOnExit;
private final DateTimeFormatter s3ObjectDateFormat;

private final ObjectMapper objectMapper;

public ObjectStore(boolean retainFilesOnExit,
DateTimeFormatter s3ObjectDateFormat,
public ObjectStore(DateTimeFormatter s3ObjectDateFormat,
ObjectMapper objectMapper) {
this.retainFilesOnExit = retainFilesOnExit;
this.s3ObjectDateFormat = s3ObjectDateFormat;
this.objectMapper = objectMapper;
}
Expand Down Expand Up @@ -109,7 +106,7 @@ public S3ObjectMetadata storeS3ObjectMetadata(BucketMetadata bucket,
lockStore.putIfAbsent(id, new Object());
synchronized (lockStore.get(id)) {
createObjectRootFolder(bucket, id);
var dataFile = inputPathToFile(path, getDataFilePath(bucket, id), retainFilesOnExit);
var dataFile = inputPathToFile(path, getDataFilePath(bucket, id));
var now = Instant.now();
var s3ObjectMetadata = new S3ObjectMetadata(
id,
Expand Down Expand Up @@ -445,9 +442,7 @@ void loadObjects(BucketMetadata bucketMetadata, Collection<UUID> ids) {
*/
private void createObjectRootFolder(BucketMetadata bucket, UUID id) {
var objectRootFolder = getObjectFolderPath(bucket, id).toFile();
if (objectRootFolder.mkdirs() && !retainFilesOnExit) {
objectRootFolder.deleteOnExit();
}
objectRootFolder.mkdirs();
}

private Path getObjectFolderPath(BucketMetadata bucket, UUID id) {
Expand All @@ -471,9 +466,6 @@ private void writeMetafile(BucketMetadata bucket, S3ObjectMetadata s3ObjectMetad
try {
synchronized (lockStore.get(id)) {
var metaFile = getMetaFilePath(bucket, id).toFile();
if (!retainFilesOnExit) {
metaFile.deleteOnExit();
}
objectMapper.writeValue(metaFile, s3ObjectMetadata);
}
} catch (IOException e) {
Expand All @@ -500,9 +492,6 @@ private void writeAclFile(BucketMetadata bucket, UUID id, AccessControlPolicy po
try {
synchronized (lockStore.get(id)) {
var aclFile = getAclFilePath(bucket, id).toFile();
if (!retainFilesOnExit) {
aclFile.deleteOnExit();
}
FileUtils.write(aclFile, objectMapper.writeValueAsString(policy), Charset.defaultCharset());
}
} catch (IOException e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,10 @@ abstract class StoreBase {
*
* @return the newly created File.
*/
File inputPathToFile(Path inputPath, Path filePath, boolean retainFilesOnExit) {
File inputPathToFile(Path inputPath, Path filePath) {
var targetFile = filePath.toFile();
try {
if (targetFile.createNewFile() && (!retainFilesOnExit)) {
targetFile.deleteOnExit();
}

targetFile.createNewFile();
try (var is = newInputStream(inputPath);
var os = newOutputStream(targetFile.toPath())) {
is.transferTo(os);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/*
* Copyright 2017-2024 Adobe.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.adobe.testing.s3mock.store;

import static org.apache.commons.io.FileUtils.cleanDirectory;
import static org.apache.commons.io.FileUtils.deleteDirectory;
import static org.apache.commons.io.FileUtils.isSymlink;

import java.io.File;
import org.springframework.beans.factory.DisposableBean;

public class StoreCleaner implements DisposableBean {

private final File rootFolder;
private final boolean retainFilesOnExit;

public StoreCleaner(File rootFolder, boolean retainFilesOnExit) {
this.rootFolder = rootFolder;
this.retainFilesOnExit = retainFilesOnExit;
}

@Override
public void destroy() throws Exception {
if (!retainFilesOnExit && rootFolder.exists()) {
cleanDirectory(rootFolder);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,7 @@ public class StoreConfiguration {
@Bean
ObjectStore objectStore(StoreProperties properties, List<String> bucketNames,
BucketStore bucketStore, ObjectMapper objectMapper) {
var objectStore = new ObjectStore(properties.retainFilesOnExit(),
S3_OBJECT_DATE_FORMAT, objectMapper);
var objectStore = new ObjectStore(S3_OBJECT_DATE_FORMAT, objectMapper);
for (var bucketName : bucketNames) {
var bucketMetadata = bucketStore.getBucketMetadata(bucketName);
if (bucketMetadata != null) {
Expand All @@ -60,8 +59,7 @@ ObjectStore objectStore(StoreProperties properties, List<String> bucketNames,
@Bean
BucketStore bucketStore(StoreProperties properties, File rootFolder, List<String> bucketNames,
ObjectMapper objectMapper) {
var bucketStore = new BucketStore(rootFolder, properties.retainFilesOnExit(),
S3_OBJECT_DATE_FORMAT, objectMapper);
var bucketStore = new BucketStore(rootFolder, S3_OBJECT_DATE_FORMAT, objectMapper);
//load existing buckets first
bucketStore.loadBuckets(bucketNames);

Expand Down Expand Up @@ -112,7 +110,7 @@ List<String> bucketNames(File rootFolder) {
MultipartStore multipartStore(StoreProperties properties,
ObjectStore objectStore,
ObjectMapper objectMapper) {
return new MultipartStore(properties.retainFilesOnExit(), objectStore, objectMapper);
return new MultipartStore(objectStore, objectMapper);
}

@Bean
Expand Down Expand Up @@ -151,11 +149,11 @@ File rootFolder(StoreProperties properties) {
root.getAbsolutePath(), properties.retainFilesOnExit());
}
}

if (!properties.retainFilesOnExit()) {
root.deleteOnExit();
}

return root;
}

@Bean
StoreCleaner storeCleaner(File rootFolder, StoreProperties storeProperties) {
return new StoreCleaner(rootFolder, storeProperties.retainFilesOnExit());
}
}
Loading

0 comments on commit b30d8bc

Please sign in to comment.