Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions Sources/Containerization/ContainerManager.swift
Original file line number Diff line number Diff line change
Expand Up @@ -370,11 +370,13 @@ public struct ContainerManager: Sendable {
/// - reference: The image reference.
/// - rootfsSizeInBytes: The size of the root filesystem in bytes. Defaults to 8 GiB.
/// - readOnly: Whether to mount the root filesystem as read-only.
/// - progress: Optional handler for tracking rootfs unpacking progress.
public mutating func create(
_ id: String,
reference: String,
rootfsSizeInBytes: UInt64 = 8.gib(),
readOnly: Bool = false,
progress: ProgressHandler? = nil,
configuration: (inout LinuxContainer.Configuration) throws -> Void
) async throws -> LinuxContainer {
let image = try await imageStore.get(reference: reference, pull: true)
Expand All @@ -383,6 +385,7 @@ public struct ContainerManager: Sendable {
image: image,
rootfsSizeInBytes: rootfsSizeInBytes,
readOnly: readOnly,
progress: progress,
configuration: configuration
)
}
Expand All @@ -393,19 +396,22 @@ public struct ContainerManager: Sendable {
/// - image: The image.
/// - rootfsSizeInBytes: The size of the root filesystem in bytes. Defaults to 8 GiB.
/// - readOnly: Whether to mount the root filesystem as read-only.
/// - progress: Optional handler for tracking rootfs unpacking progress.
public mutating func create(
_ id: String,
image: Image,
rootfsSizeInBytes: UInt64 = 8.gib(),
readOnly: Bool = false,
progress: ProgressHandler? = nil,
configuration: (inout LinuxContainer.Configuration) throws -> Void
) async throws -> LinuxContainer {
let path = try createContainerRoot(id)

var rootfs = try await unpack(
image: image,
destination: path.appendingPathComponent("rootfs.ext4"),
size: rootfsSizeInBytes
size: rootfsSizeInBytes,
progress: progress
)
if readOnly {
rootfs.options.append("ro")
Expand Down Expand Up @@ -462,10 +468,10 @@ public struct ContainerManager: Sendable {
return path
}

private func unpack(image: Image, destination: URL, size: UInt64) async throws -> Mount {
private func unpack(image: Image, destination: URL, size: UInt64, progress: ProgressHandler? = nil) async throws -> Mount {
do {
let unpacker = EXT4Unpacker(blockSizeInBytes: size)
return try await unpacker.unpack(image, for: .current, at: destination)
return try await unpacker.unpack(image, for: .current, at: destination, progress: progress)
} catch let err as ContainerizationError {
if err.code == .exists {
return .block(
Expand Down
22 changes: 22 additions & 0 deletions Sources/ContainerizationEXT4/Formatter+Unpack.swift
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,28 @@ extension EXT4.Formatter {
compression: ContainerizationArchive.Filter = .gzip,
progress: ProgressHandler? = nil
) throws {
// Optional first pass: scan headers to get total size (fast, metadata only)
if let progress {
let sizeReader = try ArchiveReader(
format: format,
filter: compression,
file: source
)
var totalSize: Int64 = 0
for (entry, _) in sizeReader.makeStreamingIterator() {
try Task.checkCancellation()
if entry.fileType == .regular, let size = entry.size {
totalSize += Int64(size)
}
}
if totalSize > 0 {
Task {
await progress([ProgressEvent(event: "add-total-size", value: totalSize)])
}
}
}

// Second pass: unpack
let reader = try ArchiveReader(
format: format,
filter: compression,
Expand Down
154 changes: 154 additions & 0 deletions Tests/ContainerizationEXT4Tests/TestFormatterUnpack.swift
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#if os(macOS)
import ContainerizationArchive
import ContainerizationExtras
import Foundation
import Testing
import SystemPackage
Expand Down Expand Up @@ -130,6 +131,159 @@ struct Tar2EXT4Test: ~Copyable {
}
}

/// Collects progress events in a thread-safe manner.
private actor ProgressCollector {
var events: [ProgressEvent] = []

func append(_ newEvents: [ProgressEvent]) {
events.append(contentsOf: newEvents)
}

func allEvents() -> [ProgressEvent] {
events
}
}

struct UnpackProgressTest {
@Test func progressReportsAccurateSizes() async throws {
// Create an archive with files of known sizes
let tempDir = FileManager.default.uniqueTemporaryDirectory()
let archivePath = tempDir.appendingPathComponent("test.tar.gz", isDirectory: false)
let fsPath = FilePath(tempDir.appendingPathComponent("test.ext4.img", isDirectory: false))

defer {
try? FileManager.default.removeItem(at: tempDir)
}

// Create test data with specific sizes
let file1Data = Data(repeating: 0xAA, count: 1024) // 1 KiB
let file2Data = Data(repeating: 0xBB, count: 4096) // 4 KiB
let file3Data = Data(repeating: 0xCC, count: 512) // 512 bytes
let expectedTotalSize: Int64 = 1024 + 4096 + 512 // 5632 bytes

// Build the archive
let archiver = try ArchiveWriter(
configuration: ArchiveWriterConfiguration(format: .paxRestricted, filter: .gzip))
try archiver.open(file: archivePath)

try archiver.writeEntry(entry: WriteEntry.dir(path: "/data", permissions: 0o755), data: nil)
try archiver.writeEntry(
entry: WriteEntry.file(path: "/data/file1.bin", permissions: 0o644, size: Int64(file1Data.count)),
data: file1Data)
try archiver.writeEntry(
entry: WriteEntry.file(path: "/data/file2.bin", permissions: 0o644, size: Int64(file2Data.count)),
data: file2Data)
try archiver.writeEntry(
entry: WriteEntry.file(path: "/data/file3.bin", permissions: 0o644, size: Int64(file3Data.count)),
data: file3Data)
// Include an empty file to verify it doesn't break size calculations
try archiver.writeEntry(
entry: WriteEntry.file(path: "/data/empty.bin", permissions: 0o644, size: 0),
data: Data())
try archiver.finishEncoding()

// Set up progress collection
let collector = ProgressCollector()
let progressHandler: ProgressHandler = { events in
await collector.append(events)
}

// Unpack with progress tracking
let formatter = try EXT4.Formatter(fsPath)
try formatter.unpack(source: archivePath, progress: progressHandler)
try formatter.close()

// Allow async progress tasks to complete
try await Task.sleep(for: .milliseconds(100))

// Analyze collected events
let allEvents = await collector.allEvents()

var reportedTotalSize: Int64 = 0
var cumulativeSize: Int64 = 0
var itemCount: Int64 = 0

for event in allEvents {
switch event.event {
case "add-total-size":
let value = try #require(event.value as? Int64, "add-total-size value should be Int64")
reportedTotalSize += value
case "add-size":
let value = try #require(event.value as? Int64, "add-size value should be Int64")
cumulativeSize += value
case "add-items":
let value = try #require(event.value as? Int, "add-items value should be Int")
itemCount += Int64(value)
default:
break
}
}

// Verify the progress contract
#expect(
reportedTotalSize == expectedTotalSize,
"Total size should be \(expectedTotalSize) bytes, got \(reportedTotalSize)")
#expect(
cumulativeSize == expectedTotalSize,
"Cumulative size should equal total size (\(expectedTotalSize)), got \(cumulativeSize)")
#expect(
itemCount == 5,
"Should have processed 5 entries (1 dir + 4 files), got \(itemCount)")

// Verify incremental progress: we should get separate add-size events for each file
let addSizeEvents = allEvents.filter { $0.event == "add-size" }
#expect(
addSizeEvents.count == 4,
"Should have 4 add-size events (one per file, including empty), got \(addSizeEvents.count)")

// Verify individual file sizes were reported correctly
let reportedSizes = addSizeEvents.compactMap { $0.value as? Int64 }.sorted()
#expect(
reportedSizes == [0, 512, 1024, 4096],
"Individual file sizes should be [0, 512, 1024, 4096], got \(reportedSizes)")

// Verify add-total-size comes before add-size events (first pass before second pass)
if let totalSizeIndex = allEvents.firstIndex(where: { $0.event == "add-total-size" }),
let firstAddSizeIndex = allEvents.firstIndex(where: { $0.event == "add-size" }) {
#expect(
totalSizeIndex < firstAddSizeIndex,
"add-total-size should be reported before add-size events")
}
}

@Test func progressHandlerIsOptional() throws {
// Verify that unpacking works without a progress handler (existing behavior)
let tempDir = FileManager.default.uniqueTemporaryDirectory()
let archivePath = tempDir.appendingPathComponent("test.tar.gz", isDirectory: false)
let fsPath = FilePath(tempDir.appendingPathComponent("test.ext4.img", isDirectory: false))

defer {
try? FileManager.default.removeItem(at: tempDir)
}

let archiver = try ArchiveWriter(
configuration: ArchiveWriterConfiguration(format: .paxRestricted, filter: .gzip))
try archiver.open(file: archivePath)
try archiver.writeEntry(entry: WriteEntry.dir(path: "/test", permissions: 0o755), data: nil)
let data = Data(repeating: 0x42, count: 100)
try archiver.writeEntry(
entry: WriteEntry.file(path: "/test/file.bin", permissions: 0o644, size: Int64(data.count)),
data: data)
try archiver.finishEncoding()

// Unpack without progress handler - should not throw
let formatter = try EXT4.Formatter(fsPath)
try formatter.unpack(source: archivePath)
try formatter.close()

// Verify the file was unpacked correctly
let reader = try EXT4.EXT4Reader(blockDevice: fsPath)
let children = try reader.children(of: EXT4.RootInode)
let childNames = Set(children.map { $0.0 })
#expect(childNames.contains("test"), "Directory 'test' should exist in unpacked filesystem")
}
}

extension ContainerizationArchive.WriteEntry {
static func dir(path: String, permissions: UInt16) -> WriteEntry {
let entry = WriteEntry()
Expand Down