diff --git a/cmd/buildah/manifest.go b/cmd/buildah/manifest.go index 233ccf9b5b..bb73cacd7d 100644 --- a/cmd/buildah/manifest.go +++ b/cmd/buildah/manifest.go @@ -42,7 +42,10 @@ type manifestAnnotateOpts = struct { os, arch, variant, osVersion string features, osFeatures, annotations []string } -type manifestInspectOpts = struct{} +type manifestInspectOpts = struct { + authfile string + tlsVerify bool +} func init() { var ( @@ -199,6 +202,9 @@ func init() { Example: `buildah manifest inspect mylist:v1.11`, Args: cobra.MinimumNArgs(1), } + flags = manifestInspectCommand.Flags() + flags.StringVar(&manifestInspectOpts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + flags.BoolVar(&manifestInspectOpts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.") manifestInspectCommand.SetUsageTemplate(UsageTemplate()) manifestCommand.AddCommand(manifestInspectCommand) @@ -221,6 +227,7 @@ func init() { flags.StringVar(&manifestPushOpts.creds, "creds", "", "use `[username[:password]]` for accessing the registry") flags.StringVar(&manifestPushOpts.digestfile, "digestfile", "", "after copying the image, write the digest of the resulting digest to the file") flags.StringVarP(&manifestPushOpts.format, "format", "f", "", "manifest type (oci or v2s2) to attempt to use when pushing the manifest list (default is manifest type of source)") + flags.StringSliceVar(&manifestPushOpts.addCompression, "add-compression", nil, "add instances with selected compression while pushing") flags.BoolVarP(&manifestPushOpts.removeSignatures, "remove-signatures", "", false, "don't copy signatures when pushing images") flags.StringVar(&manifestPushOpts.signBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`") flags.StringVar(&manifestPushOpts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)") @@ -696,6 +703,11 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO } func manifestInspectCmd(c *cobra.Command, args []string, opts manifestInspectOpts) error { + if c.Flag("authfile").Changed { + if err := auth.CheckAuthFile(opts.authfile); err != nil { + return err + } + } imageSpec := "" switch len(args) { case 0: @@ -831,7 +843,7 @@ func manifestPushCmd(c *cobra.Command, args []string, opts pushOptions) error { return errors.New("At least a source list ID must be specified") case 1: listImageSpec = args[0] - destSpec = "docker://"+listImageSpec + destSpec = "docker://" + listImageSpec case 2: listImageSpec = args[0] destSpec = args[1] @@ -897,6 +909,7 @@ func manifestPush(systemContext *types.SystemContext, store storage.Store, listI RemoveSignatures: opts.removeSignatures, SignBy: opts.signBy, ManifestType: manifestType, + AddCompression: opts.addCompression, } if opts.all { options.ImageListSelection = cp.CopyAllImages diff --git a/cmd/buildah/push.go b/cmd/buildah/push.go index cc59c7976c..4fc815576b 100644 --- a/cmd/buildah/push.go +++ b/cmd/buildah/push.go @@ -46,6 +46,7 @@ type pushOptions struct { encryptionKeys []string encryptLayers []int insecure bool + addCompression []string } func init() { diff --git a/docs/buildah-manifest-inspect.1.md b/docs/buildah-manifest-inspect.1.md index 2f73ce9693..8bdf3d3425 100644 --- a/docs/buildah-manifest-inspect.1.md +++ b/docs/buildah-manifest-inspect.1.md @@ -16,6 +16,17 @@ Displays the manifest list or image index stored using the specified image name. A formatted JSON representation of the manifest list or image index. +## OPTIONS + +**--authfile** *path* + +Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `buildah login`. +If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. + +**--tls-verify** *bool-value* + +Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry. + ## EXAMPLE ``` diff --git a/docs/buildah-manifest-push.1.md b/docs/buildah-manifest-push.1.md index 4b55cac124..66f67217e9 100644 --- a/docs/buildah-manifest-push.1.md +++ b/docs/buildah-manifest-push.1.md @@ -18,6 +18,15 @@ The list image's ID and the digest of the image's manifest. ## OPTIONS +**--add-compression** *compression* + +Makes sure that requested compression variant for each platform is added to the manifest list keeping original instance +intact in the same manifest list. Supported values are (`gzip`, `zstd` and `zstd:chunked`) + +Note: This is different than `--compression` which replaces the instance with requested with specified compression +while `--add-compression` makes sure than each instance has it variant added to manifest list without modifying the +original instance. + **--all** Push the images mentioned in the manifest list or image index, in addition to diff --git a/go.mod b/go.mod index 2d78c4e5d3..f1223aade0 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,8 @@ require ( github.com/containerd/containerd v1.7.3 github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.3.0 - github.com/containers/common v0.55.1-0.20230721175448-664d013a6ae2 - github.com/containers/image/v5 v5.26.1 + github.com/containers/common v0.55.1-0.20230727095721-647ed1d4d79a + github.com/containers/image/v5 v5.26.1-0.20230727122416-da7899237198 github.com/containers/ocicrypt v1.1.7 github.com/containers/storage v1.48.1-0.20230721123825-4a3a3019d765 github.com/cyphar/filepath-securejoin v0.2.3 @@ -115,7 +115,7 @@ require ( github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect - github.com/vbatts/tar-split v0.11.3 // indirect + github.com/vbatts/tar-split v0.11.5 // indirect github.com/vbauerster/mpb/v8 v8.5.2 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect diff --git a/go.sum b/go.sum index 26d17534d0..e940d9888a 100644 --- a/go.sum +++ b/go.sum @@ -6,7 +6,6 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -51,10 +50,10 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3 github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM= github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0= -github.com/containers/common v0.55.1-0.20230721175448-664d013a6ae2 h1:4B42HUIAghFGSqej5RADTNf0WlOBFiGGzmGjNa3Do78= -github.com/containers/common v0.55.1-0.20230721175448-664d013a6ae2/go.mod h1:O/JSRY1dLfwgBxVvn3yJfKvF63KEjbNJcJAtjpNvO90= -github.com/containers/image/v5 v5.26.1 h1:8y3xq8GO/6y8FR+nAedHPsAFiAtOrab9qHTBpbqaX8g= -github.com/containers/image/v5 v5.26.1/go.mod h1:IwlOGzTkGnmfirXxt0hZeJlzv1zVukE03WZQ203Z9GA= +github.com/containers/common v0.55.1-0.20230727095721-647ed1d4d79a h1:w314+pqd43cO35iRzJW8dB3tX6JOQH8aSP8a2wMoT2g= +github.com/containers/common v0.55.1-0.20230727095721-647ed1d4d79a/go.mod h1:kZWakZy8Ep9Zhdg+qrPRyWIZj4AyborpXqjuxt5ADfk= +github.com/containers/image/v5 v5.26.1-0.20230727122416-da7899237198 h1:H1YlMbFrkoBOWzgChilbuSZkPj149U9v/zkBAnyC1XU= +github.com/containers/image/v5 v5.26.1-0.20230727122416-da7899237198/go.mod h1:Zg7m6YHPZRl/wbUDZ6vt+yAyXAjAvALVUelmsIPpMcE= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U= @@ -359,7 +358,6 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -398,9 +396,8 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= -github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= -github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= +github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= +github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/vbauerster/mpb/v8 v8.5.2 h1:zanzt1cZpSEG5uGNYKcv43+97f0IgEnXpuBFaMxKbM0= github.com/vbauerster/mpb/v8 v8.5.2/go.mod h1:YqKyR4ZR6Gd34yD3cDHPMmQxc+uUQMwjgO/LkxiJQ6I= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= @@ -510,7 +507,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/tests/bud.bats b/tests/bud.bats index 0fa5db3216..01d1752f3f 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -11,6 +11,34 @@ load helpers run_buildah build $BUDFILES/stdio } +@test "bud: build manifest list and --add-compression zstd" { + local contextdir=${TEST_SCRATCH_DIR}/bud/platform + mkdir -p $contextdir + + cat > $contextdir/Dockerfile1 << _EOF +FROM alpine +_EOF + + start_registry + run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT} + run_buildah build $WITH_POLICY_JSON -t image1 --platform linux/amd64 -f $contextdir/Dockerfile1 + run_buildah build $WITH_POLICY_JSON -t image2 --platform linux/arm64 -f $contextdir/Dockerfile1 + + run_buildah manifest create foo + run_buildah manifest add foo image1 + run_buildah manifest add foo image2 + + run_buildah manifest push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --all --add-compression zstd --tls-verify=false foo docker://localhost:${REGISTRY_PORT}/list + + run_buildah manifest inspect --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false localhost:${REGISTRY_PORT}/list + list="$output" + + validate_instance_compression "0" "$list" "amd64" "gzip" + validate_instance_compression "1" "$list" "arm64" "gzip" + validate_instance_compression "2" "$list" "amd64" "zstd" + validate_instance_compression "3" "$list" "arm64" "zstd" +} + @test "bud with --dns* flags" { _prefetch alpine @@ -2103,6 +2131,39 @@ function _test_http() { run_buildah from ${target} } +# Helper function for several of the tests which verifies compression. +# +# Usage: validate_instance_compression INDEX MANIFEST ARCH COMPRESSION +# +# INDEX instance which needs to be verified in +# provided manifest list. +# +# MANIFEST OCI manifest specification in json format +# +# ARCH instance architecture +# +# COMPRESSION compression algorithm name; e.g "zstd". +# +function validate_instance_compression { + case $4 in + + gzip) + run jq -r '.manifests['$1'].annotations' <<< $2 + # annotation is `null` for gzip compression + assert "$output" = "null" ".manifests[$1].annotations (null means gzip)" + ;; + + zstd) + # annotation `'"io.github.containers.compression.zstd": "true"'` must be there for zstd compression + run jq -r '.manifests['$1'].annotations."io.github.containers.compression.zstd"' <<< $2 + assert "$output" = "true" ".manifests[$1].annotations.'io.github.containers.compression.zstd' (io.github.containers.compression.zstd must be set)" + ;; + esac + + run jq -r '.manifests['$1'].platform.architecture' <<< $2 + assert "$output" = $3 ".manifests[$1].platform.architecture" +} + @test "bud-http-Dockerfile" { _test_http from-scratch Containerfile } diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/multierror/multierror.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/multierror/multierror.go deleted file mode 100644 index 07aca4a1d3..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/multierror/multierror.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright © 2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package multierror - -import ( - "strings" -) - -// New combines several errors into a single error. Parameters that are nil are -// ignored. If no errors are passed in or all parameters are nil, then the -// result is also nil. -func New(errors ...error) error { - // Filter out nil entries. - numErrors := 0 - for _, err := range errors { - if err != nil { - errors[numErrors] = err - numErrors++ - } - } - if numErrors == 0 { - return nil - } - return multiError(errors[0:numErrors]) -} - -// multiError is the underlying implementation used by New. -// -// Beware that a null multiError is not the same as a nil error. -type multiError []error - -// multiError returns all individual error strings concatenated with "\n" -func (e multiError) Error() string { - var builder strings.Builder - for i, err := range e { - if i > 0 { - _, _ = builder.WriteString("\n") - } - _, _ = builder.WriteString(err.Error()) - } - return builder.String() -} - -// Append returns a new multi error all errors concatenated. Errors that are -// multi errors get flattened, nil is ignored. -func Append(err error, errors ...error) error { - var result multiError - if m, ok := err.(multiError); ok { - result = m - } else if err != nil { - result = append(result, err) - } - - for _, e := range errors { - if e == nil { - continue - } - if m, ok := e.(multiError); ok { - result = append(result, m...) - } else { - result = append(result, e) - } - } - if len(result) == 0 { - return nil - } - return result -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/objectmeta.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/objectmeta.go deleted file mode 100644 index b8a6487f0e..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/objectmeta.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Adapted from k8s.io/apimachinery/pkg/api/validation: -// https://github.com/kubernetes/apimachinery/blob/7687996c715ee7d5c8cf1e3215e607eb065a4221/pkg/api/validation/objectmeta.go - -package k8s - -import ( - "fmt" - "strings" - - "github.com/container-orchestrated-devices/container-device-interface/internal/multierror" -) - -// TotalAnnotationSizeLimitB defines the maximum size of all annotations in characters. -const TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB - -// ValidateAnnotations validates that a set of annotations are correctly defined. -func ValidateAnnotations(annotations map[string]string, path string) error { - errors := multierror.New() - for k := range annotations { - // The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking. - for _, msg := range IsQualifiedName(strings.ToLower(k)) { - errors = multierror.Append(errors, fmt.Errorf("%v.%v is invalid: %v", path, k, msg)) - } - } - if err := ValidateAnnotationsSize(annotations); err != nil { - errors = multierror.Append(errors, fmt.Errorf("%v is too long: %v", path, err)) - } - return errors -} - -// ValidateAnnotationsSize validates that a set of annotations is not too large. -func ValidateAnnotationsSize(annotations map[string]string) error { - var totalSize int64 - for k, v := range annotations { - totalSize += (int64)(len(k)) + (int64)(len(v)) - } - if totalSize > (int64)(TotalAnnotationSizeLimitB) { - return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, TotalAnnotationSizeLimitB) - } - return nil -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/validation.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/validation.go deleted file mode 100644 index 5ad6ce2776..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/validation.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Adapted from k8s.io/apimachinery/pkg/util/validation: -// https://github.com/kubernetes/apimachinery/blob/7687996c715ee7d5c8cf1e3215e607eb065a4221/pkg/util/validation/validation.go - -package k8s - -import ( - "fmt" - "regexp" - "strings" -) - -const qnameCharFmt string = "[A-Za-z0-9]" -const qnameExtCharFmt string = "[-A-Za-z0-9_.]" -const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt -const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" -const qualifiedNameMaxLength int = 63 - -var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") - -// IsQualifiedName tests whether the value passed is what Kubernetes calls a -// "qualified name". This is a format used in various places throughout the -// system. If the value is not valid, a list of error strings is returned. -// Otherwise an empty list (or nil) is returned. -func IsQualifiedName(value string) []string { - var errs []string - parts := strings.Split(value, "/") - var name string - switch len(parts) { - case 1: - name = parts[0] - case 2: - var prefix string - prefix, name = parts[0], parts[1] - if len(prefix) == 0 { - errs = append(errs, "prefix part "+EmptyError()) - } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { - errs = append(errs, prefixEach(msgs, "prefix part ")...) - } - default: - return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ - " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") - } - - if len(name) == 0 { - errs = append(errs, "name part "+EmptyError()) - } else if len(name) > qualifiedNameMaxLength { - errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) - } - if !qualifiedNameRegexp.MatchString(name) { - errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) - } - return errs -} - -const labelValueFmt string = "(" + qualifiedNameFmt + ")?" -const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" - -// LabelValueMaxLength is a label's max length -const LabelValueMaxLength int = 63 - -var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") - -// IsValidLabelValue tests whether the value passed is a valid label value. If -// the value is not valid, a list of error strings is returned. Otherwise an -// empty list (or nil) is returned. -func IsValidLabelValue(value string) []string { - var errs []string - if len(value) > LabelValueMaxLength { - errs = append(errs, MaxLenError(LabelValueMaxLength)) - } - if !labelValueRegexp.MatchString(value) { - errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) - } - return errs -} - -const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" - -// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) -const DNS1123LabelMaxLength int = 63 - -var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") - -// IsDNS1123Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1123). -func IsDNS1123Label(value string) []string { - var errs []string - if len(value) > DNS1123LabelMaxLength { - errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) - } - if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) - } - return errs -} - -const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" - -// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) -const DNS1123SubdomainMaxLength int = 253 - -var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") - -// IsDNS1123Subdomain tests for a string that conforms to the definition of a -// subdomain in DNS (RFC 1123). -func IsDNS1123Subdomain(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !dns1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) - } - return errs -} - -const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" -const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" - -// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) -const DNS1035LabelMaxLength int = 63 - -var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") - -// IsDNS1035Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1035). -func IsDNS1035Label(value string) []string { - var errs []string - if len(value) > DNS1035LabelMaxLength { - errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) - } - if !dns1035LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) - } - return errs -} - -// wildcard definition - RFC 1034 section 4.3.3. -// examples: -// - valid: *.bar.com, *.foo.bar.com -// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * -const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt -const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" - -// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a -// wildcard subdomain in DNS (RFC 1034 section 4.3.3). -func IsWildcardDNS1123Subdomain(value string) []string { - wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") - - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !wildcardDNS1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) - } - return errs -} - -// MaxLenError returns a string explanation of a "string too long" validation -// failure. -func MaxLenError(length int) string { - return fmt.Sprintf("must be no more than %d characters", length) -} - -// RegexError returns a string explanation of a regex validation failure. -func RegexError(msg string, fmt string, examples ...string) string { - if len(examples) == 0 { - return msg + " (regex used for validation is '" + fmt + "')" - } - msg += " (e.g. " - for i := range examples { - if i > 0 { - msg += " or " - } - msg += "'" + examples[i] + "', " - } - msg += "regex used for validation is '" + fmt + "')" - return msg -} - -// EmptyError returns a string explanation of a "must not be empty" validation -// failure. -func EmptyError() string { - return "must be non-empty" -} - -func prefixEach(msgs []string, prefix string) []string { - for i := range msgs { - msgs[i] = prefix + msgs[i] - } - return msgs -} - -// InclusiveRangeError returns a string explanation of a numeric "must be -// between" validation failure. -func InclusiveRangeError(lo, hi int) string { - return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/validate.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/validate.go deleted file mode 100644 index 59c14c2022..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/validate.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright © The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package validation - -import ( - "fmt" - "strings" - - "github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s" -) - -// ValidateSpecAnnotations checks whether spec annotations are valid. -func ValidateSpecAnnotations(name string, any interface{}) error { - if any == nil { - return nil - } - - switch v := any.(type) { - case map[string]interface{}: - annotations := make(map[string]string) - for k, v := range v { - if s, ok := v.(string); ok { - annotations[k] = s - } else { - return fmt.Errorf("invalid annotation %v.%v; %v is not a string", name, k, any) - } - } - return validateSpecAnnotations(name, annotations) - } - - return nil -} - -// validateSpecAnnotations checks whether spec annotations are valid. -func validateSpecAnnotations(name string, annotations map[string]string) error { - path := "annotations" - if name != "" { - path = strings.Join([]string{name, path}, ".") - } - - return k8s.ValidateAnnotations(annotations, path) -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go deleted file mode 100644 index 69b69663cb..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright © 2021-2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "fmt" - "strings" - - "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" -) - -const ( - // AnnotationPrefix is the prefix for CDI container annotation keys. - AnnotationPrefix = "cdi.k8s.io/" -) - -// UpdateAnnotations updates annotations with a plugin-specific CDI device -// injection request for the given devices. Upon any error a non-nil error -// is returned and annotations are left intact. By convention plugin should -// be in the format of "vendor.device-type". -func UpdateAnnotations(annotations map[string]string, plugin string, deviceID string, devices []string) (map[string]string, error) { - key, err := AnnotationKey(plugin, deviceID) - if err != nil { - return annotations, fmt.Errorf("CDI annotation failed: %w", err) - } - if _, ok := annotations[key]; ok { - return annotations, fmt.Errorf("CDI annotation failed, key %q used", key) - } - value, err := AnnotationValue(devices) - if err != nil { - return annotations, fmt.Errorf("CDI annotation failed: %w", err) - } - - if annotations == nil { - annotations = make(map[string]string) - } - annotations[key] = value - - return annotations, nil -} - -// ParseAnnotations parses annotations for CDI device injection requests. -// The keys and devices from all such requests are collected into slices -// which are returned as the result. All devices are expected to be fully -// qualified CDI device names. If any device fails this check empty slices -// are returned along with a non-nil error. The annotations are expected -// to be formatted by, or in a compatible fashion to UpdateAnnotations(). -func ParseAnnotations(annotations map[string]string) ([]string, []string, error) { - var ( - keys []string - devices []string - ) - - for key, value := range annotations { - if !strings.HasPrefix(key, AnnotationPrefix) { - continue - } - for _, d := range strings.Split(value, ",") { - if !IsQualifiedName(d) { - return nil, nil, fmt.Errorf("invalid CDI device name %q", d) - } - devices = append(devices, d) - } - keys = append(keys, key) - } - - return keys, devices, nil -} - -// AnnotationKey returns a unique annotation key for an device allocation -// by a K8s device plugin. pluginName should be in the format of -// "vendor.device-type". deviceID is the ID of the device the plugin is -// allocating. It is used to make sure that the generated key is unique -// even if multiple allocations by a single plugin needs to be annotated. -func AnnotationKey(pluginName, deviceID string) (string, error) { - const maxNameLen = 63 - - if pluginName == "" { - return "", errors.New("invalid plugin name, empty") - } - if deviceID == "" { - return "", errors.New("invalid deviceID, empty") - } - - name := pluginName + "_" + strings.ReplaceAll(deviceID, "/", "_") - - if len(name) > maxNameLen { - return "", fmt.Errorf("invalid plugin+deviceID %q, too long", name) - } - - if c := rune(name[0]); !parser.IsAlphaNumeric(c) { - return "", fmt.Errorf("invalid name %q, first '%c' should be alphanumeric", - name, c) - } - if len(name) > 2 { - for _, c := range name[1 : len(name)-1] { - switch { - case parser.IsAlphaNumeric(c): - case c == '_' || c == '-' || c == '.': - default: - return "", fmt.Errorf("invalid name %q, invalid character '%c'", - name, c) - } - } - } - if c := rune(name[len(name)-1]); !parser.IsAlphaNumeric(c) { - return "", fmt.Errorf("invalid name %q, last '%c' should be alphanumeric", - name, c) - } - - return AnnotationPrefix + name, nil -} - -// AnnotationValue returns an annotation value for the given devices. -func AnnotationValue(devices []string) (string, error) { - value, sep := "", "" - for _, d := range devices { - if _, _, _, err := ParseQualifiedName(d); err != nil { - return "", err - } - value += sep + d - sep = "," - } - - return value, nil -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go deleted file mode 100644 index cb495ebb36..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go +++ /dev/null @@ -1,581 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "fmt" - "io/fs" - "os" - "path/filepath" - "sort" - "strings" - "sync" - - "github.com/container-orchestrated-devices/container-device-interface/internal/multierror" - cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" - "github.com/fsnotify/fsnotify" - oci "github.com/opencontainers/runtime-spec/specs-go" -) - -// Option is an option to change some aspect of default CDI behavior. -type Option func(*Cache) error - -// Cache stores CDI Specs loaded from Spec directories. -type Cache struct { - sync.Mutex - specDirs []string - specs map[string][]*Spec - devices map[string]*Device - errors map[string][]error - dirErrors map[string]error - - autoRefresh bool - watch *watch -} - -// WithAutoRefresh returns an option to control automatic Cache refresh. -// By default auto-refresh is enabled, the list of Spec directories are -// monitored and the Cache is automatically refreshed whenever a change -// is detected. This option can be used to disable this behavior when a -// manually refreshed mode is preferable. -func WithAutoRefresh(autoRefresh bool) Option { - return func(c *Cache) error { - c.autoRefresh = autoRefresh - return nil - } -} - -// NewCache creates a new CDI Cache. The cache is populated from a set -// of CDI Spec directories. These can be specified using a WithSpecDirs -// option. The default set of directories is exposed in DefaultSpecDirs. -func NewCache(options ...Option) (*Cache, error) { - c := &Cache{ - autoRefresh: true, - watch: &watch{}, - } - - WithSpecDirs(DefaultSpecDirs...)(c) - c.Lock() - defer c.Unlock() - - return c, c.configure(options...) -} - -// Configure applies options to the Cache. Updates and refreshes the -// Cache if options have changed. -func (c *Cache) Configure(options ...Option) error { - if len(options) == 0 { - return nil - } - - c.Lock() - defer c.Unlock() - - return c.configure(options...) -} - -// Configure the Cache. Start/stop CDI Spec directory watch, refresh -// the Cache if necessary. -func (c *Cache) configure(options ...Option) error { - var err error - - for _, o := range options { - if err = o(c); err != nil { - return fmt.Errorf("failed to apply cache options: %w", err) - } - } - - c.dirErrors = make(map[string]error) - - c.watch.stop() - if c.autoRefresh { - c.watch.setup(c.specDirs, c.dirErrors) - c.watch.start(&c.Mutex, c.refresh, c.dirErrors) - } - c.refresh() - - return nil -} - -// Refresh rescans the CDI Spec directories and refreshes the Cache. -// In manual refresh mode the cache is always refreshed. In auto- -// refresh mode the cache is only refreshed if it is out of date. -func (c *Cache) Refresh() error { - c.Lock() - defer c.Unlock() - - // force a refresh in manual mode - if refreshed, err := c.refreshIfRequired(!c.autoRefresh); refreshed { - return err - } - - // collect and return cached errors, much like refresh() does it - var result error - for _, errors := range c.errors { - result = multierror.Append(result, errors...) - } - return result -} - -// Refresh the Cache by rescanning CDI Spec directories and files. -func (c *Cache) refresh() error { - var ( - specs = map[string][]*Spec{} - devices = map[string]*Device{} - conflicts = map[string]struct{}{} - specErrors = map[string][]error{} - result []error - ) - - // collect errors per spec file path and once globally - collectError := func(err error, paths ...string) { - result = append(result, err) - for _, path := range paths { - specErrors[path] = append(specErrors[path], err) - } - } - // resolve conflicts based on device Spec priority (order of precedence) - resolveConflict := func(name string, dev *Device, old *Device) bool { - devSpec, oldSpec := dev.GetSpec(), old.GetSpec() - devPrio, oldPrio := devSpec.GetPriority(), oldSpec.GetPriority() - switch { - case devPrio > oldPrio: - return false - case devPrio == oldPrio: - devPath, oldPath := devSpec.GetPath(), oldSpec.GetPath() - collectError(fmt.Errorf("conflicting device %q (specs %q, %q)", - name, devPath, oldPath), devPath, oldPath) - conflicts[name] = struct{}{} - } - return true - } - - _ = scanSpecDirs(c.specDirs, func(path string, priority int, spec *Spec, err error) error { - path = filepath.Clean(path) - if err != nil { - collectError(fmt.Errorf("failed to load CDI Spec %w", err), path) - return nil - } - - vendor := spec.GetVendor() - specs[vendor] = append(specs[vendor], spec) - - for _, dev := range spec.devices { - qualified := dev.GetQualifiedName() - other, ok := devices[qualified] - if ok { - if resolveConflict(qualified, dev, other) { - continue - } - } - devices[qualified] = dev - } - - return nil - }) - - for conflict := range conflicts { - delete(devices, conflict) - } - - c.specs = specs - c.devices = devices - c.errors = specErrors - - return multierror.New(result...) -} - -// RefreshIfRequired triggers a refresh if necessary. -func (c *Cache) refreshIfRequired(force bool) (bool, error) { - // We need to refresh if - // - it's forced by an explicitly call to Refresh() in manual mode - // - a missing Spec dir appears (added to watch) in auto-refresh mode - if force || (c.autoRefresh && c.watch.update(c.dirErrors)) { - return true, c.refresh() - } - return false, nil -} - -// InjectDevices injects the given qualified devices to an OCI Spec. It -// returns any unresolvable devices and an error if injection fails for -// any of the devices. -func (c *Cache) InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, error) { - var unresolved []string - - if ociSpec == nil { - return devices, fmt.Errorf("can't inject devices, nil OCI Spec") - } - - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - edits := &ContainerEdits{} - specs := map[*Spec]struct{}{} - - for _, device := range devices { - d := c.devices[device] - if d == nil { - unresolved = append(unresolved, device) - continue - } - if _, ok := specs[d.GetSpec()]; !ok { - specs[d.GetSpec()] = struct{}{} - edits.Append(d.GetSpec().edits()) - } - edits.Append(d.edits()) - } - - if unresolved != nil { - return unresolved, fmt.Errorf("unresolvable CDI devices %s", - strings.Join(devices, ", ")) - } - - if err := edits.Apply(ociSpec); err != nil { - return nil, fmt.Errorf("failed to inject devices: %w", err) - } - - return nil, nil -} - -// highestPrioritySpecDir returns the Spec directory with highest priority -// and its priority. -func (c *Cache) highestPrioritySpecDir() (string, int) { - if len(c.specDirs) == 0 { - return "", -1 - } - - prio := len(c.specDirs) - 1 - dir := c.specDirs[prio] - - return dir, prio -} - -// WriteSpec writes a Spec file with the given content into the highest -// priority Spec directory. If name has a "json" or "yaml" extension it -// choses the encoding. Otherwise the default YAML encoding is used. -func (c *Cache) WriteSpec(raw *cdi.Spec, name string) error { - var ( - specDir string - path string - prio int - spec *Spec - err error - ) - - specDir, prio = c.highestPrioritySpecDir() - if specDir == "" { - return errors.New("no Spec directories to write to") - } - - path = filepath.Join(specDir, name) - if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { - path += defaultSpecExt - } - - spec, err = newSpec(raw, path, prio) - if err != nil { - return err - } - - return spec.write(true) -} - -// RemoveSpec removes a Spec with the given name from the highest -// priority Spec directory. This function can be used to remove a -// Spec previously written by WriteSpec(). If the file exists and -// its removal fails RemoveSpec returns an error. -func (c *Cache) RemoveSpec(name string) error { - var ( - specDir string - path string - err error - ) - - specDir, _ = c.highestPrioritySpecDir() - if specDir == "" { - return errors.New("no Spec directories to remove from") - } - - path = filepath.Join(specDir, name) - if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { - path += defaultSpecExt - } - - err = os.Remove(path) - if err != nil && errors.Is(err, fs.ErrNotExist) { - err = nil - } - - return err -} - -// GetDevice returns the cached device for the given qualified name. -func (c *Cache) GetDevice(device string) *Device { - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - return c.devices[device] -} - -// ListDevices lists all cached devices by qualified name. -func (c *Cache) ListDevices() []string { - var devices []string - - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - for name := range c.devices { - devices = append(devices, name) - } - sort.Strings(devices) - - return devices -} - -// ListVendors lists all vendors known to the cache. -func (c *Cache) ListVendors() []string { - var vendors []string - - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - for vendor := range c.specs { - vendors = append(vendors, vendor) - } - sort.Strings(vendors) - - return vendors -} - -// ListClasses lists all device classes known to the cache. -func (c *Cache) ListClasses() []string { - var ( - cmap = map[string]struct{}{} - classes []string - ) - - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - for _, specs := range c.specs { - for _, spec := range specs { - cmap[spec.GetClass()] = struct{}{} - } - } - for class := range cmap { - classes = append(classes, class) - } - sort.Strings(classes) - - return classes -} - -// GetVendorSpecs returns all specs for the given vendor. -func (c *Cache) GetVendorSpecs(vendor string) []*Spec { - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - return c.specs[vendor] -} - -// GetSpecErrors returns all errors encountered for the spec during the -// last cache refresh. -func (c *Cache) GetSpecErrors(spec *Spec) []error { - var errors []error - - c.Lock() - defer c.Unlock() - - if errs, ok := c.errors[spec.GetPath()]; ok { - errors = make([]error, len(errs)) - copy(errors, errs) - } - - return errors -} - -// GetErrors returns all errors encountered during the last -// cache refresh. -func (c *Cache) GetErrors() map[string][]error { - c.Lock() - defer c.Unlock() - - errors := map[string][]error{} - for path, errs := range c.errors { - errors[path] = errs - } - for path, err := range c.dirErrors { - errors[path] = []error{err} - } - - return errors -} - -// GetSpecDirectories returns the CDI Spec directories currently in use. -func (c *Cache) GetSpecDirectories() []string { - c.Lock() - defer c.Unlock() - - dirs := make([]string, len(c.specDirs)) - copy(dirs, c.specDirs) - return dirs -} - -// GetSpecDirErrors returns any errors related to configured Spec directories. -func (c *Cache) GetSpecDirErrors() map[string]error { - if c.dirErrors == nil { - return nil - } - - c.Lock() - defer c.Unlock() - - errors := make(map[string]error) - for dir, err := range c.dirErrors { - errors[dir] = err - } - return errors -} - -// Our fsnotify helper wrapper. -type watch struct { - watcher *fsnotify.Watcher - tracked map[string]bool -} - -// Setup monitoring for the given Spec directories. -func (w *watch) setup(dirs []string, dirErrors map[string]error) { - var ( - dir string - err error - ) - w.tracked = make(map[string]bool) - for _, dir = range dirs { - w.tracked[dir] = false - } - - w.watcher, err = fsnotify.NewWatcher() - if err != nil { - for _, dir := range dirs { - dirErrors[dir] = fmt.Errorf("failed to create watcher: %w", err) - } - return - } - - w.update(dirErrors) -} - -// Start watching Spec directories for relevant changes. -func (w *watch) start(m *sync.Mutex, refresh func() error, dirErrors map[string]error) { - go w.watch(w.watcher, m, refresh, dirErrors) -} - -// Stop watching directories. -func (w *watch) stop() { - if w.watcher == nil { - return - } - - w.watcher.Close() - w.tracked = nil -} - -// Watch Spec directory changes, triggering a refresh if necessary. -func (w *watch) watch(fsw *fsnotify.Watcher, m *sync.Mutex, refresh func() error, dirErrors map[string]error) { - watch := fsw - if watch == nil { - return - } - for { - select { - case event, ok := <-watch.Events: - if !ok { - return - } - - if (event.Op & (fsnotify.Rename | fsnotify.Remove | fsnotify.Write)) == 0 { - continue - } - if event.Op == fsnotify.Write { - if ext := filepath.Ext(event.Name); ext != ".json" && ext != ".yaml" { - continue - } - } - - m.Lock() - if event.Op == fsnotify.Remove && w.tracked[event.Name] { - w.update(dirErrors, event.Name) - } else { - w.update(dirErrors) - } - refresh() - m.Unlock() - - case _, ok := <-watch.Errors: - if !ok { - return - } - } - } -} - -// Update watch with pending/missing or removed directories. -func (w *watch) update(dirErrors map[string]error, removed ...string) bool { - var ( - dir string - ok bool - err error - update bool - ) - - for dir, ok = range w.tracked { - if ok { - continue - } - - err = w.watcher.Add(dir) - if err == nil { - w.tracked[dir] = true - delete(dirErrors, dir) - update = true - } else { - w.tracked[dir] = false - dirErrors[dir] = fmt.Errorf("failed to monitor for changes: %w", err) - } - } - - for _, dir = range removed { - w.tracked[dir] = false - dirErrors[dir] = errors.New("directory removed") - update = true - } - - return update -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_unix.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_unix.go deleted file mode 100644 index 0ee5fb86f5..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows -// +build !windows - -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import "syscall" - -func osSync() { - syscall.Sync() -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_windows.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_windows.go deleted file mode 100644 index c6dabf5fa8..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache_test_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build windows -// +build windows - -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -func osSync() {} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go deleted file mode 100644 index 55c748fc42..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go +++ /dev/null @@ -1,332 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/container-orchestrated-devices/container-device-interface/specs-go" - oci "github.com/opencontainers/runtime-spec/specs-go" - ocigen "github.com/opencontainers/runtime-tools/generate" -) - -const ( - // PrestartHook is the name of the OCI "prestart" hook. - PrestartHook = "prestart" - // CreateRuntimeHook is the name of the OCI "createRuntime" hook. - CreateRuntimeHook = "createRuntime" - // CreateContainerHook is the name of the OCI "createContainer" hook. - CreateContainerHook = "createContainer" - // StartContainerHook is the name of the OCI "startContainer" hook. - StartContainerHook = "startContainer" - // PoststartHook is the name of the OCI "poststart" hook. - PoststartHook = "poststart" - // PoststopHook is the name of the OCI "poststop" hook. - PoststopHook = "poststop" -) - -var ( - // Names of recognized hooks. - validHookNames = map[string]struct{}{ - PrestartHook: {}, - CreateRuntimeHook: {}, - CreateContainerHook: {}, - StartContainerHook: {}, - PoststartHook: {}, - PoststopHook: {}, - } -) - -// ContainerEdits represent updates to be applied to an OCI Spec. -// These updates can be specific to a CDI device, or they can be -// specific to a CDI Spec. In the former case these edits should -// be applied to all OCI Specs where the corresponding CDI device -// is injected. In the latter case, these edits should be applied -// to all OCI Specs where at least one devices from the CDI Spec -// is injected. -type ContainerEdits struct { - *specs.ContainerEdits -} - -// Apply edits to the given OCI Spec. Updates the OCI Spec in place. -// Returns an error if the update fails. -func (e *ContainerEdits) Apply(spec *oci.Spec) error { - if spec == nil { - return errors.New("can't edit nil OCI Spec") - } - if e == nil || e.ContainerEdits == nil { - return nil - } - - specgen := ocigen.NewFromSpec(spec) - if len(e.Env) > 0 { - specgen.AddMultipleProcessEnv(e.Env) - } - - for _, d := range e.DeviceNodes { - dn := DeviceNode{d} - - err := dn.fillMissingInfo() - if err != nil { - return err - } - dev := d.ToOCI() - if dev.UID == nil && spec.Process != nil { - if uid := spec.Process.User.UID; uid > 0 { - dev.UID = &uid - } - } - if dev.GID == nil && spec.Process != nil { - if gid := spec.Process.User.GID; gid > 0 { - dev.GID = &gid - } - } - - specgen.RemoveDevice(dev.Path) - specgen.AddDevice(dev) - - if dev.Type == "b" || dev.Type == "c" { - access := d.Permissions - if access == "" { - access = "rwm" - } - specgen.AddLinuxResourcesDevice(true, dev.Type, &dev.Major, &dev.Minor, access) - } - } - - if len(e.Mounts) > 0 { - for _, m := range e.Mounts { - specgen.RemoveMount(m.ContainerPath) - specgen.AddMount(m.ToOCI()) - } - sortMounts(&specgen) - } - - for _, h := range e.Hooks { - switch h.HookName { - case PrestartHook: - specgen.AddPreStartHook(h.ToOCI()) - case PoststartHook: - specgen.AddPostStartHook(h.ToOCI()) - case PoststopHook: - specgen.AddPostStopHook(h.ToOCI()) - // TODO: Maybe runtime-tools/generate should be updated with these... - case CreateRuntimeHook: - ensureOCIHooks(spec) - spec.Hooks.CreateRuntime = append(spec.Hooks.CreateRuntime, h.ToOCI()) - case CreateContainerHook: - ensureOCIHooks(spec) - spec.Hooks.CreateContainer = append(spec.Hooks.CreateContainer, h.ToOCI()) - case StartContainerHook: - ensureOCIHooks(spec) - spec.Hooks.StartContainer = append(spec.Hooks.StartContainer, h.ToOCI()) - default: - return fmt.Errorf("unknown hook name %q", h.HookName) - } - } - - return nil -} - -// Validate container edits. -func (e *ContainerEdits) Validate() error { - if e == nil || e.ContainerEdits == nil { - return nil - } - - if err := ValidateEnv(e.Env); err != nil { - return fmt.Errorf("invalid container edits: %w", err) - } - for _, d := range e.DeviceNodes { - if err := (&DeviceNode{d}).Validate(); err != nil { - return err - } - } - for _, h := range e.Hooks { - if err := (&Hook{h}).Validate(); err != nil { - return err - } - } - for _, m := range e.Mounts { - if err := (&Mount{m}).Validate(); err != nil { - return err - } - } - - return nil -} - -// Append other edits into this one. If called with a nil receiver, -// allocates and returns newly allocated edits. -func (e *ContainerEdits) Append(o *ContainerEdits) *ContainerEdits { - if o == nil || o.ContainerEdits == nil { - return e - } - if e == nil { - e = &ContainerEdits{} - } - if e.ContainerEdits == nil { - e.ContainerEdits = &specs.ContainerEdits{} - } - - e.Env = append(e.Env, o.Env...) - e.DeviceNodes = append(e.DeviceNodes, o.DeviceNodes...) - e.Hooks = append(e.Hooks, o.Hooks...) - e.Mounts = append(e.Mounts, o.Mounts...) - - return e -} - -// isEmpty returns true if these edits are empty. This is valid in a -// global Spec context but invalid in a Device context. -func (e *ContainerEdits) isEmpty() bool { - if e == nil { - return false - } - return len(e.Env)+len(e.DeviceNodes)+len(e.Hooks)+len(e.Mounts) == 0 -} - -// ValidateEnv validates the given environment variables. -func ValidateEnv(env []string) error { - for _, v := range env { - if strings.IndexByte(v, byte('=')) <= 0 { - return fmt.Errorf("invalid environment variable %q", v) - } - } - return nil -} - -// DeviceNode is a CDI Spec DeviceNode wrapper, used for validating DeviceNodes. -type DeviceNode struct { - *specs.DeviceNode -} - -// Validate a CDI Spec DeviceNode. -func (d *DeviceNode) Validate() error { - validTypes := map[string]struct{}{ - "": {}, - "b": {}, - "c": {}, - "u": {}, - "p": {}, - } - - if d.Path == "" { - return errors.New("invalid (empty) device path") - } - if _, ok := validTypes[d.Type]; !ok { - return fmt.Errorf("device %q: invalid type %q", d.Path, d.Type) - } - for _, bit := range d.Permissions { - if bit != 'r' && bit != 'w' && bit != 'm' { - return fmt.Errorf("device %q: invalid permissions %q", - d.Path, d.Permissions) - } - } - return nil -} - -// Hook is a CDI Spec Hook wrapper, used for validating hooks. -type Hook struct { - *specs.Hook -} - -// Validate a hook. -func (h *Hook) Validate() error { - if _, ok := validHookNames[h.HookName]; !ok { - return fmt.Errorf("invalid hook name %q", h.HookName) - } - if h.Path == "" { - return fmt.Errorf("invalid hook %q with empty path", h.HookName) - } - if err := ValidateEnv(h.Env); err != nil { - return fmt.Errorf("invalid hook %q: %w", h.HookName, err) - } - return nil -} - -// Mount is a CDI Mount wrapper, used for validating mounts. -type Mount struct { - *specs.Mount -} - -// Validate a mount. -func (m *Mount) Validate() error { - if m.HostPath == "" { - return errors.New("invalid mount, empty host path") - } - if m.ContainerPath == "" { - return errors.New("invalid mount, empty container path") - } - return nil -} - -// Ensure OCI Spec hooks are not nil so we can add hooks. -func ensureOCIHooks(spec *oci.Spec) { - if spec.Hooks == nil { - spec.Hooks = &oci.Hooks{} - } -} - -// sortMounts sorts the mounts in the given OCI Spec. -func sortMounts(specgen *ocigen.Generator) { - mounts := specgen.Mounts() - specgen.ClearMounts() - sort.Sort(orderedMounts(mounts)) - specgen.Config.Mounts = mounts -} - -// orderedMounts defines how to sort an OCI Spec Mount slice. -// This is the almost the same implementation sa used by CRI-O and Docker, -// with a minor tweak for stable sorting order (easier to test): -// -// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26 -type orderedMounts []oci.Mount - -// Len returns the number of mounts. Used in sorting. -func (m orderedMounts) Len() int { - return len(m) -} - -// Less returns true if the number of parts (a/b/c would be 3 parts) in the -// mount indexed by parameter 1 is less than that of the mount indexed by -// parameter 2. Used in sorting. -func (m orderedMounts) Less(i, j int) bool { - ip, jp := m.parts(i), m.parts(j) - if ip < jp { - return true - } - if jp < ip { - return false - } - return m[i].Destination < m[j].Destination -} - -// Swap swaps two items in an array of mounts. Used in sorting -func (m orderedMounts) Swap(i, j int) { - m[i], m[j] = m[j], m[i] -} - -// parts returns the number of parts in the destination of a mount. Used in sorting. -func (m orderedMounts) parts(i int) int { - return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go deleted file mode 100644 index 11a4cfe8c1..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build !windows -// +build !windows - -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "fmt" - - runc "github.com/opencontainers/runc/libcontainer/devices" -) - -// fillMissingInfo fills in missing mandatory attributes from the host device. -func (d *DeviceNode) fillMissingInfo() error { - if d.HostPath == "" { - d.HostPath = d.Path - } - - if d.Type != "" && (d.Major != 0 || d.Type == "p") { - return nil - } - - hostDev, err := runc.DeviceFromPath(d.HostPath, "rwm") - if err != nil { - return fmt.Errorf("failed to stat CDI host device %q: %w", d.HostPath, err) - } - - if d.Type == "" { - d.Type = string(hostDev.Type) - } else { - if d.Type != string(hostDev.Type) { - return fmt.Errorf("CDI device (%q, %q), host type mismatch (%s, %s)", - d.Path, d.HostPath, d.Type, string(hostDev.Type)) - } - } - if d.Major == 0 && d.Type != "p" { - d.Major = hostDev.Major - d.Minor = hostDev.Minor - } - - return nil -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_windows.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_windows.go deleted file mode 100644 index fd91afa926..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build windows -// +build windows - -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import "fmt" - -// fillMissingInfo fills in missing mandatory attributes from the host device. -func (d *DeviceNode) fillMissingInfo() error { - return fmt.Errorf("unimplemented") -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go deleted file mode 100644 index d93ddd0256..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "fmt" - - "github.com/container-orchestrated-devices/container-device-interface/internal/validation" - "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" - cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" - oci "github.com/opencontainers/runtime-spec/specs-go" -) - -// Device represents a CDI device of a Spec. -type Device struct { - *cdi.Device - spec *Spec -} - -// Create a new Device, associate it with the given Spec. -func newDevice(spec *Spec, d cdi.Device) (*Device, error) { - dev := &Device{ - Device: &d, - spec: spec, - } - - if err := dev.validate(); err != nil { - return nil, err - } - - return dev, nil -} - -// GetSpec returns the Spec this device is defined in. -func (d *Device) GetSpec() *Spec { - return d.spec -} - -// GetQualifiedName returns the qualified name for this device. -func (d *Device) GetQualifiedName() string { - return parser.QualifiedName(d.spec.GetVendor(), d.spec.GetClass(), d.Name) -} - -// ApplyEdits applies the device-speific container edits to an OCI Spec. -func (d *Device) ApplyEdits(ociSpec *oci.Spec) error { - return d.edits().Apply(ociSpec) -} - -// edits returns the applicable container edits for this spec. -func (d *Device) edits() *ContainerEdits { - return &ContainerEdits{&d.ContainerEdits} -} - -// Validate the device. -func (d *Device) validate() error { - if err := ValidateDeviceName(d.Name); err != nil { - return err - } - name := d.Name - if d.spec != nil { - name = d.GetQualifiedName() - } - if err := validation.ValidateSpecAnnotations(name, d.Annotations); err != nil { - return err - } - edits := d.edits() - if edits.isEmpty() { - return fmt.Errorf("invalid device, empty device edits") - } - if err := edits.Validate(); err != nil { - return fmt.Errorf("invalid device %q: %w", d.Name, err) - } - return nil -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go deleted file mode 100644 index c5cce0c87c..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go +++ /dev/null @@ -1,272 +0,0 @@ -// Package cdi has the primary purpose of providing an API for -// interacting with CDI and consuming CDI devices. -// -// For more information about Container Device Interface, please refer to -// https://github.com/container-orchestrated-devices/container-device-interface -// -// Container Device Interface -// -// Container Device Interface, or CDI for short, provides comprehensive -// third party device support for container runtimes. CDI uses vendor -// provided specification files, CDI Specs for short, to describe how a -// container's runtime environment should be modified when one or more -// of the vendor-specific devices is injected into the container. Beyond -// describing the low level platform-specific details of how to gain -// basic access to a device, CDI Specs allow more fine-grained device -// initialization, and the automatic injection of any necessary vendor- -// or device-specific software that might be required for a container -// to use a device or take full advantage of it. -// -// In the CDI device model containers request access to a device using -// fully qualified device names, qualified names for short, consisting of -// a vendor identifier, a device class and a device name or identifier. -// These pieces of information together uniquely identify a device among -// all device vendors, classes and device instances. -// -// This package implements an API for easy consumption of CDI. The API -// implements discovery, loading and caching of CDI Specs and injection -// of CDI devices into containers. This is the most common functionality -// the vast majority of CDI consumers need. The API should be usable both -// by OCI runtime clients and runtime implementations. -// -// CDI Registry -// -// The primary interface to interact with CDI devices is the Registry. It -// is essentially a cache of all Specs and devices discovered in standard -// CDI directories on the host. The registry has two main functionality, -// injecting devices into an OCI Spec and refreshing the cache of CDI -// Specs and devices. -// -// Device Injection -// -// Using the Registry one can inject CDI devices into a container with code -// similar to the following snippet: -// -// import ( -// "fmt" -// "strings" -// -// log "github.com/sirupsen/logrus" -// -// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" -// oci "github.com/opencontainers/runtime-spec/specs-go" -// ) -// -// func injectCDIDevices(spec *oci.Spec, devices []string) error { -// log.Debug("pristine OCI Spec: %s", dumpSpec(spec)) -// -// unresolved, err := cdi.GetRegistry().InjectDevices(spec, devices) -// if err != nil { -// return fmt.Errorf("CDI device injection failed: %w", err) -// } -// -// log.Debug("CDI-updated OCI Spec: %s", dumpSpec(spec)) -// return nil -// } -// -// Cache Refresh -// -// By default the CDI Spec cache monitors the configured Spec directories -// and automatically refreshes itself when necessary. This behavior can be -// disabled using the WithAutoRefresh(false) option. -// -// Failure to set up monitoring for a Spec directory causes the directory to -// get ignored and an error to be recorded among the Spec directory errors. -// These errors can be queried using the GetSpecDirErrors() function. If the -// error condition is transient, for instance a missing directory which later -// gets created, the corresponding error will be removed once the condition -// is over. -// -// With auto-refresh enabled injecting any CDI devices can be done without -// an explicit call to Refresh(), using a code snippet similar to the -// following: -// -// In a runtime implementation one typically wants to make sure the -// CDI Spec cache is up to date before performing device injection. -// A code snippet similar to the following accmplishes that: -// -// import ( -// "fmt" -// "strings" -// -// log "github.com/sirupsen/logrus" -// -// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" -// oci "github.com/opencontainers/runtime-spec/specs-go" -// ) -// -// func injectCDIDevices(spec *oci.Spec, devices []string) error { -// registry := cdi.GetRegistry() -// -// if err := registry.Refresh(); err != nil { -// // Note: -// // It is up to the implementation to decide whether -// // to abort injection on errors. A failed Refresh() -// // does not necessarily render the registry unusable. -// // For instance, a parse error in a Spec file for -// // vendor A does not have any effect on devices of -// // vendor B... -// log.Warnf("pre-injection Refresh() failed: %v", err) -// } -// -// log.Debug("pristine OCI Spec: %s", dumpSpec(spec)) -// -// unresolved, err := registry.InjectDevices(spec, devices) -// if err != nil { -// return fmt.Errorf("CDI device injection failed: %w", err) -// } -// -// log.Debug("CDI-updated OCI Spec: %s", dumpSpec(spec)) -// return nil -// } -// -// Generated Spec Files, Multiple Directories, Device Precedence -// -// It is often necessary to generate Spec files dynamically. On some -// systems the available or usable set of CDI devices might change -// dynamically which then needs to be reflected in CDI Specs. For -// some device classes it makes sense to enumerate the available -// devices at every boot and generate Spec file entries for each -// device found. Some CDI devices might need special client- or -// request-specific configuration which can only be fulfilled by -// dynamically generated client-specific entries in transient Spec -// files. -// -// CDI can collect Spec files from multiple directories. Spec files are -// automatically assigned priorities according to which directory they -// were loaded from. The later a directory occurs in the list of CDI -// directories to scan, the higher priority Spec files loaded from that -// directory are assigned to. When two or more Spec files define the -// same device, conflict is resolved by choosing the definition from the -// Spec file with the highest priority. -// -// The default CDI directory configuration is chosen to encourage -// separating dynamically generated CDI Spec files from static ones. -// The default directories are '/etc/cdi' and '/var/run/cdi'. By putting -// dynamically generated Spec files under '/var/run/cdi', those take -// precedence over static ones in '/etc/cdi'. With this scheme, static -// Spec files, typically installed by distro-specific packages, go into -// '/etc/cdi' while all the dynamically generated Spec files, transient -// or other, go into '/var/run/cdi'. -// -// Spec File Generation -// -// CDI offers two functions for writing and removing dynamically generated -// Specs from CDI Spec directories. These functions, WriteSpec() and -// RemoveSpec() implicitly follow the principle of separating dynamic Specs -// from the rest and therefore always write to and remove Specs from the -// last configured directory. -// -// Corresponding functions are also provided for generating names for Spec -// files. These functions follow a simple naming convention to ensure that -// multiple entities generating Spec files simultaneously on the same host -// do not end up using conflicting Spec file names. GenerateSpecName(), -// GenerateNameForSpec(), GenerateTransientSpecName(), and -// GenerateTransientNameForSpec() all generate names which can be passed -// as such to WriteSpec() and subsequently to RemoveSpec(). -// -// Generating a Spec file for a vendor/device class can be done with a -// code snippet similar to the following: -// -// import ( -// "fmt" -// ... -// "github.com/container-orchestrated-devices/container-device-interface/specs-go" -// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" -// ) -// -// func generateDeviceSpecs() error { -// registry := cdi.GetRegistry() -// spec := &specs.Spec{ -// Version: specs.CurrentVersion, -// Kind: vendor+"/"+class, -// } -// -// for _, dev := range enumerateDevices() { -// spec.Devices = append(spec.Devices, specs.Device{ -// Name: dev.Name, -// ContainerEdits: getContainerEditsForDevice(dev), -// }) -// } -// -// specName, err := cdi.GenerateNameForSpec(spec) -// if err != nil { -// return fmt.Errorf("failed to generate Spec name: %w", err) -// } -// -// return registry.SpecDB().WriteSpec(spec, specName) -// } -// -// Similarly, generating and later cleaning up transient Spec files can be -// done with code fragments similar to the following. These transient Spec -// files are temporary Spec files with container-specific parametrization. -// They are typically created before the associated container is created -// and removed once that container is removed. -// -// import ( -// "fmt" -// ... -// "github.com/container-orchestrated-devices/container-device-interface/specs-go" -// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" -// ) -// -// func generateTransientSpec(ctr Container) error { -// registry := cdi.GetRegistry() -// devices := getContainerDevs(ctr, vendor, class) -// spec := &specs.Spec{ -// Version: specs.CurrentVersion, -// Kind: vendor+"/"+class, -// } -// -// for _, dev := range devices { -// spec.Devices = append(spec.Devices, specs.Device{ -// // the generated name needs to be unique within the -// // vendor/class domain on the host/node. -// Name: generateUniqueDevName(dev, ctr), -// ContainerEdits: getEditsForContainer(dev), -// }) -// } -// -// // transientID is expected to guarantee that the Spec file name -// // generated using is unique within -// // the host/node. If more than one device is allocated with the -// // same vendor/class domain, either all generated Spec entries -// // should go to a single Spec file (like in this sample snippet), -// // or transientID should be unique for each generated Spec file. -// transientID := getSomeSufficientlyUniqueIDForContainer(ctr) -// specName, err := cdi.GenerateNameForTransientSpec(vendor, class, transientID) -// if err != nil { -// return fmt.Errorf("failed to generate Spec name: %w", err) -// } -// -// return registry.SpecDB().WriteSpec(spec, specName) -// } -// -// func removeTransientSpec(ctr Container) error { -// registry := cdi.GetRegistry() -// transientID := getSomeSufficientlyUniqueIDForContainer(ctr) -// specName := cdi.GenerateNameForTransientSpec(vendor, class, transientID) -// -// return registry.SpecDB().RemoveSpec(specName) -// } -// -// CDI Spec Validation -// -// This package performs both syntactic and semantic validation of CDI -// Spec file data when a Spec file is loaded via the registry or using -// the ReadSpec API function. As part of the semantic verification, the -// Spec file is verified against the CDI Spec JSON validation schema. -// -// If a valid externally provided JSON validation schema is found in -// the filesystem at /etc/cdi/schema/schema.json it is loaded and used -// as the default validation schema. If such a file is not found or -// fails to load, an embedded no-op schema is used. -// -// The used validation schema can also be changed programmatically using -// the SetSchema API convenience function. This function also accepts -// the special "builtin" (BuiltinSchemaName) and "none" (NoneSchemaName) -// schema names which switch the used schema to the in-repo validation -// schema embedded into the binary or the now default no-op schema -// correspondingly. Other names are interpreted as the path to the actual -// validation schema to load and use. -package cdi diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go deleted file mode 100644 index 16e889a7ae..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" -) - -// QualifiedName returns the qualified name for a device. -// The syntax for a qualified device names is -// -// "/=". -// -// A valid vendor and class name may contain the following runes: -// -// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. -// -// A valid device name may contain the following runes: -// -// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' -// -// Deprecated: use parser.QualifiedName instead -func QualifiedName(vendor, class, name string) string { - return parser.QualifiedName(vendor, class, name) -} - -// IsQualifiedName tests if a device name is qualified. -// -// Deprecated: use parser.IsQualifiedName instead -func IsQualifiedName(device string) bool { - return parser.IsQualifiedName(device) -} - -// ParseQualifiedName splits a qualified name into device vendor, class, -// and name. If the device fails to parse as a qualified name, or if any -// of the split components fail to pass syntax validation, vendor and -// class are returned as empty, together with the verbatim input as the -// name and an error describing the reason for failure. -// -// Deprecated: use parser.ParseQualifiedName instead -func ParseQualifiedName(device string) (string, string, string, error) { - return parser.ParseQualifiedName(device) -} - -// ParseDevice tries to split a device name into vendor, class, and name. -// If this fails, for instance in the case of unqualified device names, -// ParseDevice returns an empty vendor and class together with name set -// to the verbatim input. -// -// Deprecated: use parser.ParseDevice instead -func ParseDevice(device string) (string, string, string) { - return parser.ParseDevice(device) -} - -// ParseQualifier splits a device qualifier into vendor and class. -// The syntax for a device qualifier is -// -// "/" -// -// If parsing fails, an empty vendor and the class set to the -// verbatim input is returned. -// -// Deprecated: use parser.ParseQualifier instead -func ParseQualifier(kind string) (string, string) { - return parser.ParseQualifier(kind) -} - -// ValidateVendorName checks the validity of a vendor name. -// A vendor name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -// -// Deprecated: use parser.ValidateVendorName instead -func ValidateVendorName(vendor string) error { - return parser.ValidateVendorName(vendor) -} - -// ValidateClassName checks the validity of class name. -// A class name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -// -// Deprecated: use parser.ValidateClassName instead -func ValidateClassName(class string) error { - return parser.ValidateClassName(class) -} - -// ValidateDeviceName checks the validity of a device name. -// A device name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, dot, colon ('_', '-', '.', ':') -// -// Deprecated: use parser.ValidateDeviceName instead -func ValidateDeviceName(name string) error { - return parser.ValidateDeviceName(name) -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go deleted file mode 100644 index e13ce60b55..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "sync" - - cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" - oci "github.com/opencontainers/runtime-spec/specs-go" -) - -// Registry keeps a cache of all CDI Specs installed or generated on -// the host. Registry is the primary interface clients should use to -// interact with CDI. -// -// The most commonly used Registry functions are for refreshing the -// registry and injecting CDI devices into an OCI Spec. -type Registry interface { - RegistryResolver - RegistryRefresher - DeviceDB() RegistryDeviceDB - SpecDB() RegistrySpecDB -} - -// RegistryRefresher is the registry interface for refreshing the -// cache of CDI Specs and devices. -// -// Configure reconfigures the registry with the given options. -// -// Refresh rescans all CDI Spec directories and updates the -// state of the cache to reflect any changes. It returns any -// errors encountered during the refresh. -// -// GetErrors returns all errors encountered for any of the scanned -// Spec files during the last cache refresh. -// -// GetSpecDirectories returns the set up CDI Spec directories -// currently in use. The directories are returned in the scan -// order of Refresh(). -// -// GetSpecDirErrors returns any errors related to the configured -// Spec directories. -type RegistryRefresher interface { - Configure(...Option) error - Refresh() error - GetErrors() map[string][]error - GetSpecDirectories() []string - GetSpecDirErrors() map[string]error -} - -// RegistryResolver is the registry interface for injecting CDI -// devices into an OCI Spec. -// -// InjectDevices takes an OCI Spec and injects into it a set of -// CDI devices given by qualified name. It returns the names of -// any unresolved devices and an error if injection fails. -type RegistryResolver interface { - InjectDevices(spec *oci.Spec, device ...string) (unresolved []string, err error) -} - -// RegistryDeviceDB is the registry interface for querying devices. -// -// GetDevice returns the CDI device for the given qualified name. If -// the device is not GetDevice returns nil. -// -// ListDevices returns a slice with the names of qualified device -// known. The returned slice is sorted. -type RegistryDeviceDB interface { - GetDevice(device string) *Device - ListDevices() []string -} - -// RegistrySpecDB is the registry interface for querying CDI Specs. -// -// ListVendors returns a slice with all vendors known. The returned -// slice is sorted. -// -// ListClasses returns a slice with all classes known. The returned -// slice is sorted. -// -// GetVendorSpecs returns a slice of all Specs for the vendor. -// -// GetSpecErrors returns any errors for the Spec encountered during -// the last cache refresh. -// -// WriteSpec writes the Spec with the given content and name to the -// last Spec directory. -type RegistrySpecDB interface { - ListVendors() []string - ListClasses() []string - GetVendorSpecs(vendor string) []*Spec - GetSpecErrors(*Spec) []error - WriteSpec(raw *cdi.Spec, name string) error - RemoveSpec(name string) error -} - -type registry struct { - *Cache -} - -var _ Registry = ®istry{} - -var ( - reg *registry - initOnce sync.Once -) - -// GetRegistry returns the CDI registry. If any options are given, those -// are applied to the registry. -func GetRegistry(options ...Option) Registry { - var new bool - initOnce.Do(func() { - reg, _ = getRegistry(options...) - new = true - }) - if !new && len(options) > 0 { - reg.Configure(options...) - reg.Refresh() - } - return reg -} - -// DeviceDB returns the registry interface for querying devices. -func (r *registry) DeviceDB() RegistryDeviceDB { - return r -} - -// SpecDB returns the registry interface for querying Specs. -func (r *registry) SpecDB() RegistrySpecDB { - return r -} - -func getRegistry(options ...Option) (*registry, error) { - c, err := NewCache(options...) - return ®istry{c}, err -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec-dirs.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec-dirs.go deleted file mode 100644 index f339349bba..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec-dirs.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "io/fs" - "os" - "path/filepath" -) - -const ( - // DefaultStaticDir is the default directory for static CDI Specs. - DefaultStaticDir = "/etc/cdi" - // DefaultDynamicDir is the default directory for generated CDI Specs - DefaultDynamicDir = "/var/run/cdi" -) - -var ( - // DefaultSpecDirs is the default Spec directory configuration. - // While altering this variable changes the package defaults, - // the preferred way of overriding the default directories is - // to use a WithSpecDirs options. Otherwise the change is only - // effective if it takes place before creating the Registry or - // other Cache instances. - DefaultSpecDirs = []string{DefaultStaticDir, DefaultDynamicDir} - // ErrStopScan can be returned from a ScanSpecFunc to stop the scan. - ErrStopScan = errors.New("stop Spec scan") -) - -// WithSpecDirs returns an option to override the CDI Spec directories. -func WithSpecDirs(dirs ...string) Option { - return func(c *Cache) error { - specDirs := make([]string, len(dirs)) - for i, dir := range dirs { - specDirs[i] = filepath.Clean(dir) - } - c.specDirs = specDirs - return nil - } -} - -// scanSpecFunc is a function for processing CDI Spec files. -type scanSpecFunc func(string, int, *Spec, error) error - -// ScanSpecDirs scans the given directories looking for CDI Spec files, -// which are all files with a '.json' or '.yaml' suffix. For every Spec -// file discovered, ScanSpecDirs loads a Spec from the file then calls -// the scan function passing it the path to the file, the priority (the -// index of the directory in the slice of directories given), the Spec -// itself, and any error encountered while loading the Spec. -// -// Scanning stops once all files have been processed or when the scan -// function returns an error. The result of ScanSpecDirs is the error -// returned by the scan function, if any. The special error ErrStopScan -// can be used to terminate the scan gracefully without ScanSpecDirs -// returning an error. ScanSpecDirs silently skips any subdirectories. -func scanSpecDirs(dirs []string, scanFn scanSpecFunc) error { - var ( - spec *Spec - err error - ) - - for priority, dir := range dirs { - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - // for initial stat failure Walk calls us with nil info - if info == nil { - if errors.Is(err, fs.ErrNotExist) { - return nil - } - return err - } - // first call from Walk is for dir itself, others we skip - if info.IsDir() { - if path == dir { - return nil - } - return filepath.SkipDir - } - - // ignore obviously non-Spec files - if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { - return nil - } - - if err != nil { - return scanFn(path, priority, nil, err) - } - - spec, err = ReadSpec(path, priority) - return scanFn(path, priority, spec, err) - }) - - if err != nil && err != ErrStopScan { - return err - } - } - - return nil -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go deleted file mode 100644 index 62693c1bda..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go +++ /dev/null @@ -1,352 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - - oci "github.com/opencontainers/runtime-spec/specs-go" - "sigs.k8s.io/yaml" - - "github.com/container-orchestrated-devices/container-device-interface/internal/validation" - cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" -) - -const ( - // defaultSpecExt is the file extension for the default encoding. - defaultSpecExt = ".yaml" -) - -var ( - // Externally set CDI Spec validation function. - specValidator func(*cdi.Spec) error - validatorLock sync.RWMutex -) - -// Spec represents a single CDI Spec. It is usually loaded from a -// file and stored in a cache. The Spec has an associated priority. -// This priority is inherited from the associated priority of the -// CDI Spec directory that contains the CDI Spec file and is used -// to resolve conflicts if multiple CDI Spec files contain entries -// for the same fully qualified device. -type Spec struct { - *cdi.Spec - vendor string - class string - path string - priority int - devices map[string]*Device -} - -// ReadSpec reads the given CDI Spec file. The resulting Spec is -// assigned the given priority. If reading or parsing the Spec -// data fails ReadSpec returns a nil Spec and an error. -func ReadSpec(path string, priority int) (*Spec, error) { - data, err := ioutil.ReadFile(path) - switch { - case os.IsNotExist(err): - return nil, err - case err != nil: - return nil, fmt.Errorf("failed to read CDI Spec %q: %w", path, err) - } - - raw, err := ParseSpec(data) - if err != nil { - return nil, fmt.Errorf("failed to parse CDI Spec %q: %w", path, err) - } - if raw == nil { - return nil, fmt.Errorf("failed to parse CDI Spec %q, no Spec data", path) - } - - spec, err := newSpec(raw, path, priority) - if err != nil { - return nil, err - } - - return spec, nil -} - -// newSpec creates a new Spec from the given CDI Spec data. The -// Spec is marked as loaded from the given path with the given -// priority. If Spec data validation fails newSpec returns a nil -// Spec and an error. -func newSpec(raw *cdi.Spec, path string, priority int) (*Spec, error) { - err := validateSpec(raw) - if err != nil { - return nil, err - } - - spec := &Spec{ - Spec: raw, - path: filepath.Clean(path), - priority: priority, - } - - if ext := filepath.Ext(spec.path); ext != ".yaml" && ext != ".json" { - spec.path += defaultSpecExt - } - - spec.vendor, spec.class = ParseQualifier(spec.Kind) - - if spec.devices, err = spec.validate(); err != nil { - return nil, fmt.Errorf("invalid CDI Spec: %w", err) - } - - return spec, nil -} - -// Write the CDI Spec to the file associated with it during instantiation -// by newSpec() or ReadSpec(). -func (s *Spec) write(overwrite bool) error { - var ( - data []byte - dir string - tmp *os.File - err error - ) - - err = validateSpec(s.Spec) - if err != nil { - return err - } - - if filepath.Ext(s.path) == ".yaml" { - data, err = yaml.Marshal(s.Spec) - data = append([]byte("---\n"), data...) - } else { - data, err = json.Marshal(s.Spec) - } - if err != nil { - return fmt.Errorf("failed to marshal Spec file: %w", err) - } - - dir = filepath.Dir(s.path) - err = os.MkdirAll(dir, 0o755) - if err != nil { - return fmt.Errorf("failed to create Spec dir: %w", err) - } - - tmp, err = os.CreateTemp(dir, "spec.*.tmp") - if err != nil { - return fmt.Errorf("failed to create Spec file: %w", err) - } - _, err = tmp.Write(data) - tmp.Close() - if err != nil { - return fmt.Errorf("failed to write Spec file: %w", err) - } - - err = renameIn(dir, filepath.Base(tmp.Name()), filepath.Base(s.path), overwrite) - - if err != nil { - os.Remove(tmp.Name()) - err = fmt.Errorf("failed to write Spec file: %w", err) - } - - return err -} - -// GetVendor returns the vendor of this Spec. -func (s *Spec) GetVendor() string { - return s.vendor -} - -// GetClass returns the device class of this Spec. -func (s *Spec) GetClass() string { - return s.class -} - -// GetDevice returns the device for the given unqualified name. -func (s *Spec) GetDevice(name string) *Device { - return s.devices[name] -} - -// GetPath returns the filesystem path of this Spec. -func (s *Spec) GetPath() string { - return s.path -} - -// GetPriority returns the priority of this Spec. -func (s *Spec) GetPriority() int { - return s.priority -} - -// ApplyEdits applies the Spec's global-scope container edits to an OCI Spec. -func (s *Spec) ApplyEdits(ociSpec *oci.Spec) error { - return s.edits().Apply(ociSpec) -} - -// edits returns the applicable global container edits for this spec. -func (s *Spec) edits() *ContainerEdits { - return &ContainerEdits{&s.ContainerEdits} -} - -// Validate the Spec. -func (s *Spec) validate() (map[string]*Device, error) { - if err := validateVersion(s.Version); err != nil { - return nil, err - } - - minVersion, err := MinimumRequiredVersion(s.Spec) - if err != nil { - return nil, fmt.Errorf("could not determine minimum required version: %v", err) - } - if newVersion(minVersion).IsGreaterThan(newVersion(s.Version)) { - return nil, fmt.Errorf("the spec version must be at least v%v", minVersion) - } - - if err := ValidateVendorName(s.vendor); err != nil { - return nil, err - } - if err := ValidateClassName(s.class); err != nil { - return nil, err - } - if err := validation.ValidateSpecAnnotations(s.Kind, s.Annotations); err != nil { - return nil, err - } - if err := s.edits().Validate(); err != nil { - return nil, err - } - - devices := make(map[string]*Device) - for _, d := range s.Devices { - dev, err := newDevice(s, d) - if err != nil { - return nil, fmt.Errorf("failed add device %q: %w", d.Name, err) - } - if _, conflict := devices[d.Name]; conflict { - return nil, fmt.Errorf("invalid spec, multiple device %q", d.Name) - } - devices[d.Name] = dev - } - - return devices, nil -} - -// validateVersion checks whether the specified spec version is supported. -func validateVersion(version string) error { - if !validSpecVersions.isValidVersion(version) { - return fmt.Errorf("invalid version %q", version) - } - - return nil -} - -// ParseSpec parses CDI Spec data into a raw CDI Spec. -func ParseSpec(data []byte) (*cdi.Spec, error) { - var raw *cdi.Spec - err := yaml.UnmarshalStrict(data, &raw) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal CDI Spec: %w", err) - } - return raw, nil -} - -// SetSpecValidator sets a CDI Spec validator function. This function -// is used for extra CDI Spec content validation whenever a Spec file -// loaded (using ReadSpec() or written (using WriteSpec()). -func SetSpecValidator(fn func(*cdi.Spec) error) { - validatorLock.Lock() - defer validatorLock.Unlock() - specValidator = fn -} - -// validateSpec validates the Spec using the extneral validator. -func validateSpec(raw *cdi.Spec) error { - validatorLock.RLock() - defer validatorLock.RUnlock() - - if specValidator == nil { - return nil - } - err := specValidator(raw) - if err != nil { - return fmt.Errorf("Spec validation failed: %w", err) - } - return nil -} - -// GenerateSpecName generates a vendor+class scoped Spec file name. The -// name can be passed to WriteSpec() to write a Spec file to the file -// system. -// -// vendor and class should match the vendor and class of the CDI Spec. -// The file name is generated without a ".json" or ".yaml" extension. -// The caller can append the desired extension to choose a particular -// encoding. Otherwise WriteSpec() will use its default encoding. -// -// This function always returns the same name for the same vendor/class -// combination. Therefore it cannot be used as such to generate multiple -// Spec file names for a single vendor and class. -func GenerateSpecName(vendor, class string) string { - return vendor + "-" + class -} - -// GenerateTransientSpecName generates a vendor+class scoped transient -// Spec file name. The name can be passed to WriteSpec() to write a Spec -// file to the file system. -// -// Transient Specs are those whose lifecycle is tied to that of some -// external entity, for instance a container. vendor and class should -// match the vendor and class of the CDI Spec. transientID should be -// unique among all CDI users on the same host that might generate -// transient Spec files using the same vendor/class combination. If -// the external entity to which the lifecycle of the transient Spec -// is tied to has a unique ID of its own, then this is usually a -// good choice for transientID. -// -// The file name is generated without a ".json" or ".yaml" extension. -// The caller can append the desired extension to choose a particular -// encoding. Otherwise WriteSpec() will use its default encoding. -func GenerateTransientSpecName(vendor, class, transientID string) string { - transientID = strings.ReplaceAll(transientID, "/", "_") - return GenerateSpecName(vendor, class) + "_" + transientID -} - -// GenerateNameForSpec generates a name for the given Spec using -// GenerateSpecName with the vendor and class taken from the Spec. -// On success it returns the generated name and a nil error. If -// the Spec does not contain a valid vendor or class, it returns -// an empty name and a non-nil error. -func GenerateNameForSpec(raw *cdi.Spec) (string, error) { - vendor, class := ParseQualifier(raw.Kind) - if vendor == "" { - return "", fmt.Errorf("invalid vendor/class %q in Spec", raw.Kind) - } - - return GenerateSpecName(vendor, class), nil -} - -// GenerateNameForTransientSpec generates a name for the given transient -// Spec using GenerateTransientSpecName with the vendor and class taken -// from the Spec. On success it returns the generated name and a nil error. -// If the Spec does not contain a valid vendor or class, it returns an -// an empty name and a non-nil error. -func GenerateNameForTransientSpec(raw *cdi.Spec, transientID string) (string, error) { - vendor, class := ParseQualifier(raw.Kind) - if vendor == "" { - return "", fmt.Errorf("invalid vendor/class %q in Spec", raw.Kind) - } - - return GenerateTransientSpecName(vendor, class, transientID), nil -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_linux.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_linux.go deleted file mode 100644 index 9ad2739256..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_linux.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright © 2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -// Rename src to dst, both relative to the directory dir. If dst already exists -// refuse renaming with an error unless overwrite is explicitly asked for. -func renameIn(dir, src, dst string, overwrite bool) error { - var flags uint - - dirf, err := os.Open(dir) - if err != nil { - return fmt.Errorf("rename failed: %w", err) - } - defer dirf.Close() - - if !overwrite { - flags = unix.RENAME_NOREPLACE - } - - dirFd := int(dirf.Fd()) - err = unix.Renameat2(dirFd, src, dirFd, dst, flags) - if err != nil { - return fmt.Errorf("rename failed: %w", err) - } - - return nil -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_other.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_other.go deleted file mode 100644 index 285e04e27a..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec_other.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !linux -// +build !linux - -/* - Copyright © 2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "os" - "path/filepath" -) - -// Rename src to dst, both relative to the directory dir. If dst already exists -// refuse renaming with an error unless overwrite is explicitly asked for. -func renameIn(dir, src, dst string, overwrite bool) error { - src = filepath.Join(dir, src) - dst = filepath.Join(dir, dst) - - _, err := os.Stat(dst) - if err == nil && !overwrite { - return os.ErrExist - } - - return os.Rename(src, dst) -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go deleted file mode 100644 index 22534d9204..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - Copyright © The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "strings" - - "golang.org/x/mod/semver" - - "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" - cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" -) - -const ( - // CurrentVersion is the current version of the CDI Spec. - CurrentVersion = cdi.CurrentVersion - - // vCurrent is the current version as a semver-comparable type - vCurrent version = "v" + CurrentVersion - - // These represent the released versions of the CDI specification - v010 version = "v0.1.0" - v020 version = "v0.2.0" - v030 version = "v0.3.0" - v040 version = "v0.4.0" - v050 version = "v0.5.0" - v060 version = "v0.6.0" - - // vEarliest is the earliest supported version of the CDI specification - vEarliest version = v030 -) - -// validSpecVersions stores a map of spec versions to functions to check the required versions. -// Adding new fields / spec versions requires that a `requiredFunc` be implemented and -// this map be updated. -var validSpecVersions = requiredVersionMap{ - v010: nil, - v020: nil, - v030: nil, - v040: requiresV040, - v050: requiresV050, - v060: requiresV060, -} - -// MinimumRequiredVersion determines the minimum spec version for the input spec. -func MinimumRequiredVersion(spec *cdi.Spec) (string, error) { - minVersion := validSpecVersions.requiredVersion(spec) - return minVersion.String(), nil -} - -// version represents a semantic version string -type version string - -// newVersion creates a version that can be used for semantic version comparisons. -func newVersion(v string) version { - return version("v" + strings.TrimPrefix(v, "v")) -} - -// String returns the string representation of the version. -// This trims a leading v if present. -func (v version) String() string { - return strings.TrimPrefix(string(v), "v") -} - -// IsGreaterThan checks with a version is greater than the specified version. -func (v version) IsGreaterThan(o version) bool { - return semver.Compare(string(v), string(o)) > 0 -} - -// IsLatest checks whether the version is the latest supported version -func (v version) IsLatest() bool { - return v == vCurrent -} - -type requiredFunc func(*cdi.Spec) bool - -type requiredVersionMap map[version]requiredFunc - -// isValidVersion checks whether the specified version is valid. -// A version is valid if it is contained in the required version map. -func (r requiredVersionMap) isValidVersion(specVersion string) bool { - _, ok := validSpecVersions[newVersion(specVersion)] - - return ok -} - -// requiredVersion returns the minimum version required for the given spec -func (r requiredVersionMap) requiredVersion(spec *cdi.Spec) version { - minVersion := vEarliest - - for v, isRequired := range validSpecVersions { - if isRequired == nil { - continue - } - if isRequired(spec) && v.IsGreaterThan(minVersion) { - minVersion = v - } - // If we have already detected the latest version then no later version could be detected - if minVersion.IsLatest() { - break - } - } - - return minVersion -} - -// requiresV060 returns true if the spec uses v0.6.0 features -func requiresV060(spec *cdi.Spec) bool { - // The v0.6.0 spec allows annotations to be specified at a spec level - for range spec.Annotations { - return true - } - - // The v0.6.0 spec allows annotations to be specified at a device level - for _, d := range spec.Devices { - for range d.Annotations { - return true - } - } - - // The v0.6.0 spec allows dots "." in Kind name label (class) - vendor, class := parser.ParseQualifier(spec.Kind) - if vendor != "" { - if strings.ContainsRune(class, '.') { - return true - } - } - - return false -} - -// requiresV050 returns true if the spec uses v0.5.0 features -func requiresV050(spec *cdi.Spec) bool { - var edits []*cdi.ContainerEdits - - for _, d := range spec.Devices { - // The v0.5.0 spec allowed device names to start with a digit instead of requiring a letter - if len(d.Name) > 0 && !parser.IsLetter(rune(d.Name[0])) { - return true - } - edits = append(edits, &d.ContainerEdits) - } - - edits = append(edits, &spec.ContainerEdits) - for _, e := range edits { - for _, dn := range e.DeviceNodes { - // The HostPath field was added in v0.5.0 - if dn.HostPath != "" { - return true - } - } - } - return false -} - -// requiresV040 returns true if the spec uses v0.4.0 features -func requiresV040(spec *cdi.Spec) bool { - var edits []*cdi.ContainerEdits - - for _, d := range spec.Devices { - edits = append(edits, &d.ContainerEdits) - } - - edits = append(edits, &spec.ContainerEdits) - for _, e := range edits { - for _, m := range e.Mounts { - // The Type field was added in v0.4.0 - if m.Type != "" { - return true - } - } - } - return false -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go deleted file mode 100644 index 4043b858f2..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go +++ /dev/null @@ -1,62 +0,0 @@ -package specs - -import "os" - -// CurrentVersion is the current version of the Spec. -const CurrentVersion = "0.6.0" - -// Spec is the base configuration for CDI -type Spec struct { - Version string `json:"cdiVersion"` - Kind string `json:"kind"` - // Annotations add meta information per CDI spec. Note these are CDI-specific and do not affect container metadata. - Annotations map[string]string `json:"annotations,omitempty"` - Devices []Device `json:"devices"` - ContainerEdits ContainerEdits `json:"containerEdits,omitempty"` -} - -// Device is a "Device" a container runtime can add to a container -type Device struct { - Name string `json:"name"` - // Annotations add meta information per device. Note these are CDI-specific and do not affect container metadata. - Annotations map[string]string `json:"annotations,omitempty"` - ContainerEdits ContainerEdits `json:"containerEdits"` -} - -// ContainerEdits are edits a container runtime must make to the OCI spec to expose the device. -type ContainerEdits struct { - Env []string `json:"env,omitempty"` - DeviceNodes []*DeviceNode `json:"deviceNodes,omitempty"` - Hooks []*Hook `json:"hooks,omitempty"` - Mounts []*Mount `json:"mounts,omitempty"` -} - -// DeviceNode represents a device node that needs to be added to the OCI spec. -type DeviceNode struct { - Path string `json:"path"` - HostPath string `json:"hostPath,omitempty"` - Type string `json:"type,omitempty"` - Major int64 `json:"major,omitempty"` - Minor int64 `json:"minor,omitempty"` - FileMode *os.FileMode `json:"fileMode,omitempty"` - Permissions string `json:"permissions,omitempty"` - UID *uint32 `json:"uid,omitempty"` - GID *uint32 `json:"gid,omitempty"` -} - -// Mount represents a mount that needs to be added to the OCI spec. -type Mount struct { - HostPath string `json:"hostPath"` - ContainerPath string `json:"containerPath"` - Options []string `json:"options,omitempty"` - Type string `json:"type,omitempty"` -} - -// Hook represents a hook that needs to be added to the OCI spec. -type Hook struct { - HookName string `json:"hookName"` - Path string `json:"path"` - Args []string `json:"args,omitempty"` - Env []string `json:"env,omitempty"` - Timeout *int `json:"timeout,omitempty"` -} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go deleted file mode 100644 index d709ecbc73..0000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go +++ /dev/null @@ -1,113 +0,0 @@ -package specs - -import ( - "errors" - "fmt" - - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -// ApplyOCIEditsForDevice applies devices OCI edits, in other words -// it finds the device in the CDI spec and applies the OCI patches that device -// requires to the OCI specification. -func ApplyOCIEditsForDevice(config *spec.Spec, cdi *Spec, dev string) error { - for _, d := range cdi.Devices { - if d.Name != dev { - continue - } - - return ApplyEditsToOCISpec(config, &d.ContainerEdits) - } - - return fmt.Errorf("CDI: device %q not found for spec %q", dev, cdi.Kind) -} - -// ApplyOCIEdits applies the OCI edits the CDI spec declares globally -func ApplyOCIEdits(config *spec.Spec, cdi *Spec) error { - return ApplyEditsToOCISpec(config, &cdi.ContainerEdits) -} - -// ApplyEditsToOCISpec applies the specified edits to the OCI spec. -func ApplyEditsToOCISpec(config *spec.Spec, edits *ContainerEdits) error { - if config == nil { - return errors.New("spec is nil") - } - if edits == nil { - return nil - } - - if len(edits.Env) > 0 { - if config.Process == nil { - config.Process = &spec.Process{} - } - config.Process.Env = append(config.Process.Env, edits.Env...) - } - - for _, d := range edits.DeviceNodes { - if config.Linux == nil { - config.Linux = &spec.Linux{} - } - config.Linux.Devices = append(config.Linux.Devices, d.ToOCI()) - } - - for _, m := range edits.Mounts { - config.Mounts = append(config.Mounts, m.ToOCI()) - } - - for _, h := range edits.Hooks { - if config.Hooks == nil { - config.Hooks = &spec.Hooks{} - } - switch h.HookName { - case "prestart": - config.Hooks.Prestart = append(config.Hooks.Prestart, h.ToOCI()) - case "createRuntime": - config.Hooks.CreateRuntime = append(config.Hooks.CreateRuntime, h.ToOCI()) - case "createContainer": - config.Hooks.CreateContainer = append(config.Hooks.CreateContainer, h.ToOCI()) - case "startContainer": - config.Hooks.StartContainer = append(config.Hooks.StartContainer, h.ToOCI()) - case "poststart": - config.Hooks.Poststart = append(config.Hooks.Poststart, h.ToOCI()) - case "poststop": - config.Hooks.Poststop = append(config.Hooks.Poststop, h.ToOCI()) - default: - fmt.Printf("CDI: Unknown hook %q\n", h.HookName) - } - } - - return nil -} - -// ToOCI returns the opencontainers runtime Spec Hook for this Hook. -func (h *Hook) ToOCI() spec.Hook { - return spec.Hook{ - Path: h.Path, - Args: h.Args, - Env: h.Env, - Timeout: h.Timeout, - } -} - -// ToOCI returns the opencontainers runtime Spec Mount for this Mount. -func (m *Mount) ToOCI() spec.Mount { - return spec.Mount{ - Source: m.HostPath, - Destination: m.ContainerPath, - Options: m.Options, - Type: m.Type, - } -} - -// ToOCI returns the opencontainers runtime Spec LinuxDevice for this DeviceNode. -func (d *DeviceNode) ToOCI() spec.LinuxDevice { - return spec.LinuxDevice{ - Path: d.Path, - Type: d.Type, - Major: d.Major, - Minor: d.Minor, - FileMode: d.FileMode, - UID: d.UID, - GID: d.GID, - } -} diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 7a51b84237..68f9914856 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -14,6 +14,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/signature/signer" is "github.com/containers/image/v5/storage" @@ -70,6 +71,7 @@ type PushOptions struct { RemoveSignatures bool // true to discard signatures in images ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 SourceFilter LookupReferenceFunc // filter the list source + AddCompression []string // add existing instances with requested compression algorithms to manifest list } // Create creates a new list containing information about the specified image, @@ -239,6 +241,10 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push return nil, "", err } } + compressionVariants, err := prepareAddWithCompression(options.AddCompression) + if err != nil { + return nil, "", err + } copyOptions := &cp.Options{ ImageListSelection: options.ImageListSelection, Instances: options.Instances, @@ -252,6 +258,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push SignBySigstorePrivateKeyFile: options.SignBySigstorePrivateKeyFile, SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase, ForceManifestMIMEType: singleImageManifestType, + EnsureCompressionVariantsExist: compressionVariants, } // Copy whatever we were asked to copy. @@ -266,6 +273,18 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push return nil, manifestDigest, nil } +func prepareAddWithCompression(variants []string) ([]cp.OptionCompressionVariant, error) { + res := []cp.OptionCompressionVariant{} + for _, name := range variants { + algo, err := compression.AlgorithmByName(name) + if err != nil { + return nil, fmt.Errorf("requested algorithm %s is not supported for replication: %w", name, err) + } + res = append(res, cp.OptionCompressionVariant{Algorithm: algo}) + } + return res, nil +} + // Add adds information about the specified image to the list, computing the // image's manifest's digest, retrieving OS and architecture information from // the image's configuration, and recording the image's reference so that it diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index d5cf153e4f..2b005d39f8 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -185,6 +185,9 @@ type ContainersConfig struct { // Containers logs default to truncated container ID as a tag. LogTag string `toml:"log_tag,omitempty"` + // Mount to add to all containers + Mounts []string `toml:"mounts,omitempty"` + // NetNS indicates how to create a network namespace for the container NetNS string `toml:"netns,omitempty"` @@ -266,6 +269,17 @@ type EngineConfig struct { // in containers-registries.conf(5). CompatAPIEnforceDockerHub bool `toml:"compat_api_enforce_docker_hub,omitempty"` + // ComposeProviders specifies one or more external providers for the + // compose command. The first found provider is used for execution. + // Can be an absolute and relative path or a (file) name. Make sure to + // expand the return items via `os.ExpandEnv`. + ComposeProviders []string `toml:"compose_providers,omitempty"` + + // ComposeWarningLogs emits logs on each invocation of the compose + // command indicating that an external compose provider is being + // executed. + ComposeWarningLogs bool `toml:"compose_warning_logs,omitempty"` + // DBBackend is the database backend to be used by Podman. DBBackend string `toml:"database_backend,omitempty"` @@ -1010,17 +1024,7 @@ func (c *NetworkConfig) Validate() error { } } - if stringsEq(c.CNIPluginDirs, DefaultCNIPluginDirs) { - return nil - } - - for _, pluginDir := range c.CNIPluginDirs { - if err := isDirectory(pluginDir); err == nil { - return nil - } - } - - return fmt.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ",")) + return nil } // FindConmon iterates over (*Config).ConmonPath and returns the path diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index e101b06219..4e2a0abc95 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -11,7 +11,7 @@ import ( "strings" "syscall" - "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" + "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" units "github.com/docker/go-units" ) @@ -58,7 +58,7 @@ func (c *EngineConfig) validatePaths() error { func (c *ContainersConfig) validateDevices() error { for _, d := range c.Devices { - if cdi.IsQualifiedName(d) { + if parser.IsQualifiedName(d) { continue } _, _, _, err := Device(d) diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 35553a96ee..329b45aae0 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -196,6 +196,13 @@ default_sysctls = [ # #log_tag = "" +# List of mounts. Specified as +# "type=TYPE,source=,destination=,", for example: +# "type=bind,source=/var/lib/foobar,destination=/var/lib/foobar,ro". +# If it is empty or commented out, no mounts will be added +# +#mounts = [] + # Default way to to create a Network namespace for the container # Options are: # `private` Create private Network Namespace for the container. @@ -276,7 +283,7 @@ default_sysctls = [ # If it is empty or commented out, no volumes will be added # #volumes = [] -# + #[engine.platform_to_oci_runtime] #"wasi/wasm" = ["crun-wasm"] #"wasi/wasm32" = ["crun-wasm"] @@ -376,6 +383,20 @@ default_sysctls = [ # #active_service = "production" +# Enforces using docker.io for completing short names in Podman's compatibility +# REST API. Note that this will ignore unqualified-search-registries and +# short-name aliases defined in containers-registries.conf(5). +#compat_api_enforce_docker_hub = true + +# Specify one or more external providers for the compose command. The first +# found provider is used for execution. Can be an absolute and relative path +# or a (file) name. +#compose_providers=[] + +# Emit logs on each invocation of the compose command indicating that an +# external compose provider is being executed. +#compose_warning_logs = true + # The compression format to use when pushing an image. # Valid options are: `gzip`, `zstd` and `zstd:chunked`. # diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index 5e187893b2..e580e05fb3 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -500,7 +500,7 @@ default_sysctls = [ # List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # -#runtime_supports_json = ["crun", "runc", "kata", "runsc", "youki", "krun"] +#runtime_supports_json = ["crun", "runc", "kata", "runsc", "youki", "krun", "ocijail"] # List of the OCI runtimes that supports running containers with KVM Separation. # diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index a3e885f5c4..ff22f244f4 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -87,6 +87,16 @@ var ( // should be set during link-time, if different packagers put their // helper binary in a different location. additionalHelperBinariesDir string + + defaultUnixComposeProviders = []string{ + "docker-compose", + "$HOME/.docker/cli-plugins/docker-compose", + "/usr/local/lib/docker/cli-plugins/docker-compose", + "/usr/local/libexec/docker/cli-plugins/docker-compose", + "/usr/lib/docker/cli-plugins/docker-compose", + "/usr/libexec/docker/cli-plugins/docker-compose", + "podman-compose", + } ) // nolint:unparam @@ -176,19 +186,18 @@ func DefaultConfig() (*Config, error) { return &Config{ Containers: ContainersConfig{ - Devices: []string{}, - Volumes: []string{}, Annotations: []string{}, ApparmorProfile: DefaultApparmorProfile, BaseHostsFile: "", CgroupNS: cgroupNS, Cgroups: getDefaultCgroupsMode(), + DNSOptions: []string{}, + DNSSearches: []string{}, + DNSServers: []string{}, DefaultCapabilities: DefaultCapabilities, DefaultSysctls: []string{}, DefaultUlimits: getDefaultProcessLimits(), - DNSServers: []string{}, - DNSOptions: []string{}, - DNSSearches: []string{}, + Devices: []string{}, EnableKeyring: true, EnableLabeling: selinuxEnabled(), Env: []string{ @@ -197,20 +206,22 @@ func DefaultConfig() (*Config, error) { }, EnvHost: false, HTTPProxy: true, + IPCNS: "shareable", Init: false, InitPath: "", - IPCNS: "shareable", LogDriver: defaultLogDriver(), LogSizeMax: DefaultLogSizeMax, + Mounts: []string{}, NetNS: "private", NoHosts: false, - PidsLimit: DefaultPidsLimit, PidNS: "private", + PidsLimit: DefaultPidsLimit, ShmSize: DefaultShmSize, TZ: "", - Umask: "0022", UTSNS: "private", + Umask: "0022", UserNSSize: DefaultUserNSSize, // Deprecated + Volumes: []string{}, }, Network: NetworkConfig{ DefaultNetwork: "podman", @@ -260,6 +271,8 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) c.CompatAPIEnforceDockerHub = true + c.ComposeProviders = getDefaultComposeProviders() // may vary across supported platforms + c.ComposeWarningLogs = true if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { if err := types.SetDefaultConfigFilePath(path); err != nil { @@ -406,6 +419,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "runsc", "youki", "krun", + "ocijail", } c.RuntimeSupportsNoCgroups = []string{"crun", "krun"} c.RuntimeSupportsKVM = []string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"} @@ -487,6 +501,11 @@ func (c *Config) Volumes() []string { return c.Containers.Volumes } +// Mounts returns the default set of mounts that should be mounted in containers. +func (c *Config) Mounts() []string { + return c.Containers.Mounts +} + // Devices returns the default additional devices for containers. func (c *Config) Devices() []string { return c.Containers.Devices diff --git a/vendor/github.com/containers/common/pkg/config/default_darwin.go b/vendor/github.com/containers/common/pkg/config/default_darwin.go index 7557666209..86fa6d5087 100644 --- a/vendor/github.com/containers/common/pkg/config/default_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/default_darwin.go @@ -20,3 +20,14 @@ func getDefaultMachineVolumes() []string { "/var/folders:/var/folders", } } + +func getDefaultComposeProviders() []string { + return []string{ + "docker-compose", + "$HOME/.docker/cli-plugins/docker-compose", + "/opt/homebrew/bin/docker-compose", + "/usr/local/bin/docker-compose", + "/Applications/Docker.app/Contents/Resources/cli-plugins/docker-compose", + "podman-compose", + } +} diff --git a/vendor/github.com/containers/common/pkg/config/default_freebsd.go b/vendor/github.com/containers/common/pkg/config/default_freebsd.go index 637abf9811..1110edd03d 100644 --- a/vendor/github.com/containers/common/pkg/config/default_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/default_freebsd.go @@ -26,3 +26,7 @@ func getLibpodTmpDir() string { func getDefaultMachineVolumes() []string { return []string{"$HOME:$HOME"} } + +func getDefaultComposeProviders() []string { + return defaultUnixComposeProviders +} diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index d4d04764a1..ee2c49d130 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -74,3 +74,7 @@ func getLibpodTmpDir() string { func getDefaultMachineVolumes() []string { return []string{"$HOME:$HOME"} } + +func getDefaultComposeProviders() []string { + return defaultUnixComposeProviders +} diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go index 08a0bf2239..c79bc70087 100644 --- a/vendor/github.com/containers/common/pkg/config/default_windows.go +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -49,3 +49,8 @@ func getLibpodTmpDir() string { func getDefaultMachineVolumes() []string { return []string{} } + +func getDefaultComposeProviders() []string { + // Rely on os.LookPath to do the trick on Windows. + return []string{"docker-compose", "podman-compose"} +} diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index e396f0fc08..dd79f91fb4 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -27,7 +27,16 @@ func queryPackageVersion(cmdArg ...string) string { cmd := exec.Command(cmdArg[0], cmdArg[1:]...) if outp, err := cmd.Output(); err == nil { output = string(outp) - if cmdArg[0] == "/usr/bin/dpkg" { + deb := false + if cmdArg[0] == "/usr/bin/dlocate" { + // can return multiple matches + l := strings.Split(output, "\n") + output = l[0] + deb = true + } else if cmdArg[0] == "/usr/bin/dpkg" { + deb = true + } + if deb { r := strings.Split(output, ": ") queryFormat := `${Package}_${Version}_${Architecture}` cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) @@ -47,9 +56,14 @@ func queryPackageVersion(cmdArg ...string) string { // Note: This function is copied from containers/podman libpod/util.go // Please see https://github.com/containers/common/pull/1460 func PackageVersion(program string) string { // program is full path + _, err := os.Stat(program) + if err != nil { + return UnknownPackage + } packagers := [][]string{ {"/usr/bin/rpm", "-q", "-f"}, - {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu + {"/usr/bin/dlocate", "-F"}, // Debian, Ubuntu (quick) + {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu (slow) {"/usr/bin/pacman", "-Qo"}, // Arch {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) {"/usr/bin/equery", "b"}, // Gentoo (slow) diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go index f45b97f56c..8d5580d7cb 100644 --- a/vendor/github.com/containers/image/v5/copy/blob.go +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -83,12 +83,12 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read return types.BlobInfo{}, err } - // === Report progress using the ic.c.progress channel, if required. - if ic.c.progress != nil && ic.c.progressInterval > 0 { + // === Report progress using the ic.c.options.Progress channel, if required. + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { progressReader := newProgressReader( stream.reader, - ic.c.progress, - ic.c.progressInterval, + ic.c.options.Progress, + ic.c.options.ProgressInterval, srcInfo, ) defer progressReader.reportDone() diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index bf8f4015b6..ac0e6f2fa2 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -17,6 +17,7 @@ import ( "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" + compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/signature/signer" "github.com/containers/image/v5/transports" @@ -126,36 +127,46 @@ type Options struct { // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type // to not indicate "nondistributable". DownloadForeignLayers bool + + // Contains slice of OptionCompressionVariant, where copy will ensure that for each platform + // in the manifest list, a variant with the requested compression will exist. + // Invalid when copying a non-multi-architecture image. That will probably + // change in the future. + EnsureCompressionVariantsExist []OptionCompressionVariant +} + +// OptionCompressionVariant allows to supply information about +// selected compression algorithm and compression level by the +// end-user. Refer to EnsureCompressionVariantsExist to know +// more about its usage. +type OptionCompressionVariant struct { + Algorithm compression.Algorithm + Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance } // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. // The owner must call close() when done. type copier struct { - dest private.ImageDestination - rawSource private.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties + policyContext *signature.PolicyContext + dest private.ImageDestination + rawSource private.ImageSource + options *Options // never nil + + reportWriter io.Writer + progressOutput io.Writer + + unparsedToplevel *image.UnparsedImage // for rawSource blobInfoCache internalblobinfocache.BlobInfoCache2 - ociDecryptConfig *encconfig.DecryptConfig - ociEncryptConfig *encconfig.EncryptConfig concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs - downloadForeignLayers bool - signers []*signer.Signer // Signers to use to create new signatures for the image - signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. + signers []*signer.Signer // Signers to use to create new signatures for the image + signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. } // Image copies image from srcRef to destRef, using policyContext to validate // source image admissibility. It returns the manifest which was written to // the new copy of the image. func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) { - // NOTE this function uses an output parameter for the error return value. - // Setting this and returning is the ideal way to return an error. - // - // the defers in this routine will wrap the error return with its own errors - // which can be valuable context in the middle of a multi-streamed copy. if options == nil { options = &Options{} } @@ -209,27 +220,27 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } c := &copier{ - dest: dest, - rawSource: rawSource, - reportWriter: reportWriter, - progressOutput: progressOutput, - progressInterval: options.ProgressInterval, - progress: options.Progress, + policyContext: policyContext, + dest: dest, + rawSource: rawSource, + options: options, + + reportWriter: reportWriter, + progressOutput: progressOutput, + + unparsedToplevel: image.UnparsedInstance(rawSource, nil), // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? - blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), - ociDecryptConfig: options.OciDecryptConfig, - ociEncryptConfig: options.OciEncryptConfig, - downloadForeignLayers: options.DownloadForeignLayers, + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), } defer c.close() // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { - c.concurrentBlobCopiesSemaphore = options.ConcurrentBlobCopiesSemaphore + c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore if c.concurrentBlobCopiesSemaphore == nil { - max := options.MaxParallelDownloads + max := c.options.MaxParallelDownloads if max == 0 { max = maxParallelDownloads } @@ -237,33 +248,40 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } } else { c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1)) - if options.ConcurrentBlobCopiesSemaphore != nil { - if err := options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + if c.options.ConcurrentBlobCopiesSemaphore != nil { + if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err) } - defer options.ConcurrentBlobCopiesSemaphore.Release(1) + defer c.options.ConcurrentBlobCopiesSemaphore.Release(1) } } - if err := c.setupSigners(options); err != nil { + if err := c.setupSigners(); err != nil { return nil, err } - unparsedToplevel := image.UnparsedInstance(rawSource, nil) - multiImage, err := isMultiImage(ctx, unparsedToplevel) + multiImage, err := isMultiImage(ctx, c.unparsedToplevel) if err != nil { return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err) } if !multiImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } // The simple case: just copy a single image. - if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil { + single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: false}) + if err != nil { return nil, err } - } else if options.ImageListSelection == CopySystemImage { + copiedManifest = single.manifest + } else if c.options.ImageListSelection == CopySystemImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that // matches the current system to copy, and copy it. - mfest, manifestType, err := unparsedToplevel.Manifest(ctx) + mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx) if err != nil { return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err) } @@ -271,34 +289,35 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, if err != nil { return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err) } - instanceDigest, err := manifestList.ChooseInstanceByCompression(options.SourceCtx, options.PreferGzipInstances) // try to pick one that matches options.SourceCtx + instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx if err != nil { return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err) } logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) - - if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil { + single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: false}) + if err != nil { return nil, fmt.Errorf("copying system image from manifest list: %w", err) } - } else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */ + copiedManifest = single.manifest + } else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */ // If we were asked to copy multiple images and can't, that's an error. if !supportsMultipleImages(c.dest) { return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) } // Copy some or all of the images. - switch options.ImageListSelection { + switch c.options.ImageListSelection { case CopyAllImages: logrus.Debugf("Source is a manifest list; copying all instances") case CopySpecificImages: logrus.Debugf("Source is a manifest list; copying some instances") } - if copiedManifest, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil { + if copiedManifest, err = c.copyMultipleImages(ctx); err != nil { return nil, err } } - if err := c.dest.Commit(ctx, unparsedToplevel); err != nil { + if err := c.dest.Commit(ctx, c.unparsedToplevel); err != nil { return nil, fmt.Errorf("committing the finished image: %w", err) } diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index 86fadff66e..b406b0c316 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -34,7 +34,7 @@ type bpDecryptionStepData struct { // srcInfo is only used for error messages. // Returns data for other steps; the caller should eventually use updateCryptoOperation. func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { - if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil { + if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil { return &bpDecryptionStepData{ decrypting: false, }, nil @@ -47,7 +47,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo desc := imgspecv1.Descriptor{ Annotations: stream.info.Annotations, } - reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false) + reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false) if err != nil { return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) } @@ -81,7 +81,7 @@ type bpEncryptionStepData struct { // Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { - if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil { + if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil { return &bpEncryptionStepData{ encrypting: false, }, nil @@ -101,7 +101,7 @@ func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncryp Size: srcInfo.Size, Annotations: annotations, } - reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc) + reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc) if err != nil { return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) } diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index 41ea1b11b0..34f2129d69 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -5,16 +5,19 @@ import ( "context" "errors" "fmt" + "sort" "strings" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/image" internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/pkg/compression" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) @@ -28,30 +31,125 @@ const ( type instanceCopy struct { op instanceCopyKind sourceDigest digest.Digest + + // Fields which can be used by callers when operation + // is `instanceCopyClone` + cloneCompressionVariant OptionCompressionVariant + clonePlatform *imgspecv1.Platform + cloneAnnotations map[string]string +} + +// internal type only to make imgspecv1.Platform comparable +type platformComparable struct { + architecture string + os string + osVersion string + osFeatures string + variant string +} + +// Converts imgspecv1.Platform to a comparable format. +func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable { + if platform == nil { + return platformComparable{} + } + osFeatures := slices.Clone(platform.OSFeatures) + sort.Strings(osFeatures) + return platformComparable{architecture: platform.Architecture, + os: platform.OS, + // This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now. + osFeatures: strings.Join(osFeatures, ","), + osVersion: platform.OSVersion, + variant: platform.Variant, + } +} + +// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests +func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) { + res := make(map[platformComparable]*set.Set[string]) + for _, instanceDigest := range instanceDigests { + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + platformSet, ok := res[platform] + if !ok { + platformSet = set.New[string]() + res[platform] = platformSet + } + platformSet.AddSlice(instanceDetails.ReadOnly.CompressionAlgorithmNames) + } + return res, nil +} + +func validateCompressionVariantExists(input []OptionCompressionVariant) error { + for _, option := range input { + _, err := compression.AlgorithmByName(option.Algorithm.Name()) + if err != nil { + return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err) + } + } + return nil } // prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list. -func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []instanceCopy { +func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) { res := []instanceCopy{} + if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 { + // List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist` + // It’s unclear what it means when `CopySpecificImages` includes an instance in options.Instances, + // EnsureCompressionVariantsExist asks for an instance with some compression, + // an instance with that compression already exists, but is not included in options.Instances. + // We might define the semantics and implement this in the future. + return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages") + } + err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist) + if err != nil { + return res, err + } + compressionsByPlatform, err := platformCompressionMap(list, instanceDigests) + if err != nil { + return nil, err + } for i, instanceDigest := range instanceDigests { if options.ImageListSelection == CopySpecificImages && !slices.Contains(options.Instances, instanceDigest) { logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) continue } + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } res = append(res, instanceCopy{ op: instanceCopyCopy, sourceDigest: instanceDigest, }) + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + compressionList := compressionsByPlatform[platform] + for _, compressionVariant := range options.EnsureCompressionVariantsExist { + if !compressionList.Contains(compressionVariant.Algorithm.Name()) { + res = append(res, instanceCopy{ + op: instanceCopyClone, + sourceDigest: instanceDigest, + cloneCompressionVariant: compressionVariant, + clonePlatform: instanceDetails.ReadOnly.Platform, + cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations), + }) + // add current compression to the list so that we don’t create duplicate clones + compressionList.Add(compressionVariant.Algorithm.Name()) + } + } } - return res + return res, nil } // copyMultipleImages copies some or all of an image list's instances, using -// policyContext to validate source image admissibility. -func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) { +// c.policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) { // Parse the list and get a copy of the original value after it's re-encoded. - manifestList, manifestType, err := unparsedToplevel.Manifest(ctx) + manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx) if err != nil { return nil, fmt.Errorf("reading manifest list: %w", err) } @@ -61,7 +159,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur } updatedList := originalList.CloneInternal() - sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options, + sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel, "Getting image list signatures", "Checking if image list destination supports signatures") if err != nil { @@ -94,12 +192,12 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur if destIsDigestedReference { cannotModifyManifestListReason = "Destination specifies a digest" } - if options.PreserveDigests { + if c.options.PreserveDigests { cannotModifyManifestListReason = "Instructed to preserve digests" } // Determine if we'll need to convert the manifest list to a different format. - forceListMIMEType := options.ForceManifestMIMEType + forceListMIMEType := c.options.ForceManifestMIMEType switch forceListMIMEType { case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: forceListMIMEType = manifest.DockerV2ListMediaType @@ -119,8 +217,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Copy each image, or just the ones we want to copy, in turn. instanceDigests := updatedList.Instances() instanceEdits := []internalManifest.ListEdit{} - instanceCopyList := prepareInstanceCopies(instanceDigests, options) - c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests)) + instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options) + if err != nil { + return nil, fmt.Errorf("preparing instances for copy: %w", err) + } + c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests)) for i, instance := range instanceCopyList { // Update instances to be edited by their `ListOperation` and // populate necessary fields. @@ -129,17 +230,39 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) - updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: false}) if err != nil { return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) } // Record the result of a possible conversion here. instanceEdits = append(instanceEdits, internalManifest.ListEdit{ - ListOperation: internalManifest.ListOpUpdate, - UpdateOldDigest: instance.sourceDigest, - UpdateDigest: updatedManifestDigest, - UpdateSize: int64(len(updatedManifest)), - UpdateMediaType: updatedManifestType}) + ListOperation: internalManifest.ListOpUpdate, + UpdateOldDigest: instance.sourceDigest, + UpdateDigest: updated.manifestDigest, + UpdateSize: int64(len(updated.manifest)), + UpdateCompressionAlgorithms: updated.compressionAlgorithms, + UpdateMediaType: updated.manifestMIMEType}) + case instanceCopyClone: + logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{ + requireCompressionFormatMatch: true, + compressionFormat: &instance.cloneCompressionVariant.Algorithm, + compressionLevel: instance.cloneCompressionVariant.Level}) + if err != nil { + return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpAdd, + AddDigest: updated.manifestDigest, + AddSize: int64(len(updated.manifest)), + AddMediaType: updated.manifestMIMEType, + AddPlatform: instance.clonePlatform, + AddAnnotations: instance.cloneAnnotations, + AddCompressionAlgorithms: updated.compressionAlgorithms, + }) default: return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op) } @@ -204,7 +327,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur } // Sign the manifest list. - newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity) + newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index 25f2463630..ce078234cb 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -84,6 +84,8 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types. ), mpb.AppendDecorators( decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + decor.Name(" | "), + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), ), ) } @@ -94,6 +96,9 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types. mpb.PrependDecorators( decor.OnComplete(decor.Name(prefix), onComplete), ), + mpb.AppendDecorators( + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), ) } return &progressBar{ diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index fd19e18cd5..0ec54ded24 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -13,20 +13,20 @@ import ( "github.com/containers/image/v5/transports" ) -// setupSigners initializes c.signers based on options. -func (c *copier) setupSigners(options *Options) error { - c.signers = append(c.signers, options.Signers...) - // c.signersToClose is intentionally not updated with options.Signers. +// setupSigners initializes c.signers. +func (c *copier) setupSigners() error { + c.signers = append(c.signers, c.options.Signers...) + // c.signersToClose is intentionally not updated with c.options.Signers. // We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need // to clean up any created signers on failure. - if options.SignBy != "" { + if c.options.SignBy != "" { opts := []simplesigning.Option{ - simplesigning.WithKeyFingerprint(options.SignBy), + simplesigning.WithKeyFingerprint(c.options.SignBy), } - if options.SignPassphrase != "" { - opts = append(opts, simplesigning.WithPassphrase(options.SignPassphrase)) + if c.options.SignPassphrase != "" { + opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase)) } signer, err := simplesigning.NewSigner(opts...) if err != nil { @@ -36,9 +36,9 @@ func (c *copier) setupSigners(options *Options) error { c.signersToClose = append(c.signersToClose, signer) } - if options.SignBySigstorePrivateKeyFile != "" { + if c.options.SignBySigstorePrivateKeyFile != "" { signer, err := sigstore.NewSigner( - sigstore.WithPrivateKeyFile(options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase), + sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase), ) if err != nil { return err @@ -50,13 +50,13 @@ func (c *copier) setupSigners(options *Options) error { return nil } -// sourceSignatures returns signatures from unparsedSource based on options, +// sourceSignatures returns signatures from unparsedSource, // and verifies that they can be used (to avoid copying a large image when we // can tell in advance that it would ultimately fail) -func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, options *Options, +func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) { var sigs []internalsig.Signature - if options.RemoveSignatures { + if c.options.RemoveSignatures { sigs = []internalsig.Signature{} } else { c.Printf("%s\n", gettingSignaturesMessage) diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index b8569a70c0..5297f019f7 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -18,7 +18,6 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" @@ -30,40 +29,54 @@ import ( // imageCopier tracks state specific to a single image (possibly an item of a manifest list) type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src *image.SourcedImage - diffIDsAreNeeded bool - cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can - canSubstituteBlobs bool - compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. - compressionLevel *int - ociEncryptLayers *[]int + c *copier + manifestUpdates *types.ManifestUpdateOptions + src *image.SourcedImage + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int + requireCompressionFormatMatch bool } -// copySingleImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate +type copySingleImageOptions struct { + requireCompressionFormatMatch bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int +} + +// copySingleImageResult carries data produced by copySingleImage +type copySingleImageResult struct { + manifest []byte + manifestMIMEType string + manifestDigest digest.Digest + compressionAlgorithms []compressiontypes.Algorithm +} + +// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate // source image admissibility. -func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) { +func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) { // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. // Make sure we fail cleanly in such cases. multiImage, err := isMultiImage(ctx, unparsedImage) if err != nil { // FIXME FIXME: How to name a reference for the sub-image? - return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) + return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) } if multiImage { - return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") } // Please keep this policy check BEFORE reading any other information about the image. // (The multiImage check above only matches the MIME type, which we have received anyway. // Actual parsing of anything should be deferred.) - if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return nil, "", "", fmt.Errorf("Source image rejected: %w", err) + if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err) } - src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) + src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage) if err != nil { - return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) + return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) } // If the destination is a digested reference, make a note of that, determine what digest value we're @@ -75,33 +88,33 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P destIsDigestedReference = true matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) if err != nil { - return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err) + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) } if !matches { - manifestList, _, err := unparsedToplevel.Manifest(ctx) + manifestList, _, err := c.unparsedToplevel.Manifest(ctx) if err != nil { - return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err) + return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err) } matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) if err != nil { - return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err) + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) } if !matches { - return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference") + return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference") } } } } - if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil { - return nil, "", "", err + if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil { + return copySingleImageResult{}, err } - sigs, err := c.sourceSignatures(ctx, src, options, + sigs, err := c.sourceSignatures(ctx, src, "Getting image source signatures", "Checking if image destination supports signatures") if err != nil { - return nil, "", "", err + return copySingleImageResult{}, err } // Determine if we're allowed to modify the manifest. @@ -114,7 +127,7 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P if destIsDigestedReference { cannotModifyManifestReason = "Destination specifies a digest" } - if options.PreserveDigests { + if c.options.PreserveDigests { cannotModifyManifestReason = "Instructed to preserve digests" } @@ -123,13 +136,16 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, src: src, // diffIDsAreNeeded is computed later - cannotModifyManifestReason: cannotModifyManifestReason, - ociEncryptLayers: options.OciEncryptLayers, + cannotModifyManifestReason: cannotModifyManifestReason, + requireCompressionFormatMatch: opts.requireCompressionFormatMatch, } - if options.DestinationCtx != nil { + if opts.compressionFormat != nil { + ic.compressionFormat = opts.compressionFormat + ic.compressionLevel = opts.compressionLevel + } else if c.options.DestinationCtx != nil { // Note that compressionFormat and compressionLevel can be nil. - ic.compressionFormat = options.DestinationCtx.CompressionFormat - ic.compressionLevel = options.DestinationCtx.CompressionLevel + ic.compressionFormat = c.options.DestinationCtx.CompressionFormat + ic.compressionLevel = c.options.DestinationCtx.CompressionLevel } // Decide whether we can substitute blobs with semantic equivalents: // - Don’t do that if we can’t modify the manifest at all @@ -142,20 +158,20 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0 if err := ic.updateEmbeddedDockerReference(); err != nil { - return nil, "", "", err + return copySingleImageResult{}, err } - destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil + destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig != nil) || c.options.OciEncryptLayers != nil manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ srcMIMEType: ic.src.ManifestMIMEType, destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), - forceManifestMIMEType: options.ForceManifestMIMEType, + forceManifestMIMEType: c.options.ForceManifestMIMEType, requiresOCIEncryption: destRequiresOciEncryption, cannotModifyManifestReason: ic.cannotModifyManifestReason, }) if err != nil { - return nil, "", "", err + return copySingleImageResult{}, err } // We set up this part of ic.manifestUpdates quite early, not just around the // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code @@ -169,27 +185,28 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal - if options.OptimizeDestinationImageAlreadyExists { + if c.options.OptimizeDestinationImageAlreadyExists { shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible noPendingManifestUpdates := ic.noPendingManifestUpdates() - logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates) - if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates { - isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest) + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch) + if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch { + matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance) if err != nil { logrus.Warnf("Failed to compare destination image manifest: %v", err) - return nil, "", "", err + return copySingleImageResult{}, err } - if isSrcDestManifestEqual { + if matchedResult != nil { c.Printf("Skipping: image already present at destination\n") - return retManifest, retManifestType, retManifestDigest, nil + return *matchedResult, nil } } } - if err := ic.copyLayers(ctx); err != nil { - return nil, "", "", err + compressionAlgos, err := ic.copyLayers(ctx) + if err != nil { + return copySingleImageResult{}, err } // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; @@ -197,8 +214,12 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P // without actually trying to upload something and getting a types.ManifestTypeRejectedError. // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if // we're altering how they're compressed. If the process succeeds, fine… - manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) - retManifestType = manifestConversionPlan.preferredMIMEType + manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + wipResult := copySingleImageResult{ + manifest: manifestBytes, + manifestMIMEType: manifestConversionPlan.preferredMIMEType, + manifestDigest: manifestDigest, + } if err != nil { logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err) // … if it fails, and the failure is either because the manifest is rejected by the registry, or @@ -213,14 +234,14 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. - return nil, "", "", err + return copySingleImageResult{}, err } // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType. // So if we are here, we will definitely be trying to convert the manifest. // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. if ic.cannotModifyManifestReason != "" { - return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) + return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. @@ -236,34 +257,37 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P } // We have successfully uploaded a manifest. - manifestBytes = attemptedManifest - retManifestDigest = attemptedManifestDigest - retManifestType = manifestMIMEType + wipResult = copySingleImageResult{ + manifest: attemptedManifest, + manifestMIMEType: manifestMIMEType, + manifestDigest: attemptedManifestDigest, + } errs = nil // Mark this as a success so that we don't abort below. break } if errs != nil { - return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) } } if targetInstance != nil { - targetInstance = &retManifestDigest + targetInstance = &wipResult.manifestDigest } - newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity) + newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity) if err != nil { - return nil, "", "", err + return copySingleImageResult{}, err } sigs = append(sigs, newSigs...) if len(sigs) > 0 { c.Printf("Storing signatures\n") if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { - return nil, "", "", fmt.Errorf("writing signatures: %w", err) + return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err) } } - - return manifestBytes, retManifestType, retManifestDigest, nil + wipResult.compressionAlgorithms = compressionAlgos + res := wipResult // We are done + return res, nil } // checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary. @@ -323,52 +347,69 @@ func (ic *imageCopier) noPendingManifestUpdates() bool { return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) } -// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the -// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal. -func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { - srcManifestDigest, err := manifest.Digest(src.ManifestBlob) +// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the +// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise. +func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) { + srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob) if err != nil { - return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err) + return nil, fmt.Errorf("calculating manifest digest: %w", err) } - destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx) + destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx) if err != nil { - logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err) - return false, nil, "", "", nil + logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err) + return nil, nil } + defer destImageSource.Close() destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) if err != nil { logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) - return false, nil, "", "", nil + return nil, nil } destManifestDigest, err := manifest.Digest(destManifest) if err != nil { - return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err) + return nil, fmt.Errorf("calculating manifest digest: %w", err) } logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) if srcManifestDigest != destManifestDigest { - return false, nil, "", "", nil + return nil, nil + } + + compressionAlgos := set.New[string]() + for _, srcInfo := range ic.src.LayerInfos() { + compression := compressionAlgorithmFromMIMEType(srcInfo) + compressionAlgos.Add(compression.Name()) + } + + algos, err := algorithmsByNames(compressionAlgos.Values()) + if err != nil { + return nil, err } // Destination and source manifests, types and digests should all be equivalent - return true, destManifest, destManifestType, destManifestDigest, nil + return ©SingleImageResult{ + manifest: destManifest, + manifestMIMEType: destManifestType, + manifestDigest: srcManifestDigest, + compressionAlgorithms: algos, + }, nil } // copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". -func (ic *imageCopier) copyLayers(ctx context.Context) error { +func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) { srcInfos := ic.src.LayerInfos() numLayers := len(srcInfos) updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) if err != nil { - return err + return nil, err } srcInfosUpdated := false if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { if ic.cannotModifyManifestReason != "" { - return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) + return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) } srcInfos = updatedSrcInfos srcInfosUpdated = true @@ -384,7 +425,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // layer is empty. man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) if err != nil { - return err + return nil, err } manifestLayerInfos := man.LayerInfos() @@ -396,7 +437,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { defer ic.c.concurrentBlobCopiesSemaphore.Release(1) defer copyGroup.Done() cld := copyLayerData{} - if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. @@ -415,10 +456,10 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // Decide which layers to encrypt layersToEncrypt := set.New[int]() var encryptAll bool - if ic.ociEncryptLayers != nil { - encryptAll = len(*ic.ociEncryptLayers) == 0 + if ic.c.options.OciEncryptLayers != nil { + encryptAll = len(*ic.c.options.OciEncryptLayers) == 0 totalLayers := len(srcInfos) - for _, l := range *ic.ociEncryptLayers { + for _, l := range *ic.c.options.OciEncryptLayers { // if layer is negative, it is reverse indexed. layersToEncrypt.Add((totalLayers + l) % totalLayers) } @@ -450,14 +491,18 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // A call to copyGroup.Wait() is done at this point by the defer above. return nil }(); err != nil { - return err + return nil, err } + compressionAlgos := set.New[string]() destInfos := make([]types.BlobInfo, numLayers) diffIDs := make([]digest.Digest, numLayers) for i, cld := range data { if cld.err != nil { - return cld.err + return nil, cld.err + } + if cld.destInfo.CompressionAlgorithm != nil { + compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name()) } destInfos[i] = cld.destInfo diffIDs[i] = cld.diffID @@ -472,7 +517,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } - return nil + algos, err := algorithmsByNames(compressionAlgos.Values()) + if err != nil { + return nil, err + } + return algos, nil } // layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) @@ -577,6 +626,19 @@ type diffIDResult struct { err error } +func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm { + // This MIME type → compression mapping belongs in manifest-specific code in our manifest + // package (but we should preferably replace/change UpdatedImage instead of productizing + // this workaround). + switch srcInfo.MediaType { + case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: + return &compression.Gzip + case imgspecv1.MediaTypeImageLayerZstd: + return &compression.Zstd + } + return nil +} + // copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded // srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. @@ -588,17 +650,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // which uses the compression information to compute the updated MediaType values. // (Sadly UpdatedImage() is documented to not update MediaTypes from // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) - // - // This MIME type → compression mapping belongs in manifest-specific code in our manifest - // package (but we should preferably replace/change UpdatedImage instead of productizing - // this workaround). if srcInfo.CompressionAlgorithm == nil { - switch srcInfo.MediaType { - case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: - srcInfo.CompressionAlgorithm = &compression.Gzip - case imgspecv1.MediaTypeImageLayerZstd: - srcInfo.CompressionAlgorithm = &compression.Zstd - } + srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo) } ic.c.printCopyInfo("blob", srcInfo) @@ -608,7 +661,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // When encrypting to decrypting, only use the simple code path. We might be able to optimize more // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. - encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil) canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. @@ -623,12 +676,20 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. // Fixing that will probably require passing more information to TryReusingBlob() than the current version of // the ImageDestination interface lets us pass in. + var requiredCompression *compressiontypes.Algorithm + var originalCompression *compressiontypes.Algorithm + if ic.requireCompressionFormatMatch { + requiredCompression = ic.compressionFormat + originalCompression = srcInfo.CompressionAlgorithm + } reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ - Cache: ic.c.blobInfoCache, - CanSubstitute: canSubstitute, - EmptyLayer: emptyLayer, - LayerIndex: &layerIndex, - SrcRef: srcRef, + Cache: ic.c.blobInfoCache, + CanSubstitute: canSubstitute, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + RequiredCompression: requiredCompression, + OriginalCompression: originalCompression, }) if err != nil { return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) @@ -642,8 +703,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to }() // Throw an event that the layer has been skipped - if ic.c.progress != nil && ic.c.progressInterval > 0 { - ic.c.progress <- types.ProgressProperties{ + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + ic.c.options.Progress <- types.ProgressProperties{ Event: types.ProgressEventSkipped, Artifact: srcInfo, } @@ -818,3 +879,16 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF return digest.Canonical.FromReader(stream) } + +// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names +func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) { + result := []compressiontypes.Algorithm{} + for _, name := range names { + algo, err := compression.AlgorithmByName(name) + if err != nil { + return nil, err + } + result = append(result, algo) + } + return result, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 974d23d5fa..222723a8f5 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -190,6 +190,9 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } if info.Digest == "" { return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest") } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 44e2aea23d..63e372d677 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -321,13 +321,21 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } - // First, check whether the blob happens to already exist at the destination. - haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) - if err != nil { - return false, private.ReusedBlob{}, err - } - if haveBlob { - return true, reusedInfo, nil + if impl.OriginalBlobMatchesRequiredCompression(options) { + // First, check whether the blob happens to already exist at the destination. + haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) + if err != nil { + return false, private.ReusedBlob{}, err + } + if haveBlob { + return true, reusedInfo, nil + } + } else { + requiredCompression := "nil" + if options.OriginalCompression != nil { + requiredCompression = options.OriginalCompression.Name() + } + logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression) } // Then try reusing blobs from other locations. @@ -338,6 +346,19 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) continue } + compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) + if err != nil { + logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err) + continue + } + if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) { + requiredCompression := "nil" + if compressionAlgorithm != nil { + requiredCompression = compressionAlgorithm.Name() + } + logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + continue + } if candidate.CompressorName != blobinfocache.Uncompressed { logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) } else { @@ -388,12 +409,6 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) - compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) - if err != nil { - logrus.Debugf("... Failed: %v", err) - continue - } - return true, private.ReusedBlob{ Digest: candidate.Digest, Size: size, diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 00e25748bd..7507d85595 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -129,6 +129,9 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } if err := d.archive.lock(); err != nil { return false, private.ReusedBlob{}, err } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go index 3b986f503d..6845893bfb 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -57,7 +57,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { // The caller should call .Close() on the returned archive when done. func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { // Save inputStream to a temporary file - tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar") if err != nil { return nil, fmt.Errorf("creating temporary file: %w", err) } diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go index 5d42c38706..e1f1f1f2b7 100644 --- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -40,7 +40,7 @@ func DockerReferenceNamespaces(ref reference.Named) []string { // then in its parent "docker.io/library"; in none of "busybox", // un-namespaced "library" nor in "" supposedly implicitly representing "library/". // - // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last + // ref.Name() == ref.Domain() + "/" + ref.Path(), so the last // iteration matches the host name (for any namespace). res := []string{} name := ref.Name() diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go new file mode 100644 index 0000000000..d5de81a613 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go @@ -0,0 +1,20 @@ +package impl + +import ( + "github.com/containers/image/v5/internal/private" + compression "github.com/containers/image/v5/pkg/compression/types" +) + +// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required +// then function performs a match against the compression requested by the caller and compression of existing blob +// (which can be nil to represent uncompressed or unknown) +func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool { + if options.RequiredCompression == nil { + return true // no requirement imposed + } + return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name()) +} + +func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool { + return BlobMatchesRequiredCompression(opts, opts.OriginalCompression) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 41a81628bd..17e1870c19 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -64,6 +64,9 @@ func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inpu // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if options.RequiredCompression != nil { + return false, private.ReusedBlob{}, nil + } reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) if !reused || err != nil { return reused, private.ReusedBlob{}, err diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 516ca7ac94..14a476642e 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -5,6 +5,7 @@ import ( "fmt" platform "github.com/containers/image/v5/internal/pkg/platform" + compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -57,11 +58,20 @@ func (list *Schema2ListPublic) Instances() []digest.Digest { func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { for _, manifest := range list.Manifests { if manifest.Digest == instanceDigest { - return ListUpdate{ + ret := ListUpdate{ Digest: manifest.Digest, Size: manifest.Size, MediaType: manifest.MediaType, - }, nil + } + ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName} + ret.ReadOnly.Platform = &imgspecv1.Platform{ + OS: manifest.Platform.OS, + Architecture: manifest.Platform.Architecture, + OSVersion: manifest.Platform.OSVersion, + OSFeatures: manifest.Platform.OSFeatures, + Variant: manifest.Platform.Variant, + } + return ret, nil } } return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go index 8786324ea4..189f1a7186 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -68,6 +68,12 @@ type ListUpdate struct { Digest digest.Digest Size int64 MediaType string + // ReadOnly fields: may be set by Instance(), ignored by UpdateInstance() + ReadOnly struct { + Platform *imgspecv1.Platform + Annotations map[string]string + CompressionAlgorithmNames []string + } } type ListOp int diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index fd251d9512..3038d81243 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -53,11 +53,15 @@ func (index *OCI1IndexPublic) Instances() []digest.Digest { func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { for _, manifest := range index.Manifests { if manifest.Digest == instanceDigest { - return ListUpdate{ + ret := ListUpdate{ Digest: manifest.Digest, Size: manifest.Size, MediaType: manifest.MediaType, - }, nil + } + ret.ReadOnly.Platform = manifest.Platform + ret.ReadOnly.Annotations = manifest.Annotations + ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations) + return ret, nil } } return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) @@ -78,14 +82,29 @@ func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error { return index.editInstances(editInstances) } -func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap map[string]string) { +func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string { + result := make([]string, 0, 1) + if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue { + result = append(result, compression.ZstdAlgorithmName) + } + // No compression was detected, hence assume instance has default compression `Gzip` + if len(result) == 0 { + result = append(result, compression.GzipAlgorithmName) + } + return result +} + +func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) { // TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm // list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable // and full compressionAlghorithms list. + if *annotationsMap == nil && len(compressionAlgorithms) > 0 { + *annotationsMap = map[string]string{} + } for _, algo := range compressionAlgorithms { switch algo.Name() { case compression.ZstdAlgorithmName: - annotationsMap[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue + (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue default: continue } @@ -130,13 +149,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations) } } - addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, index.Manifests[targetIndex].Annotations) + addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations) case ListOpAdd: annotations := map[string]string{} if editInstance.AddAnnotations != nil { annotations = maps.Clone(editInstance.AddAnnotations) } - addCompressionAnnotations(editInstance.AddCompressionAlgorithms, annotations) + addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations) addedEntries = append(addedEntries, imgspecv1.Descriptor{ MediaType: editInstance.AddMediaType, Size: editInstance.AddSize, diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index b1dd4ceb0d..95d561fcdd 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -112,10 +112,11 @@ type TryReusingBlobOptions struct { // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers // if they use internal/imagedestination/impl.Compat; // in that case, they will all be consistently zero-valued. - - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. - SrcRef reference.Named // A reference to the source image that contains the input blob. + RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go + OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”. + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. + SrcRef reference.Named // A reference to the source image that contains the input blob. } // ReusedBlob is information about a blob reused in a destination. diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go index 3e777fe124..acf30343e0 100644 --- a/vendor/github.com/containers/image/v5/internal/set/set.go +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -28,6 +28,12 @@ func (s *Set[E]) Add(v E) { s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. } +func (s *Set[E]) AddSlice(slice []E) { + for _, v := range slice { + s.Add(v) + } +} + func (s *Set[E]) Delete(v E) { delete(s.m, v) } diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go index 84bb656ac7..d5a5436a4d 100644 --- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -15,7 +15,7 @@ import ( // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. // If an error occurs, inputInfo is not modified. func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob") if err != nil { return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) } diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go index 809446e189..bab73ee334 100644 --- a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go +++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go @@ -17,10 +17,12 @@ var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles // DO NOT change this, instead see unixTempDirForBigFiles above. const builtinUnixTempDirForBigFiles = "/var/tmp" +const prefix = "container_images_" + // TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. // On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp // which on systemd based systems could be the unsuitable tmpfs filesystem. -func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string { +func temporaryDirectoryForBigFiles(sys *types.SystemContext) string { if sys != nil && sys.BigFilesTemporaryDir != "" { return sys.BigFilesTemporaryDir } @@ -32,3 +34,11 @@ func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string { } return temporaryDirectoryForBigFiles } + +func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) { + return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name) +} + +func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) { + return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name) +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 53371796fb..2a03feeeac 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -156,7 +156,7 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image // If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { - dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + dir, err := tmpdir.MkDirBigFileTemp(sys, "oci") if err != nil { return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err) } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 0a9e4eab91..8ff43d4480 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -172,6 +172,9 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } if info.Digest == "" { return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index f3d5662e66..2c69afbe94 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -65,6 +65,10 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { }, nil } +func (c *openshiftClient) close() { + c.httpClient.CloseIdleConnections() +} + // doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { requestURL := *c.baseURL diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 7b1b5dfcde..50a5339e1b 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -71,7 +71,9 @@ func (d *openshiftImageDestination) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageDestination, if any. func (d *openshiftImageDestination) Close() error { - return d.docker.Close() + err := d.docker.Close() + d.client.close() + return err } func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_src.go b/vendor/github.com/containers/image/v5/openshift/openshift_src.go index 93ba8d10e3..0ac0127ee7 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_src.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_src.go @@ -60,14 +60,15 @@ func (s *openshiftImageSource) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageSource, if any. func (s *openshiftImageSource) Close() error { + var err error if s.docker != nil { - err := s.docker.Close() + err = s.docker.Close() s.docker = nil - - return err } - return nil + s.client.close() + + return err } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 48f3ee5a72..d00a0cdf86 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -335,6 +335,9 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } if d.repo == nil { repo, err := openRepo(d.ref.repo) if err != nil { diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go index a0e353d46f..9bda085158 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -237,6 +237,9 @@ func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options) if err != nil || present { return present, reusedInfo, err diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 2e79d0ffbc..b987c58060 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -519,11 +519,12 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, if sys.LegacyFormatAuthFilePath != "" { return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil } - if sys.RootForImplicitAbsolutePaths != "" { + // Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME + if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" { return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil } } - if goOS == "windows" || goOS == "darwin" { + if goOS != "linux" { return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil } diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go index 1f6ab7f3b9..261cfbe771 100644 --- a/vendor/github.com/containers/image/v5/sif/src.go +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -73,7 +73,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere _ = sifImg.UnloadContainer() }() - workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + workDir, err := tmpdir.MkDirBigFileTemp(sys, "sif") if err != nil { return nil, fmt.Errorf("creating temp directory: %w", err) } diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 576d510cc5..628564559f 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -95,7 +95,7 @@ type addedLayerInfo struct { // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until // it's time to Commit() the image func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { - directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + directory, err := tmpdir.MkDirBigFileTemp(sys, "storage") if err != nil { return nil, fmt.Errorf("creating a temporary directory: %w", err) } @@ -307,6 +307,9 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options) if err != nil || !reused || options.LayerIndex == nil { return reused, info, err diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 03c2fa28c6..66d04da315 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -124,7 +124,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c } defer rc.Close() - tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") + tmpFile, err := tmpdir.CreateBigFileTemp(s.systemContext, "") if err != nil { return nil, 0, err } diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 1d9c2dc35d..a8f1c13adc 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -4,8 +4,11 @@ import ( "fmt" "strings" - // register all known transports - // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + + // Register all known transports. + // NOTE: Make sure docs/containers-transports.5.md and docs/containers-policy.json.5.md are updated when adding or updating // a transport. _ "github.com/containers/image/v5/directory" _ "github.com/containers/image/v5/docker" @@ -15,11 +18,9 @@ import ( _ "github.com/containers/image/v5/openshift" _ "github.com/containers/image/v5/sif" _ "github.com/containers/image/v5/tarball" - + // The docker-daemon transport is registeredy by docker_daemon*.go // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go - "github.com/containers/image/v5/transports" - "github.com/containers/image/v5/types" ) // ParseImageName converts a URL-like image name to a types.ImageReference. diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 3c8fc094d0..c270910b0f 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 26 + VersionMinor = 27 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 1 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index fcf3215539..af006fc92e 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -7,7 +7,6 @@ package tar import ( "bytes" "io" - "io/ioutil" "strconv" "strings" "time" @@ -140,7 +139,7 @@ func (tr *Reader) next() (*Header, error) { continue // This is a meta header affecting the next header case TypeGNULongName, TypeGNULongLink: format.mayOnlyBe(FormatGNU) - realname, err := ioutil.ReadAll(tr) + realname, err := io.ReadAll(tr) if err != nil { return nil, err } @@ -334,7 +333,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { // parsePAX parses PAX headers. // If an extended header (type 'x') is invalid, ErrHeader is returned func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { return nil, err } @@ -916,7 +915,7 @@ func discard(tr *Reader, n int64) error { } } - copySkipped, err = io.CopyN(ioutil.Discard, r, n-seekSkipped) + copySkipped, err = io.CopyN(io.Discard, r, n-seekSkipped) out: if err == io.EOF && seekSkipped+copySkipped < n { err = io.ErrUnexpectedEOF diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go index 009b3f5d81..80c2522afe 100644 --- a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go +++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go @@ -135,13 +135,15 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io } isEOF = true } - _, err = p.AddEntry(storage.Entry{ - Type: storage.SegmentType, - Payload: paddingChunk[:n], - }) - if err != nil { - pW.CloseWithError(err) - return + if n != 0 { + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: paddingChunk[:n], + }) + if err != nil { + pW.CloseWithError(err) + return + } } if isEOF { break diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go index aba6948185..4ba62d9b7a 100644 --- a/vendor/github.com/vbatts/tar-split/tar/storage/packer.go +++ b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go @@ -24,13 +24,6 @@ type Unpacker interface { Next() (*Entry, error) } -/* TODO(vbatts) figure out a good model for this -type PackUnpacker interface { - Packer - Unpacker -} -*/ - type jsonUnpacker struct { seen seenNames dec *json.Decoder @@ -115,13 +108,3 @@ func NewJSONPacker(w io.Writer) Packer { seen: seenNames{}, } } - -/* -TODO(vbatts) perhaps have a more compact packer/unpacker, maybe using msgapck -(https://github.com/ugorji/go) - - -Even though, since our jsonUnpacker and jsonPacker just take -io.Reader/io.Writer, then we can get away with passing them a -gzip.Reader/gzip.Writer -*/ diff --git a/vendor/modules.txt b/vendor/modules.txt index 74b99c1974..eba9eb48c8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -60,12 +60,7 @@ github.com/asaskevich/govalidator github.com/chzyer/readline # github.com/container-orchestrated-devices/container-device-interface v0.6.0 ## explicit; go 1.17 -github.com/container-orchestrated-devices/container-device-interface/internal/multierror -github.com/container-orchestrated-devices/container-device-interface/internal/validation -github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s -github.com/container-orchestrated-devices/container-device-interface/pkg/cdi github.com/container-orchestrated-devices/container-device-interface/pkg/parser -github.com/container-orchestrated-devices/container-device-interface/specs-go # github.com/containerd/cgroups v1.1.0 ## explicit; go 1.17 github.com/containerd/cgroups/stats/v1 @@ -94,7 +89,7 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.3.0 ## explicit; go 1.20 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/common v0.55.1-0.20230721175448-664d013a6ae2 +# github.com/containers/common v0.55.1-0.20230727095721-647ed1d4d79a ## explicit; go 1.18 github.com/containers/common/libimage github.com/containers/common/libimage/define @@ -137,7 +132,7 @@ github.com/containers/common/pkg/timetype github.com/containers/common/pkg/umask github.com/containers/common/pkg/util github.com/containers/common/version -# github.com/containers/image/v5 v5.26.1 +# github.com/containers/image/v5 v5.26.1-0.20230727122416-da7899237198 ## explicit; go 1.18 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -650,8 +645,8 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vbatts/tar-split v0.11.3 -## explicit; go 1.15 +# github.com/vbatts/tar-split v0.11.5 +## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage