From 8fcd8574f4ffaad05340c8379996a5f258533167 Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Sun, 24 Nov 2024 19:29:27 +0300 Subject: [PATCH] revert all fixes to code Signed-off-by: Aleksandr Zimin --- .../agent/src/pkg/controller/block_device.go | 9 +-- .../src/pkg/controller/block_device_test.go | 69 ++++--------------- .../controller/controller_reconcile_test.go | 8 +-- .../controller/lvm_volume_group_discover.go | 2 +- .../controller/lvm_volume_group_watcher.go | 7 +- .../lvm_volume_group_watcher_func.go | 1 - images/agent/src/pkg/scanner/scanner.go | 12 ++-- images/agent/werf.inc.yaml | 2 +- .../sds-health-watcher-controller/src/go.sum | 4 ++ .../werf.inc.yaml | 2 +- images/sds-utils-installer/werf.inc.yaml | 2 +- templates/agent/daemonset.yaml | 2 +- 12 files changed, 38 insertions(+), 82 deletions(-) diff --git a/images/agent/src/pkg/controller/block_device.go b/images/agent/src/pkg/controller/block_device.go index a7aa6efe..d86ebeb4 100644 --- a/images/agent/src/pkg/controller/block_device.go +++ b/images/agent/src/pkg/controller/block_device.go @@ -93,7 +93,7 @@ func BlockDeviceReconcile(ctx context.Context, cl client.Client, log logger.Logg return false } - apiBlockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, nil, "") + apiBlockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, nil) if err != nil { log.Error(err, "[RunBlockDeviceController] unable to GetAPIBlockDevices") return true @@ -164,8 +164,8 @@ func hasBlockDeviceDiff(blockDevice v1alpha1.BlockDevice, candidate internal.Blo } // GetAPIBlockDevices returns map of BlockDevice resources with BlockDevice as a key. You might specify a selector to get a subset or -// leave it as nil to get all the resources. Also you can specify a node name to get only resources that are related to the node. If nodeName is empty, all resources that match the selector will be returned. -func GetAPIBlockDevices(ctx context.Context, cl client.Client, metrics monitoring.Metrics, selector *metav1.LabelSelector, nodeName string) (map[string]v1alpha1.BlockDevice, error) { +// leave it as nil to get all the resources. +func GetAPIBlockDevices(ctx context.Context, cl client.Client, metrics monitoring.Metrics, selector *metav1.LabelSelector) (map[string]v1alpha1.BlockDevice, error) { list := &v1alpha1.BlockDeviceList{} s, err := metav1.LabelSelectorAsSelector(selector) if err != nil { @@ -185,9 +185,6 @@ func GetAPIBlockDevices(ctx context.Context, cl client.Client, metrics monitorin result := make(map[string]v1alpha1.BlockDevice, len(list.Items)) for _, item := range list.Items { - if nodeName != "" && item.Status.NodeName != nodeName { - continue - } result[item.Name] = item } diff --git a/images/agent/src/pkg/controller/block_device_test.go b/images/agent/src/pkg/controller/block_device_test.go index f6878a60..497836b3 100644 --- a/images/agent/src/pkg/controller/block_device_test.go +++ b/images/agent/src/pkg/controller/block_device_test.go @@ -120,7 +120,7 @@ func TestBlockDeviceCtrl(t *testing.T) { }, } - actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector, lvg.Spec.Local.NodeName) + actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) if assert.NoError(t, err) { assert.Equal(t, 2, len(actualBd)) @@ -132,14 +132,13 @@ func TestBlockDeviceCtrl(t *testing.T) { assert.False(t, ok) } }) + t.Run("bds_exist_only_match_labels_return_bds", func(t *testing.T) { const ( - name1 = "name11" - name2 = "name22" - name3 = "name33" - name4 = "name44" - hostName = "test-host" - otherHostName = "other-host" + name1 = "name11" + name2 = "name22" + name3 = "name33" + hostName = "test-host" ) bds := []v1alpha1.BlockDevice{ @@ -147,61 +146,28 @@ func TestBlockDeviceCtrl(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: name1, Labels: map[string]string{ - "kubernetes.io/hostname": hostName, - "kubernetes.io/metadata.name": name1, - "status.blockdevice.storage.deckhouse.io/size": "1G", + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name1, }, }, - Status: v1alpha1.BlockDeviceStatus{ - Size: resource.MustParse("1G"), - NodeName: hostName, - Consumable: true, - }, }, { ObjectMeta: metav1.ObjectMeta{ Name: name2, Labels: map[string]string{ - "kubernetes.io/hostname": hostName, - "kubernetes.io/metadata.name": name2, - "status.blockdevice.storage.deckhouse.io/size": "1G", + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name2, }, }, - Status: v1alpha1.BlockDeviceStatus{ - Size: resource.MustParse("1G"), - NodeName: hostName, - Consumable: true, - }, }, { ObjectMeta: metav1.ObjectMeta{ Name: name3, Labels: map[string]string{ - "kubernetes.io/hostname": hostName, - "kubernetes.io/metadata.name": name3, - "status.blockdevice.storage.deckhouse.io/size": "2G", - }, - }, - Status: v1alpha1.BlockDeviceStatus{ - Size: resource.MustParse("1G"), - NodeName: hostName, - Consumable: true, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: name4, - Labels: map[string]string{ - "kubernetes.io/hostname": otherHostName, - "kubernetes.io/metadata.name": name4, - "status.blockdevice.storage.deckhouse.io/size": "1G", + "kubernetes.io/hostname": "other-host", + "kubernetes.io/metadata.name": name3, }, }, - Status: v1alpha1.BlockDeviceStatus{ - Size: resource.MustParse("1G"), - NodeName: otherHostName, - Consumable: true, - }, }, } @@ -224,15 +190,12 @@ func TestBlockDeviceCtrl(t *testing.T) { lvg := &v1alpha1.LVMVolumeGroup{ Spec: v1alpha1.LVMVolumeGroupSpec{ BlockDeviceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"status.blockdevice.storage.deckhouse.io/size": "1G"}, - }, - Local: v1alpha1.LVMVolumeGroupLocalSpec{ - NodeName: hostName, + MatchLabels: map[string]string{"kubernetes.io/hostname": hostName}, }, }, } - actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector, lvg.Spec.Local.NodeName) + actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) if assert.NoError(t, err) { assert.Equal(t, 2, len(actualBd)) @@ -242,8 +205,6 @@ func TestBlockDeviceCtrl(t *testing.T) { assert.True(t, ok) _, ok = actualBd[name3] assert.False(t, ok) - _, ok = actualBd[name4] - assert.False(t, ok) } }) @@ -315,7 +276,7 @@ func TestBlockDeviceCtrl(t *testing.T) { }, } - actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector, lvg.Spec.Local.NodeName) + actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) if assert.NoError(t, err) { assert.Equal(t, 2, len(actualBd)) _, ok := actualBd[name1] diff --git a/images/agent/src/pkg/controller/controller_reconcile_test.go b/images/agent/src/pkg/controller/controller_reconcile_test.go index 2e1f22c1..603cc6ed 100644 --- a/images/agent/src/pkg/controller/controller_reconcile_test.go +++ b/images/agent/src/pkg/controller/controller_reconcile_test.go @@ -82,7 +82,7 @@ var _ = Describe("Storage Controller", func() { }) It("GetAPIBlockDevices", func() { - listDevice, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil, "") + listDevice, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil) Expect(err).NotTo(HaveOccurred()) Expect(listDevice).NotTo(BeNil()) Expect(len(listDevice)).To(Equal(1)) @@ -115,7 +115,7 @@ var _ = Describe("Storage Controller", func() { MachineID: "1234", } - resources, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil, "") + resources, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil) Expect(err).NotTo(HaveOccurred()) Expect(resources).NotTo(BeNil()) Expect(len(resources)).To(Equal(1)) @@ -127,7 +127,7 @@ var _ = Describe("Storage Controller", func() { err = controller.UpdateAPIBlockDevice(ctx, cl, testMetrics, oldResource, newCandidate) Expect(err).NotTo(HaveOccurred()) - resources, err = controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil, "") + resources, err = controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil) Expect(err).NotTo(HaveOccurred()) Expect(resources).NotTo(BeNil()) Expect(len(resources)).To(Equal(1)) @@ -147,7 +147,7 @@ var _ = Describe("Storage Controller", func() { }) Expect(err).NotTo(HaveOccurred()) - devices, err := controller.GetAPIBlockDevices(context.Background(), cl, testMetrics, nil, "") + devices, err := controller.GetAPIBlockDevices(context.Background(), cl, testMetrics, nil) Expect(err).NotTo(HaveOccurred()) for name := range devices { Expect(name).NotTo(Equal(deviceName)) diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover.go b/images/agent/src/pkg/controller/lvm_volume_group_discover.go index 7a7ed1ec..fa00a3db 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_discover.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_discover.go @@ -92,7 +92,7 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl client.Client, metr log.Debug("[RunLVMVolumeGroupDiscoverController] no current LVMVolumeGroups found") } - blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, nil, cfg.NodeName) + blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, nil) if err != nil { log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to GetAPIBlockDevices") for _, lvg := range currentLVMVGs { diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go index 6f34246d..f1bc33e4 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go @@ -118,8 +118,8 @@ func RunLVMVolumeGroupWatcherController( log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully removed the label %s from the LVMVolumeGroup %s", internal.LVGUpdateTriggerLabel, lvg.Name)) } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to get block device resources for the LVMVolumeGroup %s by the selector %v on the node %s", lvg.Name, lvg.Spec.BlockDeviceSelector, lvg.Spec.Local.NodeName)) - blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector, lvg.Spec.Local.NodeName) + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to get block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector.MatchLabels)) + blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) if err != nil { log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to get BlockDevices. Retry in %s", cfg.BlockDeviceScanIntervalSec.String())) err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "NoBlockDevices", fmt.Sprintf("unable to get block devices resources, err: %s", err.Error())) @@ -129,8 +129,7 @@ func RunLVMVolumeGroupWatcherController( return reconcile.Result{RequeueAfter: cfg.BlockDeviceScanIntervalSec}, nil } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully got block device resources for the LVMVolumeGroup %s by the selector %v on the node %s", lvg.Name, lvg.Spec.BlockDeviceSelector, lvg.Spec.Local.NodeName)) - log.Trace(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] block devices: %v", blockDevices)) + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully got block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector.MatchLabels)) valid, reason := validateSpecBlockDevices(lvg, blockDevices) if !valid { diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go index 95f28e52..85751f06 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go @@ -841,7 +841,6 @@ func getLVMVolumeGroup(ctx context.Context, cl client.Client, metrics monitoring func DeleteVGIfExist(log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, vgName string) error { vgs, _ := sdsCache.GetVGs() - log.Trace(fmt.Sprintf("[DeleteVGIfExist] check if VG %s exists in vgs %v", vgName, vgs)) if !checkIfVGExist(vgName, vgs) { log.Debug(fmt.Sprintf("[DeleteVGIfExist] no VG %s found, nothing to delete", vgName)) return nil diff --git a/images/agent/src/pkg/scanner/scanner.go b/images/agent/src/pkg/scanner/scanner.go index 80b48de5..a44bec86 100644 --- a/images/agent/src/pkg/scanner/scanner.go +++ b/images/agent/src/pkg/scanner/scanner.go @@ -163,28 +163,28 @@ func fillTheCache(ctx context.Context, log logger.Logger, cache *cache.Cache, cf realClock := clock.RealClock{} now := time.Now() lvs, lvsErr, err := scanLVs(ctx, log, cfg) - log.Debug(fmt.Sprintf("[fillTheCache] LVS command runs for: %s", realClock.Since(now).String())) + log.Trace(fmt.Sprintf("[fillTheCache] LVS command runs for: %s", realClock.Since(now).String())) if err != nil { return err } now = time.Now() vgs, vgsErr, err := scanVGs(ctx, log, cfg) - log.Debug(fmt.Sprintf("[fillTheCache] VGS command runs for: %s", realClock.Since(now).String())) + log.Trace(fmt.Sprintf("[fillTheCache] VGS command runs for: %s", realClock.Since(now).String())) if err != nil { return err } now = time.Now() pvs, pvsErr, err := scanPVs(ctx, log, cfg) - log.Debug(fmt.Sprintf("[fillTheCache] PVS command runs for: %s", realClock.Since(now).String())) + log.Trace(fmt.Sprintf("[fillTheCache] PVS command runs for: %s", realClock.Since(now).String())) if err != nil { return err } now = time.Now() devices, devErr, err := scanDevices(ctx, log, cfg) - log.Debug(fmt.Sprintf("[fillTheCache] LSBLK command runs for: %s", realClock.Since(now).String())) + log.Trace(fmt.Sprintf("[fillTheCache] LSBLK command runs for: %s", realClock.Since(now).String())) if err != nil { return err } @@ -208,7 +208,6 @@ func scanDevices(ctx context.Context, log logger.Logger, cfg config.Options) ([] log.Error(err, fmt.Sprintf("[ScanDevices] unable to scan the devices, cmd: %s", cmdStr)) return nil, stdErr, err } - log.Trace(fmt.Sprintf("[scanDevices] Devices: %v", devices)) return devices, stdErr, nil } @@ -221,7 +220,6 @@ func scanPVs(ctx context.Context, log logger.Logger, cfg config.Options) ([]inte log.Error(err, fmt.Sprintf("[ScanPVs] unable to scan the PVs, cmd: %s", cmdStr)) return nil, stdErr, err } - log.Trace(fmt.Sprintf("[scanPVs] PVs: %v", pvs)) return pvs, stdErr, nil } @@ -234,7 +232,6 @@ func scanVGs(ctx context.Context, log logger.Logger, cfg config.Options) ([]inte log.Error(err, fmt.Sprintf("[ScanVGs] unable to scan the VGs, cmd: %s", cmdStr)) return nil, stdErr, err } - log.Trace(fmt.Sprintf("[scanVGs] VGs: %v", vgs)) return vgs, stdErr, nil } @@ -247,7 +244,6 @@ func scanLVs(ctx context.Context, log logger.Logger, cfg config.Options) ([]inte log.Error(err, fmt.Sprintf("[ScanLVs] unable to scan LVs, cmd: %s", cmdStr)) return nil, stdErr, err } - log.Trace(fmt.Sprintf("[scanLVs] LVs: %v", lvs)) return lvs, stdErr, nil } diff --git a/images/agent/werf.inc.yaml b/images/agent/werf.inc.yaml index 0128eabe..ea3a4be1 100644 --- a/images/agent/werf.inc.yaml +++ b/images/agent/werf.inc.yaml @@ -1,4 +1,4 @@ -{{- $_ := set . "BASE_GOLANG" "registry.deckhouse.io/base_images/golang:1.22.8-alpine@sha256:54bb7313917c733191a079ccae2e52bd3b80664e46c7879efa06513d4221d804" }} +{{- $_ := set . "BASE_GOLANG" "registry.deckhouse.io/base_images/golang:1.22.6-bullseye@sha256:260918a3795372a6d33225d361fe5349723be9667de865a23411b50fbcc76c5a" }} {{- $_ := set . "BASE_SCRATCH" "registry.deckhouse.ru/base_images/scratch@sha256:b054705fcc9f2205777d80a558d920c0b4209efdc3163c22b5bfcb5dda1db5fc" }} {{- $_ := set . "BASE_ALPINE_DEV" "registry.deckhouse.ru/base_images/dev-alpine:3.16.3@sha256:c706fa83cc129079e430480369a3f062b8178cac9ec89266ebab753a574aca8e" }} {{- $_ := set . "BASE_ALT_DEV" "registry.deckhouse.ru/base_images/dev-alt:p10@sha256:76e6e163fa982f03468166203488b569e6d9fc10855d6a259c662706436cdcad" }} diff --git a/images/sds-health-watcher-controller/src/go.sum b/images/sds-health-watcher-controller/src/go.sum index 6b82a2d4..4d0847f3 100644 --- a/images/sds-health-watcher-controller/src/go.sum +++ b/images/sds-health-watcher-controller/src/go.sum @@ -18,6 +18,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b h1:EYmHWTWcWMpyxJGZK05ZxlIFnh9s66DRrxLw/LNb/xw= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240905123334-64f17b70f035 h1:2kluZX0T5gk8YgNRk2bzd+m/mSkNmcKKaDHd6sVHP8I= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240905123334-64f17b70f035/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= diff --git a/images/sds-health-watcher-controller/werf.inc.yaml b/images/sds-health-watcher-controller/werf.inc.yaml index bea9b7be..882bbae1 100644 --- a/images/sds-health-watcher-controller/werf.inc.yaml +++ b/images/sds-health-watcher-controller/werf.inc.yaml @@ -1,4 +1,4 @@ -{{- $_ := set . "BASE_GOLANG" "registry.deckhouse.io/base_images/golang:1.22.8-alpine@sha256:54bb7313917c733191a079ccae2e52bd3b80664e46c7879efa06513d4221d804" }} +{{- $_ := set . "BASE_GOLANG" "registry.deckhouse.io/base_images/golang:1.22.6-bullseye@sha256:260918a3795372a6d33225d361fe5349723be9667de865a23411b50fbcc76c5a" }} {{- $_ := set . "BASE_SCRATCH" "registry.deckhouse.io/base_images/scratch@sha256:b054705fcc9f2205777d80a558d920c0b4209efdc3163c22b5bfcb5dda1db5fc" }} --- diff --git a/images/sds-utils-installer/werf.inc.yaml b/images/sds-utils-installer/werf.inc.yaml index fcf6039c..e105e6aa 100644 --- a/images/sds-utils-installer/werf.inc.yaml +++ b/images/sds-utils-installer/werf.inc.yaml @@ -1,4 +1,4 @@ -{{- $_ := set . "BASE_GOLANG" "registry.deckhouse.io/base_images/golang:1.22.8-alpine@sha256:54bb7313917c733191a079ccae2e52bd3b80664e46c7879efa06513d4221d804" }} +{{- $_ := set . "BASE_GOLANG" "registry.deckhouse.io/base_images/golang:1.22.6-bullseye@sha256:260918a3795372a6d33225d361fe5349723be9667de865a23411b50fbcc76c5a" }} {{- $_ := set . "BASE_SCRATCH" "registry.deckhouse.ru/base_images/scratch@sha256:b054705fcc9f2205777d80a558d920c0b4209efdc3163c22b5bfcb5dda1db5fc" }} {{- $_ := set . "BASE_ALPINE_DEV" "registry.deckhouse.ru/base_images/dev-alpine:3.16.3@sha256:c706fa83cc129079e430480369a3f062b8178cac9ec89266ebab753a574aca8e" }} {{- $_ := set . "BASE_ALT_DEV" "registry.deckhouse.ru/base_images/dev-alt:p10@sha256:76e6e163fa982f03468166203488b569e6d9fc10855d6a259c662706436cdcad" }} diff --git a/templates/agent/daemonset.yaml b/templates/agent/daemonset.yaml index a3dcc0e6..8f8af27a 100644 --- a/templates/agent/daemonset.yaml +++ b/templates/agent/daemonset.yaml @@ -97,7 +97,7 @@ spec: requests: {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} {{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "sds_utils_installer_resources" . | nindent 14 }} + {{- include "static_utils_copier_resources" . | nindent 14 }} {{- end }} {{- end }} containers: