diff --git a/images/agent/src/cmd/main.go b/images/agent/src/cmd/main.go index 7a028a0f..2bf2f2ef 100644 --- a/images/agent/src/cmd/main.go +++ b/images/agent/src/cmd/main.go @@ -17,6 +17,11 @@ limitations under the License. package main import ( + "context" + "fmt" + "os" + goruntime "runtime" + "agent/config" "agent/pkg/cache" "agent/pkg/controller" @@ -24,13 +29,7 @@ import ( "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/scanner" - "context" - "fmt" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "os" - goruntime "runtime" - "sigs.k8s.io/controller-runtime/pkg/metrics/server" - v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -39,6 +38,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var ( @@ -61,7 +61,7 @@ func main() { log, err := logger.NewLogger(cfgParams.Loglevel) if err != nil { - fmt.Println(fmt.Sprintf("unable to create NewLogger, err: %v", err)) + fmt.Printf("unable to create NewLogger, err: %v\n", err) os.Exit(1) } @@ -71,7 +71,7 @@ func main() { log.Info("[main] CfgParams has been successfully created") log.Info(fmt.Sprintf("[main] %s = %s", config.LogLevel, cfgParams.Loglevel)) log.Info(fmt.Sprintf("[main] %s = %s", config.NodeName, cfgParams.NodeName)) - log.Info(fmt.Sprintf("[main] %s = %s", config.MachineID, cfgParams.MachineId)) + log.Info(fmt.Sprintf("[main] %s = %s", config.MachineID, cfgParams.MachineID)) log.Info(fmt.Sprintf("[main] %s = %s", config.ScanInterval, cfgParams.BlockDeviceScanIntervalSec.String())) log.Info(fmt.Sprintf("[main] %s = %s", config.ThrottleInterval, cfgParams.ThrottleIntervalSec.String())) log.Info(fmt.Sprintf("[main] %s = %s", config.CmdDeadlineDuration, cfgParams.CmdDeadlineDurationSec.String())) diff --git a/images/agent/src/config/config.go b/images/agent/src/config/config.go index 416832e7..a657915d 100644 --- a/images/agent/src/config/config.go +++ b/images/agent/src/config/config.go @@ -17,16 +17,16 @@ limitations under the License. package config import ( - "agent/internal" - "agent/pkg/logger" "bytes" "fmt" "os" "os/exec" - "strconv" "strings" "time" + + "agent/internal" + "agent/pkg/logger" ) const ( @@ -42,7 +42,7 @@ const ( ) type Options struct { - MachineId string + MachineID string NodeName string Loglevel logger.Verbosity MetricsPort string @@ -69,11 +69,11 @@ func NewConfig() (*Options, error) { opts.Loglevel = logger.Verbosity(loglevel) } - machId, err := getMachineId() + machID, err := getMachineID() if err != nil { return nil, fmt.Errorf("[NewConfig] unable to get %s, error: %w", MachineID, err) } - opts.MachineId = machId + opts.MachineID = machID opts.MetricsPort = os.Getenv(MetricsPort) if opts.MetricsPort == "" { @@ -127,7 +127,7 @@ func NewConfig() (*Options, error) { return &opts, nil } -func getMachineId() (string, error) { +func getMachineID() (string, error) { id := os.Getenv(MachineID) if id == "" { args := []string{"-m", "-u", "-i", "-n", "-p", "-t", "1", "cat", "/etc/machine-id"} diff --git a/images/agent/src/config/config_test.go b/images/agent/src/config/config_test.go index 19d7dfec..837ea0cb 100644 --- a/images/agent/src/config/config_test.go +++ b/images/agent/src/config/config_test.go @@ -18,16 +18,17 @@ package config import ( "fmt" - "github.com/stretchr/testify/assert" "os" "testing" + + "github.com/stretchr/testify/assert" ) func TestNewConfig(t *testing.T) { t.Run("AllValuesSet_ReturnsNoError", func(t *testing.T) { expNodeName := "test-node" expMetricsPort := ":0000" - expMachineId := "test-id" + expMachineID := "test-id" err := os.Setenv(NodeName, expNodeName) if err != nil { @@ -37,7 +38,10 @@ func TestNewConfig(t *testing.T) { if err != nil { t.Error(err) } - err = os.Setenv(MachineID, expMachineId) + err = os.Setenv(MachineID, expMachineID) + if err != nil { + t.Error(err) + } defer os.Clearenv() opts, err := NewConfig() @@ -45,12 +49,12 @@ func TestNewConfig(t *testing.T) { if assert.NoError(t, err) { assert.Equal(t, expNodeName, opts.NodeName) assert.Equal(t, expMetricsPort, opts.MetricsPort) - assert.Equal(t, expMachineId, opts.MachineId) + assert.Equal(t, expMachineID, opts.MachineID) } }) t.Run("NodeNameNotSet_ReturnsError", func(t *testing.T) { - machineIdFile := "./host-root/etc/machine-id" + machineIDFile := "./host-root/etc/machine-id" expMetricsPort := ":0000" expErrorMsg := fmt.Sprintf("[NewConfig] required %s env variable is not specified", NodeName) @@ -65,7 +69,7 @@ func TestNewConfig(t *testing.T) { t.Error(err) } - file, err := os.Create(machineIdFile) + file, err := os.Create(machineIDFile) if err != nil { t.Error(err) } @@ -85,7 +89,7 @@ func TestNewConfig(t *testing.T) { assert.EqualError(t, err, expErrorMsg) }) - t.Run("MachineIdNotSet_ReturnsError", func(t *testing.T) { + t.Run("MachineIDNotSet_ReturnsError", func(t *testing.T) { expMetricsPort := ":0000" expNodeName := "test-node" expErrorMsg := fmt.Sprintf("[NewConfig] unable to get %s, error: %s", @@ -108,13 +112,13 @@ func TestNewConfig(t *testing.T) { t.Run("MetricsPortNotSet_ReturnsDefaultPort", func(t *testing.T) { expNodeName := "test-node" expMetricsPort := ":4202" - expMachineId := "test-id" + expMachineID := "test-id" err := os.Setenv(NodeName, expNodeName) if err != nil { t.Error(err) } - err = os.Setenv(MachineID, expMachineId) + err = os.Setenv(MachineID, expMachineID) if err != nil { t.Error(err) } @@ -126,7 +130,7 @@ func TestNewConfig(t *testing.T) { if assert.NoError(t, err) { assert.Equal(t, expNodeName, opts.NodeName) assert.Equal(t, expMetricsPort, opts.MetricsPort) - assert.Equal(t, expMachineId, opts.MachineId) + assert.Equal(t, expMachineID, opts.MachineID) } }) } diff --git a/images/agent/src/internal/type.go b/images/agent/src/internal/type.go index 24db2ed7..172e1474 100644 --- a/images/agent/src/internal/type.go +++ b/images/agent/src/internal/type.go @@ -37,7 +37,7 @@ type BlockDeviceCandidate struct { PkName string Type string FSType string - MachineId string + MachineID string PartUUID string } @@ -54,7 +54,7 @@ type LVMVolumeGroupCandidate struct { StatusThinPools []LVMVGStatusThinPool VGSize resource.Quantity VGFree resource.Quantity - VGUuid string + VGUUID string Nodes map[string][]LVMVGDevice } @@ -71,7 +71,7 @@ type LVMVGDevice struct { Path string PVSize resource.Quantity DevSize resource.Quantity - PVUuid string + PVUUID string BlockDevice string } @@ -127,7 +127,7 @@ type VGData struct { VGShared string `json:"vg_shared"` VGSize resource.Quantity `json:"vg_size"` VGTags string `json:"vg_tags"` - VGUuid string `json:"vg_uuid"` + VGUUID string `json:"vg_uuid"` } type LVReport struct { diff --git a/images/agent/src/pkg/cache/cache.go b/images/agent/src/pkg/cache/cache.go index 78d3ec30..ea4962d4 100644 --- a/images/agent/src/pkg/cache/cache.go +++ b/images/agent/src/pkg/cache/cache.go @@ -1,10 +1,11 @@ package cache import ( - "agent/internal" - "agent/pkg/logger" "bytes" "fmt" + + "agent/internal" + "agent/pkg/logger" ) type Cache struct { diff --git a/images/agent/src/pkg/cache/cache_test.go b/images/agent/src/pkg/cache/cache_test.go index 7d54cdeb..727eac26 100644 --- a/images/agent/src/pkg/cache/cache_test.go +++ b/images/agent/src/pkg/cache/cache_test.go @@ -1,10 +1,11 @@ package cache import ( - "agent/internal" "bytes" - "github.com/stretchr/testify/assert" "testing" + + "agent/internal" + "github.com/stretchr/testify/assert" ) func TestCache(t *testing.T) { diff --git a/images/agent/src/pkg/controller/block_device.go b/images/agent/src/pkg/controller/block_device.go index 022ead02..ec34e916 100644 --- a/images/agent/src/pkg/controller/block_device.go +++ b/images/agent/src/pkg/controller/block_device.go @@ -17,21 +17,21 @@ limitations under the License. package controller import ( - "agent/config" - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" "context" "crypto/sha1" "fmt" - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "os" "regexp" "strings" "time" + "agent/config" + "agent/internal" + "agent/pkg/cache" + "agent/pkg/logger" + "agent/pkg/monitoring" + "agent/pkg/utils" + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -54,7 +54,7 @@ func RunBlockDeviceController( cl := mgr.GetClient() c, err := controller.New(BlockDeviceCtrlName, mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + Reconciler: reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { log.Info("[RunBlockDeviceController] Reconciler starts BlockDevice resources reconciliation") shouldRequeue := BlockDeviceReconcile(ctx, cl, log, metrics, cfg, sdsCache) @@ -152,7 +152,7 @@ func hasBlockDeviceDiff(res v1alpha1.BlockDeviceStatus, candidate internal.Block candidate.HotPlug != res.HotPlug || candidate.Type != res.Type || candidate.FSType != res.FsType || - candidate.MachineId != res.MachineID + candidate.MachineID != res.MachineID } func GetAPIBlockDevices(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics) (map[string]v1alpha1.BlockDevice, error) { @@ -160,10 +160,10 @@ func GetAPIBlockDevices(ctx context.Context, kc kclient.Client, metrics monitori start := time.Now() err := kc.List(ctx, listDevice) - metrics.ApiMethodsDuration(BlockDeviceCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "list").Inc() + metrics.APIMethodsDuration(BlockDeviceCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "list").Inc() if err != nil { - metrics.ApiMethodsErrors(BlockDeviceCtrlName, "list").Inc() + metrics.APIMethodsErrors(BlockDeviceCtrlName, "list").Inc() return nil, fmt.Errorf("unable to kc.List, error: %w", err) } @@ -181,8 +181,8 @@ func RemoveDeprecatedAPIDevices( metrics monitoring.Metrics, candidates []internal.BlockDeviceCandidate, apiBlockDevices map[string]v1alpha1.BlockDevice, - nodeName string) { - + nodeName string, +) { actualCandidates := make(map[string]struct{}, len(candidates)) for _, candidate := range candidates { actualCandidates[candidate.Name] = struct{}{} @@ -260,7 +260,7 @@ func GetBlockDeviceCandidates(log logger.Logger, cfg config.Options, sdsCache *c PkName: device.PkName, Type: device.Type, FSType: device.FSType, - MachineId: cfg.MachineId, + MachineID: cfg.MachineID, PartUUID: device.PartUUID, } @@ -513,15 +513,15 @@ func UpdateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito Model: candidate.Model, Rota: candidate.Rota, HotPlug: candidate.HotPlug, - MachineID: candidate.MachineId, + MachineID: candidate.MachineID, } start := time.Now() err := kc.Update(ctx, &blockDevice) - metrics.ApiMethodsDuration(BlockDeviceCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "update").Inc() + metrics.APIMethodsDuration(BlockDeviceCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "update").Inc() if err != nil { - metrics.ApiMethodsErrors(BlockDeviceCtrlName, "update").Inc() + metrics.APIMethodsErrors(BlockDeviceCtrlName, "update").Inc() return err } @@ -549,16 +549,16 @@ func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito Size: *resource.NewQuantity(candidate.Size.Value(), resource.BinarySI), Model: candidate.Model, Rota: candidate.Rota, - MachineID: candidate.MachineId, + MachineID: candidate.MachineID, }, } start := time.Now() err := kc.Create(ctx, device) - metrics.ApiMethodsDuration(BlockDeviceCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "create").Inc() + metrics.APIMethodsDuration(BlockDeviceCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "create").Inc() if err != nil { - metrics.ApiMethodsErrors(BlockDeviceCtrlName, "create").Inc() + metrics.APIMethodsErrors(BlockDeviceCtrlName, "create").Inc() return nil, err } return device, nil @@ -567,10 +567,10 @@ func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito func DeleteAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics, device *v1alpha1.BlockDevice) error { start := time.Now() err := kc.Delete(ctx, device) - metrics.ApiMethodsDuration(BlockDeviceCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "delete").Inc() + metrics.APIMethodsDuration(BlockDeviceCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "delete").Inc() if err != nil { - metrics.ApiMethodsErrors(BlockDeviceCtrlName, "delete").Inc() + metrics.APIMethodsErrors(BlockDeviceCtrlName, "delete").Inc() return err } return nil diff --git a/images/agent/src/pkg/controller/block_device_test.go b/images/agent/src/pkg/controller/block_device_test.go index 2e85c470..84510986 100644 --- a/images/agent/src/pkg/controller/block_device_test.go +++ b/images/agent/src/pkg/controller/block_device_test.go @@ -17,30 +17,29 @@ limitations under the License. package controller import ( + "bytes" + "fmt" + "testing" + "agent/config" "agent/internal" "agent/pkg/cache" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/utils" - "bytes" - "fmt" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" + "github.com/stretchr/testify/assert" errors2 "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - "testing" - - "k8s.io/apimachinery/pkg/api/resource" - - "github.com/stretchr/testify/assert" ) func TestBlockDeviceCtrl(t *testing.T) { log, _ := logger.NewLogger("1") cfg := config.Options{ NodeName: "test-node", - MachineId: "test-id", + MachineID: "test-id", } t.Run("shouldDeleteBlockDevice", func(t *testing.T) { @@ -121,7 +120,7 @@ func TestBlockDeviceCtrl(t *testing.T) { Model: "124124-adf", Name: goodName, HotPlug: false, - MachineId: "1245151241241", + MachineID: "1245151241241", }, } @@ -151,7 +150,7 @@ func TestBlockDeviceCtrl(t *testing.T) { defer func() { for _, bd := range bds { - cl.Delete(ctx, &bd) + _ = cl.Delete(ctx, &bd) } }() @@ -212,7 +211,7 @@ func TestBlockDeviceCtrl(t *testing.T) { assert.Equal(t, 3, len(candidates)) for i := range candidates { assert.Equal(t, devices[i].Name, candidates[i].Path) - assert.Equal(t, cfg.MachineId, candidates[i].MachineId) + assert.Equal(t, cfg.MachineID, candidates[i].MachineID) assert.Equal(t, cfg.NodeName, candidates[i].NodeName) } }) @@ -338,7 +337,7 @@ func TestBlockDeviceCtrl(t *testing.T) { PkName: "testPKNAME", Type: "testTYPE", FSType: "testFS", - MachineId: "testMACHINE", + MachineID: "testMACHINE", }, // diff state { @@ -360,7 +359,7 @@ func TestBlockDeviceCtrl(t *testing.T) { PkName: "testPKNAME2", Type: "testTYPE2", FSType: "testFS2", - MachineId: "testMACHINE2", + MachineID: "testMACHINE2", }, } blockDevice := v1alpha1.BlockDevice{ @@ -450,8 +449,8 @@ func TestBlockDeviceCtrl(t *testing.T) { candidateName := CreateCandidateName(*log, candidate, devices) assert.Equal(t, "dev-98ca88ddaaddec43b1c4894756f4856244985511", candidateName, "device name generated incorrectly") } - } + if assert.NoError(t, err) { assert.Equal(t, 7, len(filteredDevices)) } diff --git a/images/agent/src/pkg/controller/controller_reconcile_test.go b/images/agent/src/pkg/controller/controller_reconcile_test.go index cad019d2..4e018c30 100644 --- a/images/agent/src/pkg/controller/controller_reconcile_test.go +++ b/images/agent/src/pkg/controller/controller_reconcile_test.go @@ -17,16 +17,16 @@ limitations under the License. package controller_test import ( + "context" + "agent/internal" "agent/pkg/controller" "agent/pkg/monitoring" - "context" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var _ = Describe("Storage Controller", func() { @@ -54,7 +54,7 @@ var _ = Describe("Storage Controller", func() { PkName: "/dev/sda14", Type: "disk", FSType: "", - MachineId: "1234", + MachineID: "1234", } ) @@ -77,7 +77,7 @@ var _ = Describe("Storage Controller", func() { Expect(blockDevice.Status.Model).To(Equal(candidate.Model)) Expect(blockDevice.Status.Type).To(Equal(candidate.Type)) Expect(blockDevice.Status.FsType).To(Equal(candidate.FSType)) - Expect(blockDevice.Status.MachineID).To(Equal(candidate.MachineId)) + Expect(blockDevice.Status.MachineID).To(Equal(candidate.MachineID)) }) It("GetAPIBlockDevices", func() { @@ -111,7 +111,7 @@ var _ = Describe("Storage Controller", func() { PkName: "/dev/sda14", Type: "disk", FSType: "", - MachineId: "1234", + MachineID: "1234", } resources, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics) diff --git a/images/agent/src/pkg/controller/controller_suite_test.go b/images/agent/src/pkg/controller/controller_suite_test.go index a4f27c0d..9ebb5197 100644 --- a/images/agent/src/pkg/controller/controller_suite_test.go +++ b/images/agent/src/pkg/controller/controller_suite_test.go @@ -17,17 +17,15 @@ limitations under the License. package controller_test import ( - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestController(t *testing.T) { diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go b/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go index eb53e209..2fd9b946 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go @@ -1,10 +1,13 @@ package controller import ( - "agent/internal" - "agent/pkg/kubutils" "context" "fmt" + "os" + "testing" + + "agent/internal" + "agent/pkg/kubutils" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" @@ -14,9 +17,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" apiruntime "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "os" "sigs.k8s.io/controller-runtime/pkg/client" - "testing" ) var ( @@ -98,7 +99,6 @@ func BenchmarkRunThickLLVCreationSingleThread(b *testing.B) { } } } - } } b.Logf("[TIME] LLV resources were configured for %f", lvCreatedTime) @@ -165,7 +165,6 @@ func BenchmarkRunThinLLVCreationSingleThread(b *testing.B) { } } } - } } b.Logf("All LLV were configured for %f. Ends the test", createdTime) diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go index bf5d2ec5..27edcb4b 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go @@ -1,20 +1,22 @@ package controller import ( + "context" + "errors" + "fmt" + "reflect" + "time" + "agent/config" "agent/internal" "agent/pkg/cache" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/utils" - "context" - "errors" - "fmt" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" - "reflect" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -22,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "time" ) const ( @@ -79,13 +80,13 @@ func RunLVMLogicalVolumeExtenderWatcherController( } err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup]{ - CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) { + CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) { log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got a Create event for the LVMVolumeGroup %s", e.Object.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} q.Add(request) log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] added the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) }, - UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) { + UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) { log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got an Update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} q.Add(request) @@ -124,7 +125,7 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] tries to get LLV resources with percent size for the LVMVolumeGroup %s", lvg.Name)) llvs, err := getAllLLVsWithPercentSize(ctx, cl, lvg.Name) if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources")) + log.Error(err, "[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources") return true } log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got LLV resources for the LVMVolumeGroup %s", lvg.Name)) @@ -181,6 +182,12 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should be extended from %s to %s size", llv.Name, llv.Status.ActualSize.String(), llvRequestedSize.String())) err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseResizing, "") + if err != nil { + log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + shouldRetry = true + continue + } + cmd, err := utils.ExtendLV(llvRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend LV %s of the LVMLogicalVolume %s, cmd: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, cmd)) diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go index dcb55e1d..f7d1816e 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go @@ -1,21 +1,22 @@ package controller import ( + "context" + "errors" + "fmt" + "reflect" + "agent/config" "agent/internal" "agent/pkg/cache" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/utils" - "context" - "errors" - "fmt" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "github.com/google/go-cmp/cmp" k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" - "reflect" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -139,7 +140,7 @@ func RunLVMLogicalVolumeWatcherController( } } if shouldRequeue { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occured while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", request.Name, cfg.LLVRequeueIntervalSec.String())) + log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occurred while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", request.Name, cfg.LLVRequeueIntervalSec.String())) return reconcile.Result{RequeueAfter: cfg.LLVRequeueIntervalSec}, nil } @@ -333,7 +334,7 @@ func reconcileLLVUpdateFunc( log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) return false, err } - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] sucessfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String())) + log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String())) if utils.AreSizesEqualWithinDelta(actualSize, llvRequestSize, internal.ResizeDelta) { log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String())) diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go index 69ad055f..5e011183 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go @@ -1,18 +1,18 @@ package controller import ( - "agent/pkg/cache" "context" "fmt" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "strings" "agent/internal" + "agent/pkg/cache" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/utils" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -37,11 +37,7 @@ func identifyReconcileFunc(sdsCache *cache.Cache, vgName string, llv *v1alpha1.L } func shouldReconcileByDeleteFunc(llv *v1alpha1.LVMLogicalVolume) bool { - if llv.DeletionTimestamp == nil { - return false - } - - return true + return llv.DeletionTimestamp != nil } func checkIfConditionIsTrue(lvg *v1alpha1.LvmVolumeGroup, conType string) bool { @@ -128,7 +124,7 @@ func checkIfLVBelongsToLLV(llv *v1alpha1.LVMLogicalVolume, lv *internal.LVData) func updateLLVPhaseToCreatedIfNeeded(ctx context.Context, cl client.Client, llv *v1alpha1.LVMLogicalVolume, actualSize resource.Quantity) (bool, error) { var contiguous *bool if llv.Spec.Thick != nil { - if *llv.Spec.Thick.Contiguous == true { + if *llv.Spec.Thick.Contiguous { contiguous = llv.Spec.Thick.Contiguous } } @@ -208,11 +204,7 @@ func shouldReconcileByCreateFunc(sdsCache *cache.Cache, vgName string, llv *v1al } lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) - if lv != nil { - return false - } - - return true + return lv == nil } func getFreeLVGSpaceForLLV(lvg *v1alpha1.LvmVolumeGroup, llv *v1alpha1.LVMLogicalVolume) resource.Quantity { @@ -303,10 +295,8 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol if lv != nil { if len(lv.LVAttr) == 0 { reason.WriteString(fmt.Sprintf("LV %s was found on the node, but can't be validated due to its attributes is empty string. ", lv.LVName)) - } else { - if !checkIfLVBelongsToLLV(llv, lv) { - reason.WriteString(fmt.Sprintf("Specified LV %s is already created and it is doesnt match the one on the node.", lv.LVName)) - } + } else if !checkIfLVBelongsToLLV(llv, lv) { + reason.WriteString(fmt.Sprintf("Specified LV %s is already created and it is doesnt match the one on the node.", lv.LVName)) } } @@ -317,7 +307,7 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol return true, "" } -func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, llv *v1alpha1.LVMLogicalVolume, phase, reason string) error { +func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, _ monitoring.Metrics, llv *v1alpha1.LVMLogicalVolume, phase, reason string) error { if llv.Status != nil && llv.Status.Phase == phase && llv.Status.Reason == reason { @@ -342,7 +332,7 @@ func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client, return nil } -func updateLVMLogicalVolumeSpec(ctx context.Context, metrics monitoring.Metrics, cl client.Client, llv *v1alpha1.LVMLogicalVolume) error { +func updateLVMLogicalVolumeSpec(ctx context.Context, _ monitoring.Metrics, cl client.Client, llv *v1alpha1.LVMLogicalVolume) error { return cl.Update(ctx, llv) } @@ -352,11 +342,7 @@ func shouldReconcileByUpdateFunc(sdsCache *cache.Cache, vgName string, llv *v1al } lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) - if lv == nil { - return false - } - - return true + return lv != nil } func isContiguous(llv *v1alpha1.LVMLogicalVolume) bool { diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go index 395a8db2..7e70eb44 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go @@ -1,15 +1,15 @@ package controller import ( + "bytes" + "testing" + "agent/internal" "agent/pkg/cache" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/utils" - "bytes" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "testing" - "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover.go b/images/agent/src/pkg/controller/lvm_volume_group_discover.go index 502176b4..40026e16 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_discover.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_discover.go @@ -17,20 +17,20 @@ limitations under the License. package controller import ( - "agent/config" - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" "context" "errors" "fmt" - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "strconv" "strings" "time" + "agent/config" + "agent/internal" + "agent/pkg/cache" + "agent/pkg/logger" + "agent/pkg/monitoring" + "agent/pkg/utils" + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -54,12 +54,12 @@ func RunLVMVolumeGroupDiscoverController( cl := mgr.GetClient() c, err := controller.New(LVMVolumeGroupDiscoverCtrlName, mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + Reconciler: reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler starts LVMVolumeGroup resources reconciliation") shouldRequeue := LVMVolumeGroupDiscoverReconcile(ctx, cl, metrics, log, cfg, sdsCache) if shouldRequeue { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occured while run the Reconciler func, retry in %s", cfg.VolumeGroupScanIntervalSec.String())) + log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occurred while run the Reconciler func, retry in %s", cfg.VolumeGroupScanIntervalSec.String())) return reconcile.Result{ RequeueAfter: cfg.VolumeGroupScanIntervalSec, }, nil @@ -161,7 +161,6 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met } log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] updated LvmVolumeGroup, name: "%s"`, lvg.Name)) - } else { log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is not yet created. Create it", lvg.Name)) lvm, err := CreateLVMVolumeGroupByCandidate(ctx, log, metrics, cl, candidate) @@ -208,7 +207,6 @@ func filterLVGsByNode( blockDevices map[string]v1alpha1.BlockDevice, currentNode string, ) map[string]v1alpha1.LvmVolumeGroup { - filtered := make(map[string]v1alpha1.LvmVolumeGroup, len(lvgs)) blockDevicesNodes := make(map[string]string, len(blockDevices)) @@ -278,14 +276,14 @@ func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LvmVolumeGroup, candi log.Trace(fmt.Sprintf("Resource ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String())) } log.Trace(fmt.Sprintf(`VGSize, candidate: %s, lvg: %s`, candidate.VGSize.String(), lvg.Status.VGSize.String())) - log.Trace(fmt.Sprintf(`VGUuid, candidate: %s, lvg: %s`, candidate.VGUuid, lvg.Status.VGUuid)) + log.Trace(fmt.Sprintf(`VGUUID, candidate: %s, lvg: %s`, candidate.VGUUID, lvg.Status.VGUuid)) log.Trace(fmt.Sprintf(`Nodes, candidate: %+v, lvg: %+v`, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes)) return candidate.AllocatedSize.Value() != lvg.Status.AllocatedSize.Value() || hasStatusPoolDiff(convertedStatusPools, lvg.Status.ThinPools) || candidate.VGSize.Value() != lvg.Status.VGSize.Value() || candidate.VGFree.Value() != lvg.Status.VGFree.Value() || - candidate.VGUuid != lvg.Status.VGUuid || + candidate.VGUUID != lvg.Status.VGUuid || hasStatusNodesDiff(log, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes) } @@ -374,15 +372,12 @@ func ReconcileUnhealthyLVMVolumeGroups( if candidateTp, exist := candidateTPs[thinPool.Name]; !exist { log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its ThinPool %s", lvg.Name, thinPool.Name)) messageBldr.WriteString(fmt.Sprintf("Unable to find ThinPool %s. ", thinPool.Name)) - } else { + } else if !utils.AreSizesEqualWithinDelta(candidate.VGSize, thinPool.ActualSize, internal.ResizeDelta) && + candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < thinPool.ActualSize.Value() { // that means thin-pool is not 100%VG space // use candidate VGSize as lvg.Status.VGSize might not be updated yet - if !utils.AreSizesEqualWithinDelta(candidate.VGSize, thinPool.ActualSize, internal.ResizeDelta) { - if candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < thinPool.ActualSize.Value() { - log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String())) - messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String())) - } - } + log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String())) + messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String())) } } } @@ -450,7 +445,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m lvs, lvErrs := sdsCache.GetLVs() var thinPools []internal.LVData - if lvs != nil && len(lvs) > 0 { + if len(lvs) > 0 { // Filter LV to get only thin pools as we do not support thick for now. thinPools = getThinPools(lvs) } @@ -489,7 +484,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m StatusThinPools: getStatusThinPools(log, sortedThinPools, sortedLVByThinPool, vg, lvIssues), VGSize: *resource.NewQuantity(vg.VGSize.Value(), resource.BinarySI), VGFree: *resource.NewQuantity(vg.VGFree.Value(), resource.BinarySI), - VGUuid: vg.VGUuid, + VGUUID: vg.VGUUID, Nodes: configureCandidateNodeDevices(sortedPVs, sortedBDs, vg, currentNode), } @@ -502,19 +497,19 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m func checkVGHealth(blockDevices map[string][]v1alpha1.BlockDevice, vgIssues map[string]string, pvIssues map[string][]string, lvIssues map[string]map[string]string, vg internal.VGData) (health, message string) { issues := make([]string, 0, len(vgIssues)+len(pvIssues)+len(lvIssues)+1) - if bds, exist := blockDevices[vg.VGName+vg.VGUuid]; !exist || len(bds) == 0 { - issues = append(issues, fmt.Sprintf("[ERROR] Unable to get BlockDevice resources for VG, name: %s ; uuid: %s", vg.VGName, vg.VGUuid)) + if bds, exist := blockDevices[vg.VGName+vg.VGUUID]; !exist || len(bds) == 0 { + issues = append(issues, fmt.Sprintf("[ERROR] Unable to get BlockDevice resources for VG, name: %s ; uuid: %s", vg.VGName, vg.VGUUID)) } - if vgIssue, exist := vgIssues[vg.VGName+vg.VGUuid]; exist { + if vgIssue, exist := vgIssues[vg.VGName+vg.VGUUID]; exist { issues = append(issues, vgIssue) } - if pvIssue, exist := pvIssues[vg.VGName+vg.VGUuid]; exist { + if pvIssue, exist := pvIssues[vg.VGName+vg.VGUUID]; exist { issues = append(issues, strings.Join(pvIssue, "")) } - if lvIssue, exist := lvIssues[vg.VGName+vg.VGUuid]; exist { + if lvIssue, exist := lvIssues[vg.VGName+vg.VGUUID]; exist { for lvName, issue := range lvIssue { issues = append(issues, fmt.Sprintf("%s: %s", lvName, issue)) } @@ -553,10 +548,8 @@ func sortThinPoolIssuesByVG(log logger.Logger, lvs []internal.LVData) map[string if err != nil { log.Error(err, fmt.Sprintf(`[sortThinPoolIssuesByVG] unable to run lvs command for lv, name: "%s"`, lv.LVName)) - //lvIssuesByVG[lv.VGName+lv.VGUuid] = append(lvIssuesByVG[lv.VGName+lv.VGUuid], err.Error()) lvIssuesByVG[lv.VGName+lv.VGUuid] = make(map[string]string, len(lvs)) lvIssuesByVG[lv.VGName+lv.VGUuid][lv.LVName] = err.Error() - } if stdErr.Len() != 0 { @@ -599,12 +592,12 @@ func sortVGIssuesByVG(log logger.Logger, vgs []internal.VGData) map[string]strin log.Debug(fmt.Sprintf("[sortVGIssuesByVG] runs cmd: %s", cmd)) if err != nil { log.Error(err, fmt.Sprintf(`[sortVGIssuesByVG] unable to run vgs command for vg, name: "%s"`, vg.VGName)) - vgIssues[vg.VGName+vg.VGUuid] = err.Error() + vgIssues[vg.VGName+vg.VGUUID] = err.Error() } if stdErr.Len() != 0 { log.Error(fmt.Errorf(stdErr.String()), fmt.Sprintf(`[sortVGIssuesByVG] vgs command for vg "%s" has stderr: `, vg.VGName)) - vgIssues[vg.VGName+vg.VGUuid] = stdErr.String() + vgIssues[vg.VGName+vg.VGUUID] = stdErr.String() stdErr.Reset() } } @@ -627,7 +620,7 @@ func sortLVByThinPool(lvs []internal.LVData) map[string][]internal.LVData { func sortThinPoolsByVG(lvs []internal.LVData, vgs []internal.VGData) map[string][]internal.LVData { result := make(map[string][]internal.LVData, len(vgs)) for _, vg := range vgs { - result[vg.VGName+vg.VGUuid] = make([]internal.LVData, 0, len(lvs)) + result[vg.VGName+vg.VGUUID] = make([]internal.LVData, 0, len(lvs)) } for _, lv := range lvs { @@ -642,7 +635,7 @@ func sortThinPoolsByVG(lvs []internal.LVData, vgs []internal.VGData) map[string] func sortPVsByVG(pvs []internal.PVData, vgs []internal.VGData) map[string][]internal.PVData { result := make(map[string][]internal.PVData, len(vgs)) for _, vg := range vgs { - result[vg.VGName+vg.VGUuid] = make([]internal.PVData, 0, len(pvs)) + result[vg.VGName+vg.VGUUID] = make([]internal.PVData, 0, len(pvs)) } for _, pv := range pvs { @@ -657,7 +650,7 @@ func sortPVsByVG(pvs []internal.PVData, vgs []internal.VGData) map[string][]inte func sortBlockDevicesByVG(bds map[string]v1alpha1.BlockDevice, vgs []internal.VGData) map[string][]v1alpha1.BlockDevice { result := make(map[string][]v1alpha1.BlockDevice, len(vgs)) for _, vg := range vgs { - result[vg.VGName+vg.VGUuid] = make([]v1alpha1.BlockDevice, 0, len(bds)) + result[vg.VGName+vg.VGUUID] = make([]v1alpha1.BlockDevice, 0, len(bds)) } for _, bd := range bds { @@ -670,8 +663,8 @@ func sortBlockDevicesByVG(bds map[string]v1alpha1.BlockDevice, vgs []internal.VG } func configureCandidateNodeDevices(pvs map[string][]internal.PVData, bds map[string][]v1alpha1.BlockDevice, vg internal.VGData, currentNode string) map[string][]internal.LVMVGDevice { - filteredPV := pvs[vg.VGName+vg.VGUuid] - filteredBds := bds[vg.VGName+vg.VGUuid] + filteredPV := pvs[vg.VGName+vg.VGUUID] + filteredBds := bds[vg.VGName+vg.VGUUID] bdPathStatus := make(map[string]v1alpha1.BlockDevice, len(bds)) result := make(map[string][]internal.LVMVGDevice, len(filteredPV)) @@ -683,7 +676,7 @@ func configureCandidateNodeDevices(pvs map[string][]internal.PVData, bds map[str device := internal.LVMVGDevice{ Path: pv.PVName, PVSize: *resource.NewQuantity(pv.PVSize.Value(), resource.BinarySI), - PVUuid: pv.PVUuid, + PVUUID: pv.PVUuid, } if bd, exist := bdPathStatus[pv.PVName]; exist { @@ -706,7 +699,7 @@ func getVgType(vg internal.VGData) string { } func getSpecThinPools(thinPools map[string][]internal.LVData, vg internal.VGData) map[string]resource.Quantity { - lvs := thinPools[vg.VGName+vg.VGUuid] + lvs := thinPools[vg.VGName+vg.VGUUID] tps := make(map[string]resource.Quantity, len(lvs)) for _, lv := range lvs { @@ -729,7 +722,7 @@ func getThinPools(lvs []internal.LVData) []internal.LVData { } func getStatusThinPools(log logger.Logger, thinPools, sortedLVs map[string][]internal.LVData, vg internal.VGData, lvIssues map[string]map[string]string) []internal.LVMVGStatusThinPool { - tps := thinPools[vg.VGName+vg.VGUuid] + tps := thinPools[vg.VGName+vg.VGUUID] result := make([]internal.LVMVGStatusThinPool, 0, len(tps)) for _, thinPool := range tps { @@ -749,7 +742,7 @@ func getStatusThinPools(log logger.Logger, thinPools, sortedLVs map[string][]int Message: "", } - if lverrs, exist := lvIssues[vg.VGName+vg.VGUuid][thinPool.LVName]; exist { + if lverrs, exist := lvIssues[vg.VGName+vg.VGUUID][thinPool.LVName]; exist { tp.Ready = false tp.Message = lverrs } @@ -795,7 +788,7 @@ func isThinPool(lv internal.LVData) bool { } func getBlockDevicesNames(bds map[string][]v1alpha1.BlockDevice, vg internal.VGData) []string { - sorted := bds[vg.VGName+vg.VGUuid] + sorted := bds[vg.VGName+vg.VGUUID] names := make([]string, 0, len(sorted)) for _, bd := range sorted { @@ -834,7 +827,7 @@ func CreateLVMVolumeGroupByCandidate( Nodes: convertLVMVGNodes(candidate.Nodes), ThinPools: thinPools, VGSize: candidate.VGSize, - VGUuid: candidate.VGUuid, + VGUuid: candidate.VGUUID, VGFree: candidate.VGFree, }, } @@ -851,10 +844,10 @@ func CreateLVMVolumeGroupByCandidate( start := time.Now() err = kc.Create(ctx, lvmVolumeGroup) - metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "create").Inc() + metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "create").Inc() if err != nil { - metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "create").Inc() + metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "create").Inc() return nil, fmt.Errorf("unable to сreate LVMVolumeGroup, err: %w", err) } @@ -910,14 +903,14 @@ func UpdateLVMVolumeGroupByCandidate( lvg.Status.ThinPools = thinPools lvg.Status.VGSize = candidate.VGSize lvg.Status.VGFree = candidate.VGFree - lvg.Status.VGUuid = candidate.VGUuid + lvg.Status.VGUuid = candidate.VGUUID start := time.Now() err = cl.Status().Update(ctx, lvg) - metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "update").Inc() + metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "update").Inc() if err != nil { - metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "update").Inc() + metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "update").Inc() return fmt.Errorf(`[UpdateLVMVolumeGroupByCandidate] unable to update LVMVolumeGroup, name: "%s", err: %w`, lvg.Name, err) } @@ -950,7 +943,7 @@ func convertLVMVGDevices(devices []internal.LVMVGDevice) []v1alpha1.LvmVolumeGro BlockDevice: dev.BlockDevice, DevSize: dev.DevSize, PVSize: dev.PVSize, - PVUuid: dev.PVUuid, + PVUuid: dev.PVUUID, Path: dev.Path, }) } @@ -1020,7 +1013,7 @@ func getThinPoolSpaceWithAllocationLimit(actualSize resource.Quantity, allocatio } factor := float64(percent) - factor = factor / 100 + factor /= 100 return *resource.NewQuantity(int64(float64(actualSize.Value())*factor), resource.BinarySI), nil } @@ -1034,10 +1027,10 @@ func GetAPILVMVolumeGroups(ctx context.Context, kc kclient.Client, metrics monit start := time.Now() err := kc.List(ctx, lvgList) - metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "list").Inc() + metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "list").Inc() if err != nil { - metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "list").Inc() + metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "list").Inc() return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LvmVolumeGroups, err: %w", err) } diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go b/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go index 2b0f3ead..8c5fed66 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go @@ -17,15 +17,15 @@ limitations under the License. package controller import ( + "context" + "testing" + "agent/internal" "agent/pkg/logger" "agent/pkg/monitoring" - "context" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "k8s.io/apimachinery/pkg/api/resource" - "testing" - "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" @@ -73,15 +73,15 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("checkVGHealth_returns_Operational", func(t *testing.T) { const ( vgName = "testVg" - vgUuid = "testUuid" + vgUUID = "testUuid" ) bds := map[string][]v1alpha1.BlockDevice{ - vgName + vgUuid: {{}}, + vgName + vgUUID: {{}}, } vgIssues := map[string]string{} pvIssues := map[string][]string{} lvIssues := map[string]map[string]string{} - vg := internal.VGData{VGName: vgName, VGUuid: vgUuid} + vg := internal.VGData{VGName: vgName, VGUUID: vgUUID} health, _ := checkVGHealth(bds, vgIssues, pvIssues, lvIssues, vg) assert.Equal(t, health, internal.LVMVGHealthOperational) @@ -90,15 +90,15 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("checkVGHealth_returns_NonOperational", func(t *testing.T) { const ( vgName = "testVg" - vgUuid = "testUuid" + vgUUID = "testUuid" ) bds := map[string][]v1alpha1.BlockDevice{ - vgName + vgUuid: {}, + vgName + vgUUID: {}, } vgIssues := map[string]string{} pvIssues := map[string][]string{} lvIssues := map[string]map[string]string{} - vg := internal.VGData{VGName: vgName, VGUuid: vgUuid} + vg := internal.VGData{VGName: vgName, VGUUID: vgUUID} health, _ := checkVGHealth(bds, vgIssues, pvIssues, lvIssues, vg) assert.Equal(t, health, internal.LVMVGHealthNonOperational) @@ -125,37 +125,37 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("sortPVsByVG_returns_sorted_pvs", func(t *testing.T) { const ( firstVgName = "firstVg" - firstVgUuid = "firstUUID" + firstVgUUID = "firstUUID" secondVgName = "secondVg" - secondVgUuid = "secondUUID" + secondVgUUID = "secondUUID" ) pvs := []internal.PVData{ { PVName: "first", VGName: firstVgName, - VGUuid: firstVgUuid, + VGUuid: firstVgUUID, }, { PVName: "second", VGName: secondVgName, - VGUuid: secondVgUuid, + VGUuid: secondVgUUID, }, } vgs := []internal.VGData{ { VGName: firstVgName, - VGUuid: firstVgUuid, + VGUUID: firstVgUUID, }, { VGName: secondVgName, - VGUuid: secondVgUuid, + VGUUID: secondVgUUID, }, } expected := map[string][]internal.PVData{ - firstVgName + firstVgUuid: {pvs[0]}, - secondVgName + secondVgUuid: {pvs[1]}, + firstVgName + firstVgUUID: {pvs[0]}, + secondVgName + secondVgUUID: {pvs[1]}, } actual := sortPVsByVG(pvs, vgs) @@ -165,18 +165,18 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("sortBlockDevicesByVG_returns_sorted_bds", func(t *testing.T) { const ( firstVgName = "firstVg" - firstVgUuid = "firstUUID" + firstVgUUID = "firstUUID" secondVgName = "secondVg" - secondVgUuid = "secondUUID" + secondVgUUID = "secondUUID" ) vgs := []internal.VGData{ { VGName: firstVgName, - VGUuid: firstVgUuid, + VGUUID: firstVgUUID, }, { VGName: secondVgName, - VGUuid: secondVgUuid, + VGUUID: secondVgUUID, }, } @@ -185,21 +185,21 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "first"}, Status: v1alpha1.BlockDeviceStatus{ ActualVGNameOnTheNode: firstVgName, - VGUuid: firstVgUuid, + VGUuid: firstVgUUID, }, }, "second": { ObjectMeta: metav1.ObjectMeta{Name: "second"}, Status: v1alpha1.BlockDeviceStatus{ ActualVGNameOnTheNode: secondVgName, - VGUuid: secondVgUuid, + VGUuid: secondVgUUID, }, }, } expected := map[string][]v1alpha1.BlockDevice{ - firstVgName + firstVgUuid: {bds["first"]}, - secondVgName + secondVgUuid: {bds["second"]}, + firstVgName + firstVgUUID: {bds["first"]}, + secondVgName + secondVgUUID: {bds["second"]}, } actual := sortBlockDevicesByVG(bds, vgs) @@ -209,35 +209,35 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("sortLVsByVG_returns_sorted_LVs", func(t *testing.T) { const ( firstVgName = "firstVg" - firstVgUuid = "firstUUID" + firstVgUUID = "firstUUID" secondVgName = "secondVg" - secondVgUuid = "secondUUID" + secondVgUUID = "secondUUID" ) vgs := []internal.VGData{ { VGName: firstVgName, - VGUuid: firstVgUuid, + VGUUID: firstVgUUID, }, { VGName: secondVgName, - VGUuid: secondVgUuid, + VGUUID: secondVgUUID, }, } lvs := []internal.LVData{ { LVName: "first", VGName: firstVgName, - VGUuid: firstVgUuid, + VGUuid: firstVgUUID, }, { LVName: "second", VGName: secondVgName, - VGUuid: secondVgUuid, + VGUuid: secondVgUUID, }, } expected := map[string][]internal.LVData{ - firstVgName + firstVgUuid: {lvs[0]}, - secondVgName + secondVgUuid: {lvs[1]}, + firstVgName + firstVgUUID: {lvs[0]}, + secondVgName + secondVgUUID: {lvs[1]}, } actual := sortThinPoolsByVG(lvs, vgs) @@ -247,16 +247,19 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("configureCandidateNodesDevices_returns_candidates_nodes", func(t *testing.T) { const ( vgName = "test_vg" - vgUuid = "vg_uuid" + vgUUID = "vg_uuid" nodeName = "test_node" ) vg := internal.VGData{ VGName: vgName, - VGUuid: vgUuid, + VGUUID: vgUUID, } size10G, err := resource.ParseQuantity("10G") + if err != nil { + t.Error(err) + } size1G, err := resource.ParseQuantity("1G") if err != nil { t.Error(err) @@ -268,13 +271,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { PVSize: size10G, PVUuid: "pv_uuid1", VGName: vgName, - VGUuid: vgUuid, + VGUuid: vgUUID, }, { PVName: "test_pv2", PVSize: size1G, PVUuid: "pv_uuid2", - VGUuid: vgUuid, + VGUuid: vgUUID, VGName: vgName, }, } @@ -285,7 +288,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Status: v1alpha1.BlockDeviceStatus{ Path: "test_pv1", Size: resource.MustParse("10G"), - VGUuid: vgUuid, + VGUuid: vgUUID, ActualVGNameOnTheNode: vgName, }, }, @@ -294,7 +297,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Status: v1alpha1.BlockDeviceStatus{ Path: "test_pv2", Size: resource.MustParse("1G"), - VGUuid: vgUuid, + VGUuid: vgUUID, ActualVGNameOnTheNode: vgName, }, }, @@ -306,20 +309,20 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Path: "test_pv1", PVSize: *resource.NewQuantity(size10G.Value(), resource.BinarySI), DevSize: *resource.NewQuantity(size10G.Value(), resource.BinarySI), - PVUuid: "pv_uuid1", + PVUUID: "pv_uuid1", BlockDevice: "block_device1", }, { Path: "test_pv2", PVSize: *resource.NewQuantity(size1G.Value(), resource.BinarySI), DevSize: *resource.NewQuantity(size1G.Value(), resource.BinarySI), - PVUuid: "pv_uuid2", + PVUUID: "pv_uuid2", BlockDevice: "block_device2", }, }, } - mp := map[string][]v1alpha1.BlockDevice{vgName + vgUuid: bds} - ar := map[string][]internal.PVData{vgName + vgUuid: pvs} + mp := map[string][]v1alpha1.BlockDevice{vgName + vgUUID: bds} + ar := map[string][]internal.PVData{vgName + vgUUID: pvs} actual := configureCandidateNodeDevices(ar, mp, vg, nodeName) @@ -347,7 +350,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { vgs := []internal.VGData{ { VGName: "firstVG", - VGUuid: "firstUUID", + VGUUID: "firstUUID", }, } actual := sortBlockDevicesByVG(bds, vgs) @@ -378,19 +381,22 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("getSpecThinPools_returns_LVName_LVSize_map", func(t *testing.T) { const ( vgName = "test_vg" - vgUuid = "test_uuid" + vgUUID = "test_uuid" ) - vg := internal.VGData{VGName: vgName, VGUuid: vgUuid} + vg := internal.VGData{VGName: vgName, VGUUID: vgUUID} firstSize, err := resource.ParseQuantity("1G") + if err != nil { + t.Error(err) + } secondSize, err := resource.ParseQuantity("2G") if err != nil { t.Error(err) } thinPools := map[string][]internal.LVData{ - vgName + vgUuid: { + vgName + vgUUID: { { LVName: "first", LVSize: firstSize, @@ -419,10 +425,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Type = "local" Health = internal.LVMVGHealthOperational Message = "No problems detected" - VGUuid = "test_uuid" + VGUUID = "test_uuid" ) size10G, err := resource.ParseQuantity("10G") + if err != nil { + t.Error(err) + } size1G, err := resource.ParseQuantity("1G") if err != nil { t.Error(err) @@ -448,7 +457,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Path: "test/path", PVSize: size1G, DevSize: size1G, - PVUuid: "test-pv-uuid", + PVUUID: "test-pv-uuid", BlockDevice: "test-device", }, }, @@ -466,7 +475,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Message: Message, StatusThinPools: statusThinPools, VGSize: size10G, - VGUuid: VGUuid, + VGUUID: VGUUID, Nodes: nodes, } @@ -491,7 +500,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Nodes: convertLVMVGNodes(nodes), ThinPools: thinPools, VGSize: size10G, - VGUuid: VGUuid, + VGUuid: VGUUID, }, } @@ -718,7 +727,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("hasLVMVolumeGroupDiff", func(t *testing.T) { t.Run("should_return_false", func(t *testing.T) { size10G, err := resource.ParseQuantity("10G") + if err != nil { + t.Error(err) + } size1G, err := resource.ParseQuantity("1G") + if err != nil { + t.Error(err) + } size13G, err := resource.ParseQuantity("13G") if err != nil { t.Error(err) @@ -755,7 +770,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Path: "/test/ds", PVSize: size1G, DevSize: size13G, - PVUuid: "testUUID", + PVUUID: "testUUID", BlockDevice: "something", }, }, @@ -821,14 +836,14 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Path: "/test/ds", PVSize: size1G, DevSize: size13G, - PVUuid: "testUUID", + PVUUID: "testUUID", BlockDevice: "something", }, { Path: "/test/ds2", PVSize: size1G, DevSize: size13G, - PVUuid: "testUUID2", + PVUUID: "testUUID2", BlockDevice: "something2", }, }, diff --git a/images/agent/src/pkg/controller/lvm_volume_group_test.go b/images/agent/src/pkg/controller/lvm_volume_group_test.go index cd8c4d31..f055f60f 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_test.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_test.go @@ -18,11 +18,11 @@ package controller import ( "encoding/json" - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "k8s.io/apimachinery/pkg/api/resource" "testing" + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go index e80ea32b..6ffc8781 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go @@ -17,19 +17,18 @@ limitations under the License. package controller import ( + "context" + "fmt" + "agent/config" "agent/internal" "agent/pkg/cache" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/utils" - "context" - "errors" - "fmt" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" errors2 "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" @@ -143,9 +142,8 @@ func RunLVMVolumeGroupWatcherController( if err != nil { log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to delete the LVMVolumeGroup %s", lvg.Name)) return reconcile.Result{}, err - } else { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name)) } + log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name)) return reconcile.Result{}, nil } @@ -193,7 +191,7 @@ func RunLVMVolumeGroupWatcherController( } err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup]{ - CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) { + CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) { log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc got a create event for the LVMVolumeGroup, name: %s", e.Object.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} @@ -201,7 +199,7 @@ func RunLVMVolumeGroupWatcherController( log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) }, - UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) { + UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) { log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] UpdateFunc got a update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) if !shouldLVGWatcherReconcileUpdateEvent(log, e.ObjectOld, e.ObjectNew) { log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] update event for the LVMVolumeGroup %s should not be reconciled as not target changed were made", e.ObjectNew.Name)) @@ -352,7 +350,7 @@ func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Lo log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to get VG %s for the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) found, vg := tryGetVG(sdsCache, lvg.Spec.ActualVGNameOnTheNode) if !found { - err := errors.New(fmt.Sprintf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode)) + err := fmt.Errorf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode) log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile the LVMVolumeGroup %s", lvg.Name)) err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGNotFound", err.Error()) if err != nil { diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go index a87e907c..86ce8e25 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go @@ -17,23 +17,23 @@ limitations under the License. package controller import ( + "context" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "agent/internal" "agent/pkg/cache" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/utils" - "context" - "errors" - "fmt" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/strings/slices" - "reflect" - "strconv" - "strings" - "time" - "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -51,10 +51,10 @@ func DeleteLVMVolumeGroup(ctx context.Context, cl client.Client, log logger.Logg if len(lvg.Status.Nodes) == 0 { start := time.Now() err := cl.Delete(ctx, lvg) - metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "delete").Inc() + metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "delete").Inc() if err != nil { - metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "delete").Inc() + metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "delete").Inc() return err } log.Info(fmt.Sprintf("[DeleteLVMVolumeGroup] the LVMVolumeGroup %s deleted", lvg.Name)) @@ -106,11 +106,7 @@ func shouldLVGWatcherReconcileUpdateEvent(log logger.Logger, oldLVG, newLVG *v1a } func shouldReconcileLVGByDeleteFunc(lvg *v1alpha1.LvmVolumeGroup) bool { - if lvg.DeletionTimestamp != nil { - return true - } - - return false + return lvg.DeletionTimestamp != nil } func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, status v1.ConditionStatus, conType, reason, message string) error { @@ -260,11 +256,7 @@ func validateSpecBlockDevices(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[str func checkIfLVGBelongsToNode(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, nodeName string) bool { bd := blockDevices[lvg.Spec.BlockDeviceNames[0]] - if bd.Status.NodeName != nodeName { - return false - } - - return true + return bd.Status.NodeName == nodeName } func extractPathsFromBlockDevices(blockDevicesNames []string, blockDevices map[string]v1alpha1.BlockDevice) []string { @@ -755,10 +747,10 @@ func getLVMVolumeGroup(ctx context.Context, cl client.Client, metrics monitoring err := cl.Get(ctx, client.ObjectKey{ Name: name, }, obj) - metrics.ApiMethodsDuration(LVMVolumeGroupWatcherCtrlName, "get").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.ApiMethodsExecutionCount(LVMVolumeGroupWatcherCtrlName, "get").Inc() + metrics.APIMethodsDuration(LVMVolumeGroupWatcherCtrlName, "get").Observe(metrics.GetEstimatedTimeInSeconds(start)) + metrics.APIMethodsExecutionCount(LVMVolumeGroupWatcherCtrlName, "get").Inc() if err != nil { - metrics.ApiMethodsErrors(LVMVolumeGroupWatcherCtrlName, "get").Inc() + metrics.APIMethodsErrors(LVMVolumeGroupWatcherCtrlName, "get").Inc() return nil, err } return obj, nil @@ -811,7 +803,7 @@ func DeleteVGIfExist(log logger.Logger, metrics monitoring.Metrics, sdsCache *ca return nil } -func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, VGName string, log logger.Logger) error { +func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, vgName string, log logger.Logger) error { for _, pvPath := range extendPVs { start := time.Now() command, err := utils.CreatePV(pvPath) @@ -826,7 +818,7 @@ func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, VGName stri } start := time.Now() - command, err := utils.ExtendVG(VGName, extendPVs) + command, err := utils.ExtendVG(vgName, extendPVs) metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgextend").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgextend").Inc() log.Debug(command) diff --git a/images/agent/src/pkg/kubutils/kubernetes.go b/images/agent/src/pkg/kubutils/kubernetes.go index ed232de9..bcd95a6c 100644 --- a/images/agent/src/pkg/kubutils/kubernetes.go +++ b/images/agent/src/pkg/kubutils/kubernetes.go @@ -18,6 +18,7 @@ package kubutils import ( "fmt" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) diff --git a/images/agent/src/pkg/logger/logger.go b/images/agent/src/pkg/logger/logger.go index 34b94245..17cdfacb 100644 --- a/images/agent/src/pkg/logger/logger.go +++ b/images/agent/src/pkg/logger/logger.go @@ -17,11 +17,11 @@ limitations under the License. package logger import ( - "flag" "fmt" + "strconv" + "github.com/go-logr/logr" - "k8s.io/klog/v2" - "k8s.io/klog/v2/klogr" + "k8s.io/klog/v2/textlogger" ) const ( @@ -50,13 +50,20 @@ type Logger struct { } func NewLogger(level Verbosity) (*Logger, error) { - klog.InitFlags(nil) - if err := flag.Set("v", string(level)); err != nil { + // klog.InitFlags(nil) + // if err := flag.Set("v", string(level)); err != nil { + // return nil, err + // } + // flag.Parse() + // + // log := klogr.New().WithCallDepth(1) + + v, err := strconv.Atoi(string(level)) + if err != nil { return nil, err } - flag.Parse() - log := klogr.New().WithCallDepth(1) + log := textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(v))).WithCallDepth(1) return &Logger{log: log}, nil } diff --git a/images/agent/src/pkg/monitoring/monitoring.go b/images/agent/src/pkg/monitoring/monitoring.go index b0158c35..fa638a46 100644 --- a/images/agent/src/pkg/monitoring/monitoring.go +++ b/images/agent/src/pkg/monitoring/monitoring.go @@ -17,11 +17,12 @@ limitations under the License. package monitoring import ( + "strings" + "time" + "github.com/prometheus/client_golang/prometheus" "k8s.io/utils/clock" "sigs.k8s.io/controller-runtime/pkg/metrics" - "strings" - "time" ) const ( @@ -133,15 +134,15 @@ func (m Metrics) UtilsCommandsErrorsCount(controllerName, command string) promet return utilsCommandsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(command)) } -func (m Metrics) ApiMethodsDuration(controllerName, method string) prometheus.Observer { +func (m Metrics) APIMethodsDuration(controllerName, method string) prometheus.Observer { return apiMethodsDuration.WithLabelValues(m.node, controllerName, strings.ToLower(method)) } -func (m Metrics) ApiMethodsExecutionCount(controllerName, method string) prometheus.Counter { +func (m Metrics) APIMethodsExecutionCount(controllerName, method string) prometheus.Counter { return apiMethodsExecutionCount.WithLabelValues(m.node, controllerName, strings.ToLower(method)) } -func (m Metrics) ApiMethodsErrors(controllerName, method string) prometheus.Counter { +func (m Metrics) APIMethodsErrors(controllerName, method string) prometheus.Counter { return apiMethodsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(method)) } diff --git a/images/agent/src/pkg/scanner/scanner.go b/images/agent/src/pkg/scanner/scanner.go index f5b2f60f..a7764cd8 100644 --- a/images/agent/src/pkg/scanner/scanner.go +++ b/images/agent/src/pkg/scanner/scanner.go @@ -1,6 +1,12 @@ package scanner import ( + "bytes" + "context" + "errors" + "fmt" + "time" + "agent/config" "agent/internal" "agent/pkg/cache" @@ -8,14 +14,9 @@ import ( "agent/pkg/logger" "agent/pkg/throttler" "agent/pkg/utils" - "bytes" - "context" - "errors" - "fmt" "github.com/pilebones/go-udev/netlink" kubeCtrl "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "time" ) func RunScanner(ctx context.Context, log logger.Logger, cfg config.Options, sdsCache *cache.Cache, bdCtrl, lvgDiscoverCtrl kubeCtrl.Controller) error { @@ -115,7 +116,7 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", controller.BlockDeviceCtrlName)) bdRes, err := bdCtrl.Reconcile(ctx, reconcile.Request{}) if err != nil { - log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occured while %s reconcile", controller.BlockDeviceCtrlName)) + log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", controller.BlockDeviceCtrlName)) return err } @@ -128,7 +129,6 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg } log.Info("[runControllersReconcile] successfully reconciled BlockDevices after a retry") - return }() } @@ -137,7 +137,7 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName)) lvgRes, err := lvgDiscoverCtrl.Reconcile(ctx, reconcile.Request{}) if err != nil { - log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occured while %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName)) + log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName)) return err } if lvgRes.RequeueAfter > 0 { @@ -149,7 +149,6 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg } log.Info("[runControllersReconcile] successfully reconciled LVMVolumeGroups after a retry") - return }() } log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", controller.LVMVolumeGroupDiscoverCtrlName)) diff --git a/images/agent/src/pkg/utils/commands.go b/images/agent/src/pkg/utils/commands.go index e39806f4..371ddcc8 100644 --- a/images/agent/src/pkg/utils/commands.go +++ b/images/agent/src/pkg/utils/commands.go @@ -17,16 +17,16 @@ limitations under the License. package utils import ( - "agent/internal" "bufio" "bytes" "context" "encoding/json" "fmt" + golog "log" "os/exec" "regexp" - golog "log" + "agent/internal" ) func GetBlockDevices(ctx context.Context) ([]internal.Device, string, bytes.Buffer, error) { @@ -243,8 +243,8 @@ func CreateVGShared(vgName, lvmVolumeGroupName string, pvNames []string) (string return cmd.String(), nil } -func CreateThinPool(thinPoolName, VGName string, size int64) (string, error) { - args := []string{"lvcreate", "-L", fmt.Sprintf("%dk", size/1024), "-T", fmt.Sprintf("%s/%s", VGName, thinPoolName)} +func CreateThinPool(thinPoolName, vgName string, size int64) (string, error) { + args := []string{"lvcreate", "-L", fmt.Sprintf("%dk", size/1024), "-T", fmt.Sprintf("%s/%s", vgName, thinPoolName)} extendedArgs := lvmStaticExtendedArgs(args) cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) @@ -257,8 +257,8 @@ func CreateThinPool(thinPoolName, VGName string, size int64) (string, error) { return cmd.String(), nil } -func CreateThinPoolFullVGSpace(thinPoolName, VGName string) (string, error) { - args := []string{"lvcreate", "-l", "100%FREE", "-T", fmt.Sprintf("%s/%s", VGName, thinPoolName)} +func CreateThinPoolFullVGSpace(thinPoolName, vgName string) (string, error) { + args := []string{"lvcreate", "-l", "100%FREE", "-T", fmt.Sprintf("%s/%s", vgName, thinPoolName)} extendedArgs := lvmStaticExtendedArgs(args) cmd := exec.Command(internal.NSENTERCmd, extendedArgs...) @@ -476,12 +476,9 @@ func unmarshalPVs(out []byte) ([]internal.PVData, error) { return nil, err } - var pvs []internal.PVData - + pvs := make([]internal.PVData, 0, len(pvR.Report)) for _, rep := range pvR.Report { - for _, pv := range rep.PV { - pvs = append(pvs, pv) - } + pvs = append(pvs, rep.PV...) } return pvs, nil @@ -494,12 +491,9 @@ func unmarshalVGs(out []byte) ([]internal.VGData, error) { return nil, err } - var vgs []internal.VGData - + vgs := make([]internal.VGData, 0, len(vgR.Report)) for _, rep := range vgR.Report { - for _, vg := range rep.VG { - vgs = append(vgs, vg) - } + vgs = append(vgs, rep.VG...) } return vgs, nil @@ -512,27 +506,19 @@ func unmarshalLVs(out []byte) ([]internal.LVData, error) { return nil, err } - var lvs []internal.LVData - + lvs := make([]internal.LVData, 0, len(lvR.Report)) for _, rep := range lvR.Report { - for _, lv := range rep.LV { - lvs = append(lvs, lv) - } + lvs = append(lvs, rep.LV...) } return lvs, nil } -func extendArgs(args []string) []string { - nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"} - return append(nsenterArgs, args...) -} - func lvmStaticExtendedArgs(args []string) []string { nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"} lvmStaticBin := []string{"--", internal.LVMCmd} - result := append(nsenterArgs, lvmStaticBin...) - return append(result, args...) + nsenterArgs = append(nsenterArgs, lvmStaticBin...) + return append(nsenterArgs, args...) } // filterStdErr processes a bytes.Buffer containing stderr output and filters out specific diff --git a/images/agent/src/pkg/utils/commands_test.go b/images/agent/src/pkg/utils/commands_test.go index 5bae6ecf..c24123a0 100644 --- a/images/agent/src/pkg/utils/commands_test.go +++ b/images/agent/src/pkg/utils/commands_test.go @@ -17,12 +17,11 @@ limitations under the License. package utils import ( - "agent/internal" "testing" - "k8s.io/apimachinery/pkg/api/resource" - + "agent/internal" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/resource" ) func TestCommands(t *testing.T) { @@ -216,7 +215,7 @@ func TestCommands(t *testing.T) { expectedVGs := internal.VG{VG: []internal.VGData{ { VGName: "test-vg", - VGUuid: "P14t8J-nfUE-hryT-LiTv-JdFD-Wqxg-R8taCa", + VGUUID: "P14t8J-nfUE-hryT-LiTv-JdFD-Wqxg-R8taCa", VGTags: "test-tag", VGSize: size2G, VGShared: "test-shared",