diff --git a/images/agent/src/cmd/main.go b/images/agent/src/cmd/main.go index 1a8e81bb..dfceb535 100644 --- a/images/agent/src/cmd/main.go +++ b/images/agent/src/cmd/main.go @@ -27,7 +27,6 @@ import ( sv1 "k8s.io/api/storage/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" - apiruntime "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -37,6 +36,9 @@ import ( "agent/pkg/cache" "agent/pkg/controller" "agent/pkg/controller/bd" + "agent/pkg/controller/llv" + "agent/pkg/controller/llv_extender" + "agent/pkg/controller/lvg" "agent/pkg/kubutils" "agent/pkg/logger" "agent/pkg/monitoring" @@ -45,7 +47,7 @@ import ( ) var ( - resourcesSchemeFuncs = []func(*apiruntime.Scheme) error{ + resourcesSchemeFuncs = []func(*runtime.Scheme) error{ v1alpha1.AddToScheme, clientgoscheme.AddToScheme, extv1.AddToScheme, @@ -112,7 +114,7 @@ func main() { metrics := monitoring.GetMetrics(cfgParams.NodeName) log.Info("[main] ReTag starts") - if err := utils.ReTag(ctx, *log, metrics, bd.Name); err != nil { + if err := utils.ReTag(ctx, *log, metrics, bd.DiscovererName); err != nil { log.Error(err, "[main] unable to run ReTag") } @@ -138,30 +140,91 @@ func main() { os.Exit(1) } - if _, err = controller.RunLVMVolumeGroupWatcherController(mgr, *cfgParams, *log, metrics, sdsCache); err != nil { - log.Error(err, "[main] unable to controller.RunLVMVolumeGroupWatcherController") + rediscoverLVGs, err := controller.AddDiscoverer( + mgr, + *log, + lvg.NewDiscoverer( + mgr.GetClient(), + *log, + metrics, + sdsCache, + lvg.DiscovererOptions{ + NodeName: cfgParams.NodeName, + VolumeGroupScanInterval: cfgParams.VolumeGroupScanIntervalSec, + }, + ), + ) + if err != nil { + log.Error(err, "[main] unable to controller.RunLVMVolumeGroupDiscoverController") os.Exit(1) } - lvgDiscoverCtrl, err := controller.RunLVMVolumeGroupDiscoverController(mgr, *cfgParams, *log, metrics, sdsCache) + err = controller.AddReconciler( + mgr, + *log, + lvg.NewReconciler( + mgr.GetClient(), + *log, + metrics, + sdsCache, + lvg.ReconcilerOptions{ + NodeName: cfgParams.NodeName, + VolumeGroupScanInterval: cfgParams.VolumeGroupScanIntervalSec, + BlockDeviceScanInterval: cfgParams.BlockDeviceScanIntervalSec, + }, + ), + ) if err != nil { - log.Error(err, "[main] unable to controller.RunLVMVolumeGroupDiscoverController") + log.Error(err, "[main] unable to controller.RunLVMVolumeGroupWatcherController") os.Exit(1) } go func() { - if err = scanner.RunScanner(ctx, *log, *cfgParams, sdsCache, rediscoverBlockDevices, lvgDiscoverCtrl); err != nil { + if err = scanner.RunScanner( + ctx, + *log, + *cfgParams, + sdsCache, + rediscoverBlockDevices, + rediscoverLVGs, + ); err != nil { log.Error(err, "[main] unable to run scanner") os.Exit(1) } }() - if _, err = controller.RunLVMLogicalVolumeWatcherController(mgr, *cfgParams, *log, metrics, sdsCache); err != nil { - log.Error(err, "[main] unable to controller.RunLVMLogicalVolumeWatcherController") + err = controller.AddReconciler( + mgr, + *log, + llv.NewReconciler( + mgr.GetClient(), + *log, + metrics, + sdsCache, + llv.ReconcilerOptions{ + NodeName: cfgParams.NodeName, + VolumeGroupScanInterval: cfgParams.VolumeGroupScanIntervalSec, + Loglevel: cfgParams.Loglevel, + LLVRequeueInterval: cfgParams.LLVRequeueIntervalSec, + }, + ), + ) + if err != nil { + log.Error(err, "[main] unable to controller.RunLVMVolumeGroupWatcherController") os.Exit(1) } - if err = controller.RunLVMLogicalVolumeExtenderWatcherController(mgr, *cfgParams, *log, metrics, sdsCache); err != nil { + err = controller.AddReconciler(mgr, *log, llv_extender.NewReconciler( + mgr.GetClient(), + *log, + metrics, + sdsCache, + llv_extender.ReconcilerOptions{ + NodeName: cfgParams.NodeName, + VolumeGroupScanInterval: cfgParams.VolumeGroupScanIntervalSec, + }, + )) + if err != nil { log.Error(err, "[main] unable to controller.RunLVMLogicalVolumeExtenderWatcherController") os.Exit(1) } diff --git a/images/agent/src/pkg/controller/bd/discoverer.go b/images/agent/src/pkg/controller/bd/discoverer.go index 9787adc7..d5bf1efe 100644 --- a/images/agent/src/pkg/controller/bd/discoverer.go +++ b/images/agent/src/pkg/controller/bd/discoverer.go @@ -4,6 +4,8 @@ import ( "agent/internal" "agent/pkg/cache" "agent/pkg/controller" + "agent/pkg/controller/clients" + "agent/pkg/controller/utils" "agent/pkg/logger" "agent/pkg/monitoring" "context" @@ -20,15 +22,15 @@ import ( "github.com/gosimple/slug" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" ) -const Name = "block-device-controller" +const DiscovererName = "block-device-controller" type Discoverer struct { cl client.Client log logger.Logger + bdCl *clients.BDClient metrics monitoring.Metrics sdsCache *cache.Cache opts Options @@ -50,6 +52,7 @@ func NewDiscoverer( return &Discoverer{ cl: cl, log: log, + bdCl: clients.NewBDClient(cl, metrics), metrics: metrics, sdsCache: sdsCache, opts: opts, @@ -57,7 +60,7 @@ func NewDiscoverer( } func (d *Discoverer) Name() string { - return Name + return DiscovererName } func (d *Discoverer) Discover(ctx context.Context) (controller.Result, error) { @@ -83,7 +86,7 @@ func (d *Discoverer) blockDeviceReconcile(ctx context.Context) bool { return false } - apiBlockDevices, err := d.getAPIBlockDevices(ctx, nil) + apiBlockDevices, err := d.bdCl.GetAPIBlockDevices(ctx, DiscovererName, nil) if err != nil { d.log.Error(err, "[RunBlockDeviceController] unable to GetAPIBlockDevices") return true @@ -126,43 +129,12 @@ func (d *Discoverer) blockDeviceReconcile(ctx context.Context) bool { d.removeDeprecatedAPIDevices(ctx, candidates, apiBlockDevices) d.log.Info("[RunBlockDeviceController] END reconcile of block devices") - d.metrics.ReconcileDuration(Name).Observe(d.metrics.GetEstimatedTimeInSeconds(reconcileStart)) - d.metrics.ReconcilesCountTotal(Name).Inc() + d.metrics.ReconcileDuration(DiscovererName).Observe(d.metrics.GetEstimatedTimeInSeconds(reconcileStart)) + d.metrics.ReconcilesCountTotal(DiscovererName).Inc() return false } -// getAPIBlockDevices returns map of BlockDevice resources with BlockDevice as a key. You might specify a selector to get a subset or -// leave it as nil to get all the resources. -func (d *Discoverer) getAPIBlockDevices( - ctx context.Context, - selector *metav1.LabelSelector, -) (map[string]v1alpha1.BlockDevice, error) { - list := &v1alpha1.BlockDeviceList{} - s, err := metav1.LabelSelectorAsSelector(selector) - if err != nil { - return nil, err - } - if s == labels.Nothing() { - s = nil - } - start := time.Now() - err = d.cl.List(ctx, list, &client.ListOptions{LabelSelector: s}) - d.metrics.APIMethodsDuration(Name, "list").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) - d.metrics.APIMethodsExecutionCount(Name, "list").Inc() - if err != nil { - d.metrics.APIMethodsErrors(Name, "list").Inc() - return nil, err - } - - result := make(map[string]v1alpha1.BlockDevice, len(list.Items)) - for _, item := range list.Items { - result[item.Name] = item - } - - return result, nil -} - func (d *Discoverer) removeDeprecatedAPIDevices( ctx context.Context, candidates []internal.BlockDeviceCandidate, @@ -250,7 +222,7 @@ func (d *Discoverer) getBlockDeviceCandidates() []internal.BlockDeviceCandidate if pv.PVName == device.Name { d.log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] The device is a PV. Found PV name: %s", pv.PVName)) if candidate.FSType == internal.LVMFSType { - hasTag, lvmVGName := checkTag(pv.VGTags) + hasTag, lvmVGName := utils.CheckTag(pv.VGTags) if hasTag { d.log.Debug(fmt.Sprintf("[GetBlockDeviceCandidates] PV %s of BlockDevice %s has tag, fill the VG information", pv.PVName, candidate.Name)) candidate.PVUuid = pv.PVUuid @@ -398,10 +370,10 @@ func (d *Discoverer) updateAPIBlockDevice( start := time.Now() err := d.cl.Update(ctx, &blockDevice) - d.metrics.APIMethodsDuration(Name, "update").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) - d.metrics.APIMethodsExecutionCount(Name, "update").Inc() + d.metrics.APIMethodsDuration(DiscovererName, "update").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) + d.metrics.APIMethodsExecutionCount(DiscovererName, "update").Inc() if err != nil { - d.metrics.APIMethodsErrors(Name, "update").Inc() + d.metrics.APIMethodsErrors(DiscovererName, "update").Inc() return err } @@ -437,10 +409,10 @@ func (d *Discoverer) createAPIBlockDevice(ctx context.Context, candidate interna start := time.Now() err := d.cl.Create(ctx, blockDevice) - d.metrics.APIMethodsDuration(Name, "create").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) - d.metrics.APIMethodsExecutionCount(Name, "create").Inc() + d.metrics.APIMethodsDuration(DiscovererName, "create").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) + d.metrics.APIMethodsExecutionCount(DiscovererName, "create").Inc() if err != nil { - d.metrics.APIMethodsErrors(Name, "create").Inc() + d.metrics.APIMethodsErrors(DiscovererName, "create").Inc() return nil, err } return blockDevice, nil @@ -449,10 +421,10 @@ func (d *Discoverer) createAPIBlockDevice(ctx context.Context, candidate interna func (d *Discoverer) deleteAPIBlockDevice(ctx context.Context, device *v1alpha1.BlockDevice) error { start := time.Now() err := d.cl.Delete(ctx, device) - d.metrics.APIMethodsDuration(Name, "delete").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) - d.metrics.APIMethodsExecutionCount(Name, "delete").Inc() + d.metrics.APIMethodsDuration(DiscovererName, "delete").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) + d.metrics.APIMethodsExecutionCount(DiscovererName, "delete").Inc() if err != nil { - d.metrics.APIMethodsErrors(Name, "delete").Inc() + d.metrics.APIMethodsErrors(DiscovererName, "delete").Inc() return err } return nil @@ -577,22 +549,6 @@ func checkConsumable(device internal.Device) bool { return true } -func checkTag(tags string) (bool, string) { - if !strings.Contains(tags, internal.LVMTags[0]) { - return false, "" - } - - splitTags := strings.Split(tags, ",") - for _, tag := range splitTags { - if strings.HasPrefix(tag, "storage.deckhouse.io/lvmVolumeGroupName") { - kv := strings.Split(tag, "=") - return true, kv[1] - } - } - - return true, "" -} - func createUniqDeviceName(can internal.BlockDeviceCandidate) string { temp := fmt.Sprintf("%s%s%s%s%s", can.NodeName, can.Wwn, can.Model, can.Serial, can.PartUUID) s := fmt.Sprintf("dev-%x", sha1.Sum([]byte(temp))) diff --git a/images/agent/src/pkg/controller/bd/discoverer_suite_test.go b/images/agent/src/pkg/controller/bd/discoverer_suite_test.go index 77edac5c..f5c3c104 100644 --- a/images/agent/src/pkg/controller/bd/discoverer_suite_test.go +++ b/images/agent/src/pkg/controller/bd/discoverer_suite_test.go @@ -85,7 +85,7 @@ var _ = Describe("Storage Controller", func() { }) It("GetAPIBlockDevices", func() { - listDevice, err := r.getAPIBlockDevices(ctx, nil) + listDevice, err := r.bdCl.GetAPIBlockDevices(ctx, DiscovererName, nil) Expect(err).NotTo(HaveOccurred()) Expect(listDevice).NotTo(BeNil()) Expect(len(listDevice)).To(Equal(1)) @@ -118,7 +118,7 @@ var _ = Describe("Storage Controller", func() { MachineID: "1234", } - resources, err := r.getAPIBlockDevices(ctx, nil) + resources, err := r.bdCl.GetAPIBlockDevices(ctx, DiscovererName, nil) Expect(err).NotTo(HaveOccurred()) Expect(resources).NotTo(BeNil()) Expect(len(resources)).To(Equal(1)) @@ -130,7 +130,7 @@ var _ = Describe("Storage Controller", func() { err = r.updateAPIBlockDevice(ctx, oldResource, newCandidate) Expect(err).NotTo(HaveOccurred()) - resources, err = r.getAPIBlockDevices(ctx, nil) + resources, err = r.bdCl.GetAPIBlockDevices(ctx, DiscovererName, nil) Expect(err).NotTo(HaveOccurred()) Expect(resources).NotTo(BeNil()) Expect(len(resources)).To(Equal(1)) @@ -150,7 +150,7 @@ var _ = Describe("Storage Controller", func() { }) Expect(err).NotTo(HaveOccurred()) - devices, err := r.getAPIBlockDevices(context.Background(), nil) + devices, err := r.bdCl.GetAPIBlockDevices(context.Background(), DiscovererName, nil) Expect(err).NotTo(HaveOccurred()) for name := range devices { Expect(name).NotTo(Equal(deviceName)) diff --git a/images/agent/src/pkg/controller/bd/discoverer_test.go b/images/agent/src/pkg/controller/bd/discoverer_test.go index fd3fa875..ad41d2f1 100644 --- a/images/agent/src/pkg/controller/bd/discoverer_test.go +++ b/images/agent/src/pkg/controller/bd/discoverer_test.go @@ -33,6 +33,7 @@ import ( "agent/internal" "agent/pkg/cache" + cutils "agent/pkg/controller/utils" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/test_utils" @@ -127,7 +128,7 @@ func TestBlockDeviceCtrl(t *testing.T) { }, } - actualBd, err := r.getAPIBlockDevices(ctx, lvg.Spec.BlockDeviceSelector) + actualBd, err := r.bdCl.GetAPIBlockDevices(ctx, DiscovererName, lvg.Spec.BlockDeviceSelector) if assert.NoError(t, err) { assert.Equal(t, 2, len(actualBd)) @@ -202,7 +203,7 @@ func TestBlockDeviceCtrl(t *testing.T) { }, } - actualBd, err := r.getAPIBlockDevices(ctx, lvg.Spec.BlockDeviceSelector) + actualBd, err := r.bdCl.GetAPIBlockDevices(ctx, DiscovererName, lvg.Spec.BlockDeviceSelector) if assert.NoError(t, err) { assert.Equal(t, 2, len(actualBd)) @@ -283,7 +284,7 @@ func TestBlockDeviceCtrl(t *testing.T) { }, } - actualBd, err := r.getAPIBlockDevices(ctx, lvg.Spec.BlockDeviceSelector) + actualBd, err := r.bdCl.GetAPIBlockDevices(ctx, DiscovererName, lvg.Spec.BlockDeviceSelector) if assert.NoError(t, err) { assert.Equal(t, 2, len(actualBd)) _, ok := actualBd[name1] @@ -538,7 +539,7 @@ func TestBlockDeviceCtrl(t *testing.T) { expectedName := "testName" tags := fmt.Sprintf("storage.deckhouse.io/enabled=true,storage.deckhouse.io/lvmVolumeGroupName=%s", expectedName) - shouldBeTrue, actualName := checkTag(tags) + shouldBeTrue, actualName := cutils.CheckTag(tags) if assert.True(t, shouldBeTrue) { assert.Equal(t, expectedName, actualName) } @@ -547,7 +548,7 @@ func TestBlockDeviceCtrl(t *testing.T) { t.Run("Haven't tag_Returns false and empty", func(t *testing.T) { tags := "someWeirdTags=oMGwtFIsThis" - shouldBeFalse, actualName := checkTag(tags) + shouldBeFalse, actualName := cutils.CheckTag(tags) if assert.False(t, shouldBeFalse) { assert.Equal(t, "", actualName) } diff --git a/images/agent/src/pkg/controller/clients/bd_client.go b/images/agent/src/pkg/controller/clients/bd_client.go new file mode 100644 index 00000000..bbcd360b --- /dev/null +++ b/images/agent/src/pkg/controller/clients/bd_client.go @@ -0,0 +1,56 @@ +package clients + +import ( + "agent/pkg/monitoring" + "context" + "time" + + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type BDClient struct { + cl client.Client + metrics monitoring.Metrics +} + +func NewBDClient(cl client.Client, metrics monitoring.Metrics) *BDClient { + return &BDClient{ + cl: cl, + metrics: metrics, + } +} + +// GetAPIBlockDevices returns map of BlockDevice resources with BlockDevice as a key. You might specify a selector to get a subset or +// leave it as nil to get all the resources. +func (bdCl *BDClient) GetAPIBlockDevices( + ctx context.Context, + controllerName string, + selector *metav1.LabelSelector, +) (map[string]v1alpha1.BlockDevice, error) { + list := &v1alpha1.BlockDeviceList{} + s, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return nil, err + } + if s == labels.Nothing() { + s = nil + } + start := time.Now() + err = bdCl.cl.List(ctx, list, &client.ListOptions{LabelSelector: s}) + bdCl.metrics.APIMethodsDuration(controllerName, "list").Observe(bdCl.metrics.GetEstimatedTimeInSeconds(start)) + bdCl.metrics.APIMethodsExecutionCount(controllerName, "list").Inc() + if err != nil { + bdCl.metrics.APIMethodsErrors(controllerName, "list").Inc() + return nil, err + } + + result := make(map[string]v1alpha1.BlockDevice, len(list.Items)) + for _, item := range list.Items { + result[item.Name] = item + } + + return result, nil +} diff --git a/images/agent/src/pkg/controller/clients/llv_client.go b/images/agent/src/pkg/controller/clients/llv_client.go new file mode 100644 index 00000000..6672eff3 --- /dev/null +++ b/images/agent/src/pkg/controller/clients/llv_client.go @@ -0,0 +1,93 @@ +package clients + +import ( + cutils "agent/pkg/controller/utils" + "agent/pkg/logger" + "context" + "fmt" + + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type LLVClient struct { + cl client.Client + log logger.Logger +} + +func NewLLVClient( + cl client.Client, + log logger.Logger, +) *LLVClient { + return &LLVClient{ + cl: cl, + log: log, + } +} + +func (llvCl *LLVClient) UpdatePhaseIfNeeded( + ctx context.Context, + llv *v1alpha1.LVMLogicalVolume, + phase string, + reason string, +) error { + if llv.Status != nil && + llv.Status.Phase == phase && + llv.Status.Reason == reason { + llvCl.log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] no need to update the LVMLogicalVolume %s phase and reason", llv.Name)) + return nil + } + + if llv.Status == nil { + llv.Status = new(v1alpha1.LVMLogicalVolumeStatus) + } + + llv.Status.Phase = phase + llv.Status.Reason = reason + + llvCl.log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] tries to update the LVMLogicalVolume %s status with phase: %s, reason: %s", llv.Name, phase, reason)) + err := llvCl.cl.Status().Update(ctx, llv) + if err != nil { + return err + } + + llvCl.log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] updated LVMLogicalVolume %s status.phase to %s and reason to %s", llv.Name, phase, reason)) + return nil +} + +func (llvCl *LLVClient) UpdatePhaseToCreatedIfNeeded( + ctx context.Context, + llv *v1alpha1.LVMLogicalVolume, + actualSize resource.Quantity, +) error { + var contiguous *bool + if llv.Spec.Thick != nil { + if *llv.Spec.Thick.Contiguous { + contiguous = llv.Spec.Thick.Contiguous + } + } + + updateNeeded := llv.Status.Phase != cutils.LLVStatusPhaseCreated || + llv.Status.ActualSize.Value() != actualSize.Value() || + llv.Status.Reason != "" || + llv.Status.Contiguous != contiguous + + if !updateNeeded { + llvCl.log.Info(fmt.Sprintf("[UpdatePhaseToCreatedIfNeeded] no need to update the LVMLogicalVolume %s", llv.Name)) + return nil + } + + llv.Status.Phase = cutils.LLVStatusPhaseCreated + llv.Status.Reason = "" + llv.Status.ActualSize = actualSize + llv.Status.Contiguous = contiguous + err := llvCl.cl.Status().Update(ctx, llv) + if err != nil { + llvCl.log.Error(err, fmt.Sprintf("[UpdatePhaseToCreatedIfNeeded] unable to update the LVMLogicalVolume %s", llv.Name)) + return err + } + + llvCl.log.Info(fmt.Sprintf("[UpdatePhaseToCreatedIfNeeded] the LVMLogicalVolume %s was successfully updated", llv.Name)) + return nil +} diff --git a/images/agent/src/pkg/controller/clients/lvg_client.go b/images/agent/src/pkg/controller/clients/lvg_client.go new file mode 100644 index 00000000..eef57dda --- /dev/null +++ b/images/agent/src/pkg/controller/clients/lvg_client.go @@ -0,0 +1,141 @@ +package clients + +import ( + "agent/pkg/logger" + "agent/pkg/monitoring" + "context" + "fmt" + "time" + + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type LVGClient struct { + cl client.Client + log logger.Logger + currentNodeName, controllerName string + metrics monitoring.Metrics +} + +func NewLVGClient( + cl client.Client, + log logger.Logger, + metrics monitoring.Metrics, + currentNodeName string, + controllerName string, +) *LVGClient { + return &LVGClient{ + cl: cl, + log: log, + metrics: metrics, + currentNodeName: currentNodeName, + controllerName: controllerName, + } +} + +func (lvgCl *LVGClient) GetLVMVolumeGroup(ctx context.Context, name string) (*v1alpha1.LVMVolumeGroup, error) { + obj := &v1alpha1.LVMVolumeGroup{} + start := time.Now() + err := lvgCl.cl.Get(ctx, client.ObjectKey{ + Name: name, + }, obj) + lvgCl.metrics.APIMethodsDuration(lvgCl.controllerName, "get").Observe(lvgCl.metrics.GetEstimatedTimeInSeconds(start)) + lvgCl.metrics.APIMethodsExecutionCount(lvgCl.controllerName, "get").Inc() + if err != nil { + lvgCl.metrics.APIMethodsErrors(lvgCl.controllerName, "get").Inc() + return nil, err + } + return obj, nil +} + +func (lvgCl *LVGClient) UpdateLVGConditionIfNeeded( + ctx context.Context, + lvg *v1alpha1.LVMVolumeGroup, + status v1.ConditionStatus, + conType, reason, message string, +) error { + exist := false + index := 0 + newCondition := v1.Condition{ + Type: conType, + Status: status, + ObservedGeneration: lvg.Generation, + LastTransitionTime: v1.NewTime(time.Now()), + Reason: reason, + Message: message, + } + + if lvg.Status.Conditions == nil { + lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] the LVMVolumeGroup %s conditions is nil. Initialize them", lvg.Name)) + lvg.Status.Conditions = make([]v1.Condition, 0, 5) + } + + if len(lvg.Status.Conditions) > 0 { + lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] there are some conditions in the LVMVolumeGroup %s. Tries to find a condition %s", lvg.Name, conType)) + for i, c := range lvg.Status.Conditions { + if c.Type == conType { + if checkIfEqualConditions(c, newCondition) { + lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no need to update condition %s in the LVMVolumeGroup %s as new and old condition states are the same", conType, lvg.Name)) + return nil + } + + index = i + exist = true + lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was found in the LVMVolumeGroup %s at the index %d", conType, lvg.Name, i)) + } + } + + if !exist { + lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was not found. Append it in the end of the LVMVolumeGroup %s conditions", conType, lvg.Name)) + lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) + } else { + lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] insert the condition %s status %s reason %s message %s at index %d of the LVMVolumeGroup %s conditions", conType, status, reason, message, index, lvg.Name)) + lvg.Status.Conditions[index] = newCondition + } + } else { + lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no conditions were found in the LVMVolumeGroup %s. Append the condition %s in the end", lvg.Name, conType)) + lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) + } + + lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] tries to update the condition type %s status %s reason %s message %s of the LVMVolumeGroup %s", conType, status, reason, message, lvg.Name)) + return lvgCl.cl.Status().Update(ctx, lvg) +} + +func (lvgCl *LVGClient) DeleteLVMVolumeGroup( + ctx context.Context, + lvg *v1alpha1.LVMVolumeGroup, +) error { + lvgCl.log.Debug(fmt.Sprintf(`[DeleteLVMVolumeGroup] Node "%s" does not belong to VG "%s". It will be removed from LVM resource, name "%s"'`, lvgCl.currentNodeName, lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + for i, node := range lvg.Status.Nodes { + if node.Name == lvgCl.currentNodeName { + // delete node + lvg.Status.Nodes = append(lvg.Status.Nodes[:i], lvg.Status.Nodes[i+1:]...) + lvgCl.log.Info(fmt.Sprintf(`[DeleteLVMVolumeGroup] deleted node "%s" from LVMVolumeGroup "%s"`, node.Name, lvg.Name)) + } + } + + // If current LVMVolumeGroup has no nodes left, delete it. + if len(lvg.Status.Nodes) == 0 { + start := time.Now() + err := lvgCl.cl.Delete(ctx, lvg) + lvgCl.metrics.APIMethodsDuration(lvgCl.controllerName, "delete").Observe(lvgCl.metrics.GetEstimatedTimeInSeconds(start)) + lvgCl.metrics.APIMethodsExecutionCount(lvgCl.controllerName, "delete").Inc() + if err != nil { + lvgCl.metrics.APIMethodsErrors(lvgCl.controllerName, "delete").Inc() + return err + } + lvgCl.log.Info(fmt.Sprintf("[DeleteLVMVolumeGroup] the LVMVolumeGroup %s deleted", lvg.Name)) + } + + return nil +} + +func checkIfEqualConditions(first, second v1.Condition) bool { + return first.Type == second.Type && + first.Status == second.Status && + first.Reason == second.Reason && + first.Message == second.Message && + first.ObservedGeneration == second.ObservedGeneration +} diff --git a/images/agent/src/pkg/controller/const.go b/images/agent/src/pkg/controller/const.go deleted file mode 100644 index a21f8ba8..00000000 --- a/images/agent/src/pkg/controller/const.go +++ /dev/null @@ -1,11 +0,0 @@ -package controller - -const ( - CreateReconcile reconcileType = "Create" - UpdateReconcile reconcileType = "Update" - DeleteReconcile reconcileType = "Delete" -) - -type ( - reconcileType string -) diff --git a/images/agent/src/pkg/controller/controller.go b/images/agent/src/pkg/controller/controller.go index 568ea4ba..dfb6b1d0 100644 --- a/images/agent/src/pkg/controller/controller.go +++ b/images/agent/src/pkg/controller/controller.go @@ -34,6 +34,7 @@ type Named interface { type Reconciler[T client.Object] interface { Named + MaxConcurrentReconciles() int ShouldReconcileUpdate(objectOld T, objectNew T) bool Reconcile(context.Context, ReconcileRequest[T]) (Result, error) } @@ -65,7 +66,8 @@ func AddReconciler[T client.Object]( reconciler.Name(), mgr, controller.Options{ - Reconciler: makeReconcileDispatcher(mgr, log, reconciler), + Reconciler: makeReconcileDispatcher(mgr, log, reconciler), + MaxConcurrentReconciles: reconciler.MaxConcurrentReconciles(), }, ) if err != nil { @@ -138,13 +140,11 @@ func AddDiscoverer( func makeDiscovererDispatcher(log logger.Logger, discoverer Discoverer) reconcile.Func { return reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[DiscovererDispatcher] Discoverer starts")) + log.Info(fmt.Sprintf("[DiscovererDispatcher] %s discoverer starts", discoverer.Name())) result, err := discoverer.Discover(ctx) - return reconcile.Result{ - RequeueAfter: result.RequeueAfter, - }, err + return reconcile.Result{RequeueAfter: result.RequeueAfter}, err }) } @@ -155,14 +155,9 @@ func makeReconcileDispatcher[T client.Object]( ) reconcile.TypedReconciler[reconcile.Request] { cl := mgr.GetClient() return reconcile.Func(func(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - // load object being reconciled log.Info(fmt.Sprintf("[ReconcileDispatcher] Reconciler starts to reconcile the request %s", req.NamespacedName.String())) - if req.Name == "" { - - } - var obj T if err := cl.Get(ctx, req.NamespacedName, obj); err != nil { if errors.IsNotFound(err) { diff --git a/images/agent/src/pkg/controller/llv/reconciler.go b/images/agent/src/pkg/controller/llv/reconciler.go new file mode 100644 index 00000000..e9b132b3 --- /dev/null +++ b/images/agent/src/pkg/controller/llv/reconciler.go @@ -0,0 +1,709 @@ +package llv + +import ( + "agent/internal" + "agent/pkg/cache" + "agent/pkg/controller" + "agent/pkg/controller/clients" + cutils "agent/pkg/controller/utils" + "agent/pkg/logger" + "agent/pkg/monitoring" + "agent/pkg/utils" + "context" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" + "github.com/google/go-cmp/cmp" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/strings/slices" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ReconcilerName = "lvm-logical-volume-watcher-controller" + +// TODO: remove +const ( + CreateReconcile reconcileType = "Create" + UpdateReconcile reconcileType = "Update" + DeleteReconcile reconcileType = "Delete" +) + +type ( + reconcileType string +) + +type Reconciler struct { + cl client.Client + log logger.Logger + lvgCl *clients.LVGClient + llvCl *clients.LLVClient + metrics monitoring.Metrics + sdsCache *cache.Cache + opts ReconcilerOptions +} + +type ReconcilerOptions struct { + NodeName string + Loglevel logger.Verbosity + VolumeGroupScanInterval time.Duration + LLVRequeueInterval time.Duration +} + +func NewReconciler( + cl client.Client, + log logger.Logger, + metrics monitoring.Metrics, + sdsCache *cache.Cache, + opts ReconcilerOptions, +) *Reconciler { + return &Reconciler{ + cl: cl, + log: log, + lvgCl: clients.NewLVGClient( + cl, + log, + metrics, + opts.NodeName, + ReconcilerName, + ), + llvCl: clients.NewLLVClient( + cl, log, + ), + metrics: metrics, + sdsCache: sdsCache, + opts: opts, + } +} + +// Name implements controller.Reconciler. +func (r *Reconciler) Name() string { + return ReconcilerName +} + +func (r *Reconciler) MaxConcurrentReconciles() int { + return 10 +} + +// ShouldReconcileUpdate implements controller.Reconciler. +func (r *Reconciler) ShouldReconcileUpdate(objectOld *v1alpha1.LVMLogicalVolume, objectNew *v1alpha1.LVMLogicalVolume) bool { + r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got an update event for the LVMLogicalVolume: %s", objectNew.GetName())) + + // TODO: Figure out how to log it in our logger. + if r.opts.Loglevel == "4" { + fmt.Println("==============START DIFF==================") + fmt.Println(cmp.Diff(objectOld, objectNew)) + fmt.Println("==============END DIFF==================") + } + + if reflect.DeepEqual(objectOld.Spec, objectNew.Spec) && objectNew.DeletionTimestamp == nil { + r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] no target changes were made for the LVMLogicalVolume %s. No need to reconcile the request", objectNew.Name)) + return false + } + + return true +} + +// Reconcile implements controller.Reconciler. +func (r *Reconciler) Reconcile( + ctx context.Context, + req controller.ReconcileRequest[*v1alpha1.LVMLogicalVolume], +) (controller.Result, error) { + llv := req.Object + r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] Reconciler starts reconciliation of the LVMLogicalVolume: %s", llv.Name)) + + lvg, err := r.lvgCl.GetLVMVolumeGroup(ctx, llv.Spec.LVMVolumeGroupName) + if err != nil { + if k8serr.IsNotFound(err) { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] LVMVolumeGroup %s not found for LVMLogicalVolume %s. Retry in %s", llv.Spec.LVMVolumeGroupName, llv.Name, r.opts.VolumeGroupScanInterval.String())) + err = r.llvCl.UpdatePhaseIfNeeded( + ctx, + llv, + cutils.LLVStatusPhaseFailed, + fmt.Sprintf("LVMVolumeGroup %s not found", llv.Spec.LVMVolumeGroupName), + ) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) + return controller.Result{}, err + } + + return controller.Result{ + RequeueAfter: r.opts.VolumeGroupScanInterval, + }, nil + } + + err = r.llvCl.UpdatePhaseIfNeeded( + ctx, + llv, + cutils.LLVStatusPhaseFailed, + fmt.Sprintf("Unable to get selected LVMVolumeGroup, err: %s", err.Error()), + ) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) + } + return controller.Result{}, err + } + + if !cutils.LVGBelongsToNode(lvg, r.opts.NodeName) { + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMVolumeGroup %s of the LVMLogicalVolume %s does not belongs to the current node: %s. Reconciliation stopped", lvg.Name, llv.Name, r.opts.NodeName)) + return controller.Result{}, nil + } + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMVolumeGroup %s of the LVMLogicalVolume %s belongs to the current node: %s. Reconciliation continues", lvg.Name, llv.Name, r.opts.NodeName)) + + // this case prevents the unexpected behavior when the controller runs up with existing LVMLogicalVolumes + if vgs, _ := r.sdsCache.GetVGs(); len(vgs) == 0 { + r.log.Warning(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] unable to reconcile the request as no VG was found in the cache. Retry in %s", r.opts.VolumeGroupScanInterval.String())) + return controller.Result{RequeueAfter: r.opts.VolumeGroupScanInterval}, nil + } + + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] tries to add the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + added, err := r.addLLVFinalizerIfNotExist(ctx, llv) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) + return controller.Result{}, err + } + if added { + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] successfully added the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + } else { + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] no need to add the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + } + + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] starts to validate the LVMLogicalVolume %s", llv.Name)) + valid, reason := r.validateLVMLogicalVolume(llv, lvg) + if !valid { + r.log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMLogicalVolume %s is not valid, reason: %s", llv.Name, reason)) + err = r.llvCl.UpdatePhaseIfNeeded(ctx, llv, cutils.LLVStatusPhaseFailed, reason) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) + return controller.Result{}, err + } + + return controller.Result{}, nil + } + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] successfully validated the LVMLogicalVolume %s", llv.Name)) + + shouldRequeue, err := r.ReconcileLVMLogicalVolume(ctx, llv, lvg) + if err != nil { + r.log.Error(err, fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] an error occurred while reconciling the LVMLogicalVolume: %s", llv.Name)) + updErr := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, cutils.LLVStatusPhaseFailed, err.Error()) + if updErr != nil { + r.log.Error(updErr, fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] unable to update the LVMLogicalVolume %s", llv.Name)) + return controller.Result{}, updErr + } + } + if shouldRequeue { + r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occurred while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", llv.Name, r.opts.LLVRequeueInterval.String())) + return controller.Result{RequeueAfter: r.opts.LLVRequeueInterval}, nil + } + + r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] successfully ended reconciliation of the LVMLogicalVolume %s", llv.Name)) + return controller.Result{}, nil +} + +func (r *Reconciler) ReconcileLVMLogicalVolume(ctx context.Context, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] starts the reconciliation for the LVMLogicalVolume %s", llv.Name)) + + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] tries to identify the reconciliation type for the LVMLogicalVolume %s", llv.Name)) + r.log.Trace(fmt.Sprintf("[ReconcileLVMLogicalVolume] %+v", llv)) + + switch r.identifyReconcileFunc(lvg.Spec.ActualVGNameOnTheNode, llv) { + case CreateReconcile: + return r.reconcileLLVCreateFunc(ctx, llv, lvg) + case UpdateReconcile: + return r.reconcileLLVUpdateFunc(ctx, llv, lvg) + case DeleteReconcile: + return r.reconcileLLVDeleteFunc(ctx, llv, lvg) + default: + r.log.Info(fmt.Sprintf("[runEventReconcile] the LVMLogicalVolume %s has compeleted configuration and should not be reconciled", llv.Name)) + if llv.Status.Phase != cutils.LLVStatusPhaseCreated { + r.log.Warning(fmt.Sprintf("[runEventReconcile] the LVMLogicalVolume %s should not be reconciled but has an unexpected phase: %s. Setting the phase to %s", llv.Name, llv.Status.Phase, cutils.LLVStatusPhaseCreated)) + err := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, cutils.LLVStatusPhaseCreated, "") + if err != nil { + return true, err + } + } + } + + return false, nil +} + +func (r *Reconciler) reconcileLLVCreateFunc( + ctx context.Context, + llv *v1alpha1.LVMLogicalVolume, + lvg *v1alpha1.LVMVolumeGroup, +) (bool, error) { + r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) + + // this check prevents infinite resource updating after retries + if llv.Status == nil { + err := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, cutils.LLVStatusPhasePending, "") + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) + return true, err + } + } + llvRequestSize, err := cutils.GetLLVRequestedSize(llv, lvg) + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) + return false, err + } + + freeSpace := cutils.GetFreeLVGSpaceForLLV(lvg, llv) + r.log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s type: %s requested size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, llvRequestSize.String(), freeSpace.String())) + + if !utils.AreSizesEqualWithinDelta(llvRequestSize, freeSpace, internal.ResizeDelta) { + if freeSpace.Value() < llvRequestSize.Value()+internal.ResizeDelta.Value() { + err = errors.New("not enough space") + r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than the actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) + + // we return true cause the user might manage LVMVolumeGroup free space without changing the LLV + return true, err + } + } + + var cmd string + switch llv.Spec.Type { + case cutils.Thick: + r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s will be created in VG %s with size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llvRequestSize.String())) + cmd, err = utils.CreateThickLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value(), isContiguous(llv)) + case cutils.Thin: + if llv.Spec.Source == nil { + r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s of the LVMLogicalVolume %s will be created in Thin-pool %s with size %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) + cmd, err = utils.CreateThinLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Thin.PoolName, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value()) + } else { + // volume is a clone + r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] Snapshot (for source %s) LV %s of the LVMLogicalVolume %s will be created in Thin-pool %s with size %s", llv.Spec.Source.Name, llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) + + var sourceVgName, sourceVolumeName string + if llv.Spec.Source.Kind == "LVMLogicalVolume" { + sourceLlv := &v1alpha1.LVMLogicalVolume{} + if err = r.cl.Get(ctx, types.NamespacedName{Name: llv.Spec.Source.Name}, sourceLlv); err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to find source LVMLogicalVolume %s", llv.Spec.Source.Name)) + return false, err + } + + sourceVolumeName = sourceLlv.Spec.ActualLVNameOnTheNode + sourceVgName = sourceLlv.Spec.LVMVolumeGroupName + + // TODO snapshots: validate source llv + } else if llv.Spec.Source.Kind == "LVMLogicalVolumeSnapshot" { + sourceSnapshot := &v1alpha1.LVMLogicalVolumeSnapshot{} + if err = r.cl.Get(ctx, types.NamespacedName{Name: llv.Spec.Source.Name}, sourceSnapshot); err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to find source LVMLogicalVolumeSnapshot %s", llv.Spec.Source.Name)) + return false, err + } + sourceVolumeName = sourceSnapshot.Spec.ActualLVNameOnTheNode + sourceVgName = sourceSnapshot.Spec.LVMVolumeGroupName + // TODO snapshots: validate source snapshot + } else { + return false, fmt.Errorf("source kind is not supported: %s", llv.Spec.Source.Kind) + } + cmd, err = utils.CreateThinLogicalVolumeSnapshot(llv.Spec.ActualLVNameOnTheNode, sourceVgName, sourceVolumeName) + + } + } + r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] runs cmd: %s", cmd)) + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to create a %s LogicalVolume for the LVMLogicalVolume %s", llv.Spec.Type, llv.Name)) + return true, err + } + + r.log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully created LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) + + r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] adds the LV %s to the cache", llv.Spec.ActualLVNameOnTheNode)) + r.sdsCache.AddLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] tries to get the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) + actualSize := r.getLVActualSize(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + if actualSize.Value() == 0 { + r.log.Warning(fmt.Sprintf("[reconcileLLVCreateFunc] unable to get actual size for LV %s in VG %s (likely LV was not found in the cache), retry...", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + return true, nil + } + r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] successfully got the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) + r.log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s in VG: %s has actual size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String())) + + if err := r.llvCl.UpdatePhaseToCreatedIfNeeded(ctx, llv, actualSize); err != nil { + return true, err + } + + r.log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully ended the reconciliation for the LVMLogicalVolume %s", llv.Name)) + return false, nil +} + +func (r *Reconciler) reconcileLLVUpdateFunc( + ctx context.Context, + llv *v1alpha1.LVMLogicalVolume, + lvg *v1alpha1.LVMVolumeGroup, +) (bool, error) { + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) + + // status might be nil if a user creates the resource with LV name which matches existing LV on the node + if llv.Status == nil { + err := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, cutils.LLVStatusPhasePending, "") + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) + return true, err + } + } + + // it needs to get current LV size from the node as status might be nil + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size before the extension", llv.Name)) + actualSize := r.getLVActualSize(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + if actualSize.Value() == 0 { + r.log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s has zero size (likely LV was not updated in the cache) ", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + return true, nil + } + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size %s before the extension", llv.Name, actualSize.String())) + + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to count the LVMLogicalVolume %s requested size", llv.Name)) + llvRequestSize, err := cutils.GetLLVRequestedSize(llv, lvg) + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) + return false, err + } + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String())) + + if utils.AreSizesEqualWithinDelta(actualSize, llvRequestSize, internal.ResizeDelta) { + r.log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String())) + + if err := r.llvCl.UpdatePhaseToCreatedIfNeeded(ctx, llv, actualSize); err != nil { + return true, err + } + + r.log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) + + return false, nil + } + + extendingSize := subtractQuantity(llvRequestSize, actualSize) + r.log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has extending size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, extendingSize.String())) + if extendingSize.Value() < 0 { + err = fmt.Errorf("specified LV size %dB is less than actual one on the node %dB", llvRequestSize.Value(), actualSize.Value()) + r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to extend the LVMLogicalVolume %s", llv.Name)) + return false, err + } + + r.log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s should be resized", llv.Name)) + // this check prevents infinite resource updates after retry + if llv.Status.Phase != cutils.LLVStatusPhaseFailed { + err := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, cutils.LLVStatusPhaseResizing, "") + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) + return true, err + } + } + + freeSpace := cutils.GetFreeLVGSpaceForLLV(lvg, llv) + r.log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s, type: %s, extending size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, extendingSize.String(), freeSpace.String())) + + if !utils.AreSizesEqualWithinDelta(freeSpace, extendingSize, internal.ResizeDelta) { + if freeSpace.Value() < extendingSize.Value()+internal.ResizeDelta.Value() { + err = errors.New("not enough space") + r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) + + // returns true cause a user might manage LVG free space without changing the LLV + return true, err + } + } + + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s will be extended with size: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llvRequestSize.String())) + cmd, err := utils.ExtendLV(llvRequestSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] runs cmd: %s", cmd)) + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to ExtendLV, name: %s, type: %s", llv.Spec.ActualLVNameOnTheNode, llv.Spec.Type)) + return true, err + } + + r.log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully extended LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) + + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size after the extension", llv.Name)) + newActualSize := r.getLVActualSize(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + + // this case might be triggered if sds cache will not update lv state in time + if newActualSize.Value() == actualSize.Value() { + r.log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s was extended but cache is not updated yet. It will be retried", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + return true, nil + } + + r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size before the extension", llv.Name)) + r.log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s actual size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, newActualSize.String())) + + // need this here as a user might create the LLV with existing LV + if err := r.llvCl.UpdatePhaseToCreatedIfNeeded(ctx, llv, newActualSize); err != nil { + return true, err + } + + r.log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) + return false, nil +} + +func (r *Reconciler) reconcileLLVDeleteFunc( + ctx context.Context, + llv *v1alpha1.LVMLogicalVolume, + lvg *v1alpha1.LVMVolumeGroup, +) (bool, error) { + r.log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) + + // The controller won't remove the LLV resource and LV volume till the resource has any other finalizer. + if len(llv.Finalizers) != 0 { + if len(llv.Finalizers) > 1 || + llv.Finalizers[0] != internal.SdsNodeConfiguratorFinalizer { + r.log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete LVMLogicalVolume %s for now due to it has any other finalizer", llv.Name)) + return false, nil + } + } + + err := r.deleteLVIfNeeded(lvg.Spec.ActualVGNameOnTheNode, llv) + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + return true, err + } + + r.log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully deleted the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + + err = r.removeLLVFinalizersIfExist(ctx, llv) + if err != nil { + r.log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to remove finalizers from the LVMVolumeGroup %s", llv.Name)) + return true, err + } + + r.log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) + return false, nil +} + +func (r *Reconciler) identifyReconcileFunc(vgName string, llv *v1alpha1.LVMLogicalVolume) reconcileType { + should := r.shouldReconcileByCreateFunc(vgName, llv) + if should { + return CreateReconcile + } + + should = r.shouldReconcileByUpdateFunc(vgName, llv) + if should { + return UpdateReconcile + } + + should = shouldReconcileByDeleteFunc(llv) + if should { + return DeleteReconcile + } + + return "" +} + +func shouldReconcileByDeleteFunc(llv *v1alpha1.LVMLogicalVolume) bool { + return llv.DeletionTimestamp != nil +} + +//nolint:unparam +func checkIfConditionIsTrue(lvg *v1alpha1.LVMVolumeGroup, conType string) bool { + // this check prevents infinite resource updating after a retry + for _, c := range lvg.Status.Conditions { + if c.Type == conType && c.Status == v1.ConditionTrue { + return true + } + } + + return false +} + +func (r *Reconciler) removeLLVFinalizersIfExist( + ctx context.Context, + llv *v1alpha1.LVMLogicalVolume, +) error { + var removed bool + for i, f := range llv.Finalizers { + if f == internal.SdsNodeConfiguratorFinalizer { + llv.Finalizers = append(llv.Finalizers[:i], llv.Finalizers[i+1:]...) + removed = true + r.log.Debug(fmt.Sprintf("[removeLLVFinalizersIfExist] removed finalizer %s from the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + break + } + } + + if removed { + r.log.Trace(fmt.Sprintf("[removeLLVFinalizersIfExist] removed finalizer %s from the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + err := r.updateLVMLogicalVolumeSpec(ctx, llv) + if err != nil { + r.log.Error(err, fmt.Sprintf("[updateLVMLogicalVolumeSpec] unable to update the LVMVolumeGroup %s", llv.Name)) + return err + } + } + + return nil +} + +func checkIfLVBelongsToLLV(llv *v1alpha1.LVMLogicalVolume, lv *internal.LVData) bool { + switch llv.Spec.Type { + case cutils.Thin: + if lv.PoolName != llv.Spec.Thin.PoolName { + return false + } + case cutils.Thick: + contiguous := string(lv.LVAttr[2]) == "c" + if string(lv.LVAttr[0]) != "-" || + contiguous != isContiguous(llv) { + return false + } + } + + return true +} + +func (r *Reconciler) deleteLVIfNeeded(vgName string, llv *v1alpha1.LVMLogicalVolume) error { + lv := r.sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) + if lv == nil || !lv.Exist { + r.log.Warning(fmt.Sprintf("[deleteLVIfNeeded] did not find LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) + return nil + } + + // this case prevents unexpected same-name LV deletions which does not actually belong to our LLV + if !checkIfLVBelongsToLLV(llv, &lv.Data) { + r.log.Warning(fmt.Sprintf("[deleteLVIfNeeded] no need to delete LV %s as it doesnt belong to LVMLogicalVolume %s", lv.Data.LVName, llv.Name)) + return nil + } + + cmd, err := utils.RemoveLV(vgName, llv.Spec.ActualLVNameOnTheNode) + r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] runs cmd: %s", cmd)) + if err != nil { + r.log.Error(err, fmt.Sprintf("[deleteLVIfNeeded] unable to remove LV %s from VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) + return err + } + + r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] mark LV %s in the cache as removed", lv.Data.LVName)) + r.sdsCache.MarkLVAsRemoved(lv.Data.VGName, lv.Data.LVName) + + return nil +} + +func (r *Reconciler) getLVActualSize(vgName, lvName string) resource.Quantity { + lv := r.sdsCache.FindLV(vgName, lvName) + if lv == nil { + return resource.Quantity{} + } + + result := resource.NewQuantity(lv.Data.LVSize.Value(), resource.BinarySI) + + return *result +} + +func (r *Reconciler) addLLVFinalizerIfNotExist(ctx context.Context, llv *v1alpha1.LVMLogicalVolume) (bool, error) { + if slices.Contains(llv.Finalizers, internal.SdsNodeConfiguratorFinalizer) { + return false, nil + } + + llv.Finalizers = append(llv.Finalizers, internal.SdsNodeConfiguratorFinalizer) + + r.log.Trace(fmt.Sprintf("[addLLVFinalizerIfNotExist] added finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + err := r.updateLVMLogicalVolumeSpec(ctx, llv) + if err != nil { + return false, err + } + + return true, nil +} + +func (r *Reconciler) shouldReconcileByCreateFunc(vgName string, llv *v1alpha1.LVMLogicalVolume) bool { + if llv.DeletionTimestamp != nil { + return false + } + + lv := r.sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) + return lv == nil +} + +func subtractQuantity(currentQuantity, quantityToSubtract resource.Quantity) resource.Quantity { + resultingQuantity := currentQuantity.DeepCopy() + resultingQuantity.Sub(quantityToSubtract) + return resultingQuantity +} + +func (r *Reconciler) validateLVMLogicalVolume(llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (bool, string) { + if llv.DeletionTimestamp != nil { + // as the configuration doesn't matter if we want to delete it + return true, "" + } + + reason := strings.Builder{} + + if len(llv.Spec.ActualLVNameOnTheNode) == 0 { + reason.WriteString("No LV name specified. ") + } + + llvRequestedSize, err := cutils.GetLLVRequestedSize(llv, lvg) + if err != nil { + reason.WriteString(err.Error()) + } + + if llvRequestedSize.Value() == 0 { + reason.WriteString("Zero size for LV. ") + } + + if llv.Status != nil { + if llvRequestedSize.Value()+internal.ResizeDelta.Value() < llv.Status.ActualSize.Value() { + reason.WriteString("Desired LV size is less than actual one. ") + } + } + + switch llv.Spec.Type { + case cutils.Thin: + if llv.Spec.Thin == nil { + reason.WriteString("No thin pool specified. ") + break + } + + exist := false + for _, tp := range lvg.Status.ThinPools { + if tp.Name == llv.Spec.Thin.PoolName { + exist = true + break + } + } + + if !exist { + reason.WriteString("Selected thin pool does not exist in selected LVMVolumeGroup. ") + } + case cutils.Thick: + if llv.Spec.Thin != nil { + reason.WriteString("Thin pool specified for Thick LV. ") + } + } + + // if a specified Thick LV name matches the existing Thin one + lv := r.sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + if lv != nil && + len(lv.Data.LVAttr) != 0 && !checkIfLVBelongsToLLV(llv, &lv.Data) { + reason.WriteString(fmt.Sprintf("Specified LV %s is already created and it is doesnt match the one on the node.", lv.Data.LVName)) + } + + if reason.Len() > 0 { + return false, reason.String() + } + + return true, "" +} + +func (r *Reconciler) updateLVMLogicalVolumeSpec(ctx context.Context, llv *v1alpha1.LVMLogicalVolume) error { + return r.cl.Update(ctx, llv) +} + +func (r *Reconciler) shouldReconcileByUpdateFunc(vgName string, llv *v1alpha1.LVMLogicalVolume) bool { + if llv.DeletionTimestamp != nil { + return false + } + + lv := r.sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) + return lv != nil && lv.Exist +} + +func isContiguous(llv *v1alpha1.LVMLogicalVolume) bool { + if llv.Spec.Thick == nil { + return false + } + + return *llv.Spec.Thick.Contiguous +} diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go b/images/agent/src/pkg/controller/llv/reconciler_test.go similarity index 77% rename from images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go rename to images/agent/src/pkg/controller/llv/reconciler_test.go index 4bb0f453..123e178e 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go +++ b/images/agent/src/pkg/controller/llv/reconciler_test.go @@ -1,4 +1,4 @@ -package controller +package llv import ( "bytes" @@ -13,18 +13,17 @@ import ( "agent/internal" "agent/pkg/cache" + cutils "agent/pkg/controller/utils" "agent/pkg/logger" "agent/pkg/monitoring" + "agent/pkg/test_utils" "agent/pkg/utils" ) func TestLVMLogicaVolumeWatcher(t *testing.T) { var ( - cl = NewFakeClient() - log = logger.Logger{} - metrics = monitoring.Metrics{} - vgName = "test-vg" - ctx = context.Background() + vgName = "test-vg" + ctx = context.Background() ) t.Run("subtractQuantity_returns_correct_value", func(t *testing.T) { @@ -42,7 +41,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{}, Spec: v1alpha1.LVMLogicalVolumeSpec{ - Type: Thin, + Type: cutils.Thin, Thin: &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: poolName}, }, } @@ -56,7 +55,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{}, Spec: v1alpha1.LVMLogicalVolumeSpec{ - Type: Thin, + Type: cutils.Thin, Thin: &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: poolName}, }, } @@ -69,7 +68,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{}, Spec: v1alpha1.LVMLogicalVolumeSpec{ - Type: Thick, + Type: cutils.Thick, }, } lv := &internal.LVData{LVAttr: "-wi-a-----"} @@ -81,7 +80,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{}, Spec: v1alpha1.LVMLogicalVolumeSpec{ - Type: Thick, + Type: cutils.Thick, }, } lv1 := &internal.LVData{LVAttr: "Vwi-a-----"} @@ -98,18 +97,20 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { t.Run("thick_all_good_returns_true", func(t *testing.T) { const lvgName = "test-lvg" + r := setupReconciler() + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: lvgName, }, } - err := cl.Create(ctx, lvg) + err := r.cl.Create(ctx, lvg) if err != nil { t.Error(err) } else { defer func() { - err = cl.Delete(ctx, lvg) + err = r.cl.Delete(ctx, lvg) if err != nil { t.Error(err) } @@ -119,41 +120,42 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: "test-lv", - Type: Thick, + Type: cutils.Thick, Size: "10M", LVMVolumeGroupName: lvgName, }, } - v, r := validateLVMLogicalVolume(&cache.Cache{}, llv, lvg) + v, reason := r.validateLVMLogicalVolume(llv, lvg) if assert.True(t, v) { - assert.Equal(t, 0, len(r)) + assert.Equal(t, 0, len(reason)) } }) t.Run("thick_all_bad_returns_false", func(t *testing.T) { lvName := "test-lv" + r := setupReconciler() + llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: lvName, - Type: Thick, + Type: cutils.Thick, Size: "0M", LVMVolumeGroupName: "some-lvg", Thin: &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: "some-lvg"}, }, } - sdsCache := cache.New() - sdsCache.StoreLVs([]internal.LVData{ + r.sdsCache.StoreLVs([]internal.LVData{ { LVName: lvName, }, }, bytes.Buffer{}) - v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LVMVolumeGroup{}) + v, reason := r.validateLVMLogicalVolume(llv, &v1alpha1.LVMVolumeGroup{}) if assert.False(t, v) { - assert.Equal(t, "Zero size for LV. Thin pool specified for Thick LV. ", r) + assert.Equal(t, "Zero size for LV. Thin pool specified for Thick LV. ", reason) } }) @@ -163,6 +165,8 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { tpName = "test-tp" ) + r := setupReconciler() + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: lvgName, @@ -180,39 +184,41 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: "test-lv", - Type: Thin, + Type: cutils.Thin, Size: "10M", LVMVolumeGroupName: lvgName, Thin: &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: tpName}, }, } - v, r := validateLVMLogicalVolume(cache.New(), llv, lvg) + v, reason := r.validateLVMLogicalVolume(llv, lvg) if assert.True(t, v) { - assert.Equal(t, 0, len(r)) + assert.Equal(t, 0, len(reason)) } }) t.Run("thin_all_bad_returns_false", func(t *testing.T) { + + r := setupReconciler() + llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: "", - Type: Thin, + Type: cutils.Thin, Size: "0M", LVMVolumeGroupName: "some-lvg", }, } - sdsCache := cache.New() - sdsCache.StoreLVs([]internal.LVData{ + r.sdsCache.StoreLVs([]internal.LVData{ { LVName: "test-lv", }, }, bytes.Buffer{}) - v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LVMVolumeGroup{}) + v, reason := r.validateLVMLogicalVolume(llv, &v1alpha1.LVMVolumeGroup{}) if assert.False(t, v) { - assert.Equal(t, "No LV name specified. Zero size for LV. No thin pool specified. ", r) + assert.Equal(t, "No LV name specified. Zero size for LV. No thin pool specified. ", reason) } }) }) @@ -227,7 +233,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { AllocationLimit: internal.AllocationLimitDefaultValue, } - free, err := getThinPoolAvailableSpace(tp.ActualSize, tp.AllocatedSize, tp.AllocationLimit) + free, err := cutils.GetThinPoolAvailableSpace(tp.ActualSize, tp.AllocatedSize, tp.AllocationLimit) if err != nil { t.Error(err) } @@ -251,57 +257,61 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { } t.Run("returns_true", func(t *testing.T) { - belongs := belongsToNode(lvg, nodeName) + belongs := cutils.LVGBelongsToNode(lvg, nodeName) assert.True(t, belongs) }) t.Run("returns_false", func(t *testing.T) { - belongs := belongsToNode(lvg, "other_node") + belongs := cutils.LVGBelongsToNode(lvg, "other_node") assert.False(t, belongs) }) }) t.Run("identifyReconcileFunc", func(t *testing.T) { t.Run("returns_create", func(t *testing.T) { + r := setupReconciler() llv := &v1alpha1.LVMLogicalVolume{} - actual := identifyReconcileFunc(cache.New(), vgName, llv) + actual := r.identifyReconcileFunc(vgName, llv) assert.Equal(t, CreateReconcile, actual) }) t.Run("returns_update", func(t *testing.T) { lvName := "test-lv" + r := setupReconciler() + llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: lvName, }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhaseCreated, + Phase: cutils.LLVStatusPhaseCreated, }, } - sdsCache := cache.New() - sdsCache.StoreLVs([]internal.LVData{ + r.sdsCache.StoreLVs([]internal.LVData{ { LVName: lvName, VGName: vgName, }, }, bytes.Buffer{}) - actual := identifyReconcileFunc(sdsCache, vgName, llv) + actual := r.identifyReconcileFunc(vgName, llv) assert.Equal(t, UpdateReconcile, actual) }) t.Run("returns_delete", func(t *testing.T) { + r := setupReconciler() + llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &v1.Time{}}, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhaseCreated, + Phase: cutils.LLVStatusPhaseCreated, }, } - actual := identifyReconcileFunc(cache.New(), vgName, llv) + actual := r.identifyReconcileFunc(vgName, llv) assert.Equal(t, DeleteReconcile, actual) }) @@ -309,42 +319,45 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { t.Run("shouldReconcileByCreateFunc", func(t *testing.T) { t.Run("if_lv_is_not_created_returns_true", func(t *testing.T) { + r := setupReconciler() + lvName := "test-lv" llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: lvName, }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhaseCreated, + Phase: cutils.LLVStatusPhaseCreated, }, } - should := shouldReconcileByCreateFunc(cache.New(), vgName, llv) + should := r.shouldReconcileByCreateFunc(vgName, llv) assert.True(t, should) }) t.Run("if_lv_is_created_returns_false", func(t *testing.T) { + r := setupReconciler() lvName := "test-lv" llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: lvName, }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhaseCreated, + Phase: cutils.LLVStatusPhaseCreated, }, } - sdsCache := cache.New() - sdsCache.StoreLVs([]internal.LVData{ + r.sdsCache.StoreLVs([]internal.LVData{ { LVName: lvName, VGName: vgName, }, }, bytes.Buffer{}) - should := shouldReconcileByCreateFunc(sdsCache, vgName, llv) + should := r.shouldReconcileByCreateFunc(vgName, llv) assert.False(t, should) }) t.Run("if_deletion_timestamp_is_not_nil_returns_false", func(t *testing.T) { + r := setupReconciler() lvName := "test-lv" llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &v1.Time{}}, @@ -352,66 +365,60 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { ActualLVNameOnTheNode: lvName, }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhaseCreated, + Phase: cutils.LLVStatusPhaseCreated, }, } - sdsCache := cache.New() - sdsCache.StoreLVs([]internal.LVData{ - { - LVName: lvName, - VGName: vgName, - }, - }, bytes.Buffer{}) - - should := shouldReconcileByCreateFunc(cache.New(), vgName, llv) + should := r.shouldReconcileByCreateFunc(vgName, llv) assert.False(t, should) }) }) t.Run("shouldReconcileByUpdateFunc", func(t *testing.T) { t.Run("if_deletion_timestamp_is_not_nill_returns_false", func(t *testing.T) { + r := setupReconciler() llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{ DeletionTimestamp: &v1.Time{}, }, } - should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv) + should := r.shouldReconcileByUpdateFunc(vgName, llv) assert.False(t, should) }) t.Run("if_lv_exists_returns_true", func(t *testing.T) { + r := setupReconciler() lvName := "test-lv" llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: lvName, }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhaseCreated, + Phase: cutils.LLVStatusPhaseCreated, }, } - sdsCache := cache.New() - sdsCache.StoreLVs([]internal.LVData{ + r.sdsCache.StoreLVs([]internal.LVData{ { LVName: lvName, VGName: vgName, }, }, bytes.Buffer{}) - should := shouldReconcileByUpdateFunc(sdsCache, vgName, llv) + should := r.shouldReconcileByUpdateFunc(vgName, llv) assert.True(t, should) }) t.Run("if_lv_does_not_exist_returns_false", func(t *testing.T) { + r := setupReconciler() lvName := "test-lv" llv := &v1alpha1.LVMLogicalVolume{ Spec: v1alpha1.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: lvName, }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhaseCreated, + Phase: cutils.LLVStatusPhaseCreated, }, } - should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv) + should := r.shouldReconcileByUpdateFunc(vgName, llv) assert.False(t, should) }) }) @@ -438,38 +445,39 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { t.Run("updateLVMLogicalVolumePhaseIfNeeded", func(t *testing.T) { const reason = "test_reason" + r := setupReconciler() llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{ Name: "test", }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhaseCreated, + Phase: cutils.LLVStatusPhaseCreated, Reason: "", }, } - err := cl.Create(ctx, llv) + err := r.cl.Create(ctx, llv) if err != nil { t.Error(err) return } defer func() { - err = cl.Delete(ctx, llv) + err = r.cl.Delete(ctx, llv) if err != nil { t.Error(err) } }() - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, reason) + err = r.llvCl.UpdatePhaseIfNeeded(ctx, llv, cutils.LLVStatusPhaseFailed, reason) if assert.NoError(t, err) { newLLV := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, Namespace: "", }, newLLV) - assert.Equal(t, newLLV.Status.Phase, LLVStatusPhaseFailed) + assert.Equal(t, newLLV.Status.Phase, cutils.LLVStatusPhaseFailed) assert.Equal(t, newLLV.Status.Reason, reason) } }) @@ -479,6 +487,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { const ( name = "test-name1" ) + r := setupReconciler() llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{ Name: name, @@ -486,25 +495,25 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { }, } - err := cl.Create(ctx, llv) + err := r.cl.Create(ctx, llv) if err != nil { t.Error(err) return } defer func() { - err = cl.Delete(ctx, llv) + err = r.cl.Delete(ctx, llv) if err != nil { t.Error(err) } }() - added, err := addLLVFinalizerIfNotExist(ctx, cl, log, metrics, llv) + added, err := r.addLLVFinalizerIfNotExist(ctx, llv) if assert.NoError(t, err) { assert.True(t, added) newLLV := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, Namespace: "", }, newLLV) @@ -517,6 +526,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { const ( name = "test-name2" ) + r := setupReconciler() llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{ Name: name, @@ -524,25 +534,25 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { }, } - err := cl.Create(ctx, llv) + err := r.cl.Create(ctx, llv) if err != nil { t.Error(err) return } defer func() { - err = cl.Delete(ctx, llv) + err = r.cl.Delete(ctx, llv) if err != nil { t.Error(err) } }() - added, err := addLLVFinalizerIfNotExist(ctx, cl, log, metrics, llv) + added, err := r.addLLVFinalizerIfNotExist(ctx, llv) if assert.NoError(t, err) { assert.False(t, added) newLLV := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, Namespace: "", }, newLLV) @@ -560,6 +570,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { oldSize = resource.NewQuantity(100000000, resource.BinarySI) newSize = resource.NewQuantity(200000000, resource.BinarySI) ) + r := setupReconciler() llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{ Name: lvgName, @@ -570,27 +581,27 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { Size: oldSize.String(), }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhasePending, + Phase: cutils.LLVStatusPhasePending, Reason: "", ActualSize: *oldSize, }, } - err := cl.Create(ctx, llv) + err := r.cl.Create(ctx, llv) if err != nil { t.Error(err) return } defer func() { - err = cl.Delete(ctx, llv) + err = r.cl.Delete(ctx, llv) if err != nil { t.Error(err) } }() oldLLV := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, }, oldLLV) if err != nil { @@ -599,18 +610,18 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { } if assert.NotNil(t, oldLLV) { - assert.Equal(t, LLVStatusPhasePending, oldLLV.Status.Phase) + assert.Equal(t, cutils.LLVStatusPhasePending, oldLLV.Status.Phase) assert.Equal(t, oldSize.Value(), oldLLV.Status.ActualSize.Value()) } oldLLV.Spec.Size = newSize.String() - oldLLV.Status.Phase = LLVStatusPhaseCreated + oldLLV.Status.Phase = cutils.LLVStatusPhaseCreated oldLLV.Status.ActualSize = *newSize - err = updateLVMLogicalVolumeSpec(ctx, metrics, cl, oldLLV) + err = r.updateLVMLogicalVolumeSpec(ctx, oldLLV) if assert.NoError(t, err) { newLLV := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, }, newLLV) if err != nil { @@ -618,7 +629,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { return } - assert.Equal(t, LLVStatusPhasePending, newLLV.Status.Phase) + assert.Equal(t, cutils.LLVStatusPhasePending, newLLV.Status.Phase) assert.Equal(t, oldSize.Value(), newLLV.Status.ActualSize.Value()) } }) @@ -631,6 +642,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { oldSize = resource.NewQuantity(100000000, resource.BinarySI) newSize = resource.NewQuantity(200000000, resource.BinarySI) ) + r := setupReconciler() llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{ Name: lvgName, @@ -641,27 +653,27 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { Size: oldSize.String(), }, Status: &v1alpha1.LVMLogicalVolumeStatus{ - Phase: LLVStatusPhasePending, + Phase: cutils.LLVStatusPhasePending, Reason: "", ActualSize: *oldSize, }, } - err := cl.Create(ctx, llv) + err := r.cl.Create(ctx, llv) if err != nil { t.Error(err) return } defer func() { - err = cl.Delete(ctx, llv) + err = r.cl.Delete(ctx, llv) if err != nil { t.Error(err) } }() oldLLV := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, }, oldLLV) if err != nil { @@ -670,17 +682,16 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { } if assert.NotNil(t, oldLLV) { - assert.Equal(t, LLVStatusPhasePending, oldLLV.Status.Phase) + assert.Equal(t, cutils.LLVStatusPhasePending, oldLLV.Status.Phase) assert.Equal(t, oldSize.Value(), oldLLV.Status.ActualSize.Value()) } oldLLV.Spec.Size = newSize.String() - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, oldLLV, *newSize) + err = r.llvCl.UpdatePhaseToCreatedIfNeeded(ctx, oldLLV, *newSize) if assert.NoError(t, err) { - assert.True(t, updated) newLLV := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, }, newLLV) if err != nil { @@ -689,33 +700,34 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { } assert.Equal(t, oldSize.String(), newLLV.Spec.Size) - assert.Equal(t, LLVStatusPhaseCreated, newLLV.Status.Phase) + assert.Equal(t, cutils.LLVStatusPhaseCreated, newLLV.Status.Phase) assert.Equal(t, newSize.Value(), newLLV.Status.ActualSize.Value()) } }) t.Run("removeLLVFinalizersIfExist", func(t *testing.T) { + r := setupReconciler() llv := &v1alpha1.LVMLogicalVolume{ ObjectMeta: v1.ObjectMeta{ Name: "test-name", Finalizers: []string{internal.SdsNodeConfiguratorFinalizer}, }, } - err := cl.Create(ctx, llv) + err := r.cl.Create(ctx, llv) if err != nil { t.Error(err) return } defer func() { - err = cl.Delete(ctx, llv) + err = r.cl.Delete(ctx, llv) if err != nil { t.Error(err) } }() llvWithFinalizer := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, }, llvWithFinalizer) if err != nil { @@ -725,10 +737,10 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { assert.Contains(t, llvWithFinalizer.Finalizers, internal.SdsNodeConfiguratorFinalizer) - err = removeLLVFinalizersIfExist(ctx, cl, metrics, log, llv) + err = r.removeLLVFinalizersIfExist(ctx, llv) if assert.NoError(t, err) { llvNoFinalizer := &v1alpha1.LVMLogicalVolume{} - err = cl.Get(ctx, client.ObjectKey{ + err = r.cl.Get(ctx, client.ObjectKey{ Name: llv.Name, }, llvNoFinalizer) if err != nil { @@ -764,3 +776,11 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { }) }) } + +func setupReconciler() *Reconciler { + cl := test_utils.NewFakeClient() + log := logger.Logger{} + metrics := monitoring.Metrics{} + + return NewReconciler(cl, log, metrics, cache.New(), ReconcilerOptions{}) +} diff --git a/images/agent/src/pkg/controller/llv_extender/reconciler.go b/images/agent/src/pkg/controller/llv_extender/reconciler.go new file mode 100644 index 00000000..4c0d3e3d --- /dev/null +++ b/images/agent/src/pkg/controller/llv_extender/reconciler.go @@ -0,0 +1,256 @@ +package llv_extender + +import ( + "agent/internal" + "agent/pkg/cache" + "agent/pkg/controller" + "agent/pkg/controller/clients" + cutils "agent/pkg/controller/utils" + "agent/pkg/logger" + "agent/pkg/monitoring" + "agent/pkg/utils" + "context" + "errors" + "fmt" + "reflect" + "time" + + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ReconcilerName = "lvm-logical-volume-extender-controller" + +type Reconciler struct { + cl client.Client + log logger.Logger + lvgCl *clients.LVGClient + llvCl *clients.LLVClient + metrics monitoring.Metrics + sdsCache *cache.Cache + opts ReconcilerOptions +} + +type ReconcilerOptions struct { + NodeName string + VolumeGroupScanInterval time.Duration +} + +func NewReconciler( + cl client.Client, + log logger.Logger, + metrics monitoring.Metrics, + sdsCache *cache.Cache, + opts ReconcilerOptions, +) controller.Reconciler[*v1alpha1.LVMVolumeGroup] { + return &Reconciler{ + cl: cl, + log: log, + lvgCl: clients.NewLVGClient( + cl, + log, + metrics, + opts.NodeName, + ReconcilerName, + ), + llvCl: clients.NewLLVClient(cl, log), + metrics: metrics, + sdsCache: sdsCache, + opts: opts, + } +} + +// Name implements controller.Reconciler. +func (r *Reconciler) Name() string { + return ReconcilerName +} + +// MaxConcurrentReconciles implements controller.Reconciler. +func (r *Reconciler) MaxConcurrentReconciles() int { + return 1 +} + +// ShouldReconcileUpdate implements controller.Reconciler. +func (r *Reconciler) ShouldReconcileUpdate(objectOld *v1alpha1.LVMVolumeGroup, objectNew *v1alpha1.LVMVolumeGroup) bool { + return true +} + +// Reconcile implements controller.Reconciler. +func (r *Reconciler) Reconcile( + ctx context.Context, + req controller.ReconcileRequest[*v1alpha1.LVMVolumeGroup], +) (controller.Result, error) { + + lvg := req.Object + + if !r.shouldLLVExtenderReconcileEvent(lvg) { + r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] no need to reconcile a request for the LVMVolumeGroup %s", lvg.Name)) + return controller.Result{}, nil + } + + shouldRequeue := r.ReconcileLVMLogicalVolumeExtension(ctx, lvg) + if shouldRequeue { + r.log.Warning(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] Reconciler needs a retry for the LVMVolumeGroup %s. Retry in %s", lvg.Name, r.opts.VolumeGroupScanInterval.String())) + return controller.Result{ + RequeueAfter: r.opts.VolumeGroupScanInterval, + }, nil + } + + r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] successfully reconciled LVMLogicalVolumes for the LVMVolumeGroup %s", lvg.Name)) + return controller.Result{}, nil + +} + +func (r *Reconciler) shouldLLVExtenderReconcileEvent(newLVG *v1alpha1.LVMVolumeGroup) bool { + // for new LVMVolumeGroups + if reflect.DeepEqual(newLVG.Status, v1alpha1.LVMVolumeGroupStatus{}) { + r.log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as its Status is not initialized yet", newLVG.Name)) + return false + } + + if !cutils.LVGBelongsToNode(newLVG, r.opts.NodeName) { + r.log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as it does not belong to the node %s", newLVG.Name, r.opts.NodeName)) + return false + } + + if newLVG.Status.Phase != internal.PhaseReady { + r.log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as its Status.Phase is not Ready", newLVG.Name)) + return false + } + + return true +} + +func (r *Reconciler) ReconcileLVMLogicalVolumeExtension( + ctx context.Context, + lvg *v1alpha1.LVMVolumeGroup, +) bool { + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] tries to get LLV resources with percent size for the LVMVolumeGroup %s", lvg.Name)) + llvs, err := r.getAllLLVsWithPercentSize(ctx, lvg.Name) + if err != nil { + r.log.Error(err, "[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources") + return true + } + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got LLV resources for the LVMVolumeGroup %s", lvg.Name)) + + if len(llvs) == 0 { + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] no LVMLogicalVolumes with percent size were found for the LVMVolumeGroup %s", lvg.Name)) + return false + } + + shouldRetry := false + for _, llv := range llvs { + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] starts to reconcile the LVMLogicalVolume %s", llv.Name)) + llvRequestedSize, err := cutils.GetLLVRequestedSize(&llv, lvg) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to get requested size of the LVMLogicalVolume %s", llv.Name)) + shouldRetry = true + continue + } + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got the requested size of the LVMLogicalVolume %s, size: %s", llv.Name, llvRequestedSize.String())) + + lv := r.sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + if lv == nil { + err = fmt.Errorf("lv %s not found", llv.Spec.ActualLVNameOnTheNode) + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to find LV %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, cutils.LLVStatusPhaseFailed, err.Error()) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + } + shouldRetry = true + continue + } + + if utils.AreSizesEqualWithinDelta(llvRequestedSize, lv.Data.LVSize, internal.ResizeDelta) { + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should not be extended", llv.Name)) + continue + } + + if llvRequestedSize.Value() < lv.Data.LVSize.Value() { + r.log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s requested size %s is less than actual one on the node %s", llv.Name, llvRequestedSize.String(), lv.Data.LVSize.String())) + continue + } + + freeSpace := cutils.GetFreeLVGSpaceForLLV(lvg, &llv) + if llvRequestedSize.Value()+internal.ResizeDelta.Value() > freeSpace.Value() { + err = errors.New("not enough space") + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend the LV %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, cutils.LLVStatusPhaseFailed, fmt.Sprintf("unable to extend LV, err: %s", err.Error())) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + shouldRetry = true + } + continue + } + + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should be extended from %s to %s size", llv.Name, llv.Status.ActualSize.String(), llvRequestedSize.String())) + err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, cutils.LLVStatusPhaseResizing, "") + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + shouldRetry = true + continue + } + + cmd, err := utils.ExtendLV(llvRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend LV %s of the LVMLogicalVolume %s, cmd: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, cmd)) + err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, cutils.LLVStatusPhaseFailed, fmt.Sprintf("unable to extend LV, err: %s", err.Error())) + if err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + } + shouldRetry = true + continue + } + r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s has been successfully extended", llv.Name)) + + var ( + maxAttempts = 5 + currentAttempts = 0 + ) + for currentAttempts < maxAttempts { + lv = r.sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) + if utils.AreSizesEqualWithinDelta(lv.Data.LVSize, llvRequestedSize, internal.ResizeDelta) { + r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] LV %s of the LVMLogicalVolume %s was successfully updated in the cache", lv.Data.LVName, llv.Name)) + break + } + + r.log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] LV %s size of the LVMLogicalVolume %s was not yet updated in the cache, retry...", lv.Data.LVName, llv.Name)) + currentAttempts++ + time.Sleep(1 * time.Second) + } + + if currentAttempts == maxAttempts { + err = fmt.Errorf("LV %s is not updated in the cache", lv.Data.LVName) + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to resize the LVMLogicalVolume %s", llv.Name)) + shouldRetry = true + + if err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, cutils.LLVStatusPhaseFailed, err.Error()); err != nil { + r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + } + continue + } + + if err := r.llvCl.UpdatePhaseToCreatedIfNeeded(ctx, &llv, lv.Data.LVSize); err != nil { + shouldRetry = true + continue + } + } + return shouldRetry +} + +func (r *Reconciler) getAllLLVsWithPercentSize(ctx context.Context, lvgName string) ([]v1alpha1.LVMLogicalVolume, error) { + llvList := &v1alpha1.LVMLogicalVolumeList{} + err := r.cl.List(ctx, llvList) + if err != nil { + return nil, err + } + + result := make([]v1alpha1.LVMLogicalVolume, 0, len(llvList.Items)) + for _, llv := range llvList.Items { + if llv.Spec.LVMVolumeGroupName == lvgName && cutils.IsPercentSize(llv.Spec.Size) { + result = append(result, llv) + } + } + + return result, nil +} diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover.go b/images/agent/src/pkg/controller/lvg/discoverer.go similarity index 66% rename from images/agent/src/pkg/controller/lvm_volume_group_discover.go rename to images/agent/src/pkg/controller/lvg/discoverer.go index fa00a3db..ff91ef79 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_discover.go +++ b/images/agent/src/pkg/controller/lvg/discoverer.go @@ -1,22 +1,14 @@ -/* -Copyright 2023 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller +package lvg import ( + "agent/internal" + "agent/pkg/cache" + "agent/pkg/controller" + "agent/pkg/controller/clients" + cutils "agent/pkg/controller/utils" + "agent/pkg/logger" + "agent/pkg/monitoring" + "agent/pkg/utils" "context" "errors" "fmt" @@ -24,274 +16,211 @@ import ( "strings" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "agent/config" - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" ) -const ( - LVMVolumeGroupDiscoverCtrlName = "lvm-volume-group-discover-controller" -) +const DiscovererName = "lvm-volume-group-discover-controller" -func RunLVMVolumeGroupDiscoverController( - mgr manager.Manager, - cfg config.Options, +type Discoverer struct { + cl client.Client + log logger.Logger + lvgCl *clients.LVGClient + bdCl *clients.BDClient + metrics monitoring.Metrics + sdsCache *cache.Cache + opts DiscovererOptions +} + +type DiscovererOptions struct { + NodeName string + VolumeGroupScanInterval time.Duration +} + +func NewDiscoverer( + cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, -) (controller.Controller, error) { - cl := mgr.GetClient() - - c, err := controller.New(LVMVolumeGroupDiscoverCtrlName, mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { - log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler starts LVMVolumeGroup resources reconciliation") - - shouldRequeue := LVMVolumeGroupDiscoverReconcile(ctx, cl, metrics, log, cfg, sdsCache) - if shouldRequeue { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occurred while run the Reconciler func, retry in %s", cfg.VolumeGroupScanIntervalSec.String())) - return reconcile.Result{ - RequeueAfter: cfg.VolumeGroupScanIntervalSec, - }, nil - } - log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler successfully ended LVMVolumeGroup resources reconciliation") - return reconcile.Result{}, nil - }), - }) - - if err != nil { - log.Error(err, fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] unable to create controller: "%s"`, LVMVolumeGroupDiscoverCtrlName)) - return nil, err + opts DiscovererOptions, +) *Discoverer { + return &Discoverer{ + cl: cl, + log: log, + lvgCl: clients.NewLVGClient(cl, log, metrics, opts.NodeName, DiscovererName), + bdCl: clients.NewBDClient(cl, metrics), + metrics: metrics, + sdsCache: sdsCache, + opts: opts, } +} + +func (d *Discoverer) Name() string { + return DiscovererName +} - return c, err +func (d *Discoverer) Discover(ctx context.Context) (controller.Result, error) { + d.log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler starts LVMVolumeGroup resources reconciliation") + shouldRequeue := d.LVMVolumeGroupDiscoverReconcile(ctx) + if shouldRequeue { + d.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occurred while run the Reconciler func, retry in %s", d.opts.VolumeGroupScanInterval.String())) + return controller.Result{ + RequeueAfter: d.opts.VolumeGroupScanInterval, + }, nil + } + d.log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler successfully ended LVMVolumeGroup resources reconciliation") + return controller.Result{}, nil } -func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl client.Client, metrics monitoring.Metrics, log logger.Logger, cfg config.Options, sdsCache *cache.Cache) bool { +func (d *Discoverer) LVMVolumeGroupDiscoverReconcile(ctx context.Context) bool { reconcileStart := time.Now() - log.Info("[RunLVMVolumeGroupDiscoverController] starts the reconciliation") + d.log.Info("[RunLVMVolumeGroupDiscoverController] starts the reconciliation") - currentLVMVGs, err := GetAPILVMVolumeGroups(ctx, cl, metrics) + currentLVMVGs, err := d.GetAPILVMVolumeGroups(ctx) if err != nil { - log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to run GetAPILVMVolumeGroups") + d.log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to run GetAPILVMVolumeGroups") return true } if len(currentLVMVGs) == 0 { - log.Debug("[RunLVMVolumeGroupDiscoverController] no current LVMVolumeGroups found") + d.log.Debug("[RunLVMVolumeGroupDiscoverController] no current LVMVolumeGroups found") } - blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, nil) + blockDevices, err := d.bdCl.GetAPIBlockDevices(ctx, DiscovererName, nil) if err != nil { - log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to GetAPIBlockDevices") + d.log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to GetAPIBlockDevices") for _, lvg := range currentLVMVGs { - err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, internal.TypeVGReady, "NoBlockDevices", fmt.Sprintf("unable to get block devices resources, err: %s", err.Error())) + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionFalse, internal.TypeVGReady, "NoBlockDevices", fmt.Sprintf("unable to get block devices resources, err: %s", err.Error())) if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) } } return true } if len(blockDevices) == 0 { - log.Info("[RunLVMVolumeGroupDiscoverController] no BlockDevices were found") + d.log.Info("[RunLVMVolumeGroupDiscoverController] no BlockDevices were found") return false } - filteredLVGs := filterLVGsByNode(currentLVMVGs, cfg.NodeName) + filteredLVGs := filterLVGsByNode(currentLVMVGs, d.opts.NodeName) - log.Debug("[RunLVMVolumeGroupDiscoverController] tries to get LVMVolumeGroup candidates") - candidates, err := GetLVMVolumeGroupCandidates(log, sdsCache, blockDevices, cfg.NodeName) + d.log.Debug("[RunLVMVolumeGroupDiscoverController] tries to get LVMVolumeGroup candidates") + candidates, err := d.GetLVMVolumeGroupCandidates(blockDevices) if err != nil { - log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to run GetLVMVolumeGroupCandidates") + d.log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to run GetLVMVolumeGroupCandidates") for _, lvg := range filteredLVGs { - log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] turn LVMVolumeGroup %s to non operational. LVG struct: %+v ", lvg.Name, lvg)) - err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, internal.TypeVGReady, "DataConfigurationFailed", fmt.Sprintf("unable to configure data, err: %s", err.Error())) + d.log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] turn LVMVolumeGroup %s to non operational. LVG struct: %+v ", lvg.Name, lvg)) + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionFalse, internal.TypeVGReady, "DataConfigurationFailed", fmt.Sprintf("unable to configure data, err: %s", err.Error())) if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) } } return true } - log.Debug("[RunLVMVolumeGroupDiscoverController] successfully got LVMVolumeGroup candidates") + d.log.Debug("[RunLVMVolumeGroupDiscoverController] successfully got LVMVolumeGroup candidates") if len(candidates) == 0 { - log.Debug("[RunLVMVolumeGroupDiscoverController] no candidates were found on the node") + d.log.Debug("[RunLVMVolumeGroupDiscoverController] no candidates were found on the node") } - candidates, err = ReconcileUnhealthyLVMVolumeGroups(ctx, cl, log, candidates, filteredLVGs) + candidates, err = d.ReconcileUnhealthyLVMVolumeGroups(ctx, candidates, filteredLVGs) if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error has occurred while clearing the LVMVolumeGroups resources. Requeue the request in %s", cfg.VolumeGroupScanIntervalSec.String())) + d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error has occurred while clearing the LVMVolumeGroups resources. Requeue the request in %s", d.opts.VolumeGroupScanInterval.String())) return true } shouldRequeue := false for _, candidate := range candidates { if lvg, exist := filteredLVGs[candidate.ActualVGNameOnTheNode]; exist { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is already exist. Tries to update it", lvg.Name)) - log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] candidate: %+v", candidate)) - log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] lvg: %+v", lvg)) + d.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is already exist. Tries to update it", lvg.Name)) + d.log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] candidate: %+v", candidate)) + d.log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] lvg: %+v", lvg)) - if !hasLVMVolumeGroupDiff(log, lvg, candidate) { - log.Debug(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] no data to update for LVMVolumeGroup, name: "%s"`, lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") + if !hasLVMVolumeGroupDiff(d.log, lvg, candidate) { + d.log.Debug(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] no data to update for LVMVolumeGroup, name: "%s"`, lvg.Name)) + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) shouldRequeue = true } continue } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s should be updated", lvg.Name)) - if err = UpdateLVMVolumeGroupByCandidate(ctx, cl, metrics, log, &lvg, candidate); err != nil { - log.Error(err, fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] unable to update LVMVolumeGroup, name: "%s". Requeue the request in %s`, - lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) + d.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s should be updated", lvg.Name)) + if err = d.UpdateLVMVolumeGroupByCandidate(ctx, &lvg, candidate); err != nil { + d.log.Error(err, fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] unable to update LVMVolumeGroup, name: "%s". Requeue the request in %s`, + lvg.Name, d.opts.VolumeGroupScanInterval.String())) shouldRequeue = true continue } - log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] updated LVMVolumeGroup, name: "%s"`, lvg.Name)) + d.log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] updated LVMVolumeGroup, name: "%s"`, lvg.Name)) } else { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is not yet created. Create it", candidate.LVMVGName)) - createdLvg, err := CreateLVMVolumeGroupByCandidate(ctx, log, metrics, cl, candidate, cfg.NodeName) + d.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is not yet created. Create it", candidate.LVMVGName)) + createdLvg, err := d.CreateLVMVolumeGroupByCandidate(ctx, candidate) if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to CreateLVMVolumeGroupByCandidate %s. Requeue the request in %s", candidate.LVMVGName, cfg.VolumeGroupScanIntervalSec.String())) + d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to CreateLVMVolumeGroupByCandidate %s. Requeue the request in %s", candidate.LVMVGName, d.opts.VolumeGroupScanInterval.String())) shouldRequeue = true continue } - err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, internal.TypeVGConfigurationApplied, "Success", "all configuration has been applied") + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionTrue, internal.TypeVGConfigurationApplied, "Success", "all configuration has been applied") if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, createdLvg.Name)) + d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, createdLvg.Name)) shouldRequeue = true continue } - err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, createdLvg.Name)) + d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, createdLvg.Name)) shouldRequeue = true continue } - log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] created new APILVMVolumeGroup, name: "%s"`, createdLvg.Name)) + d.log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] created new APILVMVolumeGroup, name: "%s"`, createdLvg.Name)) } } if shouldRequeue { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] some problems have been occurred while iterating the lvmvolumegroup resources. Retry the reconcile in %s", cfg.VolumeGroupScanIntervalSec.String())) + d.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] some problems have been occurred while iterating the lvmvolumegroup resources. Retry the reconcile in %s", d.opts.VolumeGroupScanInterval.String())) return true } - log.Info("[RunLVMVolumeGroupDiscoverController] END discovery loop") - metrics.ReconcileDuration(LVMVolumeGroupDiscoverCtrlName).Observe(metrics.GetEstimatedTimeInSeconds(reconcileStart)) - metrics.ReconcilesCountTotal(LVMVolumeGroupDiscoverCtrlName).Inc() + d.log.Info("[RunLVMVolumeGroupDiscoverController] END discovery loop") + d.metrics.ReconcileDuration(DiscovererName).Observe(d.metrics.GetEstimatedTimeInSeconds(reconcileStart)) + d.metrics.ReconcilesCountTotal(DiscovererName).Inc() return false } -func filterLVGsByNode(lvgs map[string]v1alpha1.LVMVolumeGroup, currentNode string) map[string]v1alpha1.LVMVolumeGroup { - filtered := make(map[string]v1alpha1.LVMVolumeGroup, len(lvgs)) - for _, lvg := range lvgs { - if lvg.Spec.Local.NodeName == currentNode { - filtered[lvg.Spec.ActualVGNameOnTheNode] = lvg - } - } - - return filtered -} +func (d *Discoverer) GetAPILVMVolumeGroups(ctx context.Context) (map[string]v1alpha1.LVMVolumeGroup, error) { + lvgList := &v1alpha1.LVMVolumeGroupList{} -func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LVMVolumeGroup, candidate internal.LVMVolumeGroupCandidate) bool { - convertedStatusPools, err := convertStatusThinPools(lvg, candidate.StatusThinPools) + start := time.Now() + err := d.cl.List(ctx, lvgList) + d.metrics.APIMethodsDuration(DiscovererName, "list").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) + d.metrics.APIMethodsExecutionCount(DiscovererName, "list").Inc() if err != nil { - log.Error(err, fmt.Sprintf("[hasLVMVolumeGroupDiff] unable to identify candidate difference for the LVMVolumeGroup %s", lvg.Name)) - return false - } - log.Trace(fmt.Sprintf(`AllocatedSize, candidate: %s, lvg: %s`, candidate.AllocatedSize.String(), lvg.Status.AllocatedSize.String())) - log.Trace(fmt.Sprintf(`ThinPools, candidate: %+v, lvg: %+v`, convertedStatusPools, lvg.Status.ThinPools)) - for _, tp := range convertedStatusPools { - log.Trace(fmt.Sprintf("Candidate ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String())) - } - for _, tp := range lvg.Status.ThinPools { - log.Trace(fmt.Sprintf("Resource ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String())) - } - log.Trace(fmt.Sprintf(`VGSize, candidate: %s, lvg: %s`, candidate.VGSize.String(), lvg.Status.VGSize.String())) - log.Trace(fmt.Sprintf(`VGUUID, candidate: %s, lvg: %s`, candidate.VGUUID, lvg.Status.VGUuid)) - log.Trace(fmt.Sprintf(`Nodes, candidate: %+v, lvg: %+v`, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes)) - - return candidate.AllocatedSize.Value() != lvg.Status.AllocatedSize.Value() || - hasStatusPoolDiff(convertedStatusPools, lvg.Status.ThinPools) || - candidate.VGSize.Value() != lvg.Status.VGSize.Value() || - candidate.VGFree.Value() != lvg.Status.VGFree.Value() || - candidate.VGUUID != lvg.Status.VGUuid || - hasStatusNodesDiff(log, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes) -} - -func hasStatusNodesDiff(log logger.Logger, first, second []v1alpha1.LVMVolumeGroupNode) bool { - if len(first) != len(second) { - return true - } - - for i := range first { - if first[i].Name != second[i].Name { - return true - } - - if len(first[i].Devices) != len(second[i].Devices) { - return true - } - - for j := range first[i].Devices { - log.Trace(fmt.Sprintf("[hasStatusNodesDiff] first Device: name %s, PVSize %s, DevSize %s", first[i].Devices[j].BlockDevice, first[i].Devices[j].PVSize.String(), first[i].Devices[j].DevSize.String())) - log.Trace(fmt.Sprintf("[hasStatusNodesDiff] second Device: name %s, PVSize %s, DevSize %s", second[i].Devices[j].BlockDevice, second[i].Devices[j].PVSize.String(), second[i].Devices[j].DevSize.String())) - if first[i].Devices[j].BlockDevice != second[i].Devices[j].BlockDevice || - first[i].Devices[j].Path != second[i].Devices[j].Path || - first[i].Devices[j].PVUuid != second[i].Devices[j].PVUuid || - first[i].Devices[j].PVSize.Value() != second[i].Devices[j].PVSize.Value() || - first[i].Devices[j].DevSize.Value() != second[i].Devices[j].DevSize.Value() { - return true - } - } - } - - return false -} - -func hasStatusPoolDiff(first, second []v1alpha1.LVMVolumeGroupThinPoolStatus) bool { - if len(first) != len(second) { - return true + d.metrics.APIMethodsErrors(DiscovererName, "list").Inc() + return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LVMVolumeGroups, err: %w", err) } - for i := range first { - if first[i].Name != second[i].Name || - first[i].UsedSize.Value() != second[i].UsedSize.Value() || - first[i].ActualSize.Value() != second[i].ActualSize.Value() || - first[i].AllocatedSize.Value() != second[i].AllocatedSize.Value() || - first[i].Ready != second[i].Ready || - first[i].Message != second[i].Message || - first[i].AvailableSpace.Value() != second[i].AvailableSpace.Value() { - return true - } + lvgs := make(map[string]v1alpha1.LVMVolumeGroup, len(lvgList.Items)) + for _, lvg := range lvgList.Items { + lvgs[lvg.Name] = lvg } - return false + return lvgs, nil } // ReconcileUnhealthyLVMVolumeGroups turns LVMVolumeGroup resources without VG or ThinPools to NotReady. -func ReconcileUnhealthyLVMVolumeGroups( +func (d *Discoverer) ReconcileUnhealthyLVMVolumeGroups( ctx context.Context, - cl client.Client, - log logger.Logger, candidates []internal.LVMVolumeGroupCandidate, lvgs map[string]v1alpha1.LVMVolumeGroup, ) ([]internal.LVMVolumeGroupCandidate, error) { @@ -308,7 +237,7 @@ func ReconcileUnhealthyLVMVolumeGroups( messageBldr := strings.Builder{} candidate, exist := candidateMap[lvg.Spec.ActualVGNameOnTheNode] if !exist { - log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its VG %s", lvg.Name, lvg.Spec.ActualVGNameOnTheNode)) + d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its VG %s", lvg.Name, lvg.Spec.ActualVGNameOnTheNode)) messageBldr.WriteString(fmt.Sprintf("Unable to find VG %s (it should be created with special tag %s). ", lvg.Spec.ActualVGNameOnTheNode, internal.LVMTags[0])) } else { // candidate exists, check thin pools @@ -320,27 +249,27 @@ func ReconcileUnhealthyLVMVolumeGroups( // take thin-pools from status instead of spec to prevent miss never-created ones for i, statusTp := range lvg.Status.ThinPools { if candidateTp, exist := candidateTPs[statusTp.Name]; !exist { - log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its ThinPool %s", lvg.Name, statusTp.Name)) + d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its ThinPool %s", lvg.Name, statusTp.Name)) messageBldr.WriteString(fmt.Sprintf("Unable to find ThinPool %s. ", statusTp.Name)) lvg.Status.ThinPools[i].Ready = false } else if !utils.AreSizesEqualWithinDelta(candidate.VGSize, statusTp.ActualSize, internal.ResizeDelta) && candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < statusTp.ActualSize.Value() { // that means thin-pool is not 100%VG space // use candidate VGSize as lvg.Status.VGSize might not be updated yet - log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, statusTp.Name, candidateTp.ActualSize.String(), statusTp.ActualSize.String())) + d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, statusTp.Name, candidateTp.ActualSize.String(), statusTp.ActualSize.String())) messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", statusTp.Name, candidateTp.ActualSize.String(), statusTp.ActualSize.String())) } } } if messageBldr.Len() > 0 { - err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, internal.TypeVGReady, internal.ReasonScanFailed, messageBldr.String()) + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionFalse, internal.TypeVGReady, internal.ReasonScanFailed, messageBldr.String()) if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] unable to update the LVMVolumeGroup %s", lvg.Name)) + d.log.Error(err, fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] unable to update the LVMVolumeGroup %s", lvg.Name)) return nil, err } - log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s and its data obejct will be removed from the reconcile due to unhealthy states", lvg.Name)) + d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s and its data obejct will be removed from the reconcile due to unhealthy states", lvg.Name)) vgNamesToSkip[candidate.ActualVGNameOnTheNode] = struct{}{} } } @@ -348,14 +277,14 @@ func ReconcileUnhealthyLVMVolumeGroups( for _, lvg := range lvgs { if _, shouldSkip := vgNamesToSkip[lvg.Spec.ActualVGNameOnTheNode]; shouldSkip { - log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] remove the LVMVolumeGroup %s from the reconcile", lvg.Name)) + d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] remove the LVMVolumeGroup %s from the reconcile", lvg.Name)) delete(lvgs, lvg.Spec.ActualVGNameOnTheNode) } } for i, c := range candidates { if _, shouldSkip := vgNamesToSkip[c.ActualVGNameOnTheNode]; shouldSkip { - log.Debug(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] remove the data object for VG %s from the reconcile", c.ActualVGNameOnTheNode)) + d.log.Debug(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] remove the data object for VG %s from the reconcile", c.ActualVGNameOnTheNode)) candidates = append(candidates[:i], candidates[i+1:]...) } } @@ -363,8 +292,8 @@ func ReconcileUnhealthyLVMVolumeGroups( return candidates, nil } -func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds map[string]v1alpha1.BlockDevice, currentNode string) ([]internal.LVMVolumeGroupCandidate, error) { - vgs, vgErrs := sdsCache.GetVGs() +func (d *Discoverer) GetLVMVolumeGroupCandidates(bds map[string]v1alpha1.BlockDevice) ([]internal.LVMVolumeGroupCandidate, error) { + vgs, vgErrs := d.sdsCache.GetVGs() vgWithTag := filterVGByTag(vgs, internal.LVMTags) candidates := make([]internal.LVMVolumeGroupCandidate, 0, len(vgWithTag)) @@ -376,25 +305,25 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m // If vgErrs is not empty, that means we have some problems on vgs, so we need to identify unhealthy vgs. var vgIssues map[string]string if vgErrs.Len() != 0 { - log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing vgs command") - vgIssues = sortVGIssuesByVG(log, vgWithTag) + d.log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing vgs command") + vgIssues = sortVGIssuesByVG(d.log, vgWithTag) } - pvs, pvErrs := sdsCache.GetPVs() + pvs, pvErrs := d.sdsCache.GetPVs() if len(pvs) == 0 { err := errors.New("no PV found") - log.Error(err, "[GetLVMVolumeGroupCandidates] no PV was found, but VG with tags are not empty") + d.log.Error(err, "[GetLVMVolumeGroupCandidates] no PV was found, but VG with tags are not empty") return nil, err } // If pvErrs is not empty, that means we have some problems on vgs, so we need to identify unhealthy vgs. var pvIssues map[string][]string if pvErrs.Len() != 0 { - log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing pvs command") - pvIssues = sortPVIssuesByVG(log, pvs) + d.log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing pvs command") + pvIssues = sortPVIssuesByVG(d.log, pvs) } - lvs, lvErrs := sdsCache.GetLVs() + lvs, lvErrs := d.sdsCache.GetLVs() var thinPools []internal.LVData if len(lvs) > 0 { // Filter LV to get only thin pools as we do not support thick for now. @@ -404,15 +333,15 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m // If lvErrs is not empty, that means we have some problems on vgs, so we need to identify unhealthy vgs. var lvIssues map[string]map[string]string if lvErrs.Len() != 0 { - log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing lvs command") - lvIssues = sortThinPoolIssuesByVG(log, thinPools) + d.log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing lvs command") + lvIssues = sortThinPoolIssuesByVG(d.log, thinPools) } // Sort PV,BlockDevices and LV by VG to fill needed information for LVMVolumeGroup resource further. sortedPVs := sortPVsByVG(pvs, vgWithTag) sortedBDs := sortBlockDevicesByVG(bds, vgWithTag) - log.Trace(fmt.Sprintf("[GetLVMVolumeGroupCandidates] BlockDevices: %+v", bds)) - log.Trace(fmt.Sprintf("[GetLVMVolumeGroupCandidates] Sorted BlockDevices: %+v", sortedBDs)) + d.log.Trace(fmt.Sprintf("[GetLVMVolumeGroupCandidates] BlockDevices: %+v", bds)) + d.log.Trace(fmt.Sprintf("[GetLVMVolumeGroupCandidates] Sorted BlockDevices: %+v", sortedBDs)) sortedThinPools := sortThinPoolsByVG(thinPools, vgWithTag) sortedLVByThinPool := sortLVByThinPool(lvs) @@ -430,11 +359,11 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m AllocatedSize: *resource.NewQuantity(allocateSize.Value(), resource.BinarySI), Health: health, Message: message, - StatusThinPools: getStatusThinPools(log, sortedThinPools, sortedLVByThinPool, vg, lvIssues), + StatusThinPools: getStatusThinPools(d.log, sortedThinPools, sortedLVByThinPool, vg, lvIssues), VGSize: *resource.NewQuantity(vg.VGSize.Value(), resource.BinarySI), VGFree: *resource.NewQuantity(vg.VGFree.Value(), resource.BinarySI), VGUUID: vg.VGUUID, - Nodes: configureCandidateNodeDevices(sortedPVs, sortedBDs, vg, currentNode), + Nodes: configureCandidateNodeDevices(sortedPVs, sortedBDs, vg, d.opts.NodeName), } candidates = append(candidates, candidate) @@ -443,10 +372,123 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m return candidates, nil } -func getVGAllocatedSize(vg internal.VGData) resource.Quantity { - allocatedSize := vg.VGSize - allocatedSize.Sub(vg.VGFree) - return allocatedSize +func (d *Discoverer) CreateLVMVolumeGroupByCandidate( + ctx context.Context, + candidate internal.LVMVolumeGroupCandidate, +) (*v1alpha1.LVMVolumeGroup, error) { + thinPools, err := convertStatusThinPools(v1alpha1.LVMVolumeGroup{}, candidate.StatusThinPools) + if err != nil { + return nil, err + } + + lvmVolumeGroup := &v1alpha1.LVMVolumeGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: candidate.LVMVGName, + OwnerReferences: []metav1.OwnerReference{}, + Finalizers: candidate.Finalizers, + }, + Spec: v1alpha1.LVMVolumeGroupSpec{ + ActualVGNameOnTheNode: candidate.ActualVGNameOnTheNode, + BlockDeviceSelector: configureBlockDeviceSelector(candidate), + ThinPools: convertSpecThinPools(candidate.SpecThinPools), + Type: candidate.Type, + Local: v1alpha1.LVMVolumeGroupLocalSpec{NodeName: d.opts.NodeName}, + }, + Status: v1alpha1.LVMVolumeGroupStatus{ + AllocatedSize: candidate.AllocatedSize, + Nodes: convertLVMVGNodes(candidate.Nodes), + ThinPools: thinPools, + VGSize: candidate.VGSize, + VGUuid: candidate.VGUUID, + VGFree: candidate.VGFree, + }, + } + + for _, node := range candidate.Nodes { + for _, dev := range node { + i := len(dev.BlockDevice) + if i == 0 { + d.log.Warning("The attempt to create the LVG resource failed because it was not possible to find a BlockDevice for it.") + return lvmVolumeGroup, nil + } + } + } + + start := time.Now() + err = d.cl.Create(ctx, lvmVolumeGroup) + d.metrics.APIMethodsDuration(DiscovererName, "create").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) + d.metrics.APIMethodsExecutionCount(DiscovererName, "create").Inc() + if err != nil { + d.metrics.APIMethodsErrors(DiscovererName, "create").Inc() + return nil, fmt.Errorf("unable to сreate LVMVolumeGroup, err: %w", err) + } + + return lvmVolumeGroup, nil +} + +func (d *Discoverer) UpdateLVMVolumeGroupByCandidate( + ctx context.Context, + lvg *v1alpha1.LVMVolumeGroup, + candidate internal.LVMVolumeGroupCandidate, +) error { + // Check if VG has some problems + if candidate.Health == NonOperational { + d.log.Warning(fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] candidate for LVMVolumeGroup %s has NonOperational health, message %s. Update the VGReady condition to False", lvg.Name, candidate.Message)) + updErr := d.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, metav1.ConditionFalse, internal.TypeVGReady, internal.ReasonScanFailed, candidate.Message) + if updErr != nil { + d.log.Error(updErr, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + } + return updErr + } + + // The resource.Status.Nodes can not be just re-written, it needs to be updated directly by a node. + // We take all current resources nodes and convert them to map for better performance further. + resourceNodes := make(map[string][]v1alpha1.LVMVolumeGroupDevice, len(lvg.Status.Nodes)) + for _, node := range lvg.Status.Nodes { + resourceNodes[node.Name] = node.Devices + } + + // Now we take our candidate's nodes, match them with resource's ones and upgrade devices for matched resource node. + for candidateNode, devices := range candidate.Nodes { + if _, match := resourceNodes[candidateNode]; match { + resourceNodes[candidateNode] = convertLVMVGDevices(devices) + } + } + + // Now we take resource's nodes, match them with our map and fill with new info. + for i, node := range lvg.Status.Nodes { + if devices, match := resourceNodes[node.Name]; match { + lvg.Status.Nodes[i].Devices = devices + } + } + thinPools, err := convertStatusThinPools(*lvg, candidate.StatusThinPools) + if err != nil { + d.log.Error(err, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to convert status thin pools for the LVMVolumeGroup %s", lvg.Name)) + return err + } + + lvg.Status.AllocatedSize = candidate.AllocatedSize + lvg.Status.Nodes = convertLVMVGNodes(candidate.Nodes) + lvg.Status.ThinPools = thinPools + lvg.Status.VGSize = candidate.VGSize + lvg.Status.VGFree = candidate.VGFree + lvg.Status.VGUuid = candidate.VGUUID + + start := time.Now() + err = d.cl.Status().Update(ctx, lvg) + d.metrics.APIMethodsDuration(DiscovererName, "update").Observe(d.metrics.GetEstimatedTimeInSeconds(start)) + d.metrics.APIMethodsExecutionCount(DiscovererName, "update").Inc() + if err != nil { + d.metrics.APIMethodsErrors(DiscovererName, "update").Inc() + return fmt.Errorf(`[UpdateLVMVolumeGroupByCandidate] unable to update LVMVolumeGroup, name: "%s", err: %w`, lvg.Name, err) + } + + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") + if err != nil { + d.log.Error(err, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + } + + return err } func checkVGHealth(blockDevices map[string][]v1alpha1.BlockDevice, vgIssues map[string]string, pvIssues map[string][]string, lvIssues map[string]map[string]string, vg internal.VGData) (health, message string) { @@ -738,10 +780,6 @@ func getThinPoolUsedSize(lv internal.LVData) (*resource.Quantity, error) { return resource.NewQuantity(int64(tmp), resource.BinarySI), nil } -func isThinPool(lv internal.LVData) bool { - return string(lv.LVAttr[0]) == "t" -} - func getBlockDevicesNames(bds map[string][]v1alpha1.BlockDevice, vg internal.VGData) []string { sorted := bds[vg.VGName+vg.VGUUID] names := make([]string, 0, len(sorted)) @@ -753,130 +791,91 @@ func getBlockDevicesNames(bds map[string][]v1alpha1.BlockDevice, vg internal.VGD return names } -func CreateLVMVolumeGroupByCandidate( - ctx context.Context, - log logger.Logger, - metrics monitoring.Metrics, - cl client.Client, - candidate internal.LVMVolumeGroupCandidate, - nodeName string, -) (*v1alpha1.LVMVolumeGroup, error) { - thinPools, err := convertStatusThinPools(v1alpha1.LVMVolumeGroup{}, candidate.StatusThinPools) - if err != nil { - return nil, err - } - - lvmVolumeGroup := &v1alpha1.LVMVolumeGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: candidate.LVMVGName, - OwnerReferences: []metav1.OwnerReference{}, - Finalizers: candidate.Finalizers, - }, - Spec: v1alpha1.LVMVolumeGroupSpec{ - ActualVGNameOnTheNode: candidate.ActualVGNameOnTheNode, - BlockDeviceSelector: configureBlockDeviceSelector(candidate), - ThinPools: convertSpecThinPools(candidate.SpecThinPools), - Type: candidate.Type, - Local: v1alpha1.LVMVolumeGroupLocalSpec{NodeName: nodeName}, - }, - Status: v1alpha1.LVMVolumeGroupStatus{ - AllocatedSize: candidate.AllocatedSize, - Nodes: convertLVMVGNodes(candidate.Nodes), - ThinPools: thinPools, - VGSize: candidate.VGSize, - VGUuid: candidate.VGUUID, - VGFree: candidate.VGFree, - }, - } - - for _, node := range candidate.Nodes { - for _, d := range node { - i := len(d.BlockDevice) - if i == 0 { - log.Warning("The attempt to create the LVG resource failed because it was not possible to find a BlockDevice for it.") - return lvmVolumeGroup, nil - } +func filterLVGsByNode(lvgs map[string]v1alpha1.LVMVolumeGroup, currentNode string) map[string]v1alpha1.LVMVolumeGroup { + filtered := make(map[string]v1alpha1.LVMVolumeGroup, len(lvgs)) + for _, lvg := range lvgs { + if lvg.Spec.Local.NodeName == currentNode { + filtered[lvg.Spec.ActualVGNameOnTheNode] = lvg } } - start := time.Now() - err = cl.Create(ctx, lvmVolumeGroup) - metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "create").Inc() + return filtered +} + +func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LVMVolumeGroup, candidate internal.LVMVolumeGroupCandidate) bool { + convertedStatusPools, err := convertStatusThinPools(lvg, candidate.StatusThinPools) if err != nil { - metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "create").Inc() - return nil, fmt.Errorf("unable to сreate LVMVolumeGroup, err: %w", err) + log.Error(err, fmt.Sprintf("[hasLVMVolumeGroupDiff] unable to identify candidate difference for the LVMVolumeGroup %s", lvg.Name)) + return false + } + log.Trace(fmt.Sprintf(`AllocatedSize, candidate: %s, lvg: %s`, candidate.AllocatedSize.String(), lvg.Status.AllocatedSize.String())) + log.Trace(fmt.Sprintf(`ThinPools, candidate: %+v, lvg: %+v`, convertedStatusPools, lvg.Status.ThinPools)) + for _, tp := range convertedStatusPools { + log.Trace(fmt.Sprintf("Candidate ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String())) } + for _, tp := range lvg.Status.ThinPools { + log.Trace(fmt.Sprintf("Resource ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String())) + } + log.Trace(fmt.Sprintf(`VGSize, candidate: %s, lvg: %s`, candidate.VGSize.String(), lvg.Status.VGSize.String())) + log.Trace(fmt.Sprintf(`VGUUID, candidate: %s, lvg: %s`, candidate.VGUUID, lvg.Status.VGUuid)) + log.Trace(fmt.Sprintf(`Nodes, candidate: %+v, lvg: %+v`, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes)) - return lvmVolumeGroup, nil + return candidate.AllocatedSize.Value() != lvg.Status.AllocatedSize.Value() || + hasStatusPoolDiff(convertedStatusPools, lvg.Status.ThinPools) || + candidate.VGSize.Value() != lvg.Status.VGSize.Value() || + candidate.VGFree.Value() != lvg.Status.VGFree.Value() || + candidate.VGUUID != lvg.Status.VGUuid || + hasStatusNodesDiff(log, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes) } -func UpdateLVMVolumeGroupByCandidate( - ctx context.Context, - cl client.Client, - metrics monitoring.Metrics, - log logger.Logger, - lvg *v1alpha1.LVMVolumeGroup, - candidate internal.LVMVolumeGroupCandidate, -) error { - // Check if VG has some problems - if candidate.Health == NonOperational { - log.Warning(fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] candidate for LVMVolumeGroup %s has NonOperational health, message %s. Update the VGReady condition to False", lvg.Name, candidate.Message)) - updErr := updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionFalse, internal.TypeVGReady, internal.ReasonScanFailed, candidate.Message) - if updErr != nil { - log.Error(updErr, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) - } - return updErr +func hasStatusNodesDiff(log logger.Logger, first, second []v1alpha1.LVMVolumeGroupNode) bool { + if len(first) != len(second) { + return true } - // The resource.Status.Nodes can not be just re-written, it needs to be updated directly by a node. - // We take all current resources nodes and convert them to map for better performance further. - resourceNodes := make(map[string][]v1alpha1.LVMVolumeGroupDevice, len(lvg.Status.Nodes)) - for _, node := range lvg.Status.Nodes { - resourceNodes[node.Name] = node.Devices - } + for i := range first { + if first[i].Name != second[i].Name { + return true + } - // Now we take our candidate's nodes, match them with resource's ones and upgrade devices for matched resource node. - for candidateNode, devices := range candidate.Nodes { - if _, match := resourceNodes[candidateNode]; match { - resourceNodes[candidateNode] = convertLVMVGDevices(devices) + if len(first[i].Devices) != len(second[i].Devices) { + return true } - } - // Now we take resource's nodes, match them with our map and fill with new info. - for i, node := range lvg.Status.Nodes { - if devices, match := resourceNodes[node.Name]; match { - lvg.Status.Nodes[i].Devices = devices + for j := range first[i].Devices { + log.Trace(fmt.Sprintf("[hasStatusNodesDiff] first Device: name %s, PVSize %s, DevSize %s", first[i].Devices[j].BlockDevice, first[i].Devices[j].PVSize.String(), first[i].Devices[j].DevSize.String())) + log.Trace(fmt.Sprintf("[hasStatusNodesDiff] second Device: name %s, PVSize %s, DevSize %s", second[i].Devices[j].BlockDevice, second[i].Devices[j].PVSize.String(), second[i].Devices[j].DevSize.String())) + if first[i].Devices[j].BlockDevice != second[i].Devices[j].BlockDevice || + first[i].Devices[j].Path != second[i].Devices[j].Path || + first[i].Devices[j].PVUuid != second[i].Devices[j].PVUuid || + first[i].Devices[j].PVSize.Value() != second[i].Devices[j].PVSize.Value() || + first[i].Devices[j].DevSize.Value() != second[i].Devices[j].DevSize.Value() { + return true + } } } - thinPools, err := convertStatusThinPools(*lvg, candidate.StatusThinPools) - if err != nil { - log.Error(err, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to convert status thin pools for the LVMVolumeGroup %s", lvg.Name)) - return err - } - lvg.Status.AllocatedSize = candidate.AllocatedSize - lvg.Status.Nodes = convertLVMVGNodes(candidate.Nodes) - lvg.Status.ThinPools = thinPools - lvg.Status.VGSize = candidate.VGSize - lvg.Status.VGFree = candidate.VGFree - lvg.Status.VGUuid = candidate.VGUUID + return false +} - start := time.Now() - err = cl.Status().Update(ctx, lvg) - metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "update").Inc() - if err != nil { - metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "update").Inc() - return fmt.Errorf(`[UpdateLVMVolumeGroupByCandidate] unable to update LVMVolumeGroup, name: "%s", err: %w`, lvg.Name, err) +func hasStatusPoolDiff(first, second []v1alpha1.LVMVolumeGroupThinPoolStatus) bool { + if len(first) != len(second) { + return true } - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") - if err != nil { - log.Error(err, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + for i := range first { + if first[i].Name != second[i].Name || + first[i].UsedSize.Value() != second[i].UsedSize.Value() || + first[i].ActualSize.Value() != second[i].ActualSize.Value() || + first[i].AllocatedSize.Value() != second[i].AllocatedSize.Value() || + first[i].Ready != second[i].Ready || + first[i].Message != second[i].Message || + first[i].AvailableSpace.Value() != second[i].AvailableSpace.Value() { + return true + } } - return err + return false } func configureBlockDeviceSelector(candidate internal.LVMVolumeGroupCandidate) *metav1.LabelSelector { @@ -946,7 +945,7 @@ func convertStatusThinPools(lvg v1alpha1.LVMVolumeGroup, thinPools []internal.LV limit = internal.AllocationLimitDefaultValue } - freeSpace, err := getThinPoolAvailableSpace(tp.ActualSize, tp.AllocatedSize, limit) + freeSpace, err := cutils.GetThinPoolAvailableSpace(tp.ActualSize, tp.AllocatedSize, limit) if err != nil { return nil, err } @@ -966,52 +965,10 @@ func convertStatusThinPools(lvg v1alpha1.LVMVolumeGroup, thinPools []internal.LV return result, nil } -func getThinPoolAvailableSpace(actualSize, allocatedSize resource.Quantity, allocationLimit string) (resource.Quantity, error) { - totalSize, err := getThinPoolSpaceWithAllocationLimit(actualSize, allocationLimit) - if err != nil { - return resource.Quantity{}, err - } - - return *resource.NewQuantity(totalSize.Value()-allocatedSize.Value(), resource.BinarySI), nil -} - -func getThinPoolSpaceWithAllocationLimit(actualSize resource.Quantity, allocationLimit string) (resource.Quantity, error) { - limits := strings.Split(allocationLimit, "%") - percent, err := strconv.Atoi(limits[0]) - if err != nil { - return resource.Quantity{}, err - } - - factor := float64(percent) - factor /= 100 - - return *resource.NewQuantity(int64(float64(actualSize.Value())*factor), resource.BinarySI), nil -} - func generateLVMVGName() string { return "vg-" + string(uuid.NewUUID()) } -func GetAPILVMVolumeGroups(ctx context.Context, kc client.Client, metrics monitoring.Metrics) (map[string]v1alpha1.LVMVolumeGroup, error) { - lvgList := &v1alpha1.LVMVolumeGroupList{} - - start := time.Now() - err := kc.List(ctx, lvgList) - metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "list").Inc() - if err != nil { - metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "list").Inc() - return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LVMVolumeGroups, err: %w", err) - } - - lvgs := make(map[string]v1alpha1.LVMVolumeGroup, len(lvgList.Items)) - for _, lvg := range lvgList.Items { - lvgs[lvg.Name] = lvg - } - - return lvgs, nil -} - func filterVGByTag(vgs []internal.VGData, tag []string) []internal.VGData { filtered := make([]internal.VGData, 0, len(vgs)) diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go b/images/agent/src/pkg/controller/lvg/discoverer_test.go similarity index 93% rename from images/agent/src/pkg/controller/lvm_volume_group_discover_test.go rename to images/agent/src/pkg/controller/lvg/discoverer_test.go index 07e419a4..b1f6bdb5 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go +++ b/images/agent/src/pkg/controller/lvg/discoverer_test.go @@ -1,3 +1,5 @@ +package lvg + /* Copyright 2023 Flant JSC @@ -14,8 +16,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controller - import ( "context" "testing" @@ -24,21 +24,17 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" "agent/internal" + "agent/pkg/cache" "agent/pkg/logger" "agent/pkg/monitoring" + "agent/pkg/test_utils" ) func TestLVMVolumeGroupDiscover(t *testing.T) { - var ( - ctx = context.Background() - cl = NewFakeClient() - log = logger.Logger{} - ) + ctx := context.Background() t.Run("getThinPools_returns_only_thinPools", func(t *testing.T) { lvs := []internal.LVData{ @@ -430,11 +426,12 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { NodeName = "test-node" ) + d := setupDiscoverer(&DiscovererOptions{NodeName: NodeName}) + size10G := resource.MustParse("10G") size1G := resource.MustParse("1G") var ( - testMetrics = monitoring.GetMetrics("") blockDevicesNames = []string{"first", "second"} specThinPools = map[string]resource.Quantity{"first": size10G} statusThinPools = []internal.LVMVGStatusThinPool{ @@ -498,7 +495,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { }, } - created, err := CreateLVMVolumeGroupByCandidate(ctx, log, testMetrics, cl, candidate, NodeName) + created, err := d.CreateLVMVolumeGroupByCandidate(ctx, candidate) if assert.NoError(t, err) { assert.Equal(t, &expected, created) } @@ -509,24 +506,26 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { LVMVGName = "test_lvm-1" ) + d := setupDiscoverer(nil) + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: LVMVGName, }, } - err := cl.Create(ctx, lvg) + err := d.cl.Create(ctx, lvg) if err != nil { t.Error(err) } defer func() { - err = cl.Delete(ctx, lvg) + err = d.cl.Delete(ctx, lvg) if err != nil { t.Error(err) } }() - actual, err := GetAPILVMVolumeGroups(ctx, cl, monitoring.GetMetrics("test-node")) + actual, err := d.GetAPILVMVolumeGroups(ctx) if assert.NoError(t, err) { _, ok := actual[LVMVGName] assert.True(t, ok) @@ -538,27 +537,27 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { LVMVGName = "test_lvm-2" ) - metrics := monitoring.GetMetrics("test-node") + d := setupDiscoverer(nil) lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: LVMVGName, }, } - err := cl.Create(ctx, lvg) + err := d.cl.Create(ctx, lvg) if err != nil { t.Error(err) } - actual, err := GetAPILVMVolumeGroups(ctx, cl, metrics) + actual, err := d.GetAPILVMVolumeGroups(ctx) if assert.NoError(t, err) { _, ok := actual[LVMVGName] assert.True(t, ok) } - err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvg, "test-node") + err = d.lvgCl.DeleteLVMVolumeGroup(ctx, lvg) if assert.NoError(t, err) { - actual, err = GetAPILVMVolumeGroups(ctx, cl, metrics) + actual, err = d.GetAPILVMVolumeGroups(ctx) if err != nil { t.Error(err) } @@ -572,19 +571,19 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { LVMVGName = "test_lvm_x" ) - metrics := monitoring.GetMetrics("test-node") + d := setupDiscoverer(nil) lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: LVMVGName, }, } - err := cl.Create(ctx, lvg) + err := d.cl.Create(ctx, lvg) if err != nil { t.Error(err) } - actual, err := GetAPILVMVolumeGroups(ctx, cl, metrics) + actual, err := d.GetAPILVMVolumeGroups(ctx) if assert.NoError(t, err) { createdLvg, ok := actual[LVMVGName] assert.True(t, ok) @@ -593,9 +592,9 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { LVMVGName: LVMVGName, AllocatedSize: *resource.NewQuantity(1000, resource.BinarySI), } - err = UpdateLVMVolumeGroupByCandidate(ctx, cl, metrics, log, &createdLvg, candidate) + err = d.UpdateLVMVolumeGroupByCandidate(ctx, &createdLvg, candidate) if assert.NoError(t, err) { - updated, err := GetAPILVMVolumeGroups(ctx, cl, metrics) + updated, err := d.GetAPILVMVolumeGroups(ctx) if assert.NoError(t, err) { updatedLvg, ok := updated[LVMVGName] assert.True(t, ok) @@ -847,20 +846,23 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { reason = "test-reason" message = "test-message" ) + + d := setupDiscoverer(nil) + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: lvgName, }, } - err := cl.Create(ctx, lvg) + err := d.cl.Create(ctx, lvg) if err != nil { t.Error(err) } - err = updateLVGConditionIfNeeded(ctx, cl, logger.Logger{}, lvg, metav1.ConditionTrue, conType, reason, message) + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, metav1.ConditionTrue, conType, reason, message) if assert.NoError(t, err) { - err = cl.Get(ctx, client.ObjectKey{ + err = d.cl.Get(ctx, client.ObjectKey{ Name: lvgName, }, lvg) if err != nil { @@ -876,12 +878,14 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { }) } -func NewFakeClient() client.WithWatch { - s := scheme.Scheme - _ = metav1.AddMetaToScheme(s) - _ = v1alpha1.AddToScheme(s) - builder := fake.NewClientBuilder().WithScheme(s).WithStatusSubresource(&v1alpha1.LVMVolumeGroup{}).WithStatusSubresource(&v1alpha1.LVMLogicalVolume{}) +func setupDiscoverer(opts *DiscovererOptions) *Discoverer { + cl := test_utils.NewFakeClient(&v1alpha1.LVMVolumeGroup{}, &v1alpha1.LVMLogicalVolume{}) + log := logger.Logger{} + metrics := monitoring.GetMetrics("") + if opts == nil { + opts = &DiscovererOptions{NodeName: "test_node"} + } + sdsCache := cache.New() - cl := builder.Build() - return cl + return NewDiscoverer(cl, log, metrics, sdsCache, *opts) } diff --git a/images/agent/src/pkg/controller/lvg/reconciler.go b/images/agent/src/pkg/controller/lvg/reconciler.go index 17e27ed9..49070fc6 100644 --- a/images/agent/src/pkg/controller/lvg/reconciler.go +++ b/images/agent/src/pkg/controller/lvg/reconciler.go @@ -4,6 +4,8 @@ import ( "agent/internal" "agent/pkg/cache" "agent/pkg/controller" + "agent/pkg/controller/clients" + cutils "agent/pkg/controller/utils" "agent/pkg/logger" "agent/pkg/monitoring" "agent/pkg/utils" @@ -12,18 +14,16 @@ import ( "fmt" "reflect" "slices" - "strconv" "strings" "time" - "github.com/cloudflare/cfssl/log" "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) -const Name = "lvm-volume-group-watcher-controller" +const ReconcilerName = "lvm-volume-group-watcher-controller" const LVGMetadateNameLabelKey = "kubernetes.io/metadata.name" const ( @@ -39,18 +39,31 @@ const ( LVMVolumeGroupTag = "storage.deckhouse.io/lvmVolumeGroupName" ) +// TODO: remove +const ( + CreateReconcile reconcileType = "Create" + UpdateReconcile reconcileType = "Update" + DeleteReconcile reconcileType = "Delete" +) + +type ( + reconcileType string +) + type Reconciler struct { cl client.Client log logger.Logger + lvgCl *clients.LVGClient + bdCl *clients.BDClient metrics monitoring.Metrics sdsCache *cache.Cache - opts Options + opts ReconcilerOptions } -type Options struct { - NodeName string - BlockDeviceScanIntervalSec time.Duration - VolumeGroupScanIntervalSec time.Duration +type ReconcilerOptions struct { + NodeName string + BlockDeviceScanInterval time.Duration + VolumeGroupScanInterval time.Duration } func NewReconciler( @@ -58,11 +71,19 @@ func NewReconciler( log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, - opts Options, + opts ReconcilerOptions, ) *Reconciler { return &Reconciler{ - cl: cl, - log: log, + cl: cl, + log: log, + lvgCl: clients.NewLVGClient( + cl, + log, + metrics, + opts.NodeName, + ReconcilerName, + ), + bdCl: clients.NewBDClient(cl, metrics), metrics: metrics, sdsCache: sdsCache, opts: opts, @@ -70,9 +91,19 @@ func NewReconciler( } func (r *Reconciler) Name() string { - return Name + return ReconcilerName +} + +func (r *Reconciler) MaxConcurrentReconciles() int { + return 1 +} + +// ShouldReconcileUpdate implements controller.Reconciler. +func (r *Reconciler) ShouldReconcileUpdate(objectOld *v1alpha1.LVMVolumeGroup, objectNew *v1alpha1.LVMVolumeGroup) bool { + return r.shouldLVGWatcherReconcileUpdateEvent(objectOld, objectNew) } +// Reconcile implements controller.Reconciler. func (r *Reconciler) Reconcile(ctx context.Context, request controller.ReconcileRequest[*v1alpha1.LVMVolumeGroup]) (controller.Result, error) { r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Reconciler starts to reconcile the request %s", request.Object.Name)) @@ -120,24 +151,38 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile } r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to get block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector.MatchLabels)) - blockDevices, err := r.getAPIBlockDevices(ctx, lvg.Spec.BlockDeviceSelector) + blockDevices, err := r.bdCl.GetAPIBlockDevices(ctx, ReconcilerName, lvg.Spec.BlockDeviceSelector) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to get BlockDevices. Retry in %s", r.opts.BlockDeviceScanIntervalSec.String())) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "NoBlockDevices", fmt.Sprintf("unable to get block devices resources, err: %s", err.Error())) + r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to get BlockDevices. Retry in %s", r.opts.BlockDeviceScanInterval.String())) + err = r.lvgCl.UpdateLVGConditionIfNeeded( + ctx, + lvg, + v1.ConditionFalse, + internal.TypeVGConfigurationApplied, + "NoBlockDevices", + fmt.Sprintf("unable to get block devices resources, err: %s", err.Error()), + ) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, r.opts.BlockDeviceScanIntervalSec.String())) + r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, r.opts.BlockDeviceScanInterval.String())) } - return controller.Result{RequeueAfter: r.opts.BlockDeviceScanIntervalSec}, nil + return controller.Result{RequeueAfter: r.opts.BlockDeviceScanInterval}, nil } r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully got block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector.MatchLabels)) valid, reason := validateSpecBlockDevices(lvg, blockDevices) if !valid { r.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupController] validation failed for the LVMVolumeGroup %s, reason: %s", lvg.Name, reason)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) + err = r.lvgCl.UpdateLVGConditionIfNeeded( + ctx, + lvg, + v1.ConditionFalse, + internal.TypeVGConfigurationApplied, + internal.ReasonValidationFailed, + reason, + ) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) + r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, r.opts.VolumeGroupScanInterval.String())) return controller.Result{}, err } @@ -145,7 +190,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile } r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully validated BlockDevices of the LVMVolumeGroup %s", lvg.Name)) - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, cfg.NodeName)) + r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, r.opts.NodeName)) added, err = r.addLVGLabelIfNeeded(ctx, lvg, LVGMetadateNameLabelKey, lvg.Name) if err != nil { r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) @@ -162,13 +207,20 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile bds, _ := r.sdsCache.GetDevices() if len(bds) == 0 { r.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no block devices in the cache, add the LVMVolumeGroup %s to requeue", lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "CacheEmpty", "unable to apply configuration due to the cache's state") + err = r.lvgCl.UpdateLVGConditionIfNeeded( + ctx, + lvg, + v1.ConditionFalse, + internal.TypeVGConfigurationApplied, + "CacheEmpty", + "unable to apply configuration due to the cache's state", + ) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) + r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, r.opts.VolumeGroupScanInterval.String())) } return controller.Result{ - RequeueAfter: r.opts.VolumeGroupScanIntervalSec, + RequeueAfter: r.opts.VolumeGroupScanInterval, }, nil } @@ -185,15 +237,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile } if shouldRequeue { - r.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s event will be requeued in %s", lvg.Name, r.opts.VolumeGroupScanIntervalSec.String())) + r.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s event will be requeued in %s", lvg.Name, r.opts.VolumeGroupScanInterval.String())) return controller.Result{ - RequeueAfter: r.opts.VolumeGroupScanIntervalSec, + RequeueAfter: r.opts.VolumeGroupScanInterval, }, nil } r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Reconciler successfully reconciled the LVMVolumeGroup %s", lvg.Name)) return controller.Result{}, nil - } func (r *Reconciler) runEventReconcile( @@ -201,7 +252,7 @@ func (r *Reconciler) runEventReconcile( lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, ) (bool, error) { - recType := r.identifyLVGReconcileFunc(lvg, sdsCache) + recType := r.identifyLVGReconcileFunc(lvg) switch recType { case CreateReconcile: @@ -226,7 +277,7 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L // this check prevents the LVMVolumeGroup resource's infinity updating after a retry for _, c := range lvg.Status.Conditions { if c.Type == internal.TypeVGConfigurationApplied && c.Reason != internal.ReasonTerminating { - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, "trying to delete VG") + err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, "trying to delete VG") if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) return true, err @@ -238,7 +289,14 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L _, exist := lvg.Annotations[deletionProtectionAnnotation] if exist { r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] the LVMVolumeGroup %s has a deletion timestamp but also has a deletion protection annotation %s. Remove it to proceed the delete operation", lvg.Name, deletionProtectionAnnotation)) - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, fmt.Sprintf("to delete the LVG remove the annotation %s", deletionProtectionAnnotation)) + err := r.lvgCl.UpdateLVGConditionIfNeeded( + ctx, + lvg, + v1.ConditionFalse, + internal.TypeVGConfigurationApplied, + internal.ReasonTerminating, + fmt.Sprintf("to delete the LVG remove the annotation %s", deletionProtectionAnnotation), + ) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) return true, err @@ -253,7 +311,7 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L err := fmt.Errorf("VG %s uses LVs: %v. Delete used LVs first", lvg.Spec.ActualVGNameOnTheNode, usedLVs) r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to reconcile LVG %s", lvg.Name)) r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] tries to add the condition %s status False to the LVMVolumeGroup %s due to LV does exist", internal.TypeVGConfigurationApplied, lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) return true, err @@ -266,7 +324,7 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L err := r.deleteVGIfExist(lvg.Spec.ActualVGNameOnTheNode) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to delete VG %s", lvg.Spec.ActualVGNameOnTheNode)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) return true, err @@ -278,7 +336,7 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L removed, err := r.removeLVGFinalizerIfExist(ctx, lvg) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to remove a finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -291,7 +349,7 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] no need to remove a finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) } - err = r.deleteLVMVolumeGroup(ctx, lvg, r.opts.NodeName) + err = r.lvgCl.DeleteLVMVolumeGroup(ctx, lvg) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to delete the LVMVolumeGroup %s", lvg.Name)) return true, err @@ -313,7 +371,7 @@ func (r *Reconciler) reconcileLVGUpdateFunc( valid, reason := r.validateLVGForUpdateFunc(lvg, blockDevices) if !valid { r.log.Warning(fmt.Sprintf("[reconcileLVGUpdateFunc] the LVMVolumeGroup %s is not valid", lvg.Name)) - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) + err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, lvg.Name)) } @@ -323,11 +381,11 @@ func (r *Reconciler) reconcileLVGUpdateFunc( r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully validated the LVMVolumeGroup %s", lvg.Name)) r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to get VG %s for the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - found, vg := tryGetVG(r.sdsCache, lvg.Spec.ActualVGNameOnTheNode) + found, vg := r.tryGetVG(lvg.Spec.ActualVGNameOnTheNode) if !found { err := fmt.Errorf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode) r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile the LVMVolumeGroup %s", lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGNotFound", err.Error()) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGNotFound", err.Error()) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -339,7 +397,7 @@ func (r *Reconciler) reconcileLVGUpdateFunc( updated, err := r.updateVGTagIfNeeded(ctx, lvg, vg) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to update VG %s tag of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGUpdateFailed", fmt.Sprintf("unable to update VG tag, err: %s", err.Error())) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGUpdateFailed", fmt.Sprintf("unable to update VG tag, err: %s", err.Error())) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -357,7 +415,7 @@ func (r *Reconciler) reconcileLVGUpdateFunc( err = r.resizePVIfNeeded(ctx, lvg) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to resize PV of the LVMVolumeGroup %s", lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "PVResizeFailed", fmt.Sprintf("unable to resize PV, err: %s", err.Error())) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "PVResizeFailed", fmt.Sprintf("unable to resize PV, err: %s", err.Error())) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -369,7 +427,7 @@ func (r *Reconciler) reconcileLVGUpdateFunc( err = r.extendVGIfNeeded(ctx, lvg, vg, pvs, blockDevices) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to extend VG of the LVMVolumeGroup %s", lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGExtendFailed", fmt.Sprintf("unable to extend VG, err: %s", err.Error())) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGExtendFailed", fmt.Sprintf("unable to extend VG, err: %s", err.Error())) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -384,7 +442,7 @@ func (r *Reconciler) reconcileLVGUpdateFunc( err = r.reconcileThinPoolsIfNeeded(ctx, lvg, vg, lvs) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile thin-pools of the LVMVolumeGroup %s", lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "ThinPoolReconcileFailed", fmt.Sprintf("unable to reconcile thin-pools, err: %s", err.Error())) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "ThinPoolReconcileFailed", fmt.Sprintf("unable to reconcile thin-pools, err: %s", err.Error())) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -394,7 +452,7 @@ func (r *Reconciler) reconcileLVGUpdateFunc( } r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "Applied", "configuration has been applied") + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "Applied", "configuration has been applied") if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) return true, err @@ -423,7 +481,7 @@ func (r *Reconciler) reconcileLVGCreateFunc( if !exist { r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] tries to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonCreating, "trying to apply the configuration") + err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonCreating, "trying to apply the configuration") if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) return true, err @@ -434,7 +492,7 @@ func (r *Reconciler) reconcileLVGCreateFunc( valid, reason := r.validateLVGForCreateFunc(lvg, blockDevices) if !valid { r.log.Warning(fmt.Sprintf("[reconcileLVGCreateFunc] validation fails for the LVMVolumeGroup %s", lvg.Name)) - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) + err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) if err != nil { r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -447,7 +505,7 @@ func (r *Reconciler) reconcileLVGCreateFunc( err := r.createVGComplex(lvg, blockDevices) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to create VG for the LVMVolumeGroup %s", lvg.Name)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGCreationFailed", fmt.Sprintf("unable to create VG, err: %s", err.Error())) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGCreationFailed", fmt.Sprintf("unable to create VG, err: %s", err.Error())) if err != nil { r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -460,7 +518,7 @@ func (r *Reconciler) reconcileLVGCreateFunc( for _, tp := range lvg.Spec.ThinPools { vgSize := countVGSizeByBlockDevices(blockDevices) - tpRequestedSize, err := getRequestedSizeFromString(tp.Size, vgSize) + tpRequestedSize, err := cutils.GetRequestedSizeFromString(tp.Size, vgSize) if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to get thin-pool %s requested size of the LVMVolumeGroup %s", tp.Name, lvg.Name)) return false, err @@ -476,7 +534,7 @@ func (r *Reconciler) reconcileLVGCreateFunc( } if err != nil { r.log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to create thin-pool %s of the LVMVolumeGroup %s, cmd: %s", tp.Name, lvg.Name, cmd)) - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "ThinPoolCreationFailed", fmt.Sprintf("unable to create thin-pool, err: %s", err.Error())) + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "ThinPoolCreationFailed", fmt.Sprintf("unable to create thin-pool, err: %s", err.Error())) if err != nil { r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) } @@ -487,7 +545,7 @@ func (r *Reconciler) reconcileLVGCreateFunc( r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] successfully created thin-pools for the LVMVolumeGroup %s", lvg.Name)) } - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "Success", "all configuration has been applied") + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "Success", "all configuration has been applied") if err != nil { r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) return true, err @@ -496,42 +554,6 @@ func (r *Reconciler) reconcileLVGCreateFunc( return false, nil } -func (r *Reconciler) deleteLVMVolumeGroup(ctx context.Context, lvg *v1alpha1.LVMVolumeGroup, currentNode string) error { - r.log.Debug(fmt.Sprintf(`[DeleteLVMVolumeGroup] Node "%s" does not belong to VG "%s". It will be removed from LVM resource, name "%s"'`, currentNode, lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - for i, node := range lvg.Status.Nodes { - if node.Name == currentNode { - // delete node - lvg.Status.Nodes = append(lvg.Status.Nodes[:i], lvg.Status.Nodes[i+1:]...) - r.log.Info(fmt.Sprintf(`[DeleteLVMVolumeGroup] deleted node "%s" from LVMVolumeGroup "%s"`, node.Name, lvg.Name)) - } - } - - // If current LVMVolumeGroup has no nodes left, delete it. - if len(lvg.Status.Nodes) == 0 { - start := time.Now() - err := r.cl.Delete(ctx, lvg) - r.metrics.APIMethodsDuration(Name, "delete").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.APIMethodsExecutionCount(Name, "delete").Inc() - if err != nil { - r.metrics.APIMethodsErrors(Name, "delete").Inc() - return err - } - r.log.Info(fmt.Sprintf("[DeleteLVMVolumeGroup] the LVMVolumeGroup %s deleted", lvg.Name)) - } - - return nil -} - -func checkIfVGExist(vgName string, vgs []internal.VGData) bool { - for _, vg := range vgs { - if vg.VGName == vgName { - return true - } - } - - return false -} - func (r *Reconciler) shouldUpdateLVGLabels(lvg *v1alpha1.LVMVolumeGroup, labelKey, labelValue string) bool { if lvg.Labels == nil { r.log.Debug(fmt.Sprintf("[shouldUpdateLVGLabels] the LVMVolumeGroup %s has no labels.", lvg.Name)) @@ -563,7 +585,7 @@ func (r *Reconciler) shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG *v1alph return true } - if r.shouldUpdateLVGLabels(log, newLVG, LVGMetadateNameLabelKey, newLVG.Name) { + if r.shouldUpdateLVGLabels(newLVG, LVGMetadateNameLabelKey, newLVG.Name) { r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup's %s labels have been changed", newLVG.Name)) return true } @@ -576,7 +598,7 @@ func (r *Reconciler) shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG *v1alph for _, c := range newLVG.Status.Conditions { if c.Type == internal.TypeVGConfigurationApplied { if c.Reason == internal.ReasonUpdating || c.Reason == internal.ReasonCreating { - log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should not be reconciled as the LVMVolumeGroup %s reconciliation still in progress", newLVG.Name)) + r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should not be reconciled as the LVMVolumeGroup %s reconciliation still in progress", newLVG.Name)) return false } } @@ -585,7 +607,7 @@ func (r *Reconciler) shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG *v1alph for _, n := range newLVG.Status.Nodes { for _, d := range n.Devices { if !utils.AreSizesEqualWithinDelta(d.PVSize, d.DevSize, internal.ResizeDelta) { - log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s PV size is different to device size", newLVG.Name)) + r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s PV size is different to device size", newLVG.Name)) return true } } @@ -594,71 +616,6 @@ func (r *Reconciler) shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG *v1alph return false } -func shouldReconcileLVGByDeleteFunc(lvg *v1alpha1.LVMVolumeGroup) bool { - return lvg.DeletionTimestamp != nil -} - -func (r *Reconciler) updateLVGConditionIfNeeded( - ctx context.Context, - lvg *v1alpha1.LVMVolumeGroup, - status v1.ConditionStatus, - conType, reason, message string, -) error { - exist := false - index := 0 - newCondition := v1.Condition{ - Type: conType, - Status: status, - ObservedGeneration: lvg.Generation, - LastTransitionTime: v1.NewTime(time.Now()), - Reason: reason, - Message: message, - } - - if lvg.Status.Conditions == nil { - r.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] the LVMVolumeGroup %s conditions is nil. Initialize them", lvg.Name)) - lvg.Status.Conditions = make([]v1.Condition, 0, 5) - } - - if len(lvg.Status.Conditions) > 0 { - r.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] there are some conditions in the LVMVolumeGroup %s. Tries to find a condition %s", lvg.Name, conType)) - for i, c := range lvg.Status.Conditions { - if c.Type == conType { - if checkIfEqualConditions(c, newCondition) { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no need to update condition %s in the LVMVolumeGroup %s as new and old condition states are the same", conType, lvg.Name)) - return nil - } - - index = i - exist = true - r.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was found in the LVMVolumeGroup %s at the index %d", conType, lvg.Name, i)) - } - } - - if !exist { - r.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was not found. Append it in the end of the LVMVolumeGroup %s conditions", conType, lvg.Name)) - lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) - } else { - r.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] insert the condition %s status %s reason %s message %s at index %d of the LVMVolumeGroup %s conditions", conType, status, reason, message, index, lvg.Name)) - lvg.Status.Conditions[index] = newCondition - } - } else { - r.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no conditions were found in the LVMVolumeGroup %s. Append the condition %s in the end", lvg.Name, conType)) - lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) - } - - r.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] tries to update the condition type %s status %s reason %s message %s of the LVMVolumeGroup %s", conType, status, reason, message, lvg.Name)) - return r.cl.Status().Update(ctx, lvg) -} - -func checkIfEqualConditions(first, second v1.Condition) bool { - return first.Type == second.Type && - first.Status == second.Status && - first.Reason == second.Reason && - first.Message == second.Message && - first.ObservedGeneration == second.ObservedGeneration -} - func (r *Reconciler) addLVGFinalizerIfNotExist(ctx context.Context, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { if slices.Contains(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer) { return false, nil @@ -692,7 +649,7 @@ func (r *Reconciler) syncThinPoolsAllocationLimit(ctx context.Context, lvg *v1al updated = true lvg.Status.ThinPools[i].AllocationLimit = specLimits - space, err = getThinPoolAvailableSpace(lvg.Status.ThinPools[i].ActualSize, lvg.Status.ThinPools[i].AllocatedSize, specLimits) + space, err = cutils.GetThinPoolAvailableSpace(lvg.Status.ThinPools[i].ActualSize, lvg.Status.ThinPools[i].AllocatedSize, specLimits) if err != nil { r.log.Error(err, fmt.Sprintf("[syncThinPoolsAllocationLimit] unable to get thin pool %s available space", lvg.Status.ThinPools[i].Name)) return err @@ -720,56 +677,6 @@ func (r *Reconciler) syncThinPoolsAllocationLimit(ctx context.Context, lvg *v1al return nil } -func validateSpecBlockDevices(lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { - if len(blockDevices) == 0 { - return false, "none of specified BlockDevices were found" - } - - if len(lvg.Status.Nodes) > 0 { - lostBdNames := make([]string, 0, len(lvg.Status.Nodes[0].Devices)) - for _, n := range lvg.Status.Nodes { - for _, d := range n.Devices { - if _, found := blockDevices[d.BlockDevice]; !found { - lostBdNames = append(lostBdNames, d.BlockDevice) - } - } - } - - // that means some of the used BlockDevices no longer match the blockDeviceSelector - if len(lostBdNames) > 0 { - return false, fmt.Sprintf("these BlockDevices no longer match the blockDeviceSelector: %s", strings.Join(lostBdNames, ",")) - } - } - - for _, me := range lvg.Spec.BlockDeviceSelector.MatchExpressions { - if me.Key == internal.MetadataNameLabelKey { - if len(me.Values) != len(blockDevices) { - missedBds := make([]string, 0, len(me.Values)) - for _, bdName := range me.Values { - if _, exist := blockDevices[bdName]; !exist { - missedBds = append(missedBds, bdName) - } - } - - return false, fmt.Sprintf("unable to find specified BlockDevices: %s", strings.Join(missedBds, ",")) - } - } - } - - bdFromOtherNode := make([]string, 0, len(blockDevices)) - for _, bd := range blockDevices { - if bd.Status.NodeName != lvg.Spec.Local.NodeName { - bdFromOtherNode = append(bdFromOtherNode, bd.Name) - } - } - - if len(bdFromOtherNode) != 0 { - return false, fmt.Sprintf("block devices %s have different node names from LVMVolumeGroup Local.NodeName", strings.Join(bdFromOtherNode, ",")) - } - - return true, "" -} - func (r *Reconciler) deleteLVGIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { if lvg.DeletionTimestamp == nil { return false, nil @@ -778,7 +685,7 @@ func (r *Reconciler) deleteLVGIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVol vgs, _ := r.sdsCache.GetVGs() if !checkIfVGExist(lvg.Spec.ActualVGNameOnTheNode, vgs) { r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] VG %s was not yet created for the LVMVolumeGroup %s and the resource is marked as deleting. Delete the resource", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - removed, err := removeLVGFinalizerIfExist(ctx, lvg) + removed, err := r.removeLVGFinalizerIfExist(ctx, lvg) if err != nil { r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) return false, err @@ -790,7 +697,7 @@ func (r *Reconciler) deleteLVGIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVol r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) } - err = r.deleteLVMVolumeGroup(ctx, lvg, r.opts.NodeName) + err = r.lvgCl.DeleteLVMVolumeGroup(ctx, lvg) if err != nil { r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to delete the LVMVolumeGroup %s", lvg.Name)) return false, err @@ -801,53 +708,6 @@ func (r *Reconciler) deleteLVGIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVol return false, nil } -func checkIfLVGBelongsToNode(lvg *v1alpha1.LVMVolumeGroup, nodeName string) bool { - return lvg.Spec.Local.NodeName == nodeName -} - -func extractPathsFromBlockDevices(targetDevices []string, blockDevices map[string]v1alpha1.BlockDevice) []string { - var paths []string - if len(targetDevices) > 0 { - paths = make([]string, 0, len(targetDevices)) - for _, bdName := range targetDevices { - bd := blockDevices[bdName] - paths = append(paths, bd.Status.Path) - } - } else { - paths = make([]string, 0, len(blockDevices)) - for _, bd := range blockDevices { - paths = append(paths, bd.Status.Path) - } - } - - return paths -} - -func getRequestedSizeFromString(size string, targetSpace resource.Quantity) (resource.Quantity, error) { - switch isPercentSize(size) { - case true: - strPercent := strings.Split(size, "%")[0] - percent, err := strconv.Atoi(strPercent) - if err != nil { - return resource.Quantity{}, err - } - lvSize := targetSpace.Value() * int64(percent) / 100 - return *resource.NewQuantity(lvSize, resource.BinarySI), nil - case false: - return resource.ParseQuantity(size) - } - - return resource.Quantity{}, nil -} - -func countVGSizeByBlockDevices(blockDevices map[string]v1alpha1.BlockDevice) resource.Quantity { - var totalVGSize int64 - for _, bd := range blockDevices { - totalVGSize += bd.Status.Size.Value() - } - return *resource.NewQuantity(totalVGSize, resource.BinarySI) -} - func (r *Reconciler) validateLVGForCreateFunc( lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, @@ -876,7 +736,7 @@ func (r *Reconciler) validateLVGForCreateFunc( var totalThinPoolSize int64 for _, tp := range lvg.Spec.ThinPools { - tpRequestedSize, err := getRequestedSizeFromString(tp.Size, totalVGSize) + tpRequestedSize, err := cutils.GetRequestedSizeFromString(tp.Size, totalVGSize) if err != nil { reason.WriteString(err.Error()) continue @@ -991,7 +851,7 @@ func (r *Reconciler) validateLVGForUpdateFunc( newTotalVGSize := resource.NewQuantity(vg.VGSize.Value()+additionBlockDeviceSpace, resource.BinarySI) for _, specTp := range lvg.Spec.ThinPools { // might be a case when Thin-pool is already created, but is not shown in status - tpRequestedSize, err := getRequestedSizeFromString(specTp.Size, *newTotalVGSize) + tpRequestedSize, err := cutils.GetRequestedSizeFromString(specTp.Size, *newTotalVGSize) if err != nil { reason.WriteString(err.Error()) continue @@ -1083,6 +943,10 @@ func (r *Reconciler) shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LVMVolumeGroup return vg != nil } +func (r *Reconciler) shouldReconcileLVGByDeleteFunc(lvg *v1alpha1.LVMVolumeGroup) bool { + return lvg.DeletionTimestamp != nil +} + func (r *Reconciler) reconcileThinPoolsIfNeeded( ctx context.Context, lvg *v1alpha1.LVMVolumeGroup, @@ -1098,7 +962,7 @@ func (r *Reconciler) reconcileThinPoolsIfNeeded( errs := strings.Builder{} for _, specTp := range lvg.Spec.ThinPools { - tpRequestedSize, err := getRequestedSizeFromString(specTp.Size, lvg.Status.VGSize) + tpRequestedSize, err := cutils.GetRequestedSizeFromString(specTp.Size, lvg.Status.VGSize) if err != nil { r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to get requested thin-pool %s size of the LVMVolumeGroup %s", specTp.Name, lvg.Name)) return err @@ -1107,7 +971,7 @@ func (r *Reconciler) reconcileThinPoolsIfNeeded( if actualTp, exist := actualThinPools[specTp.Name]; !exist { r.log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s is not created yet. Create it", specTp.Name, lvg.Name)) if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") + err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) return err @@ -1123,10 +987,10 @@ func (r *Reconciler) reconcileThinPoolsIfNeeded( r.log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s will be created with size %s", specTp.Name, lvg.Name, tpRequestedSize.String())) cmd, err = utils.CreateThinPool(specTp.Name, vg.VGName, tpRequestedSize.Value()) } - r.metrics.UtilsCommandsDuration(Name, "lvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "lvcreate").Inc() + r.metrics.UtilsCommandsDuration(ReconcilerName, "lvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "lvcreate").Inc() if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "lvcreate").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "lvcreate").Inc() r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to create thin-pool %s of the LVMVolumeGroup %s, cmd: %s", specTp.Name, lvg.Name, cmd)) errs.WriteString(fmt.Sprintf("unable to create thin-pool %s, err: %s. ", specTp.Name, err.Error())) continue @@ -1140,9 +1004,9 @@ func (r *Reconciler) reconcileThinPoolsIfNeeded( continue } - log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] the LVMVolumeGroup %s requested thin pool %s size is more than actual one. Resize it", lvg.Name, tpRequestedSize.String())) + r.log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] the LVMVolumeGroup %s requested thin pool %s size is more than actual one. Resize it", lvg.Name, tpRequestedSize.String())) if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err = r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) return err @@ -1175,7 +1039,7 @@ func (r *Reconciler) resizePVIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVolu for _, d := range n.Devices { if d.DevSize.Value()-d.PVSize.Value() > internal.ResizeDelta.Value() { if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") + err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) return err @@ -1186,10 +1050,10 @@ func (r *Reconciler) resizePVIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVolu start := time.Now() cmd, err := utils.ResizePV(d.Path) - r.metrics.UtilsCommandsDuration(Name, "pvresize").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "pvresize") + r.metrics.UtilsCommandsDuration(ReconcilerName, "pvresize").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "pvresize") if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "pvresize").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "pvresize").Inc() r.log.Error(err, fmt.Sprintf("[ResizePVIfNeeded] unable to resize PV %s of BlockDevice %s of LVMVolumeGroup %s, cmd: %s", d.Path, d.BlockDevice, lvg.Name, cmd)) errs.WriteString(fmt.Sprintf("unable to resize PV %s, err: %s. ", d.Path, err.Error())) continue @@ -1241,7 +1105,7 @@ func (r *Reconciler) extendVGIfNeeded( } if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") + err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) return err @@ -1260,8 +1124,8 @@ func (r *Reconciler) extendVGIfNeeded( return nil } -func tryGetVG(sdsCache *cache.Cache, vgName string) (bool, internal.VGData) { - vgs, _ := sdsCache.GetVGs() +func (r *Reconciler) tryGetVG(vgName string) (bool, internal.VGData) { + vgs, _ := r.sdsCache.GetVGs() for _, vg := range vgs { if vg.VGName == vgName { return true, vg @@ -1303,42 +1167,27 @@ func (r *Reconciler) getLVForVG(vgName string) []string { return usedLVs } -func (r *Reconciler) getLVMVolumeGroup(ctx context.Context, name string) (*v1alpha1.LVMVolumeGroup, error) { - obj := &v1alpha1.LVMVolumeGroup{} - start := time.Now() - err := r.cl.Get(ctx, client.ObjectKey{ - Name: name, - }, obj) - r.metrics.APIMethodsDuration(Name, "get").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.APIMethodsExecutionCount(Name, "get").Inc() - if err != nil { - r.metrics.APIMethodsErrors(Name, "get").Inc() - return nil, err - } - return obj, nil -} - func (r *Reconciler) deleteVGIfExist(vgName string) error { vgs, _ := r.sdsCache.GetVGs() if !checkIfVGExist(vgName, vgs) { - log.Debug(fmt.Sprintf("[DeleteVGIfExist] no VG %s found, nothing to delete", vgName)) + r.log.Debug(fmt.Sprintf("[DeleteVGIfExist] no VG %s found, nothing to delete", vgName)) return nil } pvs, _ := r.sdsCache.GetPVs() if len(pvs) == 0 { err := errors.New("no any PV found") - log.Error(err, fmt.Sprintf("[DeleteVGIfExist] no any PV was found while deleting VG %s", vgName)) + r.log.Error(err, fmt.Sprintf("[DeleteVGIfExist] no any PV was found while deleting VG %s", vgName)) return err } start := time.Now() command, err := utils.RemoveVG(vgName) - r.metrics.UtilsCommandsDuration(Name, "vgremove").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "vgremove").Inc() + r.metrics.UtilsCommandsDuration(ReconcilerName, "vgremove").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgremove").Inc() r.log.Debug(command) if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "vgremove").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgremove").Inc() r.log.Error(err, "RemoveVG "+command) return err } @@ -1352,11 +1201,11 @@ func (r *Reconciler) deleteVGIfExist(vgName string) error { start = time.Now() command, err = utils.RemovePV(pvsToRemove) - r.metrics.UtilsCommandsDuration(Name, "pvremove").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "pvremove").Inc() + r.metrics.UtilsCommandsDuration(ReconcilerName, "pvremove").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "pvremove").Inc() r.log.Debug(command) if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "pvremove").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "pvremove").Inc() r.log.Error(err, "RemovePV "+command) return err } @@ -1369,11 +1218,11 @@ func (r *Reconciler) extendVGComplex(extendPVs []string, vgName string) error { for _, pvPath := range extendPVs { start := time.Now() command, err := utils.CreatePV(pvPath) - r.metrics.UtilsCommandsDuration(Name, "pvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "pvcreate").Inc() + r.metrics.UtilsCommandsDuration(ReconcilerName, "pvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "pvcreate").Inc() r.log.Debug(command) if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "pvcreate").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "pvcreate").Inc() r.log.Error(err, "CreatePV ") return err } @@ -1381,11 +1230,11 @@ func (r *Reconciler) extendVGComplex(extendPVs []string, vgName string) error { start := time.Now() command, err := utils.ExtendVG(vgName, extendPVs) - r.metrics.UtilsCommandsDuration(Name, "vgextend").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "vgextend").Inc() + r.metrics.UtilsCommandsDuration(ReconcilerName, "vgextend").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgextend").Inc() r.log.Debug(command) if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "vgextend").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgextend").Inc() r.log.Error(err, "ExtendVG ") return err } @@ -1399,11 +1248,11 @@ func (r *Reconciler) createVGComplex(lvg *v1alpha1.LVMVolumeGroup, blockDevices for _, path := range paths { start := time.Now() command, err := utils.CreatePV(path) - r.metrics.UtilsCommandsDuration(Name, "pvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "pvcreate").Inc() + r.metrics.UtilsCommandsDuration(ReconcilerName, "pvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "pvcreate").Inc() r.log.Debug(command) if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "pvcreate").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "pvcreate").Inc() r.log.Error(err, fmt.Sprintf("[CreateVGComplex] unable to create PV by path %s", path)) return err } @@ -1415,22 +1264,22 @@ func (r *Reconciler) createVGComplex(lvg *v1alpha1.LVMVolumeGroup, blockDevices case Local: start := time.Now() cmd, err := utils.CreateVGLocal(lvg.Spec.ActualVGNameOnTheNode, lvg.Name, paths) - r.metrics.UtilsCommandsDuration(Name, "vgcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "vgcreate").Inc() - log.Debug(cmd) + r.metrics.UtilsCommandsDuration(ReconcilerName, "vgcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgcreate").Inc() + r.log.Debug(cmd) if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "vgcreate").Inc() - log.Error(err, "error CreateVGLocal") + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgcreate").Inc() + r.log.Error(err, "error CreateVGLocal") return err } case Shared: start := time.Now() cmd, err := utils.CreateVGShared(lvg.Spec.ActualVGNameOnTheNode, lvg.Name, paths) - r.metrics.UtilsCommandsDuration(Name, "vgcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "vgcreate").Inc() + r.metrics.UtilsCommandsDuration(ReconcilerName, "vgcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgcreate").Inc() r.log.Debug(cmd) if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "vgcreate").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgcreate").Inc() r.log.Error(err, "error CreateVGShared") return err } @@ -1446,35 +1295,35 @@ func (r *Reconciler) updateVGTagIfNeeded( lvg *v1alpha1.LVMVolumeGroup, vg internal.VGData, ) (bool, error) { - found, tagName := checkTag(vg.VGTags) + found, tagName := cutils.CheckTag(vg.VGTags) if found && lvg.Name != tagName { if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err := r.updateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") + err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { - log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) + r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) return false, err } } start := time.Now() cmd, err := utils.VGChangeDelTag(vg.VGName, fmt.Sprintf("%s=%s", LVMVolumeGroupTag, tagName)) - r.metrics.UtilsCommandsDuration(Name, "vgchange").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "vgchange").Inc() - log.Debug(fmt.Sprintf("[UpdateVGTagIfNeeded] exec cmd: %s", cmd)) + r.metrics.UtilsCommandsDuration(ReconcilerName, "vgchange").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgchange").Inc() + r.log.Debug(fmt.Sprintf("[UpdateVGTagIfNeeded] exec cmd: %s", cmd)) if err != nil { - log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to delete LVMVolumeGroupTag: %s=%s, vg: %s", LVMVolumeGroupTag, tagName, vg.VGName)) - r.metrics.UtilsCommandsErrorsCount(Name, "vgchange").Inc() + r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to delete LVMVolumeGroupTag: %s=%s, vg: %s", LVMVolumeGroupTag, tagName, vg.VGName)) + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgchange").Inc() return false, err } start = time.Now() cmd, err = utils.VGChangeAddTag(vg.VGName, fmt.Sprintf("%s=%s", LVMVolumeGroupTag, lvg.Name)) - r.metrics.UtilsCommandsDuration(Name, "vgchange").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "vgchange").Inc() - log.Debug(fmt.Sprintf("[UpdateVGTagIfNeeded] exec cmd: %s", cmd)) + r.metrics.UtilsCommandsDuration(ReconcilerName, "vgchange").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgchange").Inc() + r.log.Debug(fmt.Sprintf("[UpdateVGTagIfNeeded] exec cmd: %s", cmd)) if err != nil { - log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add LVMVolumeGroupTag: %s=%s, vg: %s", LVMVolumeGroupTag, lvg.Name, vg.VGName)) - r.metrics.UtilsCommandsErrorsCount(Name, "vgchange").Inc() + r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add LVMVolumeGroupTag: %s=%s, vg: %s", LVMVolumeGroupTag, lvg.Name, vg.VGName)) + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgchange").Inc() return false, err } @@ -1486,7 +1335,7 @@ func (r *Reconciler) updateVGTagIfNeeded( func (r *Reconciler) extendThinPool(lvg *v1alpha1.LVMVolumeGroup, specThinPool v1alpha1.LVMVolumeGroupThinPoolSpec) error { volumeGroupFreeSpaceBytes := lvg.Status.VGSize.Value() - lvg.Status.AllocatedSize.Value() - tpRequestedSize, err := getRequestedSizeFromString(specThinPool.Size, lvg.Status.VGSize) + tpRequestedSize, err := cutils.GetRequestedSizeFromString(specThinPool.Size, lvg.Status.VGSize) if err != nil { return err } @@ -1506,10 +1355,10 @@ func (r *Reconciler) extendThinPool(lvg *v1alpha1.LVMVolumeGroup, specThinPool v r.log.Debug(fmt.Sprintf("[ExtendThinPool] thin-pool %s of the LVMVolumeGroup %s will be extend to size %s", specThinPool.Name, lvg.Name, tpRequestedSize.String())) cmd, err = utils.ExtendLV(tpRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, specThinPool.Name) } - r.metrics.UtilsCommandsDuration(Name, "lvextend").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) - r.metrics.UtilsCommandsExecutionCount(Name, "lvextend").Inc() + r.metrics.UtilsCommandsDuration(ReconcilerName, "lvextend").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) + r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "lvextend").Inc() if err != nil { - r.metrics.UtilsCommandsErrorsCount(Name, "lvextend").Inc() + r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "lvextend").Inc() r.log.Error(err, fmt.Sprintf("[ExtendThinPool] unable to extend LV, name: %s, cmd: %s", specThinPool.Name, cmd)) return err } @@ -1534,3 +1383,93 @@ func (r *Reconciler) addLVGLabelIfNeeded(ctx context.Context, lvg *v1alpha1.LVMV return true, nil } + +func checkIfVGExist(vgName string, vgs []internal.VGData) bool { + for _, vg := range vgs { + if vg.VGName == vgName { + return true + } + } + + return false +} + +func validateSpecBlockDevices(lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { + if len(blockDevices) == 0 { + return false, "none of specified BlockDevices were found" + } + + if len(lvg.Status.Nodes) > 0 { + lostBdNames := make([]string, 0, len(lvg.Status.Nodes[0].Devices)) + for _, n := range lvg.Status.Nodes { + for _, d := range n.Devices { + if _, found := blockDevices[d.BlockDevice]; !found { + lostBdNames = append(lostBdNames, d.BlockDevice) + } + } + } + + // that means some of the used BlockDevices no longer match the blockDeviceSelector + if len(lostBdNames) > 0 { + return false, fmt.Sprintf("these BlockDevices no longer match the blockDeviceSelector: %s", strings.Join(lostBdNames, ",")) + } + } + + for _, me := range lvg.Spec.BlockDeviceSelector.MatchExpressions { + if me.Key == internal.MetadataNameLabelKey { + if len(me.Values) != len(blockDevices) { + missedBds := make([]string, 0, len(me.Values)) + for _, bdName := range me.Values { + if _, exist := blockDevices[bdName]; !exist { + missedBds = append(missedBds, bdName) + } + } + + return false, fmt.Sprintf("unable to find specified BlockDevices: %s", strings.Join(missedBds, ",")) + } + } + } + + bdFromOtherNode := make([]string, 0, len(blockDevices)) + for _, bd := range blockDevices { + if bd.Status.NodeName != lvg.Spec.Local.NodeName { + bdFromOtherNode = append(bdFromOtherNode, bd.Name) + } + } + + if len(bdFromOtherNode) != 0 { + return false, fmt.Sprintf("block devices %s have different node names from LVMVolumeGroup Local.NodeName", strings.Join(bdFromOtherNode, ",")) + } + + return true, "" +} + +func checkIfLVGBelongsToNode(lvg *v1alpha1.LVMVolumeGroup, nodeName string) bool { + return lvg.Spec.Local.NodeName == nodeName +} + +func extractPathsFromBlockDevices(targetDevices []string, blockDevices map[string]v1alpha1.BlockDevice) []string { + var paths []string + if len(targetDevices) > 0 { + paths = make([]string, 0, len(targetDevices)) + for _, bdName := range targetDevices { + bd := blockDevices[bdName] + paths = append(paths, bd.Status.Path) + } + } else { + paths = make([]string, 0, len(blockDevices)) + for _, bd := range blockDevices { + paths = append(paths, bd.Status.Path) + } + } + + return paths +} + +func countVGSizeByBlockDevices(blockDevices map[string]v1alpha1.BlockDevice) resource.Quantity { + var totalVGSize int64 + for _, bd := range blockDevices { + totalVGSize += bd.Status.Size.Value() + } + return *resource.NewQuantity(totalVGSize, resource.BinarySI) +} diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go b/images/agent/src/pkg/controller/lvg/reconciler_test.go similarity index 90% rename from images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go rename to images/agent/src/pkg/controller/lvg/reconciler_test.go index 9d62a24a..95dbed00 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go +++ b/images/agent/src/pkg/controller/lvg/reconciler_test.go @@ -1,4 +1,4 @@ -package controller +package lvg import ( "bytes" @@ -10,21 +10,26 @@ import ( "github.com/stretchr/testify/assert" errors2 "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" "agent/internal" "agent/pkg/cache" + cutils "agent/pkg/controller/utils" "agent/pkg/logger" "agent/pkg/monitoring" + "agent/pkg/test_utils" ) func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { - cl := NewFakeClient() + cl := test_utils.NewFakeClient() ctx := context.Background() log := logger.Logger{} metrics := monitoring.GetMetrics("") + ch := cache.New() + + r := NewReconciler(cl, log, metrics, ch, ReconcilerOptions{}) t.Run("validateLVGForUpdateFunc", func(t *testing.T) { t.Run("without_thin_pools_returns_true", func(t *testing.T) { @@ -76,10 +81,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - ch := cache.New() ch.StorePVs(pvs, bytes.Buffer{}) - valid, reason := validateLVGForUpdateFunc(log, ch, lvg, bds) + valid, reason := r.validateLVGForUpdateFunc(lvg, bds) if assert.True(t, valid) { assert.Equal(t, "", reason) } @@ -134,11 +138,10 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - ch := cache.New() - ch.StorePVs(pvs, bytes.Buffer{}) + r.sdsCache.StorePVs(pvs, bytes.Buffer{}) // new block device is not consumable - valid, _ := validateLVGForUpdateFunc(log, ch, lvg, bds) + valid, _ := r.validateLVGForUpdateFunc(lvg, bds) assert.False(t, valid) }) @@ -203,11 +206,10 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - ch := cache.New() - ch.StorePVs(pvs, bytes.Buffer{}) - ch.StoreVGs(vgs, bytes.Buffer{}) + r.sdsCache.StorePVs(pvs, bytes.Buffer{}) + r.sdsCache.StoreVGs(vgs, bytes.Buffer{}) - valid, reason := validateLVGForUpdateFunc(log, ch, lvg, bds) + valid, reason := r.validateLVGForUpdateFunc(lvg, bds) if assert.True(t, valid) { assert.Equal(t, "", reason) } @@ -274,11 +276,10 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - ch := cache.New() - ch.StorePVs(pvs, bytes.Buffer{}) - ch.StoreVGs(vgs, bytes.Buffer{}) + r.sdsCache.StorePVs(pvs, bytes.Buffer{}) + r.sdsCache.StoreVGs(vgs, bytes.Buffer{}) - valid, _ := validateLVGForUpdateFunc(log, ch, lvg, bds) + valid, _ := r.validateLVGForUpdateFunc(lvg, bds) assert.False(t, valid) }) }) @@ -313,7 +314,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { Spec: v1alpha1.LVMVolumeGroupSpec{}, } - valid, reason := validateLVGForCreateFunc(log, lvg, bds) + valid, reason := r.validateLVGForCreateFunc(lvg, bds) if assert.True(t, valid) { assert.Equal(t, "", reason) } @@ -338,7 +339,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { Spec: v1alpha1.LVMVolumeGroupSpec{}, } - valid, _ := validateLVGForCreateFunc(log, lvg, bds) + valid, _ := r.validateLVGForCreateFunc(lvg, bds) assert.False(t, valid) }) @@ -377,7 +378,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - valid, reason := validateLVGForCreateFunc(log, lvg, bds) + valid, reason := r.validateLVGForCreateFunc(lvg, bds) if assert.True(t, valid) { assert.Equal(t, "", reason) } @@ -418,7 +419,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - valid, _ := validateLVGForCreateFunc(log, lvg, bds) + valid, _ := r.validateLVGForCreateFunc(lvg, bds) assert.False(t, valid) }) }) @@ -432,9 +433,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - ch := cache.New() - - actual := identifyLVGReconcileFunc(lvg, ch) + actual := r.identifyLVGReconcileFunc(lvg) assert.Equal(t, CreateReconcile, actual) }) @@ -451,10 +450,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - ch := cache.New() - ch.StoreVGs(vgs, bytes.Buffer{}) + r.sdsCache.StoreVGs(vgs, bytes.Buffer{}) - actual := identifyLVGReconcileFunc(lvg, ch) + actual := r.identifyLVGReconcileFunc(lvg) assert.Equal(t, UpdateReconcile, actual) }) @@ -472,10 +470,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } - ch := cache.New() - ch.StoreVGs(vgs, bytes.Buffer{}) + r.sdsCache.StoreVGs(vgs, bytes.Buffer{}) - actual := identifyLVGReconcileFunc(lvg, ch) + actual := r.identifyLVGReconcileFunc(lvg) assert.Equal(t, DeleteReconcile, actual) }) }) @@ -484,7 +481,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("not_exist_no_remove", func(t *testing.T) { lvg := &v1alpha1.LVMVolumeGroup{} - removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg) + removed, err := r.removeLVGFinalizerIfExist(ctx, lvg) if err != nil { t.Error(err) } @@ -510,7 +507,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } }() - removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg) + removed, err := r.removeLVGFinalizerIfExist(ctx, lvg) if err != nil { t.Error(err) } @@ -550,7 +547,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { ch.StoreLVs(lvs, bytes.Buffer{}) expected := []string{firstLV} - actual := getLVForVG(ch, vgName) + actual := r.getLVForVG(vgName) assert.ElementsMatch(t, expected, actual) }) @@ -587,7 +584,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("getRequestedSizeFromString", func(t *testing.T) { t.Run("for_percent_size", func(t *testing.T) { - actual, err := getRequestedSizeFromString("50%", resource.MustParse("10G")) + actual, err := cutils.GetRequestedSizeFromString("50%", resource.MustParse("10G")) if err != nil { t.Error(err) } @@ -597,7 +594,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }) t.Run("for_number_size", func(t *testing.T) { - actual, err := getRequestedSizeFromString("5G", resource.MustParse("10G")) + actual, err := cutils.GetRequestedSizeFromString("5G", resource.MustParse("10G")) if err != nil { t.Error(err) } @@ -899,7 +896,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } }() - err = syncThinPoolsAllocationLimit(ctx, cl, log, lvg) + err = r.syncThinPoolsAllocationLimit(ctx, lvg) if err != nil { t.Error(err) } @@ -933,7 +930,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } }() - added, err := addLVGFinalizerIfNotExist(ctx, cl, lvg) + added, err := r.addLVGFinalizerIfNotExist(ctx, lvg) if err != nil { t.Error(err) } @@ -970,7 +967,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } }() - added, err := addLVGFinalizerIfNotExist(ctx, cl, lvg) + added, err := r.addLVGFinalizerIfNotExist(ctx, lvg) if err != nil { t.Error(err) } @@ -1012,7 +1009,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Error(err) } - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, badReason, "") + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, badReason, "") if err != nil { t.Error(err) } @@ -1055,7 +1052,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Error(err) } - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "", "") + err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "", "") if err != nil { t.Error(err) } @@ -1069,14 +1066,14 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { lvg := &v1alpha1.LVMVolumeGroup{} lvg.DeletionTimestamp = &v1.Time{} - assert.True(t, shouldReconcileLVGByDeleteFunc(lvg)) + assert.True(t, r.shouldReconcileLVGByDeleteFunc(lvg)) }) t.Run("returns_false", func(t *testing.T) { lvg := &v1alpha1.LVMVolumeGroup{} lvg.DeletionTimestamp = nil - assert.False(t, shouldReconcileLVGByDeleteFunc(lvg)) + assert.False(t, r.shouldReconcileLVGByDeleteFunc(lvg)) }) }) @@ -1085,7 +1082,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { oldLVG := &v1alpha1.LVMVolumeGroup{} newLVG := &v1alpha1.LVMVolumeGroup{} newLVG.DeletionTimestamp = &v1.Time{} - assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) + assert.True(t, r.shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG)) }) t.Run("spec_is_diff_returns_true", func(t *testing.T) { @@ -1093,7 +1090,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { newLVG := &v1alpha1.LVMVolumeGroup{} oldLVG.Spec.BlockDeviceSelector = &v1.LabelSelector{MatchLabels: map[string]string{"first": "second"}} newLVG.Spec.BlockDeviceSelector = &v1.LabelSelector{MatchLabels: map[string]string{"second": "second"}} - assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) + assert.True(t, r.shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG)) }) t.Run("condition_vg_configuration_applied_is_updating_returns_false", func(t *testing.T) { @@ -1107,7 +1104,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { Reason: internal.ReasonUpdating, }, } - assert.False(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) + assert.False(t, r.shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG)) }) t.Run("condition_vg_configuration_applied_is_creating_returns_false", func(t *testing.T) { @@ -1121,7 +1118,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } newLVG.Labels = map[string]string{LVGMetadateNameLabelKey: newLVG.Name} - assert.False(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) + assert.False(t, r.shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG)) }) t.Run("label_is_not_the_same_returns_true", func(t *testing.T) { @@ -1135,7 +1132,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, } newLVG.Labels = map[string]string{LVGMetadateNameLabelKey: "some-other-name"} - assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) + assert.True(t, r.shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG)) }) t.Run("dev_size_and_pv_size_are_diff_returns_true", func(t *testing.T) { @@ -1153,25 +1150,25 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { Name: "some-node", }, } - assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) + assert.True(t, r.shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG)) }) }) t.Run("shouldUpdateLVGLabels", func(t *testing.T) { t.Run("labels_nil_returns_true", func(t *testing.T) { lvg := &v1alpha1.LVMVolumeGroup{} - assert.True(t, shouldUpdateLVGLabels(log, lvg, "key", "value")) + assert.True(t, r.shouldUpdateLVGLabels(lvg, "key", "value")) }) t.Run("no_such_label_returns_true", func(t *testing.T) { lvg := &v1alpha1.LVMVolumeGroup{} lvg.Labels = map[string]string{"key": "value"} - assert.True(t, shouldUpdateLVGLabels(log, lvg, "other-key", "value")) + assert.True(t, r.shouldUpdateLVGLabels(lvg, "other-key", "value")) }) t.Run("key_exists_other_value_returns_true", func(t *testing.T) { const key = "key" lvg := &v1alpha1.LVMVolumeGroup{} lvg.Labels = map[string]string{key: "value"} - assert.True(t, shouldUpdateLVGLabels(log, lvg, key, "other-value")) + assert.True(t, r.shouldUpdateLVGLabels(lvg, key, "other-value")) }) t.Run("all_good_returns_false", func(t *testing.T) { const ( @@ -1180,7 +1177,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { ) lvg := &v1alpha1.LVMVolumeGroup{} lvg.Labels = map[string]string{key: value} - assert.False(t, shouldUpdateLVGLabels(log, lvg, key, value)) + assert.False(t, r.shouldUpdateLVGLabels(lvg, key, value)) }) }) @@ -1241,7 +1238,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } assert.Equal(t, lvgName, lvgCheck.Name) - err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvgToDelete, nodeName) + err = r.lvgCl.DeleteLVMVolumeGroup(ctx, lvgToDelete) if err != nil { t.Error(err) } @@ -1275,7 +1272,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - actual, err := getLVMVolumeGroup(ctx, cl, metrics, name) + actual, err := r.lvgCl.GetLVMVolumeGroup(ctx, name) if assert.NoError(t, err) { assert.NotNil(t, actual) assert.Equal(t, name, actual.Name) @@ -1302,7 +1299,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }() } - actual, err := getLVMVolumeGroup(ctx, cl, metrics, "another-name") + actual, err := r.lvgCl.GetLVMVolumeGroup(ctx, "another-name") if assert.EqualError(t, err, "lvmvolumegroups.storage.deckhouse.io \"another-name\" not found") { assert.Nil(t, actual) diff --git a/images/agent/src/pkg/controller/lvg/utils.go b/images/agent/src/pkg/controller/lvg/utils.go new file mode 100644 index 00000000..99e56a10 --- /dev/null +++ b/images/agent/src/pkg/controller/lvg/utils.go @@ -0,0 +1,30 @@ +package lvg + +import ( + "agent/internal" + + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func checkIfConditionIsTrue(lvg *v1alpha1.LVMVolumeGroup, conType string) bool { + // this check prevents infinite resource updating after a retry + for _, c := range lvg.Status.Conditions { + if c.Type == conType && c.Status == v1.ConditionTrue { + return true + } + } + + return false +} + +func isThinPool(lv internal.LVData) bool { + return string(lv.LVAttr[0]) == "t" +} + +func getVGAllocatedSize(vg internal.VGData) resource.Quantity { + allocatedSize := vg.VGSize + allocatedSize.Sub(vg.VGFree) + return allocatedSize +} diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go deleted file mode 100644 index 904f9d56..00000000 --- a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go +++ /dev/null @@ -1,263 +0,0 @@ -package controller - -import ( - "context" - "errors" - "fmt" - "reflect" - "time" - - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - k8serr "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "agent/config" - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" -) - -const ( - LVMLogicalVolumeExtenderCtrlName = "lvm-logical-volume-extender-controller" -) - -func RunLVMLogicalVolumeExtenderWatcherController( - mgr manager.Manager, - cfg config.Options, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, -) error { - cl := mgr.GetClient() - mgrCache := mgr.GetCache() - - c, err := controller.New(LVMLogicalVolumeExtenderCtrlName, mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] starts the reconciliation for the LVMVolumeGroup %s", request.NamespacedName.String())) - - log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] tries to get the LVMVolumeGroup %s", request.Name)) - lvg, err := getLVMVolumeGroup(ctx, cl, metrics, request.Name) - if err != nil { - if k8serr.IsNotFound(err) { - log.Error(err, fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] LVMVolumeGroup %s not found (probably was deleted). Stop the reconcile", request.Name)) - return reconcile.Result{}, nil - } - - log.Error(err, fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] unable to get the LVMVolumeGroup %s", request.Name)) - return reconcile.Result{}, err - } - log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] successfully got the LVMVolumeGroup %s", request.Name)) - - if !shouldLLVExtenderReconcileEvent(log, lvg, cfg.NodeName) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] no need to reconcile a request for the LVMVolumeGroup %s", lvg.Name)) - return reconcile.Result{}, nil - } - - shouldRequeue := ReconcileLVMLogicalVolumeExtension(ctx, cl, metrics, log, sdsCache, lvg) - if shouldRequeue { - log.Warning(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] Reconciler needs a retry for the LVMVolumeGroup %s. Retry in %s", lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) - return reconcile.Result{ - RequeueAfter: cfg.VolumeGroupScanIntervalSec, - }, nil - } - - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] successfully reconciled LVMLogicalVolumes for the LVMVolumeGroup %s", lvg.Name)) - return reconcile.Result{}, nil - }), - }) - if err != nil { - log.Error(err, "[RunLVMLogicalVolumeExtenderWatcherController] unable to create a controller") - return err - } - - err = c.Watch(source.Kind(mgrCache, &v1alpha1.LVMVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroup, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got a Create event for the LVMVolumeGroup %s", e.Object.GetName())) - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} - q.Add(request) - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] added the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) - }, - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got an Update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} - q.Add(request) - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] added the LVMVolumeGroup %s to the Reconcilers queue", e.ObjectNew.GetName())) - }, - })) - if err != nil { - log.Error(err, "[RunLVMLogicalVolumeExtenderWatcherController] unable to watch the events") - return err - } - - return nil -} - -func shouldLLVExtenderReconcileEvent(log logger.Logger, newLVG *v1alpha1.LVMVolumeGroup, nodeName string) bool { - // for new LVMVolumeGroups - if reflect.DeepEqual(newLVG.Status, v1alpha1.LVMVolumeGroupStatus{}) { - log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as its Status is not initialized yet", newLVG.Name)) - return false - } - - if !belongsToNode(newLVG, nodeName) { - log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as it does not belong to the node %s", newLVG.Name, nodeName)) - return false - } - - if newLVG.Status.Phase != internal.PhaseReady { - log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as its Status.Phase is not Ready", newLVG.Name)) - return false - } - - return true -} - -func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, metrics monitoring.Metrics, log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LVMVolumeGroup) bool { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] tries to get LLV resources with percent size for the LVMVolumeGroup %s", lvg.Name)) - llvs, err := getAllLLVsWithPercentSize(ctx, cl, lvg.Name) - if err != nil { - log.Error(err, "[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources") - return true - } - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got LLV resources for the LVMVolumeGroup %s", lvg.Name)) - - if len(llvs) == 0 { - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] no LVMLogicalVolumes with percent size were found for the LVMVolumeGroup %s", lvg.Name)) - return false - } - - shouldRetry := false - for _, llv := range llvs { - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] starts to reconcile the LVMLogicalVolume %s", llv.Name)) - llvRequestedSize, err := getLLVRequestedSize(&llv, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to get requested size of the LVMLogicalVolume %s", llv.Name)) - shouldRetry = true - continue - } - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got the requested size of the LVMLogicalVolume %s, size: %s", llv.Name, llvRequestedSize.String())) - - lv := sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if lv == nil { - err = fmt.Errorf("lv %s not found", llv.Spec.ActualLVNameOnTheNode) - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to find LV %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, err.Error()) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) - } - shouldRetry = true - continue - } - - if utils.AreSizesEqualWithinDelta(llvRequestedSize, lv.Data.LVSize, internal.ResizeDelta) { - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should not be extended", llv.Name)) - continue - } - - if llvRequestedSize.Value() < lv.Data.LVSize.Value() { - log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s requested size %s is less than actual one on the node %s", llv.Name, llvRequestedSize.String(), lv.Data.LVSize.String())) - continue - } - - freeSpace := getFreeLVGSpaceForLLV(lvg, &llv) - if llvRequestedSize.Value()+internal.ResizeDelta.Value() > freeSpace.Value() { - err = errors.New("not enough space") - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend the LV %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, fmt.Sprintf("unable to extend LV, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) - shouldRetry = true - } - continue - } - - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should be extended from %s to %s size", llv.Name, llv.Status.ActualSize.String(), llvRequestedSize.String())) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseResizing, "") - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) - shouldRetry = true - continue - } - - cmd, err := utils.ExtendLV(llvRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend LV %s of the LVMLogicalVolume %s, cmd: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, cmd)) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, fmt.Sprintf("unable to extend LV, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) - } - shouldRetry = true - continue - } - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s has been successfully extended", llv.Name)) - - var ( - maxAttempts = 5 - currentAttempts = 0 - ) - for currentAttempts < maxAttempts { - lv = sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if utils.AreSizesEqualWithinDelta(lv.Data.LVSize, llvRequestedSize, internal.ResizeDelta) { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] LV %s of the LVMLogicalVolume %s was successfully updated in the cache", lv.Data.LVName, llv.Name)) - break - } - - log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] LV %s size of the LVMLogicalVolume %s was not yet updated in the cache, retry...", lv.Data.LVName, llv.Name)) - currentAttempts++ - time.Sleep(1 * time.Second) - } - - if currentAttempts == maxAttempts { - err = fmt.Errorf("LV %s is not updated in the cache", lv.Data.LVName) - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to resize the LVMLogicalVolume %s", llv.Name)) - shouldRetry = true - - if err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, err.Error()); err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) - } - continue - } - - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, &llv, lv.Data.LVSize) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) - shouldRetry = true - continue - } - - if updated { - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s was successfully updated", llv.Name)) - } else { - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] no need to update the LVMLogicalVolume %s", llv.Name)) - } - } - - return shouldRetry -} - -func getAllLLVsWithPercentSize(ctx context.Context, cl client.Client, lvgName string) ([]v1alpha1.LVMLogicalVolume, error) { - llvList := &v1alpha1.LVMLogicalVolumeList{} - err := cl.List(ctx, llvList) - if err != nil { - return nil, err - } - - result := make([]v1alpha1.LVMLogicalVolume, 0, len(llvList.Items)) - for _, llv := range llvList.Items { - if llv.Spec.LVMVolumeGroupName == lvgName && isPercentSize(llv.Spec.Size) { - result = append(result, llv) - } - } - - return result, nil -} diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_snapshot_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_snapshot_watcher.go deleted file mode 100644 index 8dea5c32..00000000 --- a/images/agent/src/pkg/controller/lvm_logical_volume_snapshot_watcher.go +++ /dev/null @@ -1,490 +0,0 @@ -package controller - -import ( - "agent/config" - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" - "context" - "errors" - "fmt" - "reflect" - - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "github.com/google/go-cmp/cmp" - k8serr "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - lvmLogicalVolumeSnapshotWatcherCtrlName = "lvm-logical-volume-snapshot-watcher-controller" -) - -func RunLVMLogicalVolumeSnapshotWatcherController( - mgr manager.Manager, - cfg config.Options, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, -) (controller.Controller, error) { - cl := mgr.GetClient() - - c, err := controller.New(lvmLogicalVolumeSnapshotWatcherCtrlName, mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] Reconciler starts reconciliation of the LVMLogicalVolume: %s", request.Name)) - - log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] tries to get the LVMLogicalVolume %s", request.Name)) - llv := &v1alpha1.LVMLogicalVolume{} - err := cl.Get(ctx, request.NamespacedName, llv) - if err != nil { - if k8serr.IsNotFound(err) { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] LVMLogicalVolume %s not found. Object has probably been deleted", request.NamespacedName)) - return reconcile.Result{}, nil - } - return reconcile.Result{}, err - } - - lvg, err := getLVMVolumeGroup(ctx, cl, metrics, llv.Spec.LVMVolumeGroupName) - if err != nil { - if k8serr.IsNotFound(err) { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] LVMVolumeGroup %s not found for LVMLogicalVolume %s. Retry in %s", llv.Spec.LVMVolumeGroupName, llv.Name, cfg.VolumeGroupScanIntervalSec.String())) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, fmt.Sprintf("LVMVolumeGroup %s not found", llv.Spec.LVMVolumeGroupName)) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) - return reconcile.Result{}, err - } - - return reconcile.Result{ - RequeueAfter: cfg.VolumeGroupScanIntervalSec, - }, nil - } - - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, fmt.Sprintf("Unable to get selected LVMVolumeGroup, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) - } - return reconcile.Result{}, err - } - - if !belongsToNode(lvg, cfg.NodeName) { - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMVolumeGroup %s of the LVMLogicalVolume %s does not belongs to the current node: %s. Reconciliation stopped", lvg.Name, llv.Name, cfg.NodeName)) - return reconcile.Result{}, nil - } - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMVolumeGroup %s of the LVMLogicalVolume %s belongs to the current node: %s. Reconciliation continues", lvg.Name, llv.Name, cfg.NodeName)) - - // this case prevents the unexpected behavior when the controller runs up with existing LVMLogicalVolumes - if vgs, _ := sdsCache.GetVGs(); len(vgs) == 0 { - log.Warning(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] unable to reconcile the request as no VG was found in the cache. Retry in %s", cfg.VolumeGroupScanIntervalSec.String())) - return reconcile.Result{RequeueAfter: cfg.VolumeGroupScanIntervalSec}, nil - } - - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] tries to add the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - added, err := addLLVFinalizerIfNotExist(ctx, cl, log, metrics, llv) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) - return reconcile.Result{}, err - } - if added { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] successfully added the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - } else { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] no need to add the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - } - - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] starts to validate the LVMLogicalVolume %s", llv.Name)) - valid, reason := validateLVMLogicalVolume(sdsCache, llv, lvg) - if !valid { - log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMLogicalVolume %s is not valid, reason: %s", llv.Name, reason)) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, reason) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) - return reconcile.Result{}, err - } - - return reconcile.Result{}, nil - } - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] successfully validated the LVMLogicalVolume %s", llv.Name)) - - shouldRequeue, err := ReconcileLVMLogicalVolume(ctx, cl, log, metrics, sdsCache, llv, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] an error occurred while reconciling the LVMLogicalVolume: %s", request.Name)) - updErr := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, err.Error()) - if updErr != nil { - log.Error(updErr, fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] unable to update the LVMLogicalVolume %s", llv.Name)) - return reconcile.Result{}, updErr - } - } - if shouldRequeue { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occurred while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", request.Name, cfg.LLVRequeueIntervalSec.String())) - return reconcile.Result{RequeueAfter: cfg.LLVRequeueIntervalSec}, nil - } - - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] successfully ended reconciliation of the LVMLogicalVolume %s", request.Name)) - return reconcile.Result{}, nil - }), - MaxConcurrentReconciles: 10, - }) - - if err != nil { - log.Error(err, "[RunLVMLogicalVolumeWatcherController] unable to create controller") - return nil, err - } - - err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMLogicalVolume{}, handler.TypedFuncs[*v1alpha1.LVMLogicalVolume, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got a create event for the LVMLogicalVolume: %s", e.Object.GetName())) - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} - q.Add(request) - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] added the request of the LVMLogicalVolume %s to Reconciler", e.Object.GetName())) - }, - - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got an update event for the LVMLogicalVolume: %s", e.ObjectNew.GetName())) - // TODO: Figure out how to log it in our logger. - if cfg.Loglevel == "4" { - fmt.Println("==============START DIFF==================") - fmt.Println(cmp.Diff(e.ObjectOld, e.ObjectNew)) - fmt.Println("==============END DIFF==================") - } - - if reflect.DeepEqual(e.ObjectOld.Spec, e.ObjectNew.Spec) && e.ObjectNew.DeletionTimestamp == nil { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] no target changes were made for the LVMLogicalVolume %s. No need to reconcile the request", e.ObjectNew.Name)) - return - } - - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.Namespace, Name: e.ObjectNew.Name}} - q.Add(request) - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] added the request of the LVMLogicalVolume %s to Reconciler", e.ObjectNew.GetName())) - }, - })) - - if err != nil { - log.Error(err, "[RunLVMLogicalVolumeWatcherController] the controller is unable to watch") - return nil, err - } - - return c, err -} - -func ReconcileLVMLogicalVolumeSnapshot( - ctx context.Context, - cl client.Client, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, - llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LVMVolumeGroup, -) (bool, error) { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] starts the reconciliation for the LVMLogicalVolume %s", llv.Name)) - - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] tries to identify the reconciliation type for the LVMLogicalVolume %s", llv.Name)) - log.Trace(fmt.Sprintf("[ReconcileLVMLogicalVolume] %+v", llv)) - - switch identifyReconcileFunc(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv) { - case CreateReconcile: - return reconcileLLVCreateFunc(ctx, cl, log, metrics, sdsCache, llv, lvg) - case UpdateReconcile: - return reconcileLLVUpdateFunc(ctx, cl, log, metrics, sdsCache, llv, lvg) - case DeleteReconcile: - return reconcileLLVDeleteFunc(ctx, cl, log, metrics, sdsCache, llv, lvg) - default: - log.Info(fmt.Sprintf("[runEventReconcile] the LVMLogicalVolume %s has compeleted configuration and should not be reconciled", llv.Name)) - if llv.Status.Phase != LLVStatusPhaseCreated { - log.Warning(fmt.Sprintf("[runEventReconcile] the LVMLogicalVolume %s should not be reconciled but has an unexpected phase: %s. Setting the phase to %s", llv.Name, llv.Status.Phase, LLVStatusPhaseCreated)) - err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseCreated, "") - if err != nil { - return true, err - } - } - } - - return false, nil -} - -func reconcileLLVSCreateFunc( - ctx context.Context, - cl client.Client, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, - llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LVMVolumeGroup, -) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) - - // this check prevents infinite resource updating after retries - if llv.Status == nil { - err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhasePending, "") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - } - llvRequestSize, err := getLLVRequestedSize(llv, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) - return false, err - } - - freeSpace := getFreeLVGSpaceForLLV(lvg, llv) - log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s type: %s requested size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, llvRequestSize.String(), freeSpace.String())) - - if !utils.AreSizesEqualWithinDelta(llvRequestSize, freeSpace, internal.ResizeDelta) { - if freeSpace.Value() < llvRequestSize.Value()+internal.ResizeDelta.Value() { - err = errors.New("not enough space") - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than the actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) - - // we return true cause the user might manage LVMVolumeGroup free space without changing the LLV - return true, err - } - } - - var cmd string - switch llv.Spec.Type { - case Thick: - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s will be created in VG %s with size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llvRequestSize.String())) - cmd, err = utils.CreateThickLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value(), isContiguous(llv)) - case Thin: - if llv.Spec.Source == nil { - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s of the LVMLogicalVolume %s will be created in Thin-pool %s with size %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) - cmd, err = utils.CreateThinLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Thin.PoolName, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value()) - } else { - // volume is a clone - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] Snapshot (for source %s) LV %s of the LVMLogicalVolume %s will be created in Thin-pool %s with size %s", llv.Spec.Source.Name, llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) - - var sourceVgName, sourceVolumeName string - if llv.Spec.Source.Kind == "LVMLogicalVolume" { - sourceLlv := &v1alpha1.LVMLogicalVolume{} - if err = cl.Get(ctx, types.NamespacedName{Name: llv.Spec.Source.Name}, sourceLlv); err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to find source LVMLogicalVolume %s", llv.Spec.Source.Name)) - return false, err - } - - sourceVolumeName = sourceLlv.Spec.ActualLVNameOnTheNode - sourceVgName = sourceLlv.Spec.LVMVolumeGroupName - - // TODO snapshots: validate source llv - } else if llv.Spec.Source.Kind == "LVMLogicalVolumeSnapshot" { - sourceSnapshot := &v1alpha1.LVMLogicalVolumeSnapshot{} - if err = cl.Get(ctx, types.NamespacedName{Name: llv.Spec.Source.Name}, sourceSnapshot); err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to find source LVMLogicalVolumeSnapshot %s", llv.Spec.Source.Name)) - return false, err - } - sourceVolumeName = sourceSnapshot.Spec.ActualLVNameOnTheNode - sourceVgName = sourceSnapshot.Spec.LVMVolumeGroupName - // TODO snapshots: validate source snapshot - } else { - return false, fmt.Errorf("source kind is not supported: %s", llv.Spec.Source.Kind) - } - cmd, err = utils.CreateThinLogicalVolumeSnapshot(llv.Spec.ActualLVNameOnTheNode, sourceVgName, sourceVolumeName) - - } - } - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] runs cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to create a %s LogicalVolume for the LVMLogicalVolume %s", llv.Spec.Type, llv.Name)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully created LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) - - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] adds the LV %s to the cache", llv.Spec.ActualLVNameOnTheNode)) - sdsCache.AddLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] tries to get the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) - actualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if actualSize.Value() == 0 { - log.Warning(fmt.Sprintf("[reconcileLLVCreateFunc] unable to get actual size for LV %s in VG %s (likely LV was not found in the cache), retry...", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) - return true, nil - } - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] successfully got the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) - log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s in VG: %s has actual size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String())) - - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, actualSize) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - - if updated { - log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully updated the LVMLogicalVolume %s status phase to Created", llv.Name)) - } else { - log.Warning(fmt.Sprintf("[reconcileLLVCreateFunc] LVMLogicalVolume %s status phase was not updated to Created due to the resource has already have the same phase", llv.Name)) - } - - log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully ended the reconciliation for the LVMLogicalVolume %s", llv.Name)) - return false, nil -} - -func reconcileLLVSUpdateFunc( - ctx context.Context, - cl client.Client, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, - llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LVMVolumeGroup, -) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) - - // status might be nil if a user creates the resource with LV name which matches existing LV on the node - if llv.Status == nil { - err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhasePending, "") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - } - - // it needs to get current LV size from the node as status might be nil - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size before the extension", llv.Name)) - actualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if actualSize.Value() == 0 { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s has zero size (likely LV was not updated in the cache) ", llv.Spec.ActualLVNameOnTheNode, llv.Name)) - return true, nil - } - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size %s before the extension", llv.Name, actualSize.String())) - - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to count the LVMLogicalVolume %s requested size", llv.Name)) - llvRequestSize, err := getLLVRequestedSize(llv, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) - return false, err - } - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String())) - - if utils.AreSizesEqualWithinDelta(actualSize, llvRequestSize, internal.ResizeDelta) { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String())) - - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, actualSize) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - - if updated { - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully updated the LVMLogicalVolume %s status phase to Created", llv.Name)) - } else { - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] no need to update the LVMLogicalVolume %s status phase to Created", llv.Name)) - } - - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) - - return false, nil - } - - extendingSize := subtractQuantity(llvRequestSize, actualSize) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has extending size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, extendingSize.String())) - if extendingSize.Value() < 0 { - err = fmt.Errorf("specified LV size %dB is less than actual one on the node %dB", llvRequestSize.Value(), actualSize.Value()) - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to extend the LVMLogicalVolume %s", llv.Name)) - return false, err - } - - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s should be resized", llv.Name)) - // this check prevents infinite resource updates after retry - if llv.Status.Phase != Failed { - err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseResizing, "") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - } - - freeSpace := getFreeLVGSpaceForLLV(lvg, llv) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s, type: %s, extending size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, extendingSize.String(), freeSpace.String())) - - if !utils.AreSizesEqualWithinDelta(freeSpace, extendingSize, internal.ResizeDelta) { - if freeSpace.Value() < extendingSize.Value()+internal.ResizeDelta.Value() { - err = errors.New("not enough space") - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) - - // returns true cause a user might manage LVG free space without changing the LLV - return true, err - } - } - - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s will be extended with size: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llvRequestSize.String())) - cmd, err := utils.ExtendLV(llvRequestSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] runs cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to ExtendLV, name: %s, type: %s", llv.Spec.ActualLVNameOnTheNode, llv.Spec.Type)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully extended LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) - - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size after the extension", llv.Name)) - newActualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - - // this case might be triggered if sds cache will not update lv state in time - if newActualSize.Value() == actualSize.Value() { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s was extended but cache is not updated yet. It will be retried", llv.Spec.ActualLVNameOnTheNode, llv.Name)) - return true, nil - } - - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size before the extension", llv.Name)) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s actual size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, newActualSize.String())) - - // need this here as a user might create the LLV with existing LV - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, newActualSize) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - - if updated { - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully updated the LVMLogicalVolume %s status phase to Created", llv.Name)) - } else { - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] no need to update the LVMLogicalVolume %s status phase to Created", llv.Name)) - } - - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) - return false, nil -} - -func reconcileLLVSDeleteFunc( - ctx context.Context, - cl client.Client, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, - llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LVMVolumeGroup, -) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) - - // The controller won't remove the LLV resource and LV volume till the resource has any other finalizer. - if len(llv.Finalizers) != 0 { - if len(llv.Finalizers) > 1 || - llv.Finalizers[0] != internal.SdsNodeConfiguratorFinalizer { - log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete LVMLogicalVolume %s for now due to it has any other finalizer", llv.Name)) - return false, nil - } - } - - err := deleteLVIfNeeded(log, sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully deleted the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) - - err = removeLLVFinalizersIfExist(ctx, cl, metrics, log, llv) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to remove finalizers from the LVMVolumeGroup %s", llv.Name)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) - return false, nil -} diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go deleted file mode 100644 index 56cf530e..00000000 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go +++ /dev/null @@ -1,491 +0,0 @@ -package controller - -import ( - "context" - "errors" - "fmt" - "reflect" - - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "github.com/google/go-cmp/cmp" - k8serr "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "agent/config" - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" -) - -const ( - Thick = "Thick" - Thin = "Thin" - - lvmLogicalVolumeWatcherCtrlName = "lvm-logical-volume-watcher-controller" - - LLVStatusPhaseCreated = "Created" - LLVStatusPhasePending = "Pending" - LLVStatusPhaseResizing = "Resizing" - LLVStatusPhaseFailed = "Failed" -) - -func RunLVMLogicalVolumeWatcherController( - mgr manager.Manager, - cfg config.Options, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, -) (controller.Controller, error) { - cl := mgr.GetClient() - - c, err := controller.New(lvmLogicalVolumeWatcherCtrlName, mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] Reconciler starts reconciliation of the LVMLogicalVolume: %s", request.Name)) - - log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] tries to get the LVMLogicalVolume %s", request.Name)) - llv := &v1alpha1.LVMLogicalVolume{} - err := cl.Get(ctx, request.NamespacedName, llv) - if err != nil { - if k8serr.IsNotFound(err) { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] LVMLogicalVolume %s not found. Object has probably been deleted", request.NamespacedName)) - return reconcile.Result{}, nil - } - return reconcile.Result{}, err - } - - lvg, err := getLVMVolumeGroup(ctx, cl, metrics, llv.Spec.LVMVolumeGroupName) - if err != nil { - if k8serr.IsNotFound(err) { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] LVMVolumeGroup %s not found for LVMLogicalVolume %s. Retry in %s", llv.Spec.LVMVolumeGroupName, llv.Name, cfg.VolumeGroupScanIntervalSec.String())) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, fmt.Sprintf("LVMVolumeGroup %s not found", llv.Spec.LVMVolumeGroupName)) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) - return reconcile.Result{}, err - } - - return reconcile.Result{ - RequeueAfter: cfg.VolumeGroupScanIntervalSec, - }, nil - } - - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, fmt.Sprintf("Unable to get selected LVMVolumeGroup, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) - } - return reconcile.Result{}, err - } - - if !belongsToNode(lvg, cfg.NodeName) { - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMVolumeGroup %s of the LVMLogicalVolume %s does not belongs to the current node: %s. Reconciliation stopped", lvg.Name, llv.Name, cfg.NodeName)) - return reconcile.Result{}, nil - } - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMVolumeGroup %s of the LVMLogicalVolume %s belongs to the current node: %s. Reconciliation continues", lvg.Name, llv.Name, cfg.NodeName)) - - // this case prevents the unexpected behavior when the controller runs up with existing LVMLogicalVolumes - if vgs, _ := sdsCache.GetVGs(); len(vgs) == 0 { - log.Warning(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] unable to reconcile the request as no VG was found in the cache. Retry in %s", cfg.VolumeGroupScanIntervalSec.String())) - return reconcile.Result{RequeueAfter: cfg.VolumeGroupScanIntervalSec}, nil - } - - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] tries to add the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - added, err := addLLVFinalizerIfNotExist(ctx, cl, log, metrics, llv) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) - return reconcile.Result{}, err - } - if added { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] successfully added the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - } else { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] no need to add the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - } - - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] starts to validate the LVMLogicalVolume %s", llv.Name)) - valid, reason := validateLVMLogicalVolume(sdsCache, llv, lvg) - if !valid { - log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolume] the LVMLogicalVolume %s is not valid, reason: %s", llv.Name, reason)) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, reason) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) - return reconcile.Result{}, err - } - - return reconcile.Result{}, nil - } - log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolume] successfully validated the LVMLogicalVolume %s", llv.Name)) - - shouldRequeue, err := ReconcileLVMLogicalVolume(ctx, cl, log, metrics, sdsCache, llv, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] an error occurred while reconciling the LVMLogicalVolume: %s", request.Name)) - updErr := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, err.Error()) - if updErr != nil { - log.Error(updErr, fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] unable to update the LVMLogicalVolume %s", llv.Name)) - return reconcile.Result{}, updErr - } - } - if shouldRequeue { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occurred while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", request.Name, cfg.LLVRequeueIntervalSec.String())) - return reconcile.Result{RequeueAfter: cfg.LLVRequeueIntervalSec}, nil - } - - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] successfully ended reconciliation of the LVMLogicalVolume %s", request.Name)) - return reconcile.Result{}, nil - }), - MaxConcurrentReconciles: 10, - }) - - if err != nil { - log.Error(err, "[RunLVMLogicalVolumeWatcherController] unable to create controller") - return nil, err - } - - err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMLogicalVolume{}, handler.TypedFuncs[*v1alpha1.LVMLogicalVolume, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got a create event for the LVMLogicalVolume: %s", e.Object.GetName())) - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} - q.Add(request) - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] added the request of the LVMLogicalVolume %s to Reconciler", e.Object.GetName())) - }, - - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got an update event for the LVMLogicalVolume: %s", e.ObjectNew.GetName())) - // TODO: Figure out how to log it in our logger. - if cfg.Loglevel == "4" { - fmt.Println("==============START DIFF==================") - fmt.Println(cmp.Diff(e.ObjectOld, e.ObjectNew)) - fmt.Println("==============END DIFF==================") - } - - if reflect.DeepEqual(e.ObjectOld.Spec, e.ObjectNew.Spec) && e.ObjectNew.DeletionTimestamp == nil { - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] no target changes were made for the LVMLogicalVolume %s. No need to reconcile the request", e.ObjectNew.Name)) - return - } - - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.Namespace, Name: e.ObjectNew.Name}} - q.Add(request) - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] added the request of the LVMLogicalVolume %s to Reconciler", e.ObjectNew.GetName())) - }, - })) - - if err != nil { - log.Error(err, "[RunLVMLogicalVolumeWatcherController] the controller is unable to watch") - return nil, err - } - - return c, err -} - -func ReconcileLVMLogicalVolume(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] starts the reconciliation for the LVMLogicalVolume %s", llv.Name)) - - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] tries to identify the reconciliation type for the LVMLogicalVolume %s", llv.Name)) - log.Trace(fmt.Sprintf("[ReconcileLVMLogicalVolume] %+v", llv)) - - switch identifyReconcileFunc(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv) { - case CreateReconcile: - return reconcileLLVCreateFunc(ctx, cl, log, metrics, sdsCache, llv, lvg) - case UpdateReconcile: - return reconcileLLVUpdateFunc(ctx, cl, log, metrics, sdsCache, llv, lvg) - case DeleteReconcile: - return reconcileLLVDeleteFunc(ctx, cl, log, metrics, sdsCache, llv, lvg) - default: - log.Info(fmt.Sprintf("[runEventReconcile] the LVMLogicalVolume %s has compeleted configuration and should not be reconciled", llv.Name)) - if llv.Status.Phase != LLVStatusPhaseCreated { - log.Warning(fmt.Sprintf("[runEventReconcile] the LVMLogicalVolume %s should not be reconciled but has an unexpected phase: %s. Setting the phase to %s", llv.Name, llv.Status.Phase, LLVStatusPhaseCreated)) - err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseCreated, "") - if err != nil { - return true, err - } - } - } - - return false, nil -} - -func reconcileLLVCreateFunc( - ctx context.Context, - cl client.Client, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, - llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LVMVolumeGroup, -) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) - - // this check prevents infinite resource updating after retries - if llv.Status == nil { - err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhasePending, "") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - } - llvRequestSize, err := getLLVRequestedSize(llv, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) - return false, err - } - - freeSpace := getFreeLVGSpaceForLLV(lvg, llv) - log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s type: %s requested size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, llvRequestSize.String(), freeSpace.String())) - - if !utils.AreSizesEqualWithinDelta(llvRequestSize, freeSpace, internal.ResizeDelta) { - if freeSpace.Value() < llvRequestSize.Value()+internal.ResizeDelta.Value() { - err = errors.New("not enough space") - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than the actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) - - // we return true cause the user might manage LVMVolumeGroup free space without changing the LLV - return true, err - } - } - - var cmd string - switch llv.Spec.Type { - case Thick: - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s will be created in VG %s with size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llvRequestSize.String())) - cmd, err = utils.CreateThickLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value(), isContiguous(llv)) - case Thin: - if llv.Spec.Source == nil { - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s of the LVMLogicalVolume %s will be created in Thin-pool %s with size %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) - cmd, err = utils.CreateThinLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Thin.PoolName, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value()) - } else { - // volume is a clone - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] Snapshot (for source %s) LV %s of the LVMLogicalVolume %s will be created in Thin-pool %s with size %s", llv.Spec.Source.Name, llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) - - var sourceVgName, sourceVolumeName string - if llv.Spec.Source.Kind == "LVMLogicalVolume" { - sourceLlv := &v1alpha1.LVMLogicalVolume{} - if err = cl.Get(ctx, types.NamespacedName{Name: llv.Spec.Source.Name}, sourceLlv); err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to find source LVMLogicalVolume %s", llv.Spec.Source.Name)) - return false, err - } - - sourceVolumeName = sourceLlv.Spec.ActualLVNameOnTheNode - sourceVgName = sourceLlv.Spec.LVMVolumeGroupName - - // TODO snapshots: validate source llv - } else if llv.Spec.Source.Kind == "LVMLogicalVolumeSnapshot" { - sourceSnapshot := &v1alpha1.LVMLogicalVolumeSnapshot{} - if err = cl.Get(ctx, types.NamespacedName{Name: llv.Spec.Source.Name}, sourceSnapshot); err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to find source LVMLogicalVolumeSnapshot %s", llv.Spec.Source.Name)) - return false, err - } - sourceVolumeName = sourceSnapshot.Spec.ActualLVNameOnTheNode - sourceVgName = sourceSnapshot.Spec.LVMVolumeGroupName - // TODO snapshots: validate source snapshot - } else { - return false, fmt.Errorf("source kind is not supported: %s", llv.Spec.Source.Kind) - } - cmd, err = utils.CreateThinLogicalVolumeSnapshot(llv.Spec.ActualLVNameOnTheNode, sourceVgName, sourceVolumeName) - - } - } - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] runs cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to create a %s LogicalVolume for the LVMLogicalVolume %s", llv.Spec.Type, llv.Name)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully created LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) - - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] adds the LV %s to the cache", llv.Spec.ActualLVNameOnTheNode)) - sdsCache.AddLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] tries to get the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) - actualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if actualSize.Value() == 0 { - log.Warning(fmt.Sprintf("[reconcileLLVCreateFunc] unable to get actual size for LV %s in VG %s (likely LV was not found in the cache), retry...", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) - return true, nil - } - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] successfully got the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) - log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s in VG: %s has actual size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String())) - - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, actualSize) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - - if updated { - log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully updated the LVMLogicalVolume %s status phase to Created", llv.Name)) - } else { - log.Warning(fmt.Sprintf("[reconcileLLVCreateFunc] LVMLogicalVolume %s status phase was not updated to Created due to the resource has already have the same phase", llv.Name)) - } - - log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully ended the reconciliation for the LVMLogicalVolume %s", llv.Name)) - return false, nil -} - -func reconcileLLVUpdateFunc( - ctx context.Context, - cl client.Client, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, - llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LVMVolumeGroup, -) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) - - // status might be nil if a user creates the resource with LV name which matches existing LV on the node - if llv.Status == nil { - err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhasePending, "") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - } - - // it needs to get current LV size from the node as status might be nil - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size before the extension", llv.Name)) - actualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if actualSize.Value() == 0 { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s has zero size (likely LV was not updated in the cache) ", llv.Spec.ActualLVNameOnTheNode, llv.Name)) - return true, nil - } - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size %s before the extension", llv.Name, actualSize.String())) - - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to count the LVMLogicalVolume %s requested size", llv.Name)) - llvRequestSize, err := getLLVRequestedSize(llv, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) - return false, err - } - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String())) - - if utils.AreSizesEqualWithinDelta(actualSize, llvRequestSize, internal.ResizeDelta) { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String())) - - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, actualSize) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - - if updated { - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully updated the LVMLogicalVolume %s status phase to Created", llv.Name)) - } else { - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] no need to update the LVMLogicalVolume %s status phase to Created", llv.Name)) - } - - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) - - return false, nil - } - - extendingSize := subtractQuantity(llvRequestSize, actualSize) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has extending size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, extendingSize.String())) - if extendingSize.Value() < 0 { - err = fmt.Errorf("specified LV size %dB is less than actual one on the node %dB", llvRequestSize.Value(), actualSize.Value()) - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to extend the LVMLogicalVolume %s", llv.Name)) - return false, err - } - - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s should be resized", llv.Name)) - // this check prevents infinite resource updates after retry - if llv.Status.Phase != Failed { - err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseResizing, "") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - } - - freeSpace := getFreeLVGSpaceForLLV(lvg, llv) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s, type: %s, extending size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, extendingSize.String(), freeSpace.String())) - - if !utils.AreSizesEqualWithinDelta(freeSpace, extendingSize, internal.ResizeDelta) { - if freeSpace.Value() < extendingSize.Value()+internal.ResizeDelta.Value() { - err = errors.New("not enough space") - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) - - // returns true cause a user might manage LVG free space without changing the LLV - return true, err - } - } - - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s will be extended with size: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llvRequestSize.String())) - cmd, err := utils.ExtendLV(llvRequestSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] runs cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to ExtendLV, name: %s, type: %s", llv.Spec.ActualLVNameOnTheNode, llv.Spec.Type)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully extended LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) - - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size after the extension", llv.Name)) - newActualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - - // this case might be triggered if sds cache will not update lv state in time - if newActualSize.Value() == actualSize.Value() { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s was extended but cache is not updated yet. It will be retried", llv.Spec.ActualLVNameOnTheNode, llv.Name)) - return true, nil - } - - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size before the extension", llv.Name)) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s actual size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, newActualSize.String())) - - // need this here as a user might create the LLV with existing LV - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, newActualSize) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) - return true, err - } - - if updated { - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully updated the LVMLogicalVolume %s status phase to Created", llv.Name)) - } else { - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] no need to update the LVMLogicalVolume %s status phase to Created", llv.Name)) - } - - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) - return false, nil -} - -func reconcileLLVDeleteFunc( - ctx context.Context, - cl client.Client, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, - llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LVMVolumeGroup, -) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) - - // The controller won't remove the LLV resource and LV volume till the resource has any other finalizer. - if len(llv.Finalizers) != 0 { - if len(llv.Finalizers) > 1 || - llv.Finalizers[0] != internal.SdsNodeConfiguratorFinalizer { - log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete LVMLogicalVolume %s for now due to it has any other finalizer", llv.Name)) - return false, nil - } - } - - err := deleteLVIfNeeded(log, sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully deleted the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) - - err = removeLLVFinalizersIfExist(ctx, cl, metrics, log, llv) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to remove finalizers from the LVMVolumeGroup %s", llv.Name)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) - return false, nil -} diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go deleted file mode 100644 index b14ca251..00000000 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go +++ /dev/null @@ -1,356 +0,0 @@ -package controller - -import ( - "context" - "fmt" - "strings" - - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/strings/slices" - "sigs.k8s.io/controller-runtime/pkg/client" - - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" -) - -func identifyReconcileFunc(sdsCache *cache.Cache, vgName string, llv *v1alpha1.LVMLogicalVolume) reconcileType { - should := shouldReconcileByCreateFunc(sdsCache, vgName, llv) - if should { - return CreateReconcile - } - - should = shouldReconcileByUpdateFunc(sdsCache, vgName, llv) - if should { - return UpdateReconcile - } - - should = shouldReconcileByDeleteFunc(llv) - if should { - return DeleteReconcile - } - - return "" -} - -func shouldReconcileByDeleteFunc(llv *v1alpha1.LVMLogicalVolume) bool { - return llv.DeletionTimestamp != nil -} - -//nolint:unparam -func checkIfConditionIsTrue(lvg *v1alpha1.LVMVolumeGroup, conType string) bool { - // this check prevents infinite resource updating after a retry - for _, c := range lvg.Status.Conditions { - if c.Type == conType && c.Status == v1.ConditionTrue { - return true - } - } - - return false -} - -func isPercentSize(size string) bool { - return strings.Contains(size, "%") -} - -func getLLVRequestedSize(llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (resource.Quantity, error) { - switch llv.Spec.Type { - case Thick: - return getRequestedSizeFromString(llv.Spec.Size, lvg.Status.VGSize) - case Thin: - for _, tp := range lvg.Status.ThinPools { - if tp.Name == llv.Spec.Thin.PoolName { - totalSize, err := getThinPoolSpaceWithAllocationLimit(tp.ActualSize, tp.AllocationLimit) - if err != nil { - return resource.Quantity{}, err - } - - return getRequestedSizeFromString(llv.Spec.Size, totalSize) - } - } - } - - return resource.Quantity{}, nil -} - -func removeLLVFinalizersIfExist( - ctx context.Context, - cl client.Client, - metrics monitoring.Metrics, - log logger.Logger, - llv *v1alpha1.LVMLogicalVolume, -) error { - var removed bool - for i, f := range llv.Finalizers { - if f == internal.SdsNodeConfiguratorFinalizer { - llv.Finalizers = append(llv.Finalizers[:i], llv.Finalizers[i+1:]...) - removed = true - log.Debug(fmt.Sprintf("[removeLLVFinalizersIfExist] removed finalizer %s from the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - break - } - } - - if removed { - log.Trace(fmt.Sprintf("[removeLLVFinalizersIfExist] removed finalizer %s from the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - err := updateLVMLogicalVolumeSpec(ctx, metrics, cl, llv) - if err != nil { - log.Error(err, fmt.Sprintf("[updateLVMLogicalVolumeSpec] unable to update the LVMVolumeGroup %s", llv.Name)) - return err - } - } - - return nil -} - -func checkIfLVBelongsToLLV(llv *v1alpha1.LVMLogicalVolume, lv *internal.LVData) bool { - switch llv.Spec.Type { - case Thin: - if lv.PoolName != llv.Spec.Thin.PoolName { - return false - } - case Thick: - contiguous := string(lv.LVAttr[2]) == "c" - if string(lv.LVAttr[0]) != "-" || - contiguous != isContiguous(llv) { - return false - } - } - - return true -} - -func updateLLVPhaseToCreatedIfNeeded(ctx context.Context, cl client.Client, llv *v1alpha1.LVMLogicalVolume, actualSize resource.Quantity) (bool, error) { - var contiguous *bool - if llv.Spec.Thick != nil { - if *llv.Spec.Thick.Contiguous { - contiguous = llv.Spec.Thick.Contiguous - } - } - - if llv.Status.Phase != LLVStatusPhaseCreated || - llv.Status.ActualSize.Value() != actualSize.Value() || - llv.Status.Reason != "" || - llv.Status.Contiguous != contiguous { - llv.Status.Phase = LLVStatusPhaseCreated - llv.Status.Reason = "" - llv.Status.ActualSize = actualSize - llv.Status.Contiguous = contiguous - err := cl.Status().Update(ctx, llv) - if err != nil { - return false, err - } - - return true, err - } - - return false, nil -} - -func deleteLVIfNeeded(log logger.Logger, sdsCache *cache.Cache, vgName string, llv *v1alpha1.LVMLogicalVolume) error { - lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) - if lv == nil || !lv.Exist { - log.Warning(fmt.Sprintf("[deleteLVIfNeeded] did not find LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) - return nil - } - - // this case prevents unexpected same-name LV deletions which does not actually belong to our LLV - if !checkIfLVBelongsToLLV(llv, &lv.Data) { - log.Warning(fmt.Sprintf("[deleteLVIfNeeded] no need to delete LV %s as it doesnt belong to LVMLogicalVolume %s", lv.Data.LVName, llv.Name)) - return nil - } - - cmd, err := utils.RemoveLV(vgName, llv.Spec.ActualLVNameOnTheNode) - log.Debug(fmt.Sprintf("[deleteLVIfNeeded] runs cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[deleteLVIfNeeded] unable to remove LV %s from VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) - return err - } - - log.Debug(fmt.Sprintf("[deleteLVIfNeeded] mark LV %s in the cache as removed", lv.Data.LVName)) - sdsCache.MarkLVAsRemoved(lv.Data.VGName, lv.Data.LVName) - - return nil -} - -func getLVActualSize(sdsCache *cache.Cache, vgName, lvName string) resource.Quantity { - lv := sdsCache.FindLV(vgName, lvName) - if lv == nil { - return resource.Quantity{} - } - - result := resource.NewQuantity(lv.Data.LVSize.Value(), resource.BinarySI) - - return *result -} - -func addLLVFinalizerIfNotExist(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, llv *v1alpha1.LVMLogicalVolume) (bool, error) { - if slices.Contains(llv.Finalizers, internal.SdsNodeConfiguratorFinalizer) { - return false, nil - } - - llv.Finalizers = append(llv.Finalizers, internal.SdsNodeConfiguratorFinalizer) - - log.Trace(fmt.Sprintf("[addLLVFinalizerIfNotExist] added finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) - err := updateLVMLogicalVolumeSpec(ctx, metrics, cl, llv) - if err != nil { - return false, err - } - - return true, nil -} - -func shouldReconcileByCreateFunc(sdsCache *cache.Cache, vgName string, llv *v1alpha1.LVMLogicalVolume) bool { - if llv.DeletionTimestamp != nil { - return false - } - - lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) - return lv == nil -} - -func getFreeLVGSpaceForLLV(lvg *v1alpha1.LVMVolumeGroup, llv *v1alpha1.LVMLogicalVolume) resource.Quantity { - switch llv.Spec.Type { - case Thick: - return lvg.Status.VGFree - case Thin: - for _, tp := range lvg.Status.ThinPools { - if tp.Name == llv.Spec.Thin.PoolName { - return tp.AvailableSpace - } - } - } - - return resource.Quantity{} -} - -func subtractQuantity(currentQuantity, quantityToSubtract resource.Quantity) resource.Quantity { - resultingQuantity := currentQuantity.DeepCopy() - resultingQuantity.Sub(quantityToSubtract) - return resultingQuantity -} - -func belongsToNode(lvg *v1alpha1.LVMVolumeGroup, nodeName string) bool { - var belongs bool - for _, node := range lvg.Status.Nodes { - if node.Name == nodeName { - belongs = true - } - } - - return belongs -} - -func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (bool, string) { - if llv.DeletionTimestamp != nil { - // as the configuration doesn't matter if we want to delete it - return true, "" - } - - reason := strings.Builder{} - - if len(llv.Spec.ActualLVNameOnTheNode) == 0 { - reason.WriteString("No LV name specified. ") - } - - llvRequestedSize, err := getLLVRequestedSize(llv, lvg) - if err != nil { - reason.WriteString(err.Error()) - } - - if llvRequestedSize.Value() == 0 { - reason.WriteString("Zero size for LV. ") - } - - if llv.Status != nil { - if llvRequestedSize.Value()+internal.ResizeDelta.Value() < llv.Status.ActualSize.Value() { - reason.WriteString("Desired LV size is less than actual one. ") - } - } - - switch llv.Spec.Type { - case Thin: - if llv.Spec.Thin == nil { - reason.WriteString("No thin pool specified. ") - break - } - - exist := false - for _, tp := range lvg.Status.ThinPools { - if tp.Name == llv.Spec.Thin.PoolName { - exist = true - break - } - } - - if !exist { - reason.WriteString("Selected thin pool does not exist in selected LVMVolumeGroup. ") - } - case Thick: - if llv.Spec.Thin != nil { - reason.WriteString("Thin pool specified for Thick LV. ") - } - } - - // if a specified Thick LV name matches the existing Thin one - lv := sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if lv != nil && - len(lv.Data.LVAttr) != 0 && !checkIfLVBelongsToLLV(llv, &lv.Data) { - reason.WriteString(fmt.Sprintf("Specified LV %s is already created and it is doesnt match the one on the node.", lv.Data.LVName)) - } - - if reason.Len() > 0 { - return false, reason.String() - } - - return true, "" -} - -func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, _ monitoring.Metrics, llv *v1alpha1.LVMLogicalVolume, phase, reason string) error { - if llv.Status != nil && - llv.Status.Phase == phase && - llv.Status.Reason == reason { - log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] no need to update the LVMLogicalVolume %s phase and reason", llv.Name)) - return nil - } - - if llv.Status == nil { - llv.Status = new(v1alpha1.LVMLogicalVolumeStatus) - } - - llv.Status.Phase = phase - llv.Status.Reason = reason - - log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] tries to update the LVMLogicalVolume %s status with phase: %s, reason: %s", llv.Name, phase, reason)) - err := cl.Status().Update(ctx, llv) - if err != nil { - return err - } - - log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] updated LVMLogicalVolume %s status.phase to %s and reason to %s", llv.Name, phase, reason)) - return nil -} - -func updateLVMLogicalVolumeSpec(ctx context.Context, _ monitoring.Metrics, cl client.Client, llv *v1alpha1.LVMLogicalVolume) error { - return cl.Update(ctx, llv) -} - -func shouldReconcileByUpdateFunc(sdsCache *cache.Cache, vgName string, llv *v1alpha1.LVMLogicalVolume) bool { - if llv.DeletionTimestamp != nil { - return false - } - - lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) - return lv != nil && lv.Exist -} - -func isContiguous(llv *v1alpha1.LVMLogicalVolume) bool { - if llv.Spec.Thick == nil { - return false - } - - return *llv.Spec.Thick.Contiguous -} diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go deleted file mode 100644 index f1bc33e4..00000000 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go +++ /dev/null @@ -1,528 +0,0 @@ -/* -Copyright 2023 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "agent/config" - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" -) - -const ( - LVMVolumeGroupWatcherCtrlName = "lvm-volume-group-watcher-controller" - LVGMetadateNameLabelKey = "kubernetes.io/metadata.name" -) - -func RunLVMVolumeGroupWatcherController( - mgr manager.Manager, - cfg config.Options, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, -) (controller.Controller, error) { - cl := mgr.GetClient() - mgrCache := mgr.GetCache() - - c, err := controller.New(LVMVolumeGroupWatcherCtrlName, mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Reconciler starts to reconcile the request %s", request.NamespacedName.String())) - - lvg := &v1alpha1.LVMVolumeGroup{} - err := cl.Get(ctx, request.NamespacedName, lvg) - if err != nil { - if errors.IsNotFound(err) { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] seems like the LVMVolumeGroup was deleted as unable to get it, err: %s. Stop to reconcile", err.Error())) - return reconcile.Result{}, nil - } - - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to get a LVMVolumeGroup by NamespacedName %s", request.NamespacedName.String())) - return reconcile.Result{}, err - } - - if lvg.Name == "" { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] seems like the LVMVolumeGroup for the request %s was deleted. Reconcile retrying will stop.", request.Name)) - return reconcile.Result{}, nil - } - - belongs := checkIfLVGBelongsToNode(lvg, cfg.NodeName) - if !belongs { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s does not belong to the node %s", lvg.Name, cfg.NodeName)) - return reconcile.Result{}, nil - } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s belongs to the node %s. Starts to reconcile", lvg.Name, cfg.NodeName)) - - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add the finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - added, err := addLVGFinalizerIfNotExist(ctx, cl, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add the finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - return reconcile.Result{}, err - } - - if added { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully added a finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - } else { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to add a finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - } - - // this case handles the situation when a user decides to remove LVMVolumeGroup resource without created VG - deleted, err := deleteLVGIfNeeded(ctx, cl, log, metrics, cfg, sdsCache, lvg) - if err != nil { - return reconcile.Result{}, err - } - - if deleted { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s was deleted, stop the reconciliation", lvg.Name)) - return reconcile.Result{}, nil - } - - if _, exist := lvg.Labels[internal.LVGUpdateTriggerLabel]; exist { - delete(lvg.Labels, internal.LVGUpdateTriggerLabel) - err = cl.Update(ctx, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to update the LVMVolumeGroup %s", lvg.Name)) - return reconcile.Result{}, err - } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully removed the label %s from the LVMVolumeGroup %s", internal.LVGUpdateTriggerLabel, lvg.Name)) - } - - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to get block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector.MatchLabels)) - blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to get BlockDevices. Retry in %s", cfg.BlockDeviceScanIntervalSec.String())) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "NoBlockDevices", fmt.Sprintf("unable to get block devices resources, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, cfg.BlockDeviceScanIntervalSec.String())) - } - - return reconcile.Result{RequeueAfter: cfg.BlockDeviceScanIntervalSec}, nil - } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully got block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector.MatchLabels)) - - valid, reason := validateSpecBlockDevices(lvg, blockDevices) - if !valid { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupController] validation failed for the LVMVolumeGroup %s, reason: %s", lvg.Name, reason)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) - return reconcile.Result{}, err - } - - return reconcile.Result{}, nil - } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully validated BlockDevices of the LVMVolumeGroup %s", lvg.Name)) - - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, cfg.NodeName)) - added, err = addLVGLabelIfNeeded(ctx, cl, log, lvg, LVGMetadateNameLabelKey, lvg.Name) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) - return reconcile.Result{}, err - } - - if added { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully added label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) - } else { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) - } - - // We do this after BlockDevices validation and node belonging check to prevent multiple updates by all agents pods - bds, _ := sdsCache.GetDevices() - if len(bds) == 0 { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no block devices in the cache, add the LVMVolumeGroup %s to requeue", lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "CacheEmpty", "unable to apply configuration due to the cache's state") - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) - } - - return reconcile.Result{ - RequeueAfter: cfg.VolumeGroupScanIntervalSec, - }, nil - } - - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to sync status and spec thin-pool AllicationLimit fields for the LVMVolumeGroup %s", lvg.Name)) - err = syncThinPoolsAllocationLimit(ctx, cl, log, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to sync status and spec thin-pool AllocationLimit fields for the LVMVolumeGroup %s", lvg.Name)) - return reconcile.Result{}, err - } - - shouldRequeue, err := runEventReconcile(ctx, cl, log, metrics, sdsCache, cfg, lvg, blockDevices) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to reconcile the LVMVolumeGroup %s", lvg.Name)) - } - - if shouldRequeue { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s event will be requeued in %s", lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) - return reconcile.Result{ - RequeueAfter: cfg.VolumeGroupScanIntervalSec, - }, nil - } - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Reconciler successfully reconciled the LVMVolumeGroup %s", lvg.Name)) - - return reconcile.Result{}, nil - }), - }) - - if err != nil { - log.Error(err, "[RunLVMVolumeGroupWatcherController] Unable to create controller RunLVMVolumeGroupWatcherController") - return nil, err - } - - err = c.Watch(source.Kind(mgrCache, &v1alpha1.LVMVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroup, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc got a create event for the LVMVolumeGroup, name: %s", e.Object.GetName())) - - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} - q.Add(request) - - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) - }, - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] UpdateFunc got a update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) - if !shouldLVGWatcherReconcileUpdateEvent(log, e.ObjectOld, e.ObjectNew) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] update event for the LVMVolumeGroup %s should not be reconciled as not target changed were made", e.ObjectNew.Name)) - return - } - - request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} - q.Add(request) - - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] updateFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.ObjectNew.Name)) - }, - })) - - if err != nil { - log.Error(err, "[RunLVMVolumeGroupWatcherController] error Watch controller RunLVMVolumeGroupWatcherController") - return nil, err - } - return c, err -} - -func runEventReconcile( - ctx context.Context, - cl client.Client, - log logger.Logger, - metrics monitoring.Metrics, - sdsCache *cache.Cache, - cfg config.Options, - lvg *v1alpha1.LVMVolumeGroup, - blockDevices map[string]v1alpha1.BlockDevice, -) (bool, error) { - recType := identifyLVGReconcileFunc(lvg, sdsCache) - - switch recType { - case CreateReconcile: - log.Info(fmt.Sprintf("[runEventReconcile] CreateReconcile starts the reconciliation for the LVMVolumeGroup %s", lvg.Name)) - return reconcileLVGCreateFunc(ctx, cl, log, metrics, lvg, blockDevices) - case UpdateReconcile: - log.Info(fmt.Sprintf("[runEventReconcile] UpdateReconcile starts the reconciliation for the LVMVolumeGroup %s", lvg.Name)) - return reconcileLVGUpdateFunc(ctx, cl, log, metrics, sdsCache, lvg, blockDevices) - case DeleteReconcile: - log.Info(fmt.Sprintf("[runEventReconcile] DeleteReconcile starts the reconciliation for the LVMVolumeGroup %s", lvg.Name)) - return reconcileLVGDeleteFunc(ctx, cl, log, metrics, sdsCache, cfg, lvg) - default: - log.Info(fmt.Sprintf("[runEventReconcile] no need to reconcile the LVMVolumeGroup %s", lvg.Name)) - } - return false, nil -} - -func reconcileLVGDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, cfg config.Options, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] tries to add the condition %s status false to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - - // this check prevents the LVMVolumeGroup resource's infinity updating after a retry - for _, c := range lvg.Status.Conditions { - if c.Type == internal.TypeVGConfigurationApplied && c.Reason != internal.ReasonTerminating { - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, "trying to delete VG") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - return true, err - } - break - } - } - - _, exist := lvg.Annotations[deletionProtectionAnnotation] - if exist { - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] the LVMVolumeGroup %s has a deletion timestamp but also has a deletion protection annotation %s. Remove it to proceed the delete operation", lvg.Name, deletionProtectionAnnotation)) - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, fmt.Sprintf("to delete the LVG remove the annotation %s", deletionProtectionAnnotation)) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - return true, err - } - - return false, nil - } - - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] check if VG %s of the LVMVolumeGroup %s uses LVs", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - usedLVs := getLVForVG(sdsCache, lvg.Spec.ActualVGNameOnTheNode) - if len(usedLVs) > 0 { - err := fmt.Errorf("VG %s uses LVs: %v. Delete used LVs first", lvg.Spec.ActualVGNameOnTheNode, usedLVs) - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to reconcile LVG %s", lvg.Name)) - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] tries to add the condition %s status False to the LVMVolumeGroup %s due to LV does exist", internal.TypeVGConfigurationApplied, lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - return true, err - } - - return true, nil - } - - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] VG %s of the LVMVolumeGroup %s does not use any LV. Start to delete the VG", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - err := DeleteVGIfExist(log, metrics, sdsCache, lvg.Spec.ActualVGNameOnTheNode) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to delete VG %s", lvg.Spec.ActualVGNameOnTheNode)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - return true, err - } - - return true, err - } - - removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to remove a finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - return true, err - } - - if removed { - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] successfully removed a finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - } else { - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] no need to remove a finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - } - - err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvg, cfg.NodeName) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to delete the LVMVolumeGroup %s", lvg.Name)) - return true, err - } - - log.Info(fmt.Sprintf("[reconcileLVGDeleteFunc] successfully reconciled VG %s of the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - return false, nil -} - -func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) - - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to validate the LVMVolumeGroup %s", lvg.Name)) - pvs, _ := sdsCache.GetPVs() - valid, reason := validateLVGForUpdateFunc(log, sdsCache, lvg, blockDevices) - if !valid { - log.Warning(fmt.Sprintf("[reconcileLVGUpdateFunc] the LVMVolumeGroup %s is not valid", lvg.Name)) - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, lvg.Name)) - } - - return true, err - } - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully validated the LVMVolumeGroup %s", lvg.Name)) - - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to get VG %s for the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - found, vg := tryGetVG(sdsCache, lvg.Spec.ActualVGNameOnTheNode) - if !found { - err := fmt.Errorf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode) - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile the LVMVolumeGroup %s", lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGNotFound", err.Error()) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - return true, err - } - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] VG %s found for the LVMVolumeGroup %s", vg.VGName, lvg.Name)) - - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to check and update VG %s tag %s", lvg.Spec.ActualVGNameOnTheNode, internal.LVMTags[0])) - updated, err := UpdateVGTagIfNeeded(ctx, cl, log, metrics, lvg, vg) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to update VG %s tag of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGUpdateFailed", fmt.Sprintf("unable to update VG tag, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - - return true, err - } - - if updated { - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully updated VG %s tag of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) - } else { - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] no need to update VG %s tag of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) - } - - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to resize PV of the LVMVolumeGroup %s", lvg.Name)) - err = ResizePVIfNeeded(ctx, cl, log, metrics, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to resize PV of the LVMVolumeGroup %s", lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "PVResizeFailed", fmt.Sprintf("unable to resize PV, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - return true, err - } - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully ended the resize operation for PV of the LVMVolumeGroup %s", lvg.Name)) - - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to extend VG %s of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) - err = ExtendVGIfNeeded(ctx, cl, log, metrics, lvg, vg, pvs, blockDevices) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to extend VG of the LVMVolumeGroup %s", lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGExtendFailed", fmt.Sprintf("unable to extend VG, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - - return true, err - } - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully ended the extend operation for VG of the LVMVolumeGroup %s", lvg.Name)) - - if lvg.Spec.ThinPools != nil { - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to reconcile thin-pools of the LVMVolumeGroup %s", lvg.Name)) - lvs, _ := sdsCache.GetLVs() - err = ReconcileThinPoolsIfNeeded(ctx, cl, log, metrics, lvg, vg, lvs) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile thin-pools of the LVMVolumeGroup %s", lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "ThinPoolReconcileFailed", fmt.Sprintf("unable to reconcile thin-pools, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - return true, err - } - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully reconciled thin-pools operation of the LVMVolumeGroup %s", lvg.Name)) - } - - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "Applied", "configuration has been applied") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - return true, err - } - log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully added a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - log.Info(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully reconciled the LVMVolumeGroup %s", lvg.Name)) - - return false, nil -} - -func reconcileLVGCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) - - // this check prevents the LVMVolumeGroup resource's infinity updating after a retry - exist := false - for _, c := range lvg.Status.Conditions { - if c.Type == internal.TypeVGConfigurationApplied { - exist = true - break - } - } - - if !exist { - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] tries to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonCreating, "trying to apply the configuration") - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - return true, err - } - } - - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] tries to validate the LVMVolumeGroup %s", lvg.Name)) - valid, reason := validateLVGForCreateFunc(log, lvg, blockDevices) - if !valid { - log.Warning(fmt.Sprintf("[reconcileLVGCreateFunc] validation fails for the LVMVolumeGroup %s", lvg.Name)) - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - - return true, err - } - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] successfully validated the LVMVolumeGroup %s", lvg.Name)) - - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] tries to create VG for the LVMVolumeGroup %s", lvg.Name)) - err := CreateVGComplex(metrics, log, lvg, blockDevices) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to create VG for the LVMVolumeGroup %s", lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGCreationFailed", fmt.Sprintf("unable to create VG, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - return true, err - } - log.Info(fmt.Sprintf("[reconcileLVGCreateFunc] successfully created VG for the LVMVolumeGroup %s", lvg.Name)) - - if lvg.Spec.ThinPools != nil { - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] the LVMVolumeGroup %s has thin-pools. Tries to create them", lvg.Name)) - - for _, tp := range lvg.Spec.ThinPools { - vgSize := countVGSizeByBlockDevices(blockDevices) - tpRequestedSize, err := getRequestedSizeFromString(tp.Size, vgSize) - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to get thin-pool %s requested size of the LVMVolumeGroup %s", tp.Name, lvg.Name)) - return false, err - } - - var cmd string - if utils.AreSizesEqualWithinDelta(tpRequestedSize, vgSize, internal.ResizeDelta) { - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] Thin-pool %s of the LVMVolumeGroup %s will be created with full VG space size", tp.Name, lvg.Name)) - cmd, err = utils.CreateThinPoolFullVGSpace(tp.Name, lvg.Spec.ActualVGNameOnTheNode) - } else { - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] Thin-pool %s of the LVMVolumeGroup %s will be created with size %s", tp.Name, lvg.Name, tpRequestedSize.String())) - cmd, err = utils.CreateThinPool(tp.Name, lvg.Spec.ActualVGNameOnTheNode, tpRequestedSize.Value()) - } - if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to create thin-pool %s of the LVMVolumeGroup %s, cmd: %s", tp.Name, lvg.Name, cmd)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "ThinPoolCreationFailed", fmt.Sprintf("unable to create thin-pool, err: %s", err.Error())) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - } - - return true, err - } - } - log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] successfully created thin-pools for the LVMVolumeGroup %s", lvg.Name)) - } - - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "Success", "all configuration has been applied") - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - return true, err - } - - return false, nil -} diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_constants.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_constants.go deleted file mode 100644 index 060dd1b6..00000000 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_constants.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2023 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -const ( - Local = "Local" - Shared = "Shared" - - Failed = "Failed" - - NonOperational = "NonOperational" - - deletionProtectionAnnotation = "storage.deckhouse.io/deletion-protection" - - LVMVolumeGroupTag = "storage.deckhouse.io/lvmVolumeGroupName" -) diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go deleted file mode 100644 index 85751f06..00000000 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go +++ /dev/null @@ -1,1053 +0,0 @@ -/* -Copyright 2023 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/strings/slices" - "sigs.k8s.io/controller-runtime/pkg/client" - - "agent/config" - "agent/internal" - "agent/pkg/cache" - "agent/pkg/logger" - "agent/pkg/monitoring" - "agent/pkg/utils" -) - -func DeleteLVMVolumeGroup(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, currentNode string) error { - log.Debug(fmt.Sprintf(`[DeleteLVMVolumeGroup] Node "%s" does not belong to VG "%s". It will be removed from LVM resource, name "%s"'`, currentNode, lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - for i, node := range lvg.Status.Nodes { - if node.Name == currentNode { - // delete node - lvg.Status.Nodes = append(lvg.Status.Nodes[:i], lvg.Status.Nodes[i+1:]...) - log.Info(fmt.Sprintf(`[DeleteLVMVolumeGroup] deleted node "%s" from LVMVolumeGroup "%s"`, node.Name, lvg.Name)) - } - } - - // If current LVMVolumeGroup has no nodes left, delete it. - if len(lvg.Status.Nodes) == 0 { - start := time.Now() - err := cl.Delete(ctx, lvg) - metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "delete").Inc() - if err != nil { - metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "delete").Inc() - return err - } - log.Info(fmt.Sprintf("[DeleteLVMVolumeGroup] the LVMVolumeGroup %s deleted", lvg.Name)) - } - - return nil -} - -func checkIfVGExist(vgName string, vgs []internal.VGData) bool { - for _, vg := range vgs { - if vg.VGName == vgName { - return true - } - } - - return false -} - -func shouldUpdateLVGLabels(log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, labelKey, labelValue string) bool { - if lvg.Labels == nil { - log.Debug(fmt.Sprintf("[shouldUpdateLVGLabels] the LVMVolumeGroup %s has no labels.", lvg.Name)) - return true - } - - val, exist := lvg.Labels[labelKey] - if !exist { - log.Debug(fmt.Sprintf("[shouldUpdateLVGLabels] the LVMVolumeGroup %s has no label %s.", lvg.Name, labelKey)) - return true - } - - if val != labelValue { - log.Debug(fmt.Sprintf("[shouldUpdateLVGLabels] the LVMVolumeGroup %s has label %s but the value is incorrect - %s (should be %s)", lvg.Name, labelKey, val, labelValue)) - return true - } - - return false -} - -func shouldLVGWatcherReconcileUpdateEvent(log logger.Logger, oldLVG, newLVG *v1alpha1.LVMVolumeGroup) bool { - if newLVG.DeletionTimestamp != nil { - log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s has deletionTimestamp", newLVG.Name)) - return true - } - - if _, exist := newLVG.Labels[internal.LVGUpdateTriggerLabel]; exist { - log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s has the label %s", newLVG.Name, internal.LVGUpdateTriggerLabel)) - return true - } - - if shouldUpdateLVGLabels(log, newLVG, LVGMetadateNameLabelKey, newLVG.Name) { - log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup's %s labels have been changed", newLVG.Name)) - return true - } - - if !reflect.DeepEqual(oldLVG.Spec, newLVG.Spec) { - log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s configuration has been changed", newLVG.Name)) - return true - } - - for _, c := range newLVG.Status.Conditions { - if c.Type == internal.TypeVGConfigurationApplied { - if c.Reason == internal.ReasonUpdating || c.Reason == internal.ReasonCreating { - log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should not be reconciled as the LVMVolumeGroup %s reconciliation still in progress", newLVG.Name)) - return false - } - } - } - - for _, n := range newLVG.Status.Nodes { - for _, d := range n.Devices { - if !utils.AreSizesEqualWithinDelta(d.PVSize, d.DevSize, internal.ResizeDelta) { - log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s PV size is different to device size", newLVG.Name)) - return true - } - } - } - - return false -} - -func shouldReconcileLVGByDeleteFunc(lvg *v1alpha1.LVMVolumeGroup) bool { - return lvg.DeletionTimestamp != nil -} - -func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, status v1.ConditionStatus, conType, reason, message string) error { - exist := false - index := 0 - newCondition := v1.Condition{ - Type: conType, - Status: status, - ObservedGeneration: lvg.Generation, - LastTransitionTime: v1.NewTime(time.Now()), - Reason: reason, - Message: message, - } - - if lvg.Status.Conditions == nil { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] the LVMVolumeGroup %s conditions is nil. Initialize them", lvg.Name)) - lvg.Status.Conditions = make([]v1.Condition, 0, 5) - } - - if len(lvg.Status.Conditions) > 0 { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] there are some conditions in the LVMVolumeGroup %s. Tries to find a condition %s", lvg.Name, conType)) - for i, c := range lvg.Status.Conditions { - if c.Type == conType { - if checkIfEqualConditions(c, newCondition) { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no need to update condition %s in the LVMVolumeGroup %s as new and old condition states are the same", conType, lvg.Name)) - return nil - } - - index = i - exist = true - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was found in the LVMVolumeGroup %s at the index %d", conType, lvg.Name, i)) - } - } - - if !exist { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was not found. Append it in the end of the LVMVolumeGroup %s conditions", conType, lvg.Name)) - lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) - } else { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] insert the condition %s status %s reason %s message %s at index %d of the LVMVolumeGroup %s conditions", conType, status, reason, message, index, lvg.Name)) - lvg.Status.Conditions[index] = newCondition - } - } else { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no conditions were found in the LVMVolumeGroup %s. Append the condition %s in the end", lvg.Name, conType)) - lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) - } - - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] tries to update the condition type %s status %s reason %s message %s of the LVMVolumeGroup %s", conType, status, reason, message, lvg.Name)) - return cl.Status().Update(ctx, lvg) -} - -func checkIfEqualConditions(first, second v1.Condition) bool { - return first.Type == second.Type && - first.Status == second.Status && - first.Reason == second.Reason && - first.Message == second.Message && - first.ObservedGeneration == second.ObservedGeneration -} - -func addLVGFinalizerIfNotExist(ctx context.Context, cl client.Client, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { - if slices.Contains(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer) { - return false, nil - } - - lvg.Finalizers = append(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer) - err := cl.Update(ctx, lvg) - if err != nil { - return false, err - } - - return true, nil -} - -func syncThinPoolsAllocationLimit(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup) error { - updated := false - - tpSpecLimits := make(map[string]string, len(lvg.Spec.ThinPools)) - for _, tp := range lvg.Spec.ThinPools { - tpSpecLimits[tp.Name] = tp.AllocationLimit - } - - var ( - space resource.Quantity - err error - ) - for i := range lvg.Status.ThinPools { - if specLimits, matched := tpSpecLimits[lvg.Status.ThinPools[i].Name]; matched { - if lvg.Status.ThinPools[i].AllocationLimit != specLimits { - log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] thin-pool %s status AllocationLimit: %s of the LVMVolumeGroup %s should be updated by spec one: %s", lvg.Status.ThinPools[i].Name, lvg.Status.ThinPools[i].AllocationLimit, lvg.Name, specLimits)) - updated = true - lvg.Status.ThinPools[i].AllocationLimit = specLimits - - space, err = getThinPoolAvailableSpace(lvg.Status.ThinPools[i].ActualSize, lvg.Status.ThinPools[i].AllocatedSize, specLimits) - if err != nil { - log.Error(err, fmt.Sprintf("[syncThinPoolsAllocationLimit] unable to get thin pool %s available space", lvg.Status.ThinPools[i].Name)) - return err - } - log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] successfully got a new available space %s of the thin-pool %s", space.String(), lvg.Status.ThinPools[i].Name)) - lvg.Status.ThinPools[i].AvailableSpace = space - } - } else { - log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] status thin-pool %s of the LVMVolumeGroup %s was not found as used in spec", lvg.Status.ThinPools[i].Name, lvg.Name)) - } - } - - if updated { - fmt.Printf("%+v", lvg.Status.ThinPools) - log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] tries to update the LVMVolumeGroup %s", lvg.Name)) - err = cl.Status().Update(ctx, lvg) - if err != nil { - return err - } - log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] successfully updated the LVMVolumeGroup %s", lvg.Name)) - } else { - log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] every status thin-pool AllocationLimit value is synced with spec one for the LVMVolumeGroup %s", lvg.Name)) - } - - return nil -} - -func validateSpecBlockDevices(lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { - if len(blockDevices) == 0 { - return false, "none of specified BlockDevices were found" - } - - if len(lvg.Status.Nodes) > 0 { - lostBdNames := make([]string, 0, len(lvg.Status.Nodes[0].Devices)) - for _, n := range lvg.Status.Nodes { - for _, d := range n.Devices { - if _, found := blockDevices[d.BlockDevice]; !found { - lostBdNames = append(lostBdNames, d.BlockDevice) - } - } - } - - // that means some of the used BlockDevices no longer match the blockDeviceSelector - if len(lostBdNames) > 0 { - return false, fmt.Sprintf("these BlockDevices no longer match the blockDeviceSelector: %s", strings.Join(lostBdNames, ",")) - } - } - - for _, me := range lvg.Spec.BlockDeviceSelector.MatchExpressions { - if me.Key == internal.MetadataNameLabelKey { - if len(me.Values) != len(blockDevices) { - missedBds := make([]string, 0, len(me.Values)) - for _, bdName := range me.Values { - if _, exist := blockDevices[bdName]; !exist { - missedBds = append(missedBds, bdName) - } - } - - return false, fmt.Sprintf("unable to find specified BlockDevices: %s", strings.Join(missedBds, ",")) - } - } - } - - bdFromOtherNode := make([]string, 0, len(blockDevices)) - for _, bd := range blockDevices { - if bd.Status.NodeName != lvg.Spec.Local.NodeName { - bdFromOtherNode = append(bdFromOtherNode, bd.Name) - } - } - - if len(bdFromOtherNode) != 0 { - return false, fmt.Sprintf("block devices %s have different node names from LVMVolumeGroup Local.NodeName", strings.Join(bdFromOtherNode, ",")) - } - - return true, "" -} - -func deleteLVGIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, cfg config.Options, sdsCache *cache.Cache, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { - if lvg.DeletionTimestamp == nil { - return false, nil - } - - vgs, _ := sdsCache.GetVGs() - if !checkIfVGExist(lvg.Spec.ActualVGNameOnTheNode, vgs) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] VG %s was not yet created for the LVMVolumeGroup %s and the resource is marked as deleting. Delete the resource", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - return false, err - } - - if removed { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully removed the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - } else { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - } - - err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvg, cfg.NodeName) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to delete the LVMVolumeGroup %s", lvg.Name)) - return false, err - } - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name)) - return true, nil - } - return false, nil -} - -func checkIfLVGBelongsToNode(lvg *v1alpha1.LVMVolumeGroup, nodeName string) bool { - return lvg.Spec.Local.NodeName == nodeName -} - -func extractPathsFromBlockDevices(targetDevices []string, blockDevices map[string]v1alpha1.BlockDevice) []string { - var paths []string - if len(targetDevices) > 0 { - paths = make([]string, 0, len(targetDevices)) - for _, bdName := range targetDevices { - bd := blockDevices[bdName] - paths = append(paths, bd.Status.Path) - } - } else { - paths = make([]string, 0, len(blockDevices)) - for _, bd := range blockDevices { - paths = append(paths, bd.Status.Path) - } - } - - return paths -} - -func getRequestedSizeFromString(size string, targetSpace resource.Quantity) (resource.Quantity, error) { - switch isPercentSize(size) { - case true: - strPercent := strings.Split(size, "%")[0] - percent, err := strconv.Atoi(strPercent) - if err != nil { - return resource.Quantity{}, err - } - lvSize := targetSpace.Value() * int64(percent) / 100 - return *resource.NewQuantity(lvSize, resource.BinarySI), nil - case false: - return resource.ParseQuantity(size) - } - - return resource.Quantity{}, nil -} - -func countVGSizeByBlockDevices(blockDevices map[string]v1alpha1.BlockDevice) resource.Quantity { - var totalVGSize int64 - for _, bd := range blockDevices { - totalVGSize += bd.Status.Size.Value() - } - return *resource.NewQuantity(totalVGSize, resource.BinarySI) -} - -func validateLVGForCreateFunc(log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { - reason := strings.Builder{} - - log.Debug(fmt.Sprintf("[validateLVGForCreateFunc] check if every selected BlockDevice of the LVMVolumeGroup %s is consumable", lvg.Name)) - // totalVGSize needs to count if there is enough space for requested thin-pools - totalVGSize := countVGSizeByBlockDevices(blockDevices) - for _, bd := range blockDevices { - if !bd.Status.Consumable { - log.Warning(fmt.Sprintf("[validateLVGForCreateFunc] BlockDevice %s is not consumable", bd.Name)) - log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] BlockDevice name: %s, status: %+v", bd.Name, bd.Status)) - reason.WriteString(fmt.Sprintf("BlockDevice %s is not consumable. ", bd.Name)) - } - } - - if reason.Len() == 0 { - log.Debug(fmt.Sprintf("[validateLVGForCreateFunc] all BlockDevices of the LVMVolumeGroup %s are consumable", lvg.Name)) - } - - if lvg.Spec.ThinPools != nil { - log.Debug(fmt.Sprintf("[validateLVGForCreateFunc] the LVMVolumeGroup %s has thin-pools. Validate if VG size has enough space for the thin-pools", lvg.Name)) - log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] the LVMVolumeGroup %s has thin-pools %v", lvg.Name, lvg.Spec.ThinPools)) - log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] total LVMVolumeGroup %s size: %s", lvg.Name, totalVGSize.String())) - - var totalThinPoolSize int64 - for _, tp := range lvg.Spec.ThinPools { - tpRequestedSize, err := getRequestedSizeFromString(tp.Size, totalVGSize) - if err != nil { - reason.WriteString(err.Error()) - continue - } - - if tpRequestedSize.Value() == 0 { - reason.WriteString(fmt.Sprintf("Thin-pool %s has zero size. ", tp.Name)) - continue - } - - // means a user want a thin-pool with 100%FREE size - if utils.AreSizesEqualWithinDelta(tpRequestedSize, totalVGSize, internal.ResizeDelta) { - if len(lvg.Spec.ThinPools) > 1 { - reason.WriteString(fmt.Sprintf("Thin-pool %s requested size of full VG space, but there is any other thin-pool. ", tp.Name)) - } - } - - totalThinPoolSize += tpRequestedSize.Value() - } - log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] LVMVolumeGroup %s thin-pools requested space: %d", lvg.Name, totalThinPoolSize)) - - if totalThinPoolSize != totalVGSize.Value() && totalThinPoolSize+internal.ResizeDelta.Value() >= totalVGSize.Value() { - log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] total thin pool size: %s, total vg size: %s", resource.NewQuantity(totalThinPoolSize, resource.BinarySI).String(), totalVGSize.String())) - log.Warning(fmt.Sprintf("[validateLVGForCreateFunc] requested thin pool size is more than VG total size for the LVMVolumeGroup %s", lvg.Name)) - reason.WriteString(fmt.Sprintf("Required space for thin-pools %d is more than VG size %d.", totalThinPoolSize, totalVGSize.Value())) - } - } - - if reason.Len() != 0 { - return false, reason.String() - } - - return true, "" -} - -func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { - reason := strings.Builder{} - pvs, _ := sdsCache.GetPVs() - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] check if every new BlockDevice of the LVMVolumeGroup %s is comsumable", lvg.Name)) - actualPVPaths := make(map[string]struct{}, len(pvs)) - for _, pv := range pvs { - actualPVPaths[pv.PVName] = struct{}{} - } - - //TODO: add a check if BlockDevice size got less than PV size - - // Check if added BlockDevices are consumable - // additionBlockDeviceSpace value is needed to count if VG will have enough space for thin-pools - var additionBlockDeviceSpace int64 - for _, bd := range blockDevices { - if _, found := actualPVPaths[bd.Status.Path]; !found { - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] unable to find the PV %s for BlockDevice %s. Check if the BlockDevice is already used", bd.Status.Path, bd.Name)) - for _, n := range lvg.Status.Nodes { - for _, d := range n.Devices { - if d.BlockDevice == bd.Name { - log.Warning(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s misses the PV %s. That might be because the corresponding device was removed from the node. Unable to validate BlockDevices", bd.Name, bd.Status.Path)) - reason.WriteString(fmt.Sprintf("BlockDevice %s misses the PV %s (that might be because the device was removed from the node). ", bd.Name, bd.Status.Path)) - } - - if reason.Len() == 0 { - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s does not miss a PV", d.BlockDevice)) - } - } - } - - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] PV %s for BlockDevice %s of the LVMVolumeGroup %s is not created yet, check if the BlockDevice is consumable", bd.Status.Path, bd.Name, lvg.Name)) - if reason.Len() > 0 { - log.Debug("[validateLVGForUpdateFunc] some BlockDevices misses its PVs, unable to check if they are consumable") - continue - } - - if !bd.Status.Consumable { - reason.WriteString(fmt.Sprintf("BlockDevice %s is not consumable. ", bd.Name)) - continue - } - - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s is consumable", bd.Name)) - additionBlockDeviceSpace += bd.Status.Size.Value() - } - } - - if lvg.Spec.ThinPools != nil { - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s has thin-pools. Validate them", lvg.Name)) - actualThinPools := make(map[string]internal.LVData, len(lvg.Spec.ThinPools)) - for _, tp := range lvg.Spec.ThinPools { - lv := sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, tp.Name) - if lv != nil { - if !isThinPool(lv.Data) { - reason.WriteString(fmt.Sprintf("LV %s is already created on the node and it is not a thin-pool", lv.Data.LVName)) - continue - } - - actualThinPools[lv.Data.LVName] = lv.Data - } - } - - // check if added thin-pools has valid requested size - var ( - addingThinPoolSize int64 - hasFullThinPool = false - ) - - vg := sdsCache.FindVG(lvg.Spec.ActualVGNameOnTheNode) - if vg == nil { - reason.WriteString(fmt.Sprintf("Missed VG %s in the cache", lvg.Spec.ActualVGNameOnTheNode)) - return false, reason.String() - } - - newTotalVGSize := resource.NewQuantity(vg.VGSize.Value()+additionBlockDeviceSpace, resource.BinarySI) - for _, specTp := range lvg.Spec.ThinPools { - // might be a case when Thin-pool is already created, but is not shown in status - tpRequestedSize, err := getRequestedSizeFromString(specTp.Size, *newTotalVGSize) - if err != nil { - reason.WriteString(err.Error()) - continue - } - - if tpRequestedSize.Value() == 0 { - reason.WriteString(fmt.Sprintf("Thin-pool %s has zero size. ", specTp.Name)) - continue - } - - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s thin-pool %s requested size %s, Status VG size %s", lvg.Name, specTp.Name, tpRequestedSize.String(), lvg.Status.VGSize.String())) - switch utils.AreSizesEqualWithinDelta(tpRequestedSize, *newTotalVGSize, internal.ResizeDelta) { - // means a user wants 100% of VG space - case true: - hasFullThinPool = true - if len(lvg.Spec.ThinPools) > 1 { - // as if a user wants thin-pool with 100%VG size, there might be only one thin-pool - reason.WriteString(fmt.Sprintf("Thin-pool %s requests size of full VG space, but there are any other thin-pools. ", specTp.Name)) - } - case false: - if actualThinPool, created := actualThinPools[specTp.Name]; !created { - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] thin-pool %s of the LVMVolumeGroup %s is not yet created, adds its requested size", specTp.Name, lvg.Name)) - addingThinPoolSize += tpRequestedSize.Value() - } else { - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] thin-pool %s of the LVMVolumeGroup %s is already created, check its requested size", specTp.Name, lvg.Name)) - if tpRequestedSize.Value()+internal.ResizeDelta.Value() < actualThinPool.LVSize.Value() { - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s Spec.ThinPool %s size %s is less than Status one: %s", lvg.Name, specTp.Name, tpRequestedSize.String(), actualThinPool.LVSize.String())) - reason.WriteString(fmt.Sprintf("Requested Spec.ThinPool %s size %s is less than actual one %s. ", specTp.Name, tpRequestedSize.String(), actualThinPool.LVSize.String())) - continue - } - - thinPoolSizeDiff := tpRequestedSize.Value() - actualThinPool.LVSize.Value() - if thinPoolSizeDiff > internal.ResizeDelta.Value() { - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s Spec.ThinPool %s size %s more than Status one: %s", lvg.Name, specTp.Name, tpRequestedSize.String(), actualThinPool.LVSize.String())) - addingThinPoolSize += thinPoolSizeDiff - } - } - } - } - - if !hasFullThinPool { - allocatedSize := getVGAllocatedSize(*vg) - totalFreeSpace := newTotalVGSize.Value() - allocatedSize.Value() - log.Trace(fmt.Sprintf("[validateLVGForUpdateFunc] new LVMVolumeGroup %s thin-pools requested %d size, additional BlockDevices space %d, total: %d", lvg.Name, addingThinPoolSize, additionBlockDeviceSpace, totalFreeSpace)) - if addingThinPoolSize != 0 && addingThinPoolSize+internal.ResizeDelta.Value() > totalFreeSpace { - reason.WriteString("Added thin-pools requested sizes are more than allowed free space in VG.") - } - } - } - - if reason.Len() != 0 { - return false, reason.String() - } - - return true, "" -} - -func identifyLVGReconcileFunc(lvg *v1alpha1.LVMVolumeGroup, sdsCache *cache.Cache) reconcileType { - if shouldReconcileLVGByCreateFunc(lvg, sdsCache) { - return CreateReconcile - } - - if shouldReconcileLVGByUpdateFunc(lvg, sdsCache) { - return UpdateReconcile - } - - if shouldReconcileLVGByDeleteFunc(lvg) { - return DeleteReconcile - } - - return "none" -} - -func shouldReconcileLVGByCreateFunc(lvg *v1alpha1.LVMVolumeGroup, ch *cache.Cache) bool { - if lvg.DeletionTimestamp != nil { - return false - } - - vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode) - return vg == nil -} - -func shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LVMVolumeGroup, ch *cache.Cache) bool { - if lvg.DeletionTimestamp != nil { - return false - } - - vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode) - return vg != nil -} - -func ReconcileThinPoolsIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, vg internal.VGData, lvs []internal.LVData) error { - actualThinPools := make(map[string]internal.LVData, len(lvs)) - for _, lv := range lvs { - if string(lv.LVAttr[0]) == "t" { - actualThinPools[lv.LVName] = lv - } - } - - errs := strings.Builder{} - for _, specTp := range lvg.Spec.ThinPools { - tpRequestedSize, err := getRequestedSizeFromString(specTp.Size, lvg.Status.VGSize) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to get requested thin-pool %s size of the LVMVolumeGroup %s", specTp.Name, lvg.Name)) - return err - } - - if actualTp, exist := actualThinPools[specTp.Name]; !exist { - log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s is not created yet. Create it", specTp.Name, lvg.Name)) - if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) - return err - } - } - - var cmd string - start := time.Now() - if utils.AreSizesEqualWithinDelta(tpRequestedSize, lvg.Status.VGSize, internal.ResizeDelta) { - log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s will be created with size 100FREE", specTp.Name, lvg.Name)) - cmd, err = utils.CreateThinPoolFullVGSpace(specTp.Name, vg.VGName) - } else { - log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s will be created with size %s", specTp.Name, lvg.Name, tpRequestedSize.String())) - cmd, err = utils.CreateThinPool(specTp.Name, vg.VGName, tpRequestedSize.Value()) - } - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "lvcreate").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "lvcreate").Inc() - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "lvcreate").Inc() - log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to create thin-pool %s of the LVMVolumeGroup %s, cmd: %s", specTp.Name, lvg.Name, cmd)) - errs.WriteString(fmt.Sprintf("unable to create thin-pool %s, err: %s. ", specTp.Name, err.Error())) - continue - } - - log.Info(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s has been successfully created", specTp.Name, lvg.Name)) - } else { - // thin-pool exists - if utils.AreSizesEqualWithinDelta(tpRequestedSize, actualTp.LVSize, internal.ResizeDelta) { - log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] the LVMVolumeGroup %s requested thin pool %s size is equal to actual one", lvg.Name, tpRequestedSize.String())) - continue - } - - log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] the LVMVolumeGroup %s requested thin pool %s size is more than actual one. Resize it", lvg.Name, tpRequestedSize.String())) - if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) - return err - } - } - err = ExtendThinPool(log, metrics, lvg, specTp) - if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to resize thin-pool %s of the LVMVolumeGroup %s", specTp.Name, lvg.Name)) - errs.WriteString(fmt.Sprintf("unable to resize thin-pool %s, err: %s. ", specTp.Name, err.Error())) - continue - } - } - } - - if errs.Len() != 0 { - return errors.New(errs.String()) - } - - return nil -} - -func ResizePVIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup) error { - if len(lvg.Status.Nodes) == 0 { - log.Warning(fmt.Sprintf("[ResizePVIfNeeded] the LVMVolumeGroup %s nodes are empty. Wait for the next update", lvg.Name)) - return nil - } - - errs := strings.Builder{} - for _, n := range lvg.Status.Nodes { - for _, d := range n.Devices { - if d.DevSize.Value()-d.PVSize.Value() > internal.ResizeDelta.Value() { - if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") - if err != nil { - log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) - return err - } - } - - log.Debug(fmt.Sprintf("[ResizePVIfNeeded] the LVMVolumeGroup %s BlockDevice %s PVSize is less than actual device size. Resize PV", lvg.Name, d.BlockDevice)) - - start := time.Now() - cmd, err := utils.ResizePV(d.Path) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "pvresize").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "pvresize") - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "pvresize").Inc() - log.Error(err, fmt.Sprintf("[ResizePVIfNeeded] unable to resize PV %s of BlockDevice %s of LVMVolumeGroup %s, cmd: %s", d.Path, d.BlockDevice, lvg.Name, cmd)) - errs.WriteString(fmt.Sprintf("unable to resize PV %s, err: %s. ", d.Path, err.Error())) - continue - } - - log.Info(fmt.Sprintf("[ResizePVIfNeeded] successfully resized PV %s of BlockDevice %s of LVMVolumeGroup %s", d.Path, d.BlockDevice, lvg.Name)) - } else { - log.Debug(fmt.Sprintf("[ResizePVIfNeeded] no need to resize PV %s of BlockDevice %s of the LVMVolumeGroup %s", d.Path, d.BlockDevice, lvg.Name)) - } - } - } - - if errs.Len() != 0 { - return errors.New(errs.String()) - } - - return nil -} - -func ExtendVGIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, vg internal.VGData, pvs []internal.PVData, blockDevices map[string]v1alpha1.BlockDevice) error { - for _, n := range lvg.Status.Nodes { - for _, d := range n.Devices { - log.Trace(fmt.Sprintf("[ExtendVGIfNeeded] the LVMVolumeGroup %s status block device: %s", lvg.Name, d.BlockDevice)) - } - } - - pvsMap := make(map[string]struct{}, len(pvs)) - for _, pv := range pvs { - pvsMap[pv.PVName] = struct{}{} - } - - devicesToExtend := make([]string, 0, len(blockDevices)) - for _, bd := range blockDevices { - if _, exist := pvsMap[bd.Status.Path]; !exist { - log.Debug(fmt.Sprintf("[ExtendVGIfNeeded] the BlockDevice %s of LVMVolumeGroup %s Spec is not counted as used", bd.Name, lvg.Name)) - devicesToExtend = append(devicesToExtend, bd.Name) - } - } - - if len(devicesToExtend) == 0 { - log.Debug(fmt.Sprintf("[ExtendVGIfNeeded] VG %s of the LVMVolumeGroup %s should not be extended", vg.VGName, lvg.Name)) - return nil - } - - if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") - if err != nil { - log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) - return err - } - } - - log.Debug(fmt.Sprintf("[ExtendVGIfNeeded] VG %s should be extended as there are some BlockDevices were added to Spec field of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) - paths := extractPathsFromBlockDevices(devicesToExtend, blockDevices) - err := ExtendVGComplex(metrics, paths, vg.VGName, log) - if err != nil { - log.Error(err, fmt.Sprintf("[ExtendVGIfNeeded] unable to extend VG %s of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) - return err - } - log.Info(fmt.Sprintf("[ExtendVGIfNeeded] VG %s of the LVMVolumeGroup %s was extended", vg.VGName, lvg.Name)) - - return nil -} - -func tryGetVG(sdsCache *cache.Cache, vgName string) (bool, internal.VGData) { - vgs, _ := sdsCache.GetVGs() - for _, vg := range vgs { - if vg.VGName == vgName { - return true, vg - } - } - - return false, internal.VGData{} -} - -func removeLVGFinalizerIfExist(ctx context.Context, cl client.Client, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { - if !slices.Contains(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer) { - return false, nil - } - - for i := range lvg.Finalizers { - if lvg.Finalizers[i] == internal.SdsNodeConfiguratorFinalizer { - lvg.Finalizers = append(lvg.Finalizers[:i], lvg.Finalizers[i+1:]...) - break - } - } - - err := cl.Update(ctx, lvg) - if err != nil { - return false, err - } - - return true, nil -} - -func getLVForVG(ch *cache.Cache, vgName string) []string { - lvs, _ := ch.GetLVs() - usedLVs := make([]string, 0, len(lvs)) - for _, lv := range lvs { - if lv.VGName == vgName { - usedLVs = append(usedLVs, lv.LVName) - } - } - - return usedLVs -} - -func getLVMVolumeGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, name string) (*v1alpha1.LVMVolumeGroup, error) { - obj := &v1alpha1.LVMVolumeGroup{} - start := time.Now() - err := cl.Get(ctx, client.ObjectKey{ - Name: name, - }, obj) - metrics.APIMethodsDuration(LVMVolumeGroupWatcherCtrlName, "get").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.APIMethodsExecutionCount(LVMVolumeGroupWatcherCtrlName, "get").Inc() - if err != nil { - metrics.APIMethodsErrors(LVMVolumeGroupWatcherCtrlName, "get").Inc() - return nil, err - } - return obj, nil -} - -func DeleteVGIfExist(log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, vgName string) error { - vgs, _ := sdsCache.GetVGs() - if !checkIfVGExist(vgName, vgs) { - log.Debug(fmt.Sprintf("[DeleteVGIfExist] no VG %s found, nothing to delete", vgName)) - return nil - } - - pvs, _ := sdsCache.GetPVs() - if len(pvs) == 0 { - err := errors.New("no any PV found") - log.Error(err, fmt.Sprintf("[DeleteVGIfExist] no any PV was found while deleting VG %s", vgName)) - return err - } - - start := time.Now() - command, err := utils.RemoveVG(vgName) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgremove").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgremove").Inc() - log.Debug(command) - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgremove").Inc() - log.Error(err, "RemoveVG "+command) - return err - } - log.Debug(fmt.Sprintf("[DeleteVGIfExist] VG %s was successfully deleted from the node", vgName)) - var pvsToRemove []string - for _, pv := range pvs { - if pv.VGName == vgName { - pvsToRemove = append(pvsToRemove, pv.PVName) - } - } - - start = time.Now() - command, err = utils.RemovePV(pvsToRemove) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "pvremove").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "pvremove").Inc() - log.Debug(command) - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "pvremove").Inc() - log.Error(err, "RemovePV "+command) - return err - } - log.Debug(fmt.Sprintf("[DeleteVGIfExist] successfully delete PVs of VG %s from the node", vgName)) - - return nil -} - -func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, vgName string, log logger.Logger) error { - for _, pvPath := range extendPVs { - start := time.Now() - command, err := utils.CreatePV(pvPath) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "pvcreate").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "pvcreate").Inc() - log.Debug(command) - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "pvcreate").Inc() - log.Error(err, "CreatePV ") - return err - } - } - - start := time.Now() - command, err := utils.ExtendVG(vgName, extendPVs) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgextend").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgextend").Inc() - log.Debug(command) - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgextend").Inc() - log.Error(err, "ExtendVG ") - return err - } - return nil -} - -func CreateVGComplex(metrics monitoring.Metrics, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) error { - paths := extractPathsFromBlockDevices(nil, blockDevices) - - log.Trace(fmt.Sprintf("[CreateVGComplex] LVMVolumeGroup %s devices paths %v", lvg.Name, paths)) - for _, path := range paths { - start := time.Now() - command, err := utils.CreatePV(path) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "pvcreate").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "pvcreate").Inc() - log.Debug(command) - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "pvcreate").Inc() - log.Error(err, fmt.Sprintf("[CreateVGComplex] unable to create PV by path %s", path)) - return err - } - } - - log.Debug(fmt.Sprintf("[CreateVGComplex] successfully created all PVs for the LVMVolumeGroup %s", lvg.Name)) - log.Debug(fmt.Sprintf("[CreateVGComplex] the LVMVolumeGroup %s type is %s", lvg.Name, lvg.Spec.Type)) - switch lvg.Spec.Type { - case Local: - start := time.Now() - cmd, err := utils.CreateVGLocal(lvg.Spec.ActualVGNameOnTheNode, lvg.Name, paths) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgcreate").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgcreate").Inc() - log.Debug(cmd) - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgcreate").Inc() - log.Error(err, "error CreateVGLocal") - return err - } - case Shared: - start := time.Now() - cmd, err := utils.CreateVGShared(lvg.Spec.ActualVGNameOnTheNode, lvg.Name, paths) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgcreate").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgcreate").Inc() - log.Debug(cmd) - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgcreate").Inc() - log.Error(err, "error CreateVGShared") - return err - } - } - - log.Debug(fmt.Sprintf("[CreateVGComplex] successfully create VG %s of the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - - return nil -} - -func UpdateVGTagIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, vg internal.VGData) (bool, error) { - found, tagName := CheckTag(vg.VGTags) - if found && lvg.Name != tagName { - if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { - err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") - if err != nil { - log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) - return false, err - } - } - - start := time.Now() - cmd, err := utils.VGChangeDelTag(vg.VGName, fmt.Sprintf("%s=%s", LVMVolumeGroupTag, tagName)) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgchange").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgchange").Inc() - log.Debug(fmt.Sprintf("[UpdateVGTagIfNeeded] exec cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to delete LVMVolumeGroupTag: %s=%s, vg: %s", LVMVolumeGroupTag, tagName, vg.VGName)) - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgchange").Inc() - return false, err - } - - start = time.Now() - cmd, err = utils.VGChangeAddTag(vg.VGName, fmt.Sprintf("%s=%s", LVMVolumeGroupTag, lvg.Name)) - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgchange").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgchange").Inc() - log.Debug(fmt.Sprintf("[UpdateVGTagIfNeeded] exec cmd: %s", cmd)) - if err != nil { - log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add LVMVolumeGroupTag: %s=%s, vg: %s", LVMVolumeGroupTag, lvg.Name, vg.VGName)) - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "vgchange").Inc() - return false, err - } - - return true, nil - } - - return false, nil -} - -func ExtendThinPool(log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, specThinPool v1alpha1.LVMVolumeGroupThinPoolSpec) error { - volumeGroupFreeSpaceBytes := lvg.Status.VGSize.Value() - lvg.Status.AllocatedSize.Value() - tpRequestedSize, err := getRequestedSizeFromString(specThinPool.Size, lvg.Status.VGSize) - if err != nil { - return err - } - - log.Trace(fmt.Sprintf("[ExtendThinPool] volumeGroupSize = %s", lvg.Status.VGSize.String())) - log.Trace(fmt.Sprintf("[ExtendThinPool] volumeGroupAllocatedSize = %s", lvg.Status.AllocatedSize.String())) - log.Trace(fmt.Sprintf("[ExtendThinPool] volumeGroupFreeSpaceBytes = %d", volumeGroupFreeSpaceBytes)) - - log.Info(fmt.Sprintf("[ExtendThinPool] start resizing thin pool: %s; with new size: %s", specThinPool.Name, tpRequestedSize.String())) - - var cmd string - start := time.Now() - if utils.AreSizesEqualWithinDelta(tpRequestedSize, lvg.Status.VGSize, internal.ResizeDelta) { - log.Debug(fmt.Sprintf("[ExtendThinPool] thin-pool %s of the LVMVolumeGroup %s will be extend to size 100VG", specThinPool.Name, lvg.Name)) - cmd, err = utils.ExtendLVFullVGSpace(lvg.Spec.ActualVGNameOnTheNode, specThinPool.Name) - } else { - log.Debug(fmt.Sprintf("[ExtendThinPool] thin-pool %s of the LVMVolumeGroup %s will be extend to size %s", specThinPool.Name, lvg.Name, tpRequestedSize.String())) - cmd, err = utils.ExtendLV(tpRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, specThinPool.Name) - } - metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "lvextend").Observe(metrics.GetEstimatedTimeInSeconds(start)) - metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "lvextend").Inc() - if err != nil { - metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "lvextend").Inc() - log.Error(err, fmt.Sprintf("[ExtendThinPool] unable to extend LV, name: %s, cmd: %s", specThinPool.Name, cmd)) - return err - } - - return nil -} - -func addLVGLabelIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, labelKey, labelValue string) (bool, error) { - if !shouldUpdateLVGLabels(log, lvg, labelKey, labelValue) { - return false, nil - } - - if lvg.Labels == nil { - lvg.Labels = make(map[string]string) - } - - lvg.Labels[labelKey] = labelValue - err := cl.Update(ctx, lvg) - if err != nil { - return false, err - } - - return true, nil -} diff --git a/images/agent/src/pkg/controller/utils/utils.go b/images/agent/src/pkg/controller/utils/utils.go new file mode 100644 index 00000000..ae256a8a --- /dev/null +++ b/images/agent/src/pkg/controller/utils/utils.go @@ -0,0 +1,122 @@ +package utils + +import ( + "agent/internal" + "strconv" + "strings" + + "github.com/deckhouse/sds-node-configurator/api/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + Thick = "Thick" + Thin = "Thin" + + LLVStatusPhaseCreated = "Created" + LLVStatusPhasePending = "Pending" + LLVStatusPhaseResizing = "Resizing" + LLVStatusPhaseFailed = "Failed" +) + +func IsPercentSize(size string) bool { + return strings.Contains(size, "%") +} + +func CheckTag(tags string) (bool, string) { + if !strings.Contains(tags, internal.LVMTags[0]) { + return false, "" + } + + splitTags := strings.Split(tags, ",") + for _, tag := range splitTags { + if strings.HasPrefix(tag, "storage.deckhouse.io/lvmVolumeGroupName") { + kv := strings.Split(tag, "=") + return true, kv[1] + } + } + + return true, "" +} + +func GetRequestedSizeFromString(size string, targetSpace resource.Quantity) (resource.Quantity, error) { + if IsPercentSize(size) { + strPercent := strings.Split(size, "%")[0] + percent, err := strconv.Atoi(strPercent) + if err != nil { + return resource.Quantity{}, err + } + lvSize := targetSpace.Value() * int64(percent) / 100 + return *resource.NewQuantity(lvSize, resource.BinarySI), nil + } else { + return resource.ParseQuantity(size) + } +} + +func GetThinPoolAvailableSpace(actualSize, allocatedSize resource.Quantity, allocationLimit string) (resource.Quantity, error) { + totalSize, err := GetThinPoolSpaceWithAllocationLimit(actualSize, allocationLimit) + if err != nil { + return resource.Quantity{}, err + } + + return *resource.NewQuantity(totalSize.Value()-allocatedSize.Value(), resource.BinarySI), nil +} + +func GetThinPoolSpaceWithAllocationLimit(actualSize resource.Quantity, allocationLimit string) (resource.Quantity, error) { + limits := strings.Split(allocationLimit, "%") + percent, err := strconv.Atoi(limits[0]) + if err != nil { + return resource.Quantity{}, err + } + + factor := float64(percent) + factor /= 100 + + return *resource.NewQuantity(int64(float64(actualSize.Value())*factor), resource.BinarySI), nil +} + +func GetLLVRequestedSize(llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (resource.Quantity, error) { + switch llv.Spec.Type { + case Thick: + return GetRequestedSizeFromString(llv.Spec.Size, lvg.Status.VGSize) + case Thin: + for _, tp := range lvg.Status.ThinPools { + if tp.Name == llv.Spec.Thin.PoolName { + totalSize, err := GetThinPoolSpaceWithAllocationLimit(tp.ActualSize, tp.AllocationLimit) + if err != nil { + return resource.Quantity{}, err + } + + return GetRequestedSizeFromString(llv.Spec.Size, totalSize) + } + } + } + + return resource.Quantity{}, nil +} + +func LVGBelongsToNode(lvg *v1alpha1.LVMVolumeGroup, nodeName string) bool { + var belongs bool + for _, node := range lvg.Status.Nodes { + if node.Name == nodeName { + belongs = true + } + } + + return belongs +} + +func GetFreeLVGSpaceForLLV(lvg *v1alpha1.LVMVolumeGroup, llv *v1alpha1.LVMLogicalVolume) resource.Quantity { + switch llv.Spec.Type { + case Thick: + return lvg.Status.VGFree + case Thin: + for _, tp := range lvg.Status.ThinPools { + if tp.Name == llv.Spec.Thin.PoolName { + return tp.AvailableSpace + } + } + } + + return resource.Quantity{} +} diff --git a/images/agent/src/pkg/scanner/scanner.go b/images/agent/src/pkg/scanner/scanner.go index f77bc572..0a9f9bca 100644 --- a/images/agent/src/pkg/scanner/scanner.go +++ b/images/agent/src/pkg/scanner/scanner.go @@ -9,14 +9,13 @@ import ( "github.com/pilebones/go-udev/netlink" "k8s.io/utils/clock" - kubeCtrl "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "agent/config" "agent/internal" "agent/pkg/cache" "agent/pkg/controller" "agent/pkg/controller/bd" + "agent/pkg/controller/lvg" "agent/pkg/logger" "agent/pkg/throttler" "agent/pkg/utils" @@ -28,7 +27,7 @@ func RunScanner( cfg config.Options, sdsCache *cache.Cache, bdCtrl func(context.Context) (controller.Result, error), - lvgDiscoverCtrl kubeCtrl.Controller, + lvgDiscoverCtrl func(context.Context) (controller.Result, error), ) error { log.Info("[RunScanner] starts the work") @@ -126,12 +125,12 @@ func runControllersReconcile( ctx context.Context, log logger.Logger, bdCtrl func(context.Context) (controller.Result, error), - lvgDiscoverCtrl kubeCtrl.Controller, + lvgDiscoverCtrl func(context.Context) (controller.Result, error), ) error { - log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", bd.Name)) + log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", bd.DiscovererName)) bdRes, err := bdCtrl(ctx) if err != nil { - log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", bd.Name)) + log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", bd.DiscovererName)) return err } @@ -147,12 +146,12 @@ func runControllersReconcile( }() } - log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", bd.Name)) + log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", bd.DiscovererName)) - log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName)) - lvgRes, err := lvgDiscoverCtrl.Reconcile(ctx, reconcile.Request{}) + log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", lvg.DiscovererName)) + lvgRes, err := lvgDiscoverCtrl(ctx) if err != nil { - log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName)) + log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", lvg.DiscovererName)) return err } if lvgRes.RequeueAfter > 0 { @@ -160,13 +159,13 @@ func runControllersReconcile( for lvgRes.RequeueAfter > 0 { log.Warning(fmt.Sprintf("[runControllersReconcile] LVMVolumeGroups reconcile needs a retry in %s", lvgRes.RequeueAfter.String())) time.Sleep(lvgRes.RequeueAfter) - lvgRes, err = lvgDiscoverCtrl.Reconcile(ctx, reconcile.Request{}) + lvgRes, err = lvgDiscoverCtrl(ctx) } log.Info("[runControllersReconcile] successfully reconciled LVMVolumeGroups after a retry") }() } - log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", controller.LVMVolumeGroupDiscoverCtrlName)) + log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", lvg.DiscovererName)) return nil } diff --git a/images/agent/src/pkg/test_utils/fake_client.go b/images/agent/src/pkg/test_utils/fake_client.go index d66aefb6..adfd11ed 100644 --- a/images/agent/src/pkg/test_utils/fake_client.go +++ b/images/agent/src/pkg/test_utils/fake_client.go @@ -8,13 +8,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func NewFakeClient() client.WithWatch { +func NewFakeClient(statusSubresources ...client.Object) client.WithWatch { s := scheme.Scheme _ = metav1.AddMetaToScheme(s) _ = v1alpha1.AddToScheme(s) - builder := fake.NewClientBuilder().WithScheme(s) - - cl := builder.Build() - return cl + return fake. + NewClientBuilder(). + WithScheme(s). + WithStatusSubresource(statusSubresources...). + Build() }