From 5dbc40146ee3afa5150f41fb119d41afd4386d10 Mon Sep 17 00:00:00 2001 From: Viktor Kramarenko Date: Thu, 22 Aug 2024 19:04:31 +0300 Subject: [PATCH] refactored cache Signed-off-by: Viktor Kramarenko --- images/agent/src/pkg/cache/cache.go | 82 +++++++++++++++---- .../agent/src/pkg/controller/block_device.go | 4 +- .../lvm_logical_volume_extender_watcher.go | 28 +++---- .../controller/lvm_logical_volume_watcher.go | 56 ++++++------- .../lvm_logical_volume_watcher_func.go | 36 ++++---- .../lvm_logical_volume_watcher_test.go | 4 +- .../controller/lvm_volume_group_discover.go | 12 +-- .../controller/lvm_volume_group_watcher.go | 4 +- .../lvm_volume_group_watcher_func.go | 8 +- images/agent/src/pkg/scanner/scanner.go | 2 +- 10 files changed, 142 insertions(+), 94 deletions(-) diff --git a/images/agent/src/pkg/cache/cache.go b/images/agent/src/pkg/cache/cache.go index ac937347..4172edce 100644 --- a/images/agent/src/pkg/cache/cache.go +++ b/images/agent/src/pkg/cache/cache.go @@ -1,23 +1,31 @@ package cache import ( - "agent/internal" - "agent/pkg/logger" "bytes" "fmt" + "sync" + + "agent/internal" + "agent/pkg/logger" ) type Cache struct { + m sync.RWMutex devices []internal.Device deviceErrs bytes.Buffer pvs []internal.PVData pvsErrs bytes.Buffer vgs []internal.VGData vgsErrs bytes.Buffer - lvs []internal.LVData + lvs map[string]*LVData lvsErrs bytes.Buffer } +type LVData struct { + Data internal.LVData + Exist bool +} + func New() *Cache { return &Cache{} } @@ -59,21 +67,55 @@ func (c *Cache) GetVGs() ([]internal.VGData, bytes.Buffer) { } func (c *Cache) StoreLVs(lvs []internal.LVData, stdErr bytes.Buffer) { - c.lvs = lvs + lvsOnNode := make(map[string]internal.LVData, len(lvs)) + for _, lv := range lvs { + lvsOnNode[c.configureLVKey(lv.VGName, lv.LVName)] = lv + } + + c.m.Lock() + defer c.m.Unlock() + if c.lvs == nil { + c.lvs = make(map[string]*LVData, len(lvs)) + } + + for _, lv := range lvsOnNode { + k := c.configureLVKey(lv.VGName, lv.LVName) + if cachedLV, exist := c.lvs[k]; !exist || cachedLV.Exist { + c.lvs[k].Data = lv + } + } + + for key, lv := range c.lvs { + if lv.Exist { + continue + } + + if _, exist := lvsOnNode[key]; !exist { + delete(c.lvs, key) + } + } + c.lvsErrs = stdErr } func (c *Cache) GetLVs() ([]internal.LVData, bytes.Buffer) { dst := make([]internal.LVData, len(c.lvs)) - copy(dst, c.lvs) + + c.m.RLock() + defer c.m.RUnlock() + for _, lv := range c.lvs { + dst = append(dst, lv.Data) + } return dst, c.lvsErrs } -func (c *Cache) FindLV(vgName, lvName string) *internal.LVData { +func (c *Cache) FindLV(vgName, lvName string) *LVData { + c.m.RLock() + defer c.m.RUnlock() for _, lv := range c.lvs { - if lv.VGName == vgName && lv.LVName == lvName { - return &lv + if lv.Data.VGName == vgName && lv.Data.LVName == lvName { + return lv } } @@ -81,15 +123,19 @@ func (c *Cache) FindLV(vgName, lvName string) *internal.LVData { } func (c *Cache) AddLV(vgName, lvName string) { - c.lvs = append(c.lvs, internal.LVData{VGName: vgName, LVName: lvName}) + c.m.Lock() + defer c.m.Unlock() + c.lvs[c.configureLVKey(vgName, lvName)] = &LVData{ + Data: internal.LVData{VGName: vgName, LVName: lvName}, + Exist: true, + } } func (c *Cache) RemoveLV(vgName, lvName string) { - for i, lv := range c.lvs { - if lv.VGName == vgName && lv.LVName == lvName { - c.lvs = append(c.lvs[:i], c.lvs[i+1:]...) - } - } + c.m.Lock() + defer c.m.Unlock() + + c.lvs[c.configureLVKey(vgName, lvName)].Exist = false } func (c *Cache) FindVG(vgName string) *internal.VGData { @@ -126,11 +172,17 @@ func (c *Cache) PrintTheCache(log logger.Logger) { log.Cache(c.vgsErrs.String()) log.Cache("[VGs ENDS]") log.Cache("[LVs BEGIN]") + c.m.RLock() for _, lv := range c.lvs { - log.Cache(fmt.Sprintf(" LV Name: %s, VG name: %s, size: %s, tags: %s, attr: %s, pool: %s", lv.LVName, lv.VGName, lv.LVSize.String(), lv.LvTags, lv.LVAttr, lv.PoolName)) + log.Cache(fmt.Sprintf(" Data Name: %s, VG name: %s, size: %s, tags: %s, attr: %s, pool: %s", lv.Data.LVName, lv.Data.VGName, lv.Data.LVSize.String(), lv.Data.LvTags, lv.Data.LVAttr, lv.Data.PoolName)) } + c.m.RUnlock() log.Cache("[ERRS]") log.Cache(c.lvsErrs.String()) log.Cache("[LVs ENDS]") log.Cache("*****************CACHE ENDS*****************") } + +func (c *Cache) configureLVKey(vgName, lvName string) string { + return fmt.Sprintf("%s/%s", vgName, lvName) +} diff --git a/images/agent/src/pkg/controller/block_device.go b/images/agent/src/pkg/controller/block_device.go index 8f616d15..dc87ba76 100644 --- a/images/agent/src/pkg/controller/block_device.go +++ b/images/agent/src/pkg/controller/block_device.go @@ -579,7 +579,7 @@ func DeleteAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito func ReTag(ctx context.Context, log logger.Logger, metrics monitoring.Metrics) error { // thin pool - log.Debug("[ReTag] start re-tagging LV") + log.Debug("[ReTag] start re-tagging Data") start := time.Now() lvs, cmdStr, _, err := utils.GetAllLVs(ctx) metrics.UtilsCommandsDuration(BlockDeviceCtrlName, "lvs").Observe(metrics.GetEstimatedTimeInSeconds(start)) @@ -623,7 +623,7 @@ func ReTag(ctx context.Context, log logger.Logger, metrics monitoring.Metrics) e } } } - log.Debug("[ReTag] end re-tagging LV") + log.Debug("[ReTag] end re-tagging Data") log.Debug("[ReTag] start re-tagging LVM") start = time.Now() diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go index 078ac6fe..535459c6 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go @@ -149,8 +149,8 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m lv := sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if lv == nil { - err = fmt.Errorf("LV %s not found", llv.Spec.ActualLVNameOnTheNode) - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to find LV %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + err = fmt.Errorf("Data %s not found", llv.Spec.ActualLVNameOnTheNode) + log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to find Data %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, err.Error()) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) @@ -159,21 +159,21 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m continue } - if utils.AreSizesEqualWithinDelta(llvRequestedSize, lv.LVSize, internal.ResizeDelta) { + if utils.AreSizesEqualWithinDelta(llvRequestedSize, lv.Data.LVSize, internal.ResizeDelta) { log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should not be extended", llv.Name)) continue } - if llvRequestedSize.Value() < lv.LVSize.Value() { - log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s requested size %s is less than actual one on the node %s", llv.Name, llvRequestedSize.String(), lv.LVSize.String())) + if llvRequestedSize.Value() < lv.Data.LVSize.Value() { + log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s requested size %s is less than actual one on the node %s", llv.Name, llvRequestedSize.String(), lv.Data.LVSize.String())) continue } freeSpace := getFreeLVGSpaceForLLV(lvg, &llv) if llvRequestedSize.Value()+internal.ResizeDelta.Value() > freeSpace.Value() { err = errors.New("not enough space") - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend the LV %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, fmt.Sprintf("unable to extend LV, err: %s", err.Error())) + log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend the Data %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, fmt.Sprintf("unable to extend Data, err: %s", err.Error())) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) shouldRetry = true @@ -191,8 +191,8 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m cmd, err := utils.ExtendLV(llvRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if err != nil { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend LV %s of the LVMLogicalVolume %s, cmd: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, cmd)) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, fmt.Sprintf("unable to extend LV, err: %s", err.Error())) + log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend Data %s of the LVMLogicalVolume %s, cmd: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, cmd)) + err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseFailed, fmt.Sprintf("unable to extend Data, err: %s", err.Error())) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) } @@ -207,18 +207,18 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m ) for currentAttempts < maxAttempts { lv = sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - if utils.AreSizesEqualWithinDelta(lv.LVSize, llvRequestedSize, internal.ResizeDelta) { - log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] LV %s of the LVMLogicalVolume %s was successfully updated in the cache", lv.LVName, llv.Name)) + if utils.AreSizesEqualWithinDelta(lv.Data.LVSize, llvRequestedSize, internal.ResizeDelta) { + log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] Data %s of the LVMLogicalVolume %s was successfully updated in the cache", lv.Data.LVName, llv.Name)) break } - log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] LV %s size of the LVMLogicalVolume %s was not yet updated in the cache, retry...", lv.LVName, llv.Name)) + log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] Data %s size of the LVMLogicalVolume %s was not yet updated in the cache, retry...", lv.Data.LVName, llv.Name)) currentAttempts++ time.Sleep(1 * time.Second) } if currentAttempts == maxAttempts { - err = fmt.Errorf("LV %s is not updated in the cache", lv.LVName) + err = fmt.Errorf("Data %s is not updated in the cache", lv.Data.LVName) log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to resize the LVMLogicalVolume %s", llv.Name)) shouldRetry = true @@ -228,7 +228,7 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m continue } - updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, &llv, lv.LVSize) + updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, &llv, lv.Data.LVSize) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) shouldRetry = true diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go index 2d4c0f98..4a09b983 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go @@ -145,10 +145,6 @@ func RunLVMLogicalVolumeWatcherController( return reconcile.Result{RequeueAfter: cfg.LLVRequeueIntervalSec}, nil } - // создаем LV и пытаемся получить его размер на 113278 строчке I0819 12:43:10.563596 - // при этом у нас кэш начинает заполняться в I0819 12:43:09.486602 и заканчивает в I0819 12:43:22.070604, а потом только в I0819 12:43:22.081861 - // при этом получаем ретрай в I0819 12:43:15.563851 (и на мо - log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] successfully ended reconciliation of the LVMLogicalVolume %s", request.Name)) return reconcile.Result{}, nil }), @@ -249,12 +245,12 @@ func reconcileLLVCreateFunc( } freeSpace := getFreeLVGSpaceForLLV(lvg, llv) - log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s type: %s requested size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, llvRequestSize.String(), freeSpace.String())) + log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LVMLogicalVolume %s, Data: %s, VG: %s type: %s requested size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, llvRequestSize.String(), freeSpace.String())) if !utils.AreSizesEqualWithinDelta(llvRequestSize, freeSpace, internal.ResizeDelta) { if freeSpace.Value() < llvRequestSize.Value()+internal.ResizeDelta.Value() { err = errors.New("not enough space") - log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than the actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) + log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] the Data %s requested size %s of the LVMLogicalVolume %s is more than the actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) // we return true cause the user might manage LVMVolumeGroup free space without changing the LLV return true, err @@ -264,10 +260,10 @@ func reconcileLLVCreateFunc( var cmd string switch llv.Spec.Type { case Thick: - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s will be created in VG %s with size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llvRequestSize.String())) + log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] Data %s will be created in VG %s with size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llvRequestSize.String())) cmd, err = utils.CreateThickLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value(), isContiguous(llv)) case Thin: - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s of the LVMLogicalVolume %s will be create in Thin-pool %s with size %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) + log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] Data %s of the LVMLogicalVolume %s will be create in Thin-pool %s with size %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) cmd, err = utils.CreateThinLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Thin.PoolName, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value()) } log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] runs cmd: %s", cmd)) @@ -276,18 +272,18 @@ func reconcileLLVCreateFunc( return true, err } - log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully created LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) + log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully created Data %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] adds the LV %s to the cache", llv.Spec.ActualLVNameOnTheNode)) + log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] adds the Data %s to the cache", llv.Spec.ActualLVNameOnTheNode)) sdsCache.AddLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] tries to get the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) + log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] tries to get the Data %s actual size", llv.Spec.ActualLVNameOnTheNode)) actualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if actualSize.Value() == 0 { - log.Warning(fmt.Sprintf("[reconcileLLVCreateFunc] unable to get actual size for LV %s in VG %s (likely LV was not found in the cache), retry...", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + log.Warning(fmt.Sprintf("[reconcileLLVCreateFunc] unable to get actual size for Data %s in VG %s (likely Data was not found in the cache), retry...", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) return true, nil } - log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] successfully got the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) - log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s in VG: %s has actual size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String())) + log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] successfully got the Data %s actual size", llv.Spec.ActualLVNameOnTheNode)) + log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the Data %s in VG: %s has actual size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String())) updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, actualSize) if err != nil { @@ -316,7 +312,7 @@ func reconcileLLVUpdateFunc( ) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) - // status might be nil if a user creates the resource with LV name which matches existing LV on the node + // status might be nil if a user creates the resource with Data name which matches existing Data on the node if llv.Status == nil { err := updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhasePending, "") if err != nil { @@ -325,11 +321,11 @@ func reconcileLLVUpdateFunc( } } - // it needs to get current LV size from the node as status might be nil + // it needs to get current Data size from the node as status might be nil log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size before the extension", llv.Name)) actualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if actualSize.Value() == 0 { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s has zero size (likely LV was not updated in the cache) ", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] Data %s of the LVMLogicalVolume %s has zero size (likely Data was not updated in the cache) ", llv.Spec.ActualLVNameOnTheNode, llv.Name)) return true, nil } log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size %s before the extension", llv.Name, actualSize.String())) @@ -343,7 +339,7 @@ func reconcileLLVUpdateFunc( log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String())) if utils.AreSizesEqualWithinDelta(actualSize, llvRequestSize, internal.ResizeDelta) { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String())) + log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the Data %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String())) updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, actualSize) if err != nil { @@ -363,9 +359,9 @@ func reconcileLLVUpdateFunc( } extendingSize := subtractQuantity(llvRequestSize, actualSize) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has extending size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, extendingSize.String())) + log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the Data %s in VG %s has extending size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, extendingSize.String())) if extendingSize.Value() < 0 { - err = fmt.Errorf("specified LV size %dB is less than actual one on the node %dB", llvRequestSize.Value(), actualSize.Value()) + err = fmt.Errorf("specified Data size %dB is less than actual one on the node %dB", llvRequestSize.Value(), actualSize.Value()) log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to extend the LVMLogicalVolume %s", llv.Name)) return false, err } @@ -381,19 +377,19 @@ func reconcileLLVUpdateFunc( } freeSpace := getFreeLVGSpaceForLLV(lvg, llv) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s, type: %s, extending size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, extendingSize.String(), freeSpace.String())) + log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s, Data: %s, VG: %s, type: %s, extending size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, extendingSize.String(), freeSpace.String())) if !utils.AreSizesEqualWithinDelta(freeSpace, extendingSize, internal.ResizeDelta) { if freeSpace.Value() < extendingSize.Value()+internal.ResizeDelta.Value() { err = errors.New("not enough space") - log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) + log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] the Data %s requested size %s of the LVMLogicalVolume %s is more than actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) // returns true cause a user might manage LVG free space without changing the LLV return true, err } } - log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s will be extended with size: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llvRequestSize.String())) + log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] Data %s of the LVMLogicalVolume %s will be extended with size: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llvRequestSize.String())) cmd, err := utils.ExtendLV(llvRequestSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] runs cmd: %s", cmd)) if err != nil { @@ -401,21 +397,21 @@ func reconcileLLVUpdateFunc( return true, err } - log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully extended LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) + log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully extended Data %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size after the extension", llv.Name)) newActualSize := getLVActualSize(sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) // this case might be triggered if sds cache will not update lv state in time if newActualSize.Value() == actualSize.Value() { - log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s was extended but cache is not updated yet. It will be retried", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] Data %s of the LVMLogicalVolume %s was extended but cache is not updated yet. It will be retried", llv.Spec.ActualLVNameOnTheNode, llv.Name)) return true, nil } log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size before the extension", llv.Name)) - log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s actual size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, newActualSize.String())) + log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the Data %s in VG %s actual size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, newActualSize.String())) - // need this here as a user might create the LLV with existing LV + // need this here as a user might create the LLV with existing Data updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, llv, newActualSize) if err != nil { log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) @@ -443,7 +439,7 @@ func reconcileLLVDeleteFunc( ) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) - // The controller won't remove the LLV resource and LV volume till the resource has any other finalizer. + // The controller won't remove the LLV resource and Data volume till the resource has any other finalizer. if len(llv.Finalizers) != 0 { if len(llv.Finalizers) > 1 || llv.Finalizers[0] != internal.SdsNodeConfiguratorFinalizer { @@ -454,11 +450,11 @@ func reconcileLLVDeleteFunc( err := deleteLVIfNeeded(log, sdsCache, lvg.Spec.ActualVGNameOnTheNode, llv) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete the Data %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) return true, err } - log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully deleted the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully deleted the Data %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) err = removeLLVFinalizersIfExist(ctx, cl, metrics, log, llv) if err != nil { diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go index e3ea08bf..eb820845 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go @@ -152,26 +152,26 @@ func updateLLVPhaseToCreatedIfNeeded(ctx context.Context, cl client.Client, llv func deleteLVIfNeeded(log logger.Logger, sdsCache *cache.Cache, vgName string, llv *v1alpha1.LVMLogicalVolume) error { lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) - if lv == nil { - log.Warning(fmt.Sprintf("[deleteLVIfNeeded] did not find LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) + if lv == nil || !lv.Exist { + log.Warning(fmt.Sprintf("[deleteLVIfNeeded] did not find Data %s in VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) return nil } - // this case prevents unexpected same-name LV deletions which does not actually belong to our LLV - if !checkIfLVBelongsToLLV(llv, lv) { - log.Warning(fmt.Sprintf("[deleteLVIfNeeded] no need to delete LV %s as it doesnt belong to LVMLogicalVolume %s", lv.LVName, llv.Name)) + // this case prevents unexpected same-name Data deletions which does not actually belong to our LLV + if !checkIfLVBelongsToLLV(llv, &lv.Data) { + log.Warning(fmt.Sprintf("[deleteLVIfNeeded] no need to delete Data %s as it doesnt belong to LVMLogicalVolume %s", lv.Data.LVName, llv.Name)) return nil } cmd, err := utils.RemoveLV(vgName, llv.Spec.ActualLVNameOnTheNode) log.Debug(fmt.Sprintf("[deleteLVIfNeeded] runs cmd: %s", cmd)) if err != nil { - log.Error(err, fmt.Sprintf("[deleteLVIfNeeded] unable to remove LV %s from VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) + log.Error(err, fmt.Sprintf("[deleteLVIfNeeded] unable to remove Data %s from VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) return err } - log.Debug(fmt.Sprintf("[deleteLVIfNeeded] removes LV %s from the cache", lv.LVName)) - sdsCache.RemoveLV(lv.VGName, lv.LVName) + log.Debug(fmt.Sprintf("[deleteLVIfNeeded] removes Data %s from the cache", lv.Data.LVName)) + sdsCache.RemoveLV(lv.Data.VGName, lv.Data.LVName) return nil } @@ -182,7 +182,7 @@ func getLVActualSize(sdsCache *cache.Cache, vgName, lvName string) resource.Quan return resource.Quantity{} } - result := resource.NewQuantity(lv.LVSize.Value(), resource.BinarySI) + result := resource.NewQuantity(lv.Data.LVSize.Value(), resource.BinarySI) return *result } @@ -253,7 +253,7 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol reason := strings.Builder{} if len(llv.Spec.ActualLVNameOnTheNode) == 0 { - reason.WriteString("No LV name specified. ") + reason.WriteString("No Data name specified. ") } llvRequestedSize, err := getLLVRequestedSize(llv, lvg) @@ -262,12 +262,12 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol } if llvRequestedSize.Value() == 0 { - reason.WriteString("Zero size for LV. ") + reason.WriteString("Zero size for Data. ") } if llv.Status != nil { if llvRequestedSize.Value()+internal.ResizeDelta.Value() < llv.Status.ActualSize.Value() { - reason.WriteString("Desired LV size is less than actual one. ") + reason.WriteString("Desired Data size is less than actual one. ") } } @@ -291,17 +291,17 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol } case Thick: if llv.Spec.Thin != nil { - reason.WriteString("Thin pool specified for Thick LV. ") + reason.WriteString("Thin pool specified for Thick Data. ") } } - // if a specified Thick LV name matches the existing Thin one + // if a specified Thick Data name matches the existing Thin one lv := sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if lv != nil { - if len(lv.LVAttr) == 0 { - reason.WriteString(fmt.Sprintf("LV %s was found on the node, but can't be validated due to its attributes is empty string. ", lv.LVName)) - } else if !checkIfLVBelongsToLLV(llv, lv) { - reason.WriteString(fmt.Sprintf("Specified LV %s is already created and it is doesnt match the one on the node.", lv.LVName)) + if len(lv.Data.LVAttr) == 0 { + reason.WriteString(fmt.Sprintf("Data %s was found on the node, but can't be validated due to its attributes is empty string. ", lv.Data.LVName)) + } else if !checkIfLVBelongsToLLV(llv, &lv.Data) { + reason.WriteString(fmt.Sprintf("Specified Data %s is already created and it is doesnt match the one on the node.", lv.Data.LVName)) } } diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go index 196e7b1e..880c388b 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go @@ -153,7 +153,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LvmVolumeGroup{}) if assert.False(t, v) { - assert.Equal(t, "Zero size for LV. Thin pool specified for Thick LV. LV test-lv was found on the node, but can't be validated due to its attributes is empty string. ", r) + assert.Equal(t, "Zero size for Data. Thin pool specified for Thick Data. Data test-lv was found on the node, but can't be validated due to its attributes is empty string. ", r) } }) @@ -212,7 +212,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LvmVolumeGroup{}) if assert.False(t, v) { - assert.Equal(t, "No LV name specified. Zero size for LV. No thin pool specified. ", r) + assert.Equal(t, "No Data name specified. Zero size for Data. No thin pool specified. ", r) } }) }) diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover.go b/images/agent/src/pkg/controller/lvm_volume_group_discover.go index f4f3d25d..12688643 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_discover.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_discover.go @@ -145,7 +145,7 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met if !hasLVMVolumeGroupDiff(log, lvg, candidate) { log.Debug(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] no data to update for LvmVolumeGroup, name: "%s"`, lvg.Name)) - err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") + err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create Data") if err != nil { log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) shouldRequeue = true @@ -178,7 +178,7 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met continue } - err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") + err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create Data") if err != nil { log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) shouldRequeue = true @@ -447,7 +447,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m lvs, lvErrs := sdsCache.GetLVs() var thinPools []internal.LVData if len(lvs) > 0 { - // Filter LV to get only thin pools as we do not support thick for now. + // Filter Data to get only thin pools as we do not support thick for now. thinPools = getThinPools(lvs) } @@ -458,7 +458,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m lvIssues = sortThinPoolIssuesByVG(log, thinPools) } - // Sort PV,BlockDevices and LV by VG to fill needed information for LVMVolumeGroup resource further. + // Sort PV,BlockDevices and Data by VG to fill needed information for LVMVolumeGroup resource further. sortedPVs := sortPVsByVG(pvs, vgWithTag) sortedBDs := sortBlockDevicesByVG(bds, vgWithTag) log.Trace(fmt.Sprintf("[GetLVMVolumeGroupCandidates] BlockDevices: %+v", bds)) @@ -732,7 +732,7 @@ func getStatusThinPools(log logger.Logger, thinPools, sortedLVs map[string][]int for _, thinPool := range tps { usedSize, err := getThinPoolUsedSize(thinPool) - log.Trace(fmt.Sprintf("[getStatusThinPools] LV %v for VG name %s", thinPool, vg.VGName)) + log.Trace(fmt.Sprintf("[getStatusThinPools] Data %v for VG name %s", thinPool, vg.VGName)) if err != nil { log.Error(err, "[getStatusThinPools] unable to getThinPoolUsedSize") } @@ -919,7 +919,7 @@ func UpdateLVMVolumeGroupByCandidate( return fmt.Errorf(`[UpdateLVMVolumeGroupByCandidate] unable to update LVMVolumeGroup, name: "%s", err: %w`, lvg.Name, err) } - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") + err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create Data") if err != nil { log.Error(err, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) } diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go index 484fa0eb..eb05b6f8 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go @@ -282,7 +282,7 @@ func reconcileLVGDeleteFunc(ctx context.Context, cl client.Client, log logger.Lo if len(usedLVs) > 0 { err := fmt.Errorf("VG %s uses LVs: %v. Delete used LVs first", lvg.Spec.ActualVGNameOnTheNode, usedLVs) log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to reconcile LVG %s", lvg.Name)) - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] tries to add the condition %s status False to the LVMVolumeGroup %s due to LV does exist", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] tries to add the condition %s status False to the LVMVolumeGroup %s due to Data does exist", internal.TypeVGConfigurationApplied, lvg.Name)) err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) if err != nil { log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) @@ -292,7 +292,7 @@ func reconcileLVGDeleteFunc(ctx context.Context, cl client.Client, log logger.Lo return true, nil } - log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] VG %s of the LVMVolumeGroup %s does not use any LV. Start to delete the VG", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] VG %s of the LVMVolumeGroup %s does not use any Data. Start to delete the VG", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) err := DeleteVGIfExist(log, metrics, sdsCache, lvg.Spec.ActualVGNameOnTheNode) if err != nil { log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to delete VG %s", lvg.Spec.ActualVGNameOnTheNode)) diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go index a28f9ecb..268d78c1 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go @@ -412,12 +412,12 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a for _, tp := range lvg.Spec.ThinPools { lv := sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, tp.Name) if lv != nil { - if !isThinPool(*lv) { - reason.WriteString(fmt.Sprintf("LV %s is already created on the node and it is not a thin-pool", lv.LVName)) + if !isThinPool(lv.Data) { + reason.WriteString(fmt.Sprintf("Data %s is already created on the node and it is not a thin-pool", lv.Data.LVName)) continue } - actualThinPools[lv.LVName] = *lv + actualThinPools[lv.Data.LVName] = lv.Data } } @@ -941,7 +941,7 @@ func ExtendThinPool(log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1 metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "lvextend").Inc() if err != nil { metrics.UtilsCommandsErrorsCount(LVMVolumeGroupWatcherCtrlName, "lvextend").Inc() - log.Error(err, fmt.Sprintf("[ExtendThinPool] unable to extend LV, name: %s, cmd: %s", specThinPool.Name, cmd)) + log.Error(err, fmt.Sprintf("[ExtendThinPool] unable to extend Data, name: %s, cmd: %s", specThinPool.Name, cmd)) return err } diff --git a/images/agent/src/pkg/scanner/scanner.go b/images/agent/src/pkg/scanner/scanner.go index dfb78b27..9356ff9f 100644 --- a/images/agent/src/pkg/scanner/scanner.go +++ b/images/agent/src/pkg/scanner/scanner.go @@ -5,10 +5,10 @@ import ( "context" "errors" "fmt" - "k8s.io/utils/clock" "time" "github.com/pilebones/go-udev/netlink" + "k8s.io/utils/clock" kubeCtrl "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/reconcile"