From 19aeb2ea688bd330aec67161a61c6e4d5dade5ce Mon Sep 17 00:00:00 2001
From: Viktor Kramarenko <viktor.kramarenko@flant.com>
Date: Tue, 6 Aug 2024 18:49:47 +0300
Subject: [PATCH] final refactoring Signed-off-by: Viktor Kramarenko
 <viktor.kramarenko@flant.com>

---
 images/agent/src/cmd/main.go                  |   16 +-
 images/agent/src/config/config.go             |   14 +-
 images/agent/src/config/config_test.go        |   24 +-
 images/agent/src/internal/type.go             |    8 +-
 images/agent/src/pkg/cache/cache.go           |    5 +-
 images/agent/src/pkg/cache/cache_test.go      |    5 +-
 .../agent/src/pkg/controller/block_device.go  |   54 +-
 .../src/pkg/controller/block_device_test.go   |   29 +-
 .../controller/controller_reconcile_test.go   |   15 +-
 .../pkg/controller/controller_suite_test.go   |    8 +-
 .../lvm_logical_volume_bench_test.go          |  246 ---
 .../lvm_logical_volume_extender_watcher.go    |   23 +-
 .../controller/lvm_logical_volume_watcher.go  |   17 +-
 .../lvm_logical_volume_watcher_func.go        |   35 +-
 .../lvm_logical_volume_watcher_test.go        |    9 +-
 .../controller/lvm_volume_group_discover.go   |  111 +-
 .../lvm_volume_group_discover_test.go         |  126 +-
 .../pkg/controller/lvm_volume_group_test.go   |    4 +-
 .../controller/lvm_volume_group_watcher.go    |   20 +-
 .../lvm_volume_group_watcher_func.go          |   72 +-
 .../lvm_volume_group_watcher_test.go          | 1968 ++++++++++-------
 images/agent/src/pkg/kubutils/kubernetes.go   |    1 +
 images/agent/src/pkg/logger/logger.go         |   13 +-
 images/agent/src/pkg/monitoring/monitoring.go |   11 +-
 images/agent/src/pkg/scanner/scanner.go       |   17 +-
 images/agent/src/pkg/utils/commands.go        |   48 +-
 images/agent/src/pkg/utils/commands_test.go   |   10 +-
 .../lvg_conditions_watcher_funcs.go           |    2 -
 .../controller/lvg_conditions_watcher_test.go |   19 +-
 .../src/pkg/controller/sds_infra_watcher.go   |    9 +-
 .../src/pkg/logger/logger.go                  |    7 -
 31 files changed, 1530 insertions(+), 1416 deletions(-)
 delete mode 100644 images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go

diff --git a/images/agent/src/cmd/main.go b/images/agent/src/cmd/main.go
index 7a028a0f..2bf2f2ef 100644
--- a/images/agent/src/cmd/main.go
+++ b/images/agent/src/cmd/main.go
@@ -17,6 +17,11 @@ limitations under the License.
 package main
 
 import (
+	"context"
+	"fmt"
+	"os"
+	goruntime "runtime"
+
 	"agent/config"
 	"agent/pkg/cache"
 	"agent/pkg/controller"
@@ -24,13 +29,7 @@ import (
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
 	"agent/pkg/scanner"
-	"context"
-	"fmt"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	"os"
-	goruntime "runtime"
-	"sigs.k8s.io/controller-runtime/pkg/metrics/server"
-
 	v1 "k8s.io/api/core/v1"
 	sv1 "k8s.io/api/storage/v1"
 	extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
@@ -39,6 +38,7 @@ import (
 	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/healthz"
 	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/metrics/server"
 )
 
 var (
@@ -61,7 +61,7 @@ func main() {
 
 	log, err := logger.NewLogger(cfgParams.Loglevel)
 	if err != nil {
-		fmt.Println(fmt.Sprintf("unable to create NewLogger, err: %v", err))
+		fmt.Printf("unable to create NewLogger, err: %v\n", err)
 		os.Exit(1)
 	}
 
@@ -71,7 +71,7 @@ func main() {
 	log.Info("[main] CfgParams has been successfully created")
 	log.Info(fmt.Sprintf("[main] %s = %s", config.LogLevel, cfgParams.Loglevel))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.NodeName, cfgParams.NodeName))
-	log.Info(fmt.Sprintf("[main] %s = %s", config.MachineID, cfgParams.MachineId))
+	log.Info(fmt.Sprintf("[main] %s = %s", config.MachineID, cfgParams.MachineID))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.ScanInterval, cfgParams.BlockDeviceScanIntervalSec.String()))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.ThrottleInterval, cfgParams.ThrottleIntervalSec.String()))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.CmdDeadlineDuration, cfgParams.CmdDeadlineDurationSec.String()))
diff --git a/images/agent/src/config/config.go b/images/agent/src/config/config.go
index 416832e7..a657915d 100644
--- a/images/agent/src/config/config.go
+++ b/images/agent/src/config/config.go
@@ -17,16 +17,16 @@ limitations under the License.
 package config
 
 import (
-	"agent/internal"
-	"agent/pkg/logger"
 	"bytes"
 	"fmt"
 	"os"
 	"os/exec"
-
 	"strconv"
 	"strings"
 	"time"
+
+	"agent/internal"
+	"agent/pkg/logger"
 )
 
 const (
@@ -42,7 +42,7 @@ const (
 )
 
 type Options struct {
-	MachineId                  string
+	MachineID                  string
 	NodeName                   string
 	Loglevel                   logger.Verbosity
 	MetricsPort                string
@@ -69,11 +69,11 @@ func NewConfig() (*Options, error) {
 		opts.Loglevel = logger.Verbosity(loglevel)
 	}
 
-	machId, err := getMachineId()
+	machID, err := getMachineID()
 	if err != nil {
 		return nil, fmt.Errorf("[NewConfig] unable to get %s, error: %w", MachineID, err)
 	}
-	opts.MachineId = machId
+	opts.MachineID = machID
 
 	opts.MetricsPort = os.Getenv(MetricsPort)
 	if opts.MetricsPort == "" {
@@ -127,7 +127,7 @@ func NewConfig() (*Options, error) {
 	return &opts, nil
 }
 
-func getMachineId() (string, error) {
+func getMachineID() (string, error) {
 	id := os.Getenv(MachineID)
 	if id == "" {
 		args := []string{"-m", "-u", "-i", "-n", "-p", "-t", "1", "cat", "/etc/machine-id"}
diff --git a/images/agent/src/config/config_test.go b/images/agent/src/config/config_test.go
index 19d7dfec..837ea0cb 100644
--- a/images/agent/src/config/config_test.go
+++ b/images/agent/src/config/config_test.go
@@ -18,16 +18,17 @@ package config
 
 import (
 	"fmt"
-	"github.com/stretchr/testify/assert"
 	"os"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
 )
 
 func TestNewConfig(t *testing.T) {
 	t.Run("AllValuesSet_ReturnsNoError", func(t *testing.T) {
 		expNodeName := "test-node"
 		expMetricsPort := ":0000"
-		expMachineId := "test-id"
+		expMachineID := "test-id"
 
 		err := os.Setenv(NodeName, expNodeName)
 		if err != nil {
@@ -37,7 +38,10 @@ func TestNewConfig(t *testing.T) {
 		if err != nil {
 			t.Error(err)
 		}
-		err = os.Setenv(MachineID, expMachineId)
+		err = os.Setenv(MachineID, expMachineID)
+		if err != nil {
+			t.Error(err)
+		}
 		defer os.Clearenv()
 
 		opts, err := NewConfig()
@@ -45,12 +49,12 @@ func TestNewConfig(t *testing.T) {
 		if assert.NoError(t, err) {
 			assert.Equal(t, expNodeName, opts.NodeName)
 			assert.Equal(t, expMetricsPort, opts.MetricsPort)
-			assert.Equal(t, expMachineId, opts.MachineId)
+			assert.Equal(t, expMachineID, opts.MachineID)
 		}
 	})
 
 	t.Run("NodeNameNotSet_ReturnsError", func(t *testing.T) {
-		machineIdFile := "./host-root/etc/machine-id"
+		machineIDFile := "./host-root/etc/machine-id"
 		expMetricsPort := ":0000"
 		expErrorMsg := fmt.Sprintf("[NewConfig] required %s env variable is not specified", NodeName)
 
@@ -65,7 +69,7 @@ func TestNewConfig(t *testing.T) {
 			t.Error(err)
 		}
 
-		file, err := os.Create(machineIdFile)
+		file, err := os.Create(machineIDFile)
 		if err != nil {
 			t.Error(err)
 		}
@@ -85,7 +89,7 @@ func TestNewConfig(t *testing.T) {
 		assert.EqualError(t, err, expErrorMsg)
 	})
 
-	t.Run("MachineIdNotSet_ReturnsError", func(t *testing.T) {
+	t.Run("MachineIDNotSet_ReturnsError", func(t *testing.T) {
 		expMetricsPort := ":0000"
 		expNodeName := "test-node"
 		expErrorMsg := fmt.Sprintf("[NewConfig] unable to get %s, error: %s",
@@ -108,13 +112,13 @@ func TestNewConfig(t *testing.T) {
 	t.Run("MetricsPortNotSet_ReturnsDefaultPort", func(t *testing.T) {
 		expNodeName := "test-node"
 		expMetricsPort := ":4202"
-		expMachineId := "test-id"
+		expMachineID := "test-id"
 
 		err := os.Setenv(NodeName, expNodeName)
 		if err != nil {
 			t.Error(err)
 		}
-		err = os.Setenv(MachineID, expMachineId)
+		err = os.Setenv(MachineID, expMachineID)
 		if err != nil {
 			t.Error(err)
 		}
@@ -126,7 +130,7 @@ func TestNewConfig(t *testing.T) {
 		if assert.NoError(t, err) {
 			assert.Equal(t, expNodeName, opts.NodeName)
 			assert.Equal(t, expMetricsPort, opts.MetricsPort)
-			assert.Equal(t, expMachineId, opts.MachineId)
+			assert.Equal(t, expMachineID, opts.MachineID)
 		}
 	})
 }
diff --git a/images/agent/src/internal/type.go b/images/agent/src/internal/type.go
index 24db2ed7..172e1474 100644
--- a/images/agent/src/internal/type.go
+++ b/images/agent/src/internal/type.go
@@ -37,7 +37,7 @@ type BlockDeviceCandidate struct {
 	PkName                string
 	Type                  string
 	FSType                string
-	MachineId             string
+	MachineID             string
 	PartUUID              string
 }
 
@@ -54,7 +54,7 @@ type LVMVolumeGroupCandidate struct {
 	StatusThinPools       []LVMVGStatusThinPool
 	VGSize                resource.Quantity
 	VGFree                resource.Quantity
-	VGUuid                string
+	VGUUID                string
 	Nodes                 map[string][]LVMVGDevice
 }
 
@@ -71,7 +71,7 @@ type LVMVGDevice struct {
 	Path        string
 	PVSize      resource.Quantity
 	DevSize     resource.Quantity
-	PVUuid      string
+	PVUUID      string
 	BlockDevice string
 }
 
@@ -127,7 +127,7 @@ type VGData struct {
 	VGShared string            `json:"vg_shared"`
 	VGSize   resource.Quantity `json:"vg_size"`
 	VGTags   string            `json:"vg_tags"`
-	VGUuid   string            `json:"vg_uuid"`
+	VGUUID   string            `json:"vg_uuid"`
 }
 
 type LVReport struct {
diff --git a/images/agent/src/pkg/cache/cache.go b/images/agent/src/pkg/cache/cache.go
index 78d3ec30..ea4962d4 100644
--- a/images/agent/src/pkg/cache/cache.go
+++ b/images/agent/src/pkg/cache/cache.go
@@ -1,10 +1,11 @@
 package cache
 
 import (
-	"agent/internal"
-	"agent/pkg/logger"
 	"bytes"
 	"fmt"
+
+	"agent/internal"
+	"agent/pkg/logger"
 )
 
 type Cache struct {
diff --git a/images/agent/src/pkg/cache/cache_test.go b/images/agent/src/pkg/cache/cache_test.go
index 7d54cdeb..727eac26 100644
--- a/images/agent/src/pkg/cache/cache_test.go
+++ b/images/agent/src/pkg/cache/cache_test.go
@@ -1,10 +1,11 @@
 package cache
 
 import (
-	"agent/internal"
 	"bytes"
-	"github.com/stretchr/testify/assert"
 	"testing"
+
+	"agent/internal"
+	"github.com/stretchr/testify/assert"
 )
 
 func TestCache(t *testing.T) {
diff --git a/images/agent/src/pkg/controller/block_device.go b/images/agent/src/pkg/controller/block_device.go
index 022ead02..44eb552b 100644
--- a/images/agent/src/pkg/controller/block_device.go
+++ b/images/agent/src/pkg/controller/block_device.go
@@ -17,21 +17,21 @@ limitations under the License.
 package controller
 
 import (
-	"agent/config"
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"context"
 	"crypto/sha1"
 	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"os"
 	"regexp"
 	"strings"
 	"time"
 
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	kclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -54,7 +54,7 @@ func RunBlockDeviceController(
 	cl := mgr.GetClient()
 
 	c, err := controller.New(BlockDeviceCtrlName, mgr, controller.Options{
-		Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
+		Reconciler: reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
 			log.Info("[RunBlockDeviceController] Reconciler starts BlockDevice resources reconciliation")
 
 			shouldRequeue := BlockDeviceReconcile(ctx, cl, log, metrics, cfg, sdsCache)
@@ -85,7 +85,7 @@ func BlockDeviceReconcile(ctx context.Context, cl kclient.Client, log logger.Log
 	candidates := GetBlockDeviceCandidates(log, cfg, sdsCache)
 	if len(candidates) == 0 {
 		log.Info("[RunBlockDeviceController] no block devices candidates found. Stop reconciliation")
-		return true
+		return false
 	}
 
 	apiBlockDevices, err := GetAPIBlockDevices(ctx, cl, metrics)
@@ -152,7 +152,7 @@ func hasBlockDeviceDiff(res v1alpha1.BlockDeviceStatus, candidate internal.Block
 		candidate.HotPlug != res.HotPlug ||
 		candidate.Type != res.Type ||
 		candidate.FSType != res.FsType ||
-		candidate.MachineId != res.MachineID
+		candidate.MachineID != res.MachineID
 }
 
 func GetAPIBlockDevices(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics) (map[string]v1alpha1.BlockDevice, error) {
@@ -160,10 +160,10 @@ func GetAPIBlockDevices(ctx context.Context, kc kclient.Client, metrics monitori
 
 	start := time.Now()
 	err := kc.List(ctx, listDevice)
-	metrics.ApiMethodsDuration(BlockDeviceCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "list").Inc()
+	metrics.APIMethodsDuration(BlockDeviceCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "list").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(BlockDeviceCtrlName, "list").Inc()
+		metrics.APIMethodsErrors(BlockDeviceCtrlName, "list").Inc()
 		return nil, fmt.Errorf("unable to kc.List, error: %w", err)
 	}
 
@@ -181,8 +181,8 @@ func RemoveDeprecatedAPIDevices(
 	metrics monitoring.Metrics,
 	candidates []internal.BlockDeviceCandidate,
 	apiBlockDevices map[string]v1alpha1.BlockDevice,
-	nodeName string) {
-
+	nodeName string,
+) {
 	actualCandidates := make(map[string]struct{}, len(candidates))
 	for _, candidate := range candidates {
 		actualCandidates[candidate.Name] = struct{}{}
@@ -260,7 +260,7 @@ func GetBlockDeviceCandidates(log logger.Logger, cfg config.Options, sdsCache *c
 			PkName:     device.PkName,
 			Type:       device.Type,
 			FSType:     device.FSType,
-			MachineId:  cfg.MachineId,
+			MachineID:  cfg.MachineID,
 			PartUUID:   device.PartUUID,
 		}
 
@@ -513,15 +513,15 @@ func UpdateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito
 		Model:                 candidate.Model,
 		Rota:                  candidate.Rota,
 		HotPlug:               candidate.HotPlug,
-		MachineID:             candidate.MachineId,
+		MachineID:             candidate.MachineID,
 	}
 
 	start := time.Now()
 	err := kc.Update(ctx, &blockDevice)
-	metrics.ApiMethodsDuration(BlockDeviceCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "update").Inc()
+	metrics.APIMethodsDuration(BlockDeviceCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "update").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(BlockDeviceCtrlName, "update").Inc()
+		metrics.APIMethodsErrors(BlockDeviceCtrlName, "update").Inc()
 		return err
 	}
 
@@ -549,16 +549,16 @@ func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito
 			Size:                  *resource.NewQuantity(candidate.Size.Value(), resource.BinarySI),
 			Model:                 candidate.Model,
 			Rota:                  candidate.Rota,
-			MachineID:             candidate.MachineId,
+			MachineID:             candidate.MachineID,
 		},
 	}
 
 	start := time.Now()
 	err := kc.Create(ctx, device)
-	metrics.ApiMethodsDuration(BlockDeviceCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "create").Inc()
+	metrics.APIMethodsDuration(BlockDeviceCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "create").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(BlockDeviceCtrlName, "create").Inc()
+		metrics.APIMethodsErrors(BlockDeviceCtrlName, "create").Inc()
 		return nil, err
 	}
 	return device, nil
@@ -567,10 +567,10 @@ func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito
 func DeleteAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics, device *v1alpha1.BlockDevice) error {
 	start := time.Now()
 	err := kc.Delete(ctx, device)
-	metrics.ApiMethodsDuration(BlockDeviceCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "delete").Inc()
+	metrics.APIMethodsDuration(BlockDeviceCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "delete").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(BlockDeviceCtrlName, "delete").Inc()
+		metrics.APIMethodsErrors(BlockDeviceCtrlName, "delete").Inc()
 		return err
 	}
 	return nil
diff --git a/images/agent/src/pkg/controller/block_device_test.go b/images/agent/src/pkg/controller/block_device_test.go
index 2e85c470..9b6c099a 100644
--- a/images/agent/src/pkg/controller/block_device_test.go
+++ b/images/agent/src/pkg/controller/block_device_test.go
@@ -17,30 +17,31 @@ limitations under the License.
 package controller
 
 import (
+	"bytes"
+	"context"
+	"fmt"
+	"testing"
+
 	"agent/config"
 	"agent/internal"
 	"agent/pkg/cache"
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
 	"agent/pkg/utils"
-	"bytes"
-	"fmt"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
+	"github.com/stretchr/testify/assert"
 	errors2 "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"sigs.k8s.io/controller-runtime/pkg/client"
-	"testing"
-
-	"k8s.io/apimachinery/pkg/api/resource"
-
-	"github.com/stretchr/testify/assert"
 )
 
 func TestBlockDeviceCtrl(t *testing.T) {
+	ctx := context.Background()
 	log, _ := logger.NewLogger("1")
 	cfg := config.Options{
 		NodeName:  "test-node",
-		MachineId: "test-id",
+		MachineID: "test-id",
 	}
 
 	t.Run("shouldDeleteBlockDevice", func(t *testing.T) {
@@ -121,7 +122,7 @@ func TestBlockDeviceCtrl(t *testing.T) {
 				Model:                 "124124-adf",
 				Name:                  goodName,
 				HotPlug:               false,
-				MachineId:             "1245151241241",
+				MachineID:             "1245151241241",
 			},
 		}
 
@@ -151,7 +152,7 @@ func TestBlockDeviceCtrl(t *testing.T) {
 
 		defer func() {
 			for _, bd := range bds {
-				cl.Delete(ctx, &bd)
+				_ = cl.Delete(ctx, &bd)
 			}
 		}()
 
@@ -212,7 +213,7 @@ func TestBlockDeviceCtrl(t *testing.T) {
 		assert.Equal(t, 3, len(candidates))
 		for i := range candidates {
 			assert.Equal(t, devices[i].Name, candidates[i].Path)
-			assert.Equal(t, cfg.MachineId, candidates[i].MachineId)
+			assert.Equal(t, cfg.MachineID, candidates[i].MachineID)
 			assert.Equal(t, cfg.NodeName, candidates[i].NodeName)
 		}
 	})
@@ -338,7 +339,7 @@ func TestBlockDeviceCtrl(t *testing.T) {
 				PkName:                "testPKNAME",
 				Type:                  "testTYPE",
 				FSType:                "testFS",
-				MachineId:             "testMACHINE",
+				MachineID:             "testMACHINE",
 			},
 			// diff state
 			{
@@ -360,7 +361,7 @@ func TestBlockDeviceCtrl(t *testing.T) {
 				PkName:                "testPKNAME2",
 				Type:                  "testTYPE2",
 				FSType:                "testFS2",
-				MachineId:             "testMACHINE2",
+				MachineID:             "testMACHINE2",
 			},
 		}
 		blockDevice := v1alpha1.BlockDevice{
@@ -450,8 +451,8 @@ func TestBlockDeviceCtrl(t *testing.T) {
 				candidateName := CreateCandidateName(*log, candidate, devices)
 				assert.Equal(t, "dev-98ca88ddaaddec43b1c4894756f4856244985511", candidateName, "device name generated incorrectly")
 			}
-
 		}
+
 		if assert.NoError(t, err) {
 			assert.Equal(t, 7, len(filteredDevices))
 		}
diff --git a/images/agent/src/pkg/controller/controller_reconcile_test.go b/images/agent/src/pkg/controller/controller_reconcile_test.go
index cad019d2..48e6229b 100644
--- a/images/agent/src/pkg/controller/controller_reconcile_test.go
+++ b/images/agent/src/pkg/controller/controller_reconcile_test.go
@@ -17,16 +17,16 @@ limitations under the License.
 package controller_test
 
 import (
+	"context"
+
 	"agent/internal"
 	"agent/pkg/controller"
 	"agent/pkg/monitoring"
-	"context"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
 var _ = Describe("Storage Controller", func() {
@@ -54,7 +54,7 @@ var _ = Describe("Storage Controller", func() {
 			PkName:                "/dev/sda14",
 			Type:                  "disk",
 			FSType:                "",
-			MachineId:             "1234",
+			MachineID:             "1234",
 		}
 	)
 
@@ -77,7 +77,7 @@ var _ = Describe("Storage Controller", func() {
 		Expect(blockDevice.Status.Model).To(Equal(candidate.Model))
 		Expect(blockDevice.Status.Type).To(Equal(candidate.Type))
 		Expect(blockDevice.Status.FsType).To(Equal(candidate.FSType))
-		Expect(blockDevice.Status.MachineID).To(Equal(candidate.MachineId))
+		Expect(blockDevice.Status.MachineID).To(Equal(candidate.MachineID))
 	})
 
 	It("GetAPIBlockDevices", func() {
@@ -111,7 +111,7 @@ var _ = Describe("Storage Controller", func() {
 			PkName:                "/dev/sda14",
 			Type:                  "disk",
 			FSType:                "",
-			MachineId:             "1234",
+			MachineID:             "1234",
 		}
 
 		resources, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics)
@@ -147,6 +147,7 @@ var _ = Describe("Storage Controller", func() {
 		Expect(err).NotTo(HaveOccurred())
 
 		devices, err := controller.GetAPIBlockDevices(context.Background(), cl, testMetrics)
+		Expect(err).NotTo(HaveOccurred())
 		for name := range devices {
 			Expect(name).NotTo(Equal(deviceName))
 		}
diff --git a/images/agent/src/pkg/controller/controller_suite_test.go b/images/agent/src/pkg/controller/controller_suite_test.go
index a4f27c0d..9ebb5197 100644
--- a/images/agent/src/pkg/controller/controller_suite_test.go
+++ b/images/agent/src/pkg/controller/controller_suite_test.go
@@ -17,17 +17,15 @@ limitations under the License.
 package controller_test
 
 import (
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"testing"
 
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"sigs.k8s.io/controller-runtime/pkg/client/fake"
-
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
-
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/client/fake"
 )
 
 func TestController(t *testing.T) {
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go b/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go
deleted file mode 100644
index eb53e209..00000000
--- a/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package controller
-
-import (
-	"agent/internal"
-	"agent/pkg/kubutils"
-	"context"
-	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	v1 "k8s.io/api/core/v1"
-	sv1 "k8s.io/api/storage/v1"
-	extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	apiruntime "k8s.io/apimachinery/pkg/runtime"
-	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
-	"os"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-	"testing"
-)
-
-var (
-	lvgName  = "hdd-lvg-on-node-0"
-	poolName = "hhd-thin"
-	lvCount  = 600
-	size     = "1Gi"
-
-	resizeOn = false
-
-	ctx   = context.Background()
-	e2eCL client.Client
-
-	resourcesSchemeFuncs = []func(*apiruntime.Scheme) error{
-		v1alpha1.AddToScheme,
-		clientgoscheme.AddToScheme,
-		extv1.AddToScheme,
-		v1.AddToScheme,
-		sv1.AddToScheme,
-	}
-)
-
-func BenchmarkRunThickLLVCreationSingleThread(b *testing.B) {
-	b.Logf("starts the test")
-	llvNames := make(map[string]bool, lvCount)
-
-	b.StartTimer()
-	for i := 0; i < lvCount; i++ {
-		llv := configureTestThickLLV(fmt.Sprintf("test-llv-%d", i), lvgName)
-		err := e2eCL.Create(ctx, llv)
-		if err != nil {
-			b.Logf("unable to create test LLV %s, err: %s", llv.Name, err.Error())
-		}
-		llvNames[llv.Name] = false
-	}
-	lvCreatedTime := b.Elapsed().Seconds()
-
-	succeeded := 0
-	for succeeded < len(llvNames) {
-		llvs, err := getAllLLV(ctx, e2eCL)
-		if err != nil {
-			b.Error(err)
-			continue
-		}
-
-		for llvName, created := range llvNames {
-			if llv, found := llvs[llvName]; found {
-				if llv.Status != nil {
-					b.Logf("LV %s status %s", llvName, llv.Status.Phase)
-				}
-				if err != nil {
-					b.Logf("can't check LLV %s llv", llvName)
-					continue
-				}
-
-				if llv.Status != nil &&
-					llv.Status.Phase == LLVStatusPhaseCreated &&
-					!created {
-					succeeded++
-					llvNames[llvName] = true
-
-					if resizeOn {
-						add, err := resource.ParseQuantity("1G")
-						if err != nil {
-							b.Logf(err.Error())
-							continue
-						}
-
-						lvsize := resource.MustParse(llv.Spec.Size)
-						lvsize.Add(add)
-						llv.Spec.Size = lvsize.String()
-						err = e2eCL.Update(ctx, &llv)
-						if err != nil {
-							b.Logf(err.Error())
-							continue
-						}
-
-						b.Logf("resize for LV %s succeeded", llvName)
-					}
-				}
-			}
-
-		}
-	}
-	b.Logf("[TIME] LLV resources were configured for %f", lvCreatedTime)
-	b.Logf("All LLV were created for %f. Ends the test", b.Elapsed().Seconds())
-}
-
-func BenchmarkRunThinLLVCreationSingleThread(b *testing.B) {
-	b.Logf("starts thin test")
-	llvNames := make(map[string]bool, lvCount)
-
-	b.StartTimer()
-	for i := 0; i < lvCount; i++ {
-		llv := configureTestThinLLV(fmt.Sprintf("test-llv-%d", i), lvgName, poolName)
-		err := e2eCL.Create(ctx, llv)
-		if err != nil {
-			b.Logf("unable to create test LLV %s, err: %s", llv.Name, err.Error())
-			continue
-		}
-		llvNames[llv.Name] = false
-	}
-	createdTime := b.Elapsed().Seconds()
-
-	succeeded := 0
-	for succeeded < len(llvNames) {
-		llvs, err := getAllLLV(ctx, e2eCL)
-		if err != nil {
-			b.Error(err)
-			continue
-		}
-
-		for llvName, visited := range llvNames {
-			if llv, found := llvs[llvName]; found {
-				if llv.Status != nil {
-					b.Logf("LV %s status %s", llvName, llv.Status.Phase)
-				}
-				if err != nil {
-					b.Logf("can't check LLV %s llv", llvName)
-					continue
-				}
-
-				if llv.Status != nil &&
-					llv.Status.Phase == LLVStatusPhaseCreated &&
-					!visited {
-					succeeded++
-					llvNames[llvName] = true
-
-					if resizeOn {
-						add, err := resource.ParseQuantity("1G")
-						if err != nil {
-							b.Logf(err.Error())
-							continue
-						}
-
-						lvsize := resource.MustParse(llv.Spec.Size)
-						lvsize.Add(add)
-						llv.Spec.Size = lvsize.String()
-						err = e2eCL.Update(ctx, &llv)
-						if err != nil {
-							b.Logf(err.Error())
-							continue
-						}
-
-						b.Logf("resize for LV %s succeeded", llvName)
-					}
-				}
-			}
-
-		}
-	}
-	b.Logf("All LLV were configured for %f. Ends the test", createdTime)
-	b.Logf("All LLV were created in %f. Ends the test", b.Elapsed().Seconds())
-}
-
-func getAllLLV(ctx context.Context, cl client.Client) (map[string]v1alpha1.LVMLogicalVolume, error) {
-	list := &v1alpha1.LVMLogicalVolumeList{}
-	err := cl.List(ctx, list)
-	if err != nil {
-		return nil, err
-	}
-
-	res := make(map[string]v1alpha1.LVMLogicalVolume, len(list.Items))
-	for _, lv := range list.Items {
-		res[lv.Name] = lv
-	}
-
-	return res, nil
-}
-
-func configureTestThickLLV(name, lvgName string) *v1alpha1.LVMLogicalVolume {
-	return &v1alpha1.LVMLogicalVolume{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:       name,
-			Finalizers: []string{internal.SdsNodeConfiguratorFinalizer},
-		},
-		Spec: v1alpha1.LVMLogicalVolumeSpec{
-			ActualLVNameOnTheNode: name,
-			Type:                  Thick,
-			Size:                  size,
-			LvmVolumeGroupName:    lvgName,
-		},
-	}
-}
-
-func configureTestThinLLV(name, lvgName, poolName string) *v1alpha1.LVMLogicalVolume {
-	return &v1alpha1.LVMLogicalVolume{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:       name,
-			Finalizers: []string{internal.SdsNodeConfiguratorFinalizer},
-		},
-		Spec: v1alpha1.LVMLogicalVolumeSpec{
-			ActualLVNameOnTheNode: name,
-			Type:                  Thin,
-			Size:                  size,
-			LvmVolumeGroupName:    lvgName,
-			Thin:                  &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: poolName},
-		},
-	}
-}
-
-func init() {
-	config, err := kubutils.KubernetesDefaultConfigCreate()
-	if err != nil {
-		fmt.Println(err.Error())
-		os.Exit(1)
-	}
-
-	scheme := runtime.NewScheme()
-	for _, f := range resourcesSchemeFuncs {
-		err := f(scheme)
-		if err != nil {
-			fmt.Println(err.Error())
-			os.Exit(1)
-		}
-	}
-
-	options := client.Options{
-		Scheme: scheme,
-	}
-
-	e2eCL, err = client.New(config, options)
-	if err != nil {
-		fmt.Println(err)
-		os.Exit(1)
-	}
-}
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go
index bf5d2ec5..27edcb4b 100644
--- a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go
+++ b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go
@@ -1,20 +1,22 @@
 package controller
 
 import (
+	"context"
+	"errors"
+	"fmt"
+	"reflect"
+	"time"
+
 	"agent/config"
 	"agent/internal"
 	"agent/pkg/cache"
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
 	"agent/pkg/utils"
-	"context"
-	"errors"
-	"fmt"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	k8serr "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
-	"reflect"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/event"
@@ -22,7 +24,6 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
 	"sigs.k8s.io/controller-runtime/pkg/source"
-	"time"
 )
 
 const (
@@ -79,13 +80,13 @@ func RunLVMLogicalVolumeExtenderWatcherController(
 	}
 
 	err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup]{
-		CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got a Create event for the LVMVolumeGroup %s", e.Object.GetName()))
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
 			q.Add(request)
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] added the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName()))
 		},
-		UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got an Update event for the LVMVolumeGroup %s", e.ObjectNew.GetName()))
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}}
 			q.Add(request)
@@ -124,7 +125,7 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m
 	log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] tries to get LLV resources with percent size for the LVMVolumeGroup %s", lvg.Name))
 	llvs, err := getAllLLVsWithPercentSize(ctx, cl, lvg.Name)
 	if err != nil {
-		log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources"))
+		log.Error(err, "[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources")
 		return true
 	}
 	log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got LLV resources for the LVMVolumeGroup %s", lvg.Name))
@@ -181,6 +182,12 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m
 
 		log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should be extended from %s to %s size", llv.Name, llv.Status.ActualSize.String(), llvRequestedSize.String()))
 		err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseResizing, "")
+		if err != nil {
+			log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name))
+			shouldRetry = true
+			continue
+		}
+
 		cmd, err := utils.ExtendLV(llvRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode)
 		if err != nil {
 			log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend LV %s of the LVMLogicalVolume %s, cmd: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, cmd))
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go
index dcb55e1d..bf574c6b 100644
--- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go
+++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go
@@ -1,21 +1,22 @@
 package controller
 
 import (
+	"context"
+	"errors"
+	"fmt"
+	"reflect"
+
 	"agent/config"
 	"agent/internal"
 	"agent/pkg/cache"
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
 	"agent/pkg/utils"
-	"context"
-	"errors"
-	"fmt"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"github.com/google/go-cmp/cmp"
 	k8serr "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
-	"reflect"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/event"
@@ -139,7 +140,7 @@ func RunLVMLogicalVolumeWatcherController(
 				}
 			}
 			if shouldRequeue {
-				log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occured while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", request.Name, cfg.LLVRequeueIntervalSec.String()))
+				log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occurred while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", request.Name, cfg.LLVRequeueIntervalSec.String()))
 				return reconcile.Result{RequeueAfter: cfg.LLVRequeueIntervalSec}, nil
 			}
 
@@ -155,14 +156,14 @@ func RunLVMLogicalVolumeWatcherController(
 	}
 
 	err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMLogicalVolume{}, handler.TypedFuncs[*v1alpha1.LVMLogicalVolume]{
-		CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.RateLimitingInterface) {
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.RateLimitingInterface) {
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got a create event for the LVMLogicalVolume: %s", e.Object.GetName()))
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
 			q.Add(request)
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] added the request of the LVMLogicalVolume %s to Reconciler", e.Object.GetName()))
 		},
 
-		UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.RateLimitingInterface) {
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.RateLimitingInterface) {
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got an update event for the LVMLogicalVolume: %s", e.ObjectNew.GetName()))
 			// TODO: Figure out how to log it in our logger.
 			if cfg.Loglevel == "4" {
@@ -333,7 +334,7 @@ func reconcileLLVUpdateFunc(
 		log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name))
 		return false, err
 	}
-	log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] sucessfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String()))
+	log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String()))
 
 	if utils.AreSizesEqualWithinDelta(actualSize, llvRequestSize, internal.ResizeDelta) {
 		log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String()))
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go
index 69ad055f..a494afce 100644
--- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go
+++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go
@@ -1,18 +1,18 @@
 package controller
 
 import (
-	"agent/pkg/cache"
 	"context"
 	"fmt"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"strings"
 
 	"agent/internal"
+	"agent/pkg/cache"
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
 	"agent/pkg/utils"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"k8s.io/apimachinery/pkg/api/resource"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/utils/strings/slices"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 )
@@ -37,13 +37,10 @@ func identifyReconcileFunc(sdsCache *cache.Cache, vgName string, llv *v1alpha1.L
 }
 
 func shouldReconcileByDeleteFunc(llv *v1alpha1.LVMLogicalVolume) bool {
-	if llv.DeletionTimestamp == nil {
-		return false
-	}
-
-	return true
+	return llv.DeletionTimestamp != nil
 }
 
+//nolint:unparam
 func checkIfConditionIsTrue(lvg *v1alpha1.LvmVolumeGroup, conType string) bool {
 	// this check prevents infinite resource updating after a retry
 	for _, c := range lvg.Status.Conditions {
@@ -128,7 +125,7 @@ func checkIfLVBelongsToLLV(llv *v1alpha1.LVMLogicalVolume, lv *internal.LVData)
 func updateLLVPhaseToCreatedIfNeeded(ctx context.Context, cl client.Client, llv *v1alpha1.LVMLogicalVolume, actualSize resource.Quantity) (bool, error) {
 	var contiguous *bool
 	if llv.Spec.Thick != nil {
-		if *llv.Spec.Thick.Contiguous == true {
+		if *llv.Spec.Thick.Contiguous {
 			contiguous = llv.Spec.Thick.Contiguous
 		}
 	}
@@ -208,11 +205,7 @@ func shouldReconcileByCreateFunc(sdsCache *cache.Cache, vgName string, llv *v1al
 	}
 
 	lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode)
-	if lv != nil {
-		return false
-	}
-
-	return true
+	return lv == nil
 }
 
 func getFreeLVGSpaceForLLV(lvg *v1alpha1.LvmVolumeGroup, llv *v1alpha1.LVMLogicalVolume) resource.Quantity {
@@ -303,10 +296,8 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol
 	if lv != nil {
 		if len(lv.LVAttr) == 0 {
 			reason.WriteString(fmt.Sprintf("LV %s was found on the node, but can't be validated due to its attributes is empty string. ", lv.LVName))
-		} else {
-			if !checkIfLVBelongsToLLV(llv, lv) {
-				reason.WriteString(fmt.Sprintf("Specified LV %s is already created and it is doesnt match the one on the node.", lv.LVName))
-			}
+		} else if !checkIfLVBelongsToLLV(llv, lv) {
+			reason.WriteString(fmt.Sprintf("Specified LV %s is already created and it is doesnt match the one on the node.", lv.LVName))
 		}
 	}
 
@@ -317,7 +308,7 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol
 	return true, ""
 }
 
-func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, llv *v1alpha1.LVMLogicalVolume, phase, reason string) error {
+func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, _ monitoring.Metrics, llv *v1alpha1.LVMLogicalVolume, phase, reason string) error {
 	if llv.Status != nil &&
 		llv.Status.Phase == phase &&
 		llv.Status.Reason == reason {
@@ -342,7 +333,7 @@ func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client,
 	return nil
 }
 
-func updateLVMLogicalVolumeSpec(ctx context.Context, metrics monitoring.Metrics, cl client.Client, llv *v1alpha1.LVMLogicalVolume) error {
+func updateLVMLogicalVolumeSpec(ctx context.Context, _ monitoring.Metrics, cl client.Client, llv *v1alpha1.LVMLogicalVolume) error {
 	return cl.Update(ctx, llv)
 }
 
@@ -352,11 +343,7 @@ func shouldReconcileByUpdateFunc(sdsCache *cache.Cache, vgName string, llv *v1al
 	}
 
 	lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode)
-	if lv == nil {
-		return false
-	}
-
-	return true
+	return lv != nil
 }
 
 func isContiguous(llv *v1alpha1.LVMLogicalVolume) bool {
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go
index 395a8db2..31b8e034 100644
--- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go
+++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go
@@ -1,15 +1,16 @@
 package controller
 
 import (
+	"bytes"
+	"context"
+	"testing"
+
 	"agent/internal"
 	"agent/pkg/cache"
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
 	"agent/pkg/utils"
-	"bytes"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	"testing"
-
 	"github.com/stretchr/testify/assert"
 	"k8s.io/apimachinery/pkg/api/resource"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -22,6 +23,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 		log     = logger.Logger{}
 		metrics = monitoring.Metrics{}
 		vgName  = "test-vg"
+		ctx     = context.Background()
 	)
 
 	t.Run("subtractQuantity_returns_correct_value", func(t *testing.T) {
@@ -212,7 +214,6 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 				assert.Equal(t, "No LV name specified. Zero size for LV. No thin pool specified. ", r)
 			}
 		})
-
 	})
 
 	t.Run("getThinPoolAvailableSpace", func(t *testing.T) {
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover.go b/images/agent/src/pkg/controller/lvm_volume_group_discover.go
index 502176b4..d5816965 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_discover.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_discover.go
@@ -17,20 +17,20 @@ limitations under the License.
 package controller
 
 import (
-	"agent/config"
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"context"
 	"errors"
 	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"strconv"
 	"strings"
 	"time"
 
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/uuid"
@@ -54,12 +54,12 @@ func RunLVMVolumeGroupDiscoverController(
 	cl := mgr.GetClient()
 
 	c, err := controller.New(LVMVolumeGroupDiscoverCtrlName, mgr, controller.Options{
-		Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
+		Reconciler: reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
 			log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler starts LVMVolumeGroup resources reconciliation")
 
 			shouldRequeue := LVMVolumeGroupDiscoverReconcile(ctx, cl, metrics, log, cfg, sdsCache)
 			if shouldRequeue {
-				log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occured while run the Reconciler func, retry in %s", cfg.VolumeGroupScanIntervalSec.String()))
+				log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occurred while run the Reconciler func, retry in %s", cfg.VolumeGroupScanIntervalSec.String()))
 				return reconcile.Result{
 					RequeueAfter: cfg.VolumeGroupScanIntervalSec,
 				}, nil
@@ -161,7 +161,6 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met
 			}
 
 			log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] updated LvmVolumeGroup, name: "%s"`, lvg.Name))
-
 		} else {
 			log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is not yet created. Create it", lvg.Name))
 			lvm, err := CreateLVMVolumeGroupByCandidate(ctx, log, metrics, cl, candidate)
@@ -208,7 +207,6 @@ func filterLVGsByNode(
 	blockDevices map[string]v1alpha1.BlockDevice,
 	currentNode string,
 ) map[string]v1alpha1.LvmVolumeGroup {
-
 	filtered := make(map[string]v1alpha1.LvmVolumeGroup, len(lvgs))
 	blockDevicesNodes := make(map[string]string, len(blockDevices))
 
@@ -278,14 +276,14 @@ func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LvmVolumeGroup, candi
 		log.Trace(fmt.Sprintf("Resource ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String()))
 	}
 	log.Trace(fmt.Sprintf(`VGSize, candidate: %s, lvg: %s`, candidate.VGSize.String(), lvg.Status.VGSize.String()))
-	log.Trace(fmt.Sprintf(`VGUuid, candidate: %s, lvg: %s`, candidate.VGUuid, lvg.Status.VGUuid))
+	log.Trace(fmt.Sprintf(`VGUUID, candidate: %s, lvg: %s`, candidate.VGUUID, lvg.Status.VGUuid))
 	log.Trace(fmt.Sprintf(`Nodes, candidate: %+v, lvg: %+v`, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes))
 
 	return candidate.AllocatedSize.Value() != lvg.Status.AllocatedSize.Value() ||
 		hasStatusPoolDiff(convertedStatusPools, lvg.Status.ThinPools) ||
 		candidate.VGSize.Value() != lvg.Status.VGSize.Value() ||
 		candidate.VGFree.Value() != lvg.Status.VGFree.Value() ||
-		candidate.VGUuid != lvg.Status.VGUuid ||
+		candidate.VGUUID != lvg.Status.VGUuid ||
 		hasStatusNodesDiff(log, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes)
 }
 
@@ -374,15 +372,12 @@ func ReconcileUnhealthyLVMVolumeGroups(
 					if candidateTp, exist := candidateTPs[thinPool.Name]; !exist {
 						log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its ThinPool %s", lvg.Name, thinPool.Name))
 						messageBldr.WriteString(fmt.Sprintf("Unable to find ThinPool %s. ", thinPool.Name))
-					} else {
+					} else if !utils.AreSizesEqualWithinDelta(candidate.VGSize, thinPool.ActualSize, internal.ResizeDelta) &&
+						candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < thinPool.ActualSize.Value() {
 						// that means thin-pool is not 100%VG space
 						// use candidate VGSize as lvg.Status.VGSize might not be updated yet
-						if !utils.AreSizesEqualWithinDelta(candidate.VGSize, thinPool.ActualSize, internal.ResizeDelta) {
-							if candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < thinPool.ActualSize.Value() {
-								log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String()))
-								messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String()))
-							}
-						}
+						log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String()))
+						messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String()))
 					}
 				}
 			}
@@ -450,7 +445,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m
 
 	lvs, lvErrs := sdsCache.GetLVs()
 	var thinPools []internal.LVData
-	if lvs != nil && len(lvs) > 0 {
+	if len(lvs) > 0 {
 		// Filter LV to get only thin pools as we do not support thick for now.
 		thinPools = getThinPools(lvs)
 	}
@@ -471,9 +466,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m
 	sortedLVByThinPool := sortLVByThinPool(lvs)
 
 	for _, vg := range vgWithTag {
-		allocateSize := vg.VGSize
-		allocateSize.Sub(vg.VGFree)
-
+		allocateSize := getVGAllocatedSize(vg)
 		health, message := checkVGHealth(sortedBDs, vgIssues, pvIssues, lvIssues, vg)
 
 		candidate := internal.LVMVolumeGroupCandidate{
@@ -489,7 +482,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m
 			StatusThinPools:       getStatusThinPools(log, sortedThinPools, sortedLVByThinPool, vg, lvIssues),
 			VGSize:                *resource.NewQuantity(vg.VGSize.Value(), resource.BinarySI),
 			VGFree:                *resource.NewQuantity(vg.VGFree.Value(), resource.BinarySI),
-			VGUuid:                vg.VGUuid,
+			VGUUID:                vg.VGUUID,
 			Nodes:                 configureCandidateNodeDevices(sortedPVs, sortedBDs, vg, currentNode),
 		}
 
@@ -499,22 +492,28 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m
 	return candidates, nil
 }
 
+func getVGAllocatedSize(vg internal.VGData) resource.Quantity {
+	allocatedSize := vg.VGSize
+	allocatedSize.Sub(vg.VGFree)
+	return allocatedSize
+}
+
 func checkVGHealth(blockDevices map[string][]v1alpha1.BlockDevice, vgIssues map[string]string, pvIssues map[string][]string, lvIssues map[string]map[string]string, vg internal.VGData) (health, message string) {
 	issues := make([]string, 0, len(vgIssues)+len(pvIssues)+len(lvIssues)+1)
 
-	if bds, exist := blockDevices[vg.VGName+vg.VGUuid]; !exist || len(bds) == 0 {
-		issues = append(issues, fmt.Sprintf("[ERROR] Unable to get BlockDevice resources for VG, name: %s ; uuid: %s", vg.VGName, vg.VGUuid))
+	if bds, exist := blockDevices[vg.VGName+vg.VGUUID]; !exist || len(bds) == 0 {
+		issues = append(issues, fmt.Sprintf("[ERROR] Unable to get BlockDevice resources for VG, name: %s ; uuid: %s", vg.VGName, vg.VGUUID))
 	}
 
-	if vgIssue, exist := vgIssues[vg.VGName+vg.VGUuid]; exist {
+	if vgIssue, exist := vgIssues[vg.VGName+vg.VGUUID]; exist {
 		issues = append(issues, vgIssue)
 	}
 
-	if pvIssue, exist := pvIssues[vg.VGName+vg.VGUuid]; exist {
+	if pvIssue, exist := pvIssues[vg.VGName+vg.VGUUID]; exist {
 		issues = append(issues, strings.Join(pvIssue, ""))
 	}
 
-	if lvIssue, exist := lvIssues[vg.VGName+vg.VGUuid]; exist {
+	if lvIssue, exist := lvIssues[vg.VGName+vg.VGUUID]; exist {
 		for lvName, issue := range lvIssue {
 			issues = append(issues, fmt.Sprintf("%s: %s", lvName, issue))
 		}
@@ -553,10 +552,8 @@ func sortThinPoolIssuesByVG(log logger.Logger, lvs []internal.LVData) map[string
 
 		if err != nil {
 			log.Error(err, fmt.Sprintf(`[sortThinPoolIssuesByVG] unable to run lvs command for lv, name: "%s"`, lv.LVName))
-			//lvIssuesByVG[lv.VGName+lv.VGUuid] = append(lvIssuesByVG[lv.VGName+lv.VGUuid], err.Error())
 			lvIssuesByVG[lv.VGName+lv.VGUuid] = make(map[string]string, len(lvs))
 			lvIssuesByVG[lv.VGName+lv.VGUuid][lv.LVName] = err.Error()
-
 		}
 
 		if stdErr.Len() != 0 {
@@ -599,12 +596,12 @@ func sortVGIssuesByVG(log logger.Logger, vgs []internal.VGData) map[string]strin
 		log.Debug(fmt.Sprintf("[sortVGIssuesByVG] runs cmd: %s", cmd))
 		if err != nil {
 			log.Error(err, fmt.Sprintf(`[sortVGIssuesByVG] unable to run vgs command for vg, name: "%s"`, vg.VGName))
-			vgIssues[vg.VGName+vg.VGUuid] = err.Error()
+			vgIssues[vg.VGName+vg.VGUUID] = err.Error()
 		}
 
 		if stdErr.Len() != 0 {
 			log.Error(fmt.Errorf(stdErr.String()), fmt.Sprintf(`[sortVGIssuesByVG] vgs command for vg "%s" has stderr: `, vg.VGName))
-			vgIssues[vg.VGName+vg.VGUuid] = stdErr.String()
+			vgIssues[vg.VGName+vg.VGUUID] = stdErr.String()
 			stdErr.Reset()
 		}
 	}
@@ -627,7 +624,7 @@ func sortLVByThinPool(lvs []internal.LVData) map[string][]internal.LVData {
 func sortThinPoolsByVG(lvs []internal.LVData, vgs []internal.VGData) map[string][]internal.LVData {
 	result := make(map[string][]internal.LVData, len(vgs))
 	for _, vg := range vgs {
-		result[vg.VGName+vg.VGUuid] = make([]internal.LVData, 0, len(lvs))
+		result[vg.VGName+vg.VGUUID] = make([]internal.LVData, 0, len(lvs))
 	}
 
 	for _, lv := range lvs {
@@ -642,7 +639,7 @@ func sortThinPoolsByVG(lvs []internal.LVData, vgs []internal.VGData) map[string]
 func sortPVsByVG(pvs []internal.PVData, vgs []internal.VGData) map[string][]internal.PVData {
 	result := make(map[string][]internal.PVData, len(vgs))
 	for _, vg := range vgs {
-		result[vg.VGName+vg.VGUuid] = make([]internal.PVData, 0, len(pvs))
+		result[vg.VGName+vg.VGUUID] = make([]internal.PVData, 0, len(pvs))
 	}
 
 	for _, pv := range pvs {
@@ -657,7 +654,7 @@ func sortPVsByVG(pvs []internal.PVData, vgs []internal.VGData) map[string][]inte
 func sortBlockDevicesByVG(bds map[string]v1alpha1.BlockDevice, vgs []internal.VGData) map[string][]v1alpha1.BlockDevice {
 	result := make(map[string][]v1alpha1.BlockDevice, len(vgs))
 	for _, vg := range vgs {
-		result[vg.VGName+vg.VGUuid] = make([]v1alpha1.BlockDevice, 0, len(bds))
+		result[vg.VGName+vg.VGUUID] = make([]v1alpha1.BlockDevice, 0, len(bds))
 	}
 
 	for _, bd := range bds {
@@ -670,8 +667,8 @@ func sortBlockDevicesByVG(bds map[string]v1alpha1.BlockDevice, vgs []internal.VG
 }
 
 func configureCandidateNodeDevices(pvs map[string][]internal.PVData, bds map[string][]v1alpha1.BlockDevice, vg internal.VGData, currentNode string) map[string][]internal.LVMVGDevice {
-	filteredPV := pvs[vg.VGName+vg.VGUuid]
-	filteredBds := bds[vg.VGName+vg.VGUuid]
+	filteredPV := pvs[vg.VGName+vg.VGUUID]
+	filteredBds := bds[vg.VGName+vg.VGUUID]
 	bdPathStatus := make(map[string]v1alpha1.BlockDevice, len(bds))
 	result := make(map[string][]internal.LVMVGDevice, len(filteredPV))
 
@@ -683,7 +680,7 @@ func configureCandidateNodeDevices(pvs map[string][]internal.PVData, bds map[str
 		device := internal.LVMVGDevice{
 			Path:   pv.PVName,
 			PVSize: *resource.NewQuantity(pv.PVSize.Value(), resource.BinarySI),
-			PVUuid: pv.PVUuid,
+			PVUUID: pv.PVUuid,
 		}
 
 		if bd, exist := bdPathStatus[pv.PVName]; exist {
@@ -706,7 +703,7 @@ func getVgType(vg internal.VGData) string {
 }
 
 func getSpecThinPools(thinPools map[string][]internal.LVData, vg internal.VGData) map[string]resource.Quantity {
-	lvs := thinPools[vg.VGName+vg.VGUuid]
+	lvs := thinPools[vg.VGName+vg.VGUUID]
 	tps := make(map[string]resource.Quantity, len(lvs))
 
 	for _, lv := range lvs {
@@ -729,7 +726,7 @@ func getThinPools(lvs []internal.LVData) []internal.LVData {
 }
 
 func getStatusThinPools(log logger.Logger, thinPools, sortedLVs map[string][]internal.LVData, vg internal.VGData, lvIssues map[string]map[string]string) []internal.LVMVGStatusThinPool {
-	tps := thinPools[vg.VGName+vg.VGUuid]
+	tps := thinPools[vg.VGName+vg.VGUUID]
 	result := make([]internal.LVMVGStatusThinPool, 0, len(tps))
 
 	for _, thinPool := range tps {
@@ -749,7 +746,7 @@ func getStatusThinPools(log logger.Logger, thinPools, sortedLVs map[string][]int
 			Message:       "",
 		}
 
-		if lverrs, exist := lvIssues[vg.VGName+vg.VGUuid][thinPool.LVName]; exist {
+		if lverrs, exist := lvIssues[vg.VGName+vg.VGUUID][thinPool.LVName]; exist {
 			tp.Ready = false
 			tp.Message = lverrs
 		}
@@ -795,7 +792,7 @@ func isThinPool(lv internal.LVData) bool {
 }
 
 func getBlockDevicesNames(bds map[string][]v1alpha1.BlockDevice, vg internal.VGData) []string {
-	sorted := bds[vg.VGName+vg.VGUuid]
+	sorted := bds[vg.VGName+vg.VGUUID]
 	names := make([]string, 0, len(sorted))
 
 	for _, bd := range sorted {
@@ -834,7 +831,7 @@ func CreateLVMVolumeGroupByCandidate(
 			Nodes:         convertLVMVGNodes(candidate.Nodes),
 			ThinPools:     thinPools,
 			VGSize:        candidate.VGSize,
-			VGUuid:        candidate.VGUuid,
+			VGUuid:        candidate.VGUUID,
 			VGFree:        candidate.VGFree,
 		},
 	}
@@ -851,10 +848,10 @@ func CreateLVMVolumeGroupByCandidate(
 
 	start := time.Now()
 	err = kc.Create(ctx, lvmVolumeGroup)
-	metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "create").Inc()
+	metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "create").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "create").Inc()
+		metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "create").Inc()
 		return nil, fmt.Errorf("unable to сreate LVMVolumeGroup, err: %w", err)
 	}
 
@@ -910,14 +907,14 @@ func UpdateLVMVolumeGroupByCandidate(
 	lvg.Status.ThinPools = thinPools
 	lvg.Status.VGSize = candidate.VGSize
 	lvg.Status.VGFree = candidate.VGFree
-	lvg.Status.VGUuid = candidate.VGUuid
+	lvg.Status.VGUuid = candidate.VGUUID
 
 	start := time.Now()
 	err = cl.Status().Update(ctx, lvg)
-	metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "update").Inc()
+	metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "update").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "update").Inc()
+		metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "update").Inc()
 		return fmt.Errorf(`[UpdateLVMVolumeGroupByCandidate] unable to update LVMVolumeGroup, name: "%s", err: %w`, lvg.Name, err)
 	}
 
@@ -950,7 +947,7 @@ func convertLVMVGDevices(devices []internal.LVMVGDevice) []v1alpha1.LvmVolumeGro
 			BlockDevice: dev.BlockDevice,
 			DevSize:     dev.DevSize,
 			PVSize:      dev.PVSize,
-			PVUuid:      dev.PVUuid,
+			PVUuid:      dev.PVUUID,
 			Path:        dev.Path,
 		})
 	}
@@ -1020,7 +1017,7 @@ func getThinPoolSpaceWithAllocationLimit(actualSize resource.Quantity, allocatio
 	}
 
 	factor := float64(percent)
-	factor = factor / 100
+	factor /= 100
 
 	return *resource.NewQuantity(int64(float64(actualSize.Value())*factor), resource.BinarySI), nil
 }
@@ -1034,10 +1031,10 @@ func GetAPILVMVolumeGroups(ctx context.Context, kc kclient.Client, metrics monit
 
 	start := time.Now()
 	err := kc.List(ctx, lvgList)
-	metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "list").Inc()
+	metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "list").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "list").Inc()
+		metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "list").Inc()
 		return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LvmVolumeGroups, err: %w", err)
 	}
 
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go b/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go
index 2b0f3ead..166a8ee9 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go
@@ -17,15 +17,15 @@ limitations under the License.
 package controller
 
 import (
+	"context"
+	"testing"
+
 	"agent/internal"
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
-	"context"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	"testing"
-
 	"github.com/stretchr/testify/assert"
+	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/client"
@@ -73,15 +73,15 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("checkVGHealth_returns_Operational", func(t *testing.T) {
 		const (
 			vgName = "testVg"
-			vgUuid = "testUuid"
+			vgUUID = "testUuid"
 		)
 		bds := map[string][]v1alpha1.BlockDevice{
-			vgName + vgUuid: {{}},
+			vgName + vgUUID: {{}},
 		}
 		vgIssues := map[string]string{}
 		pvIssues := map[string][]string{}
 		lvIssues := map[string]map[string]string{}
-		vg := internal.VGData{VGName: vgName, VGUuid: vgUuid}
+		vg := internal.VGData{VGName: vgName, VGUUID: vgUUID}
 
 		health, _ := checkVGHealth(bds, vgIssues, pvIssues, lvIssues, vg)
 		assert.Equal(t, health, internal.LVMVGHealthOperational)
@@ -90,15 +90,15 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("checkVGHealth_returns_NonOperational", func(t *testing.T) {
 		const (
 			vgName = "testVg"
-			vgUuid = "testUuid"
+			vgUUID = "testUuid"
 		)
 		bds := map[string][]v1alpha1.BlockDevice{
-			vgName + vgUuid: {},
+			vgName + vgUUID: {},
 		}
 		vgIssues := map[string]string{}
 		pvIssues := map[string][]string{}
 		lvIssues := map[string]map[string]string{}
-		vg := internal.VGData{VGName: vgName, VGUuid: vgUuid}
+		vg := internal.VGData{VGName: vgName, VGUUID: vgUUID}
 
 		health, _ := checkVGHealth(bds, vgIssues, pvIssues, lvIssues, vg)
 		assert.Equal(t, health, internal.LVMVGHealthNonOperational)
@@ -125,37 +125,37 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("sortPVsByVG_returns_sorted_pvs", func(t *testing.T) {
 		const (
 			firstVgName  = "firstVg"
-			firstVgUuid  = "firstUUID"
+			firstVgUUID  = "firstUUID"
 			secondVgName = "secondVg"
-			secondVgUuid = "secondUUID"
+			secondVgUUID = "secondUUID"
 		)
 		pvs := []internal.PVData{
 			{
 				PVName: "first",
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUuid: firstVgUUID,
 			},
 			{
 				PVName: "second",
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUuid: secondVgUUID,
 			},
 		}
 
 		vgs := []internal.VGData{
 			{
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUUID: firstVgUUID,
 			},
 			{
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUUID: secondVgUUID,
 			},
 		}
 
 		expected := map[string][]internal.PVData{
-			firstVgName + firstVgUuid:   {pvs[0]},
-			secondVgName + secondVgUuid: {pvs[1]},
+			firstVgName + firstVgUUID:   {pvs[0]},
+			secondVgName + secondVgUUID: {pvs[1]},
 		}
 
 		actual := sortPVsByVG(pvs, vgs)
@@ -165,18 +165,18 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("sortBlockDevicesByVG_returns_sorted_bds", func(t *testing.T) {
 		const (
 			firstVgName  = "firstVg"
-			firstVgUuid  = "firstUUID"
+			firstVgUUID  = "firstUUID"
 			secondVgName = "secondVg"
-			secondVgUuid = "secondUUID"
+			secondVgUUID = "secondUUID"
 		)
 		vgs := []internal.VGData{
 			{
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUUID: firstVgUUID,
 			},
 			{
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUUID: secondVgUUID,
 			},
 		}
 
@@ -185,21 +185,21 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				ObjectMeta: metav1.ObjectMeta{Name: "first"},
 				Status: v1alpha1.BlockDeviceStatus{
 					ActualVGNameOnTheNode: firstVgName,
-					VGUuid:                firstVgUuid,
+					VGUuid:                firstVgUUID,
 				},
 			},
 			"second": {
 				ObjectMeta: metav1.ObjectMeta{Name: "second"},
 				Status: v1alpha1.BlockDeviceStatus{
 					ActualVGNameOnTheNode: secondVgName,
-					VGUuid:                secondVgUuid,
+					VGUuid:                secondVgUUID,
 				},
 			},
 		}
 
 		expected := map[string][]v1alpha1.BlockDevice{
-			firstVgName + firstVgUuid:   {bds["first"]},
-			secondVgName + secondVgUuid: {bds["second"]},
+			firstVgName + firstVgUUID:   {bds["first"]},
+			secondVgName + secondVgUUID: {bds["second"]},
 		}
 
 		actual := sortBlockDevicesByVG(bds, vgs)
@@ -209,35 +209,35 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("sortLVsByVG_returns_sorted_LVs", func(t *testing.T) {
 		const (
 			firstVgName  = "firstVg"
-			firstVgUuid  = "firstUUID"
+			firstVgUUID  = "firstUUID"
 			secondVgName = "secondVg"
-			secondVgUuid = "secondUUID"
+			secondVgUUID = "secondUUID"
 		)
 		vgs := []internal.VGData{
 			{
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUUID: firstVgUUID,
 			},
 			{
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUUID: secondVgUUID,
 			},
 		}
 		lvs := []internal.LVData{
 			{
 				LVName: "first",
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUuid: firstVgUUID,
 			},
 			{
 				LVName: "second",
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUuid: secondVgUUID,
 			},
 		}
 		expected := map[string][]internal.LVData{
-			firstVgName + firstVgUuid:   {lvs[0]},
-			secondVgName + secondVgUuid: {lvs[1]},
+			firstVgName + firstVgUUID:   {lvs[0]},
+			secondVgName + secondVgUUID: {lvs[1]},
 		}
 
 		actual := sortThinPoolsByVG(lvs, vgs)
@@ -247,16 +247,19 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("configureCandidateNodesDevices_returns_candidates_nodes", func(t *testing.T) {
 		const (
 			vgName   = "test_vg"
-			vgUuid   = "vg_uuid"
+			vgUUID   = "vg_uuid"
 			nodeName = "test_node"
 		)
 
 		vg := internal.VGData{
 			VGName: vgName,
-			VGUuid: vgUuid,
+			VGUUID: vgUUID,
 		}
 
 		size10G, err := resource.ParseQuantity("10G")
+		if err != nil {
+			t.Error(err)
+		}
 		size1G, err := resource.ParseQuantity("1G")
 		if err != nil {
 			t.Error(err)
@@ -268,13 +271,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				PVSize: size10G,
 				PVUuid: "pv_uuid1",
 				VGName: vgName,
-				VGUuid: vgUuid,
+				VGUuid: vgUUID,
 			},
 			{
 				PVName: "test_pv2",
 				PVSize: size1G,
 				PVUuid: "pv_uuid2",
-				VGUuid: vgUuid,
+				VGUuid: vgUUID,
 				VGName: vgName,
 			},
 		}
@@ -285,7 +288,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				Status: v1alpha1.BlockDeviceStatus{
 					Path:                  "test_pv1",
 					Size:                  resource.MustParse("10G"),
-					VGUuid:                vgUuid,
+					VGUuid:                vgUUID,
 					ActualVGNameOnTheNode: vgName,
 				},
 			},
@@ -294,7 +297,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				Status: v1alpha1.BlockDeviceStatus{
 					Path:                  "test_pv2",
 					Size:                  resource.MustParse("1G"),
-					VGUuid:                vgUuid,
+					VGUuid:                vgUUID,
 					ActualVGNameOnTheNode: vgName,
 				},
 			},
@@ -306,20 +309,20 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 					Path:        "test_pv1",
 					PVSize:      *resource.NewQuantity(size10G.Value(), resource.BinarySI),
 					DevSize:     *resource.NewQuantity(size10G.Value(), resource.BinarySI),
-					PVUuid:      "pv_uuid1",
+					PVUUID:      "pv_uuid1",
 					BlockDevice: "block_device1",
 				},
 				{
 					Path:        "test_pv2",
 					PVSize:      *resource.NewQuantity(size1G.Value(), resource.BinarySI),
 					DevSize:     *resource.NewQuantity(size1G.Value(), resource.BinarySI),
-					PVUuid:      "pv_uuid2",
+					PVUUID:      "pv_uuid2",
 					BlockDevice: "block_device2",
 				},
 			},
 		}
-		mp := map[string][]v1alpha1.BlockDevice{vgName + vgUuid: bds}
-		ar := map[string][]internal.PVData{vgName + vgUuid: pvs}
+		mp := map[string][]v1alpha1.BlockDevice{vgName + vgUUID: bds}
+		ar := map[string][]internal.PVData{vgName + vgUUID: pvs}
 
 		actual := configureCandidateNodeDevices(ar, mp, vg, nodeName)
 
@@ -347,7 +350,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 		vgs := []internal.VGData{
 			{
 				VGName: "firstVG",
-				VGUuid: "firstUUID",
+				VGUUID: "firstUUID",
 			},
 		}
 		actual := sortBlockDevicesByVG(bds, vgs)
@@ -378,19 +381,22 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("getSpecThinPools_returns_LVName_LVSize_map", func(t *testing.T) {
 		const (
 			vgName = "test_vg"
-			vgUuid = "test_uuid"
+			vgUUID = "test_uuid"
 		)
 
-		vg := internal.VGData{VGName: vgName, VGUuid: vgUuid}
+		vg := internal.VGData{VGName: vgName, VGUUID: vgUUID}
 
 		firstSize, err := resource.ParseQuantity("1G")
+		if err != nil {
+			t.Error(err)
+		}
 		secondSize, err := resource.ParseQuantity("2G")
 		if err != nil {
 			t.Error(err)
 		}
 
 		thinPools := map[string][]internal.LVData{
-			vgName + vgUuid: {
+			vgName + vgUUID: {
 				{
 					LVName: "first",
 					LVSize: firstSize,
@@ -419,10 +425,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 			Type                  = "local"
 			Health                = internal.LVMVGHealthOperational
 			Message               = "No problems detected"
-			VGUuid                = "test_uuid"
+			VGUUID                = "test_uuid"
 		)
 
 		size10G, err := resource.ParseQuantity("10G")
+		if err != nil {
+			t.Error(err)
+		}
 		size1G, err := resource.ParseQuantity("1G")
 		if err != nil {
 			t.Error(err)
@@ -448,7 +457,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 						Path:        "test/path",
 						PVSize:      size1G,
 						DevSize:     size1G,
-						PVUuid:      "test-pv-uuid",
+						PVUUID:      "test-pv-uuid",
 						BlockDevice: "test-device",
 					},
 				},
@@ -466,7 +475,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 			Message:               Message,
 			StatusThinPools:       statusThinPools,
 			VGSize:                size10G,
-			VGUuid:                VGUuid,
+			VGUUID:                VGUUID,
 			Nodes:                 nodes,
 		}
 
@@ -491,7 +500,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				Nodes:         convertLVMVGNodes(nodes),
 				ThinPools:     thinPools,
 				VGSize:        size10G,
-				VGUuid:        VGUuid,
+				VGUuid:        VGUUID,
 			},
 		}
 
@@ -556,6 +565,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 		err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvg, "test-node")
 		if assert.NoError(t, err) {
 			actual, err = GetAPILVMVolumeGroups(ctx, cl, metrics)
+			assert.NoError(t, err)
 			assert.Equal(t, 0, len(actual))
 		}
 	})
@@ -718,7 +728,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("hasLVMVolumeGroupDiff", func(t *testing.T) {
 		t.Run("should_return_false", func(t *testing.T) {
 			size10G, err := resource.ParseQuantity("10G")
+			if err != nil {
+				t.Error(err)
+			}
 			size1G, err := resource.ParseQuantity("1G")
+			if err != nil {
+				t.Error(err)
+			}
 			size13G, err := resource.ParseQuantity("13G")
 			if err != nil {
 				t.Error(err)
@@ -755,7 +771,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 							Path:        "/test/ds",
 							PVSize:      size1G,
 							DevSize:     size13G,
-							PVUuid:      "testUUID",
+							PVUUID:      "testUUID",
 							BlockDevice: "something",
 						},
 					},
@@ -821,14 +837,14 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 							Path:        "/test/ds",
 							PVSize:      size1G,
 							DevSize:     size13G,
-							PVUuid:      "testUUID",
+							PVUUID:      "testUUID",
 							BlockDevice: "something",
 						},
 						{
 							Path:        "/test/ds2",
 							PVSize:      size1G,
 							DevSize:     size13G,
-							PVUuid:      "testUUID2",
+							PVUUID:      "testUUID2",
 							BlockDevice: "something2",
 						},
 					},
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_test.go b/images/agent/src/pkg/controller/lvm_volume_group_test.go
index cd8c4d31..f055f60f 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_test.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_test.go
@@ -18,11 +18,11 @@ package controller
 
 import (
 	"encoding/json"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	"k8s.io/apimachinery/pkg/api/resource"
 	"testing"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"github.com/stretchr/testify/assert"
+	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go
index e80ea32b..27264150 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go
@@ -17,19 +17,18 @@ limitations under the License.
 package controller
 
 import (
+	"context"
+	"fmt"
+
 	"agent/config"
 	"agent/internal"
 	"agent/pkg/cache"
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
 	"agent/pkg/utils"
-	"context"
-	"errors"
-	"fmt"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	errors2 "k8s.io/apimachinery/pkg/api/errors"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
 	"sigs.k8s.io/controller-runtime/pkg/client"
@@ -143,9 +142,8 @@ func RunLVMVolumeGroupWatcherController(
 				if err != nil {
 					log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to delete the LVMVolumeGroup %s", lvg.Name))
 					return reconcile.Result{}, err
-				} else {
-					log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name))
 				}
+				log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name))
 
 				return reconcile.Result{}, nil
 			}
@@ -193,7 +191,7 @@ func RunLVMVolumeGroupWatcherController(
 	}
 
 	err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup]{
-		CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
 			log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc got a create event for the LVMVolumeGroup, name: %s", e.Object.GetName()))
 
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
@@ -201,7 +199,7 @@ func RunLVMVolumeGroupWatcherController(
 
 			log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName()))
 		},
-		UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
 			log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] UpdateFunc got a update event for the LVMVolumeGroup %s", e.ObjectNew.GetName()))
 			if !shouldLVGWatcherReconcileUpdateEvent(log, e.ObjectOld, e.ObjectNew) {
 				log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] update event for the LVMVolumeGroup %s should not be reconciled as not target changed were made", e.ObjectNew.Name))
@@ -279,7 +277,7 @@ func reconcileLVGDeleteFunc(ctx context.Context, cl client.Client, log logger.Lo
 	}
 
 	log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] check if VG %s of the LVMVolumeGroup %s uses LVs", lvg.Spec.ActualVGNameOnTheNode, lvg.Name))
-	usedLVs := checkIfVGHasLV(sdsCache, lvg.Spec.ActualVGNameOnTheNode)
+	usedLVs := getLVForVG(sdsCache, lvg.Spec.ActualVGNameOnTheNode)
 	if len(usedLVs) > 0 {
 		err := fmt.Errorf("VG %s uses LVs: %v. Delete used LVs first", lvg.Spec.ActualVGNameOnTheNode, usedLVs)
 		log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to reconcile LVG %s", lvg.Name))
@@ -337,7 +335,7 @@ func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Lo
 
 	log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to validate the LVMVolumeGroup %s", lvg.Name))
 	pvs, _ := sdsCache.GetPVs()
-	valid, reason := validateLVGForUpdateFunc(log, sdsCache, lvg, blockDevices, pvs)
+	valid, reason := validateLVGForUpdateFunc(log, sdsCache, lvg, blockDevices)
 	if !valid {
 		log.Warning(fmt.Sprintf("[reconcileLVGUpdateFunc] the LVMVolumeGroup %s is not valid", lvg.Name))
 		err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason)
@@ -352,7 +350,7 @@ func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Lo
 	log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to get VG %s for the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name))
 	found, vg := tryGetVG(sdsCache, lvg.Spec.ActualVGNameOnTheNode)
 	if !found {
-		err := errors.New(fmt.Sprintf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode))
+		err := fmt.Errorf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode)
 		log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile the LVMVolumeGroup %s", lvg.Name))
 		err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGNotFound", err.Error())
 		if err != nil {
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go
index a87e907c..5a4b37ba 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go
@@ -17,23 +17,23 @@ limitations under the License.
 package controller
 
 import (
+	"context"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
 	"agent/internal"
 	"agent/pkg/cache"
 	"agent/pkg/logger"
 	"agent/pkg/monitoring"
 	"agent/pkg/utils"
-	"context"
-	"errors"
-	"fmt"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"k8s.io/apimachinery/pkg/api/resource"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/utils/strings/slices"
-	"reflect"
-	"strconv"
-	"strings"
-	"time"
-
 	"sigs.k8s.io/controller-runtime/pkg/client"
 )
 
@@ -51,10 +51,10 @@ func DeleteLVMVolumeGroup(ctx context.Context, cl client.Client, log logger.Logg
 	if len(lvg.Status.Nodes) == 0 {
 		start := time.Now()
 		err := cl.Delete(ctx, lvg)
-		metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start))
-		metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "delete").Inc()
+		metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start))
+		metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "delete").Inc()
 		if err != nil {
-			metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "delete").Inc()
+			metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "delete").Inc()
 			return err
 		}
 		log.Info(fmt.Sprintf("[DeleteLVMVolumeGroup] the LVMVolumeGroup %s deleted", lvg.Name))
@@ -106,11 +106,7 @@ func shouldLVGWatcherReconcileUpdateEvent(log logger.Logger, oldLVG, newLVG *v1a
 }
 
 func shouldReconcileLVGByDeleteFunc(lvg *v1alpha1.LvmVolumeGroup) bool {
-	if lvg.DeletionTimestamp != nil {
-		return true
-	}
-
-	return false
+	return lvg.DeletionTimestamp != nil
 }
 
 func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, status v1.ConditionStatus, conType, reason, message string) error {
@@ -260,11 +256,7 @@ func validateSpecBlockDevices(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[str
 
 func checkIfLVGBelongsToNode(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, nodeName string) bool {
 	bd := blockDevices[lvg.Spec.BlockDeviceNames[0]]
-	if bd.Status.NodeName != nodeName {
-		return false
-	}
-
-	return true
+	return bd.Status.NodeName == nodeName
 }
 
 func extractPathsFromBlockDevices(blockDevicesNames []string, blockDevices map[string]v1alpha1.BlockDevice) []string {
@@ -366,9 +358,9 @@ func validateLVGForCreateFunc(log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, b
 	return true, ""
 }
 
-func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, pvs []internal.PVData) (bool, string) {
+func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) {
 	reason := strings.Builder{}
-
+	pvs, _ := sdsCache.GetPVs()
 	log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] check if every new BlockDevice of the LVMVolumeGroup %s is comsumable", lvg.Name))
 	actualPVPaths := make(map[string]struct{}, len(pvs))
 	for _, pv := range pvs {
@@ -440,9 +432,10 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a
 			return false, reason.String()
 		}
 
+		newTotalVGSize := resource.NewQuantity(vg.VGSize.Value()+additionBlockDeviceSpace, resource.BinarySI)
 		for _, specTp := range lvg.Spec.ThinPools {
 			// might be a case when Thin-pool is already created, but is not shown in status
-			tpRequestedSize, err := getRequestedSizeFromString(specTp.Size, vg.VGSize)
+			tpRequestedSize, err := getRequestedSizeFromString(specTp.Size, *newTotalVGSize)
 			if err != nil {
 				reason.WriteString(err.Error())
 				continue
@@ -454,7 +447,7 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a
 			}
 
 			log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s thin-pool %s requested size %s, Status VG size %s", lvg.Name, specTp.Name, tpRequestedSize.String(), lvg.Status.VGSize.String()))
-			switch utils.AreSizesEqualWithinDelta(tpRequestedSize, lvg.Status.VGSize, internal.ResizeDelta) {
+			switch utils.AreSizesEqualWithinDelta(tpRequestedSize, *newTotalVGSize, internal.ResizeDelta) {
 			// means a user wants 100% of VG space
 			case true:
 				hasFullThinPool = true
@@ -484,7 +477,8 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a
 		}
 
 		if !hasFullThinPool {
-			totalFreeSpace := lvg.Status.VGSize.Value() - lvg.Status.AllocatedSize.Value() + additionBlockDeviceSpace
+			allocatedSize := getVGAllocatedSize(*vg)
+			totalFreeSpace := newTotalVGSize.Value() - allocatedSize.Value()
 			log.Trace(fmt.Sprintf("[validateLVGForUpdateFunc] new LVMVolumeGroup %s thin-pools requested %d size, additional BlockDevices space %d, total: %d", lvg.Name, addingThinPoolSize, additionBlockDeviceSpace, totalFreeSpace))
 			if addingThinPoolSize != 0 && addingThinPoolSize+internal.ResizeDelta.Value() > totalFreeSpace {
 				reason.WriteString("Added thin-pools requested sizes are more than allowed free space in VG.")
@@ -520,11 +514,8 @@ func shouldReconcileLVGByCreateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cach
 		return false
 	}
 
-	if vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode); vg != nil {
-		return false
-	}
-
-	return true
+	vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode)
+	return vg == nil
 }
 
 func shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cache) bool {
@@ -532,11 +523,8 @@ func shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cach
 		return false
 	}
 
-	if vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode); vg != nil {
-		return true
-	}
-
-	return false
+	vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode)
+	return vg != nil
 }
 
 func ReconcileThinPoolsIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, vg internal.VGData, lvs []internal.LVData) error {
@@ -737,7 +725,7 @@ func removeLVGFinalizerIfExist(ctx context.Context, cl client.Client, lvg *v1alp
 	return true, nil
 }
 
-func checkIfVGHasLV(ch *cache.Cache, vgName string) []string {
+func getLVForVG(ch *cache.Cache, vgName string) []string {
 	lvs, _ := ch.GetLVs()
 	usedLVs := make([]string, 0, len(lvs))
 	for _, lv := range lvs {
@@ -755,10 +743,10 @@ func getLVMVolumeGroup(ctx context.Context, cl client.Client, metrics monitoring
 	err := cl.Get(ctx, client.ObjectKey{
 		Name: name,
 	}, obj)
-	metrics.ApiMethodsDuration(LVMVolumeGroupWatcherCtrlName, "get").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(LVMVolumeGroupWatcherCtrlName, "get").Inc()
+	metrics.APIMethodsDuration(LVMVolumeGroupWatcherCtrlName, "get").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(LVMVolumeGroupWatcherCtrlName, "get").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(LVMVolumeGroupWatcherCtrlName, "get").Inc()
+		metrics.APIMethodsErrors(LVMVolumeGroupWatcherCtrlName, "get").Inc()
 		return nil, err
 	}
 	return obj, nil
@@ -811,7 +799,7 @@ func DeleteVGIfExist(log logger.Logger, metrics monitoring.Metrics, sdsCache *ca
 	return nil
 }
 
-func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, VGName string, log logger.Logger) error {
+func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, vgName string, log logger.Logger) error {
 	for _, pvPath := range extendPVs {
 		start := time.Now()
 		command, err := utils.CreatePV(pvPath)
@@ -826,7 +814,7 @@ func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, VGName stri
 	}
 
 	start := time.Now()
-	command, err := utils.ExtendVG(VGName, extendPVs)
+	command, err := utils.ExtendVG(vgName, extendPVs)
 	metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgextend").Observe(metrics.GetEstimatedTimeInSeconds(start))
 	metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgextend").Inc()
 	log.Debug(command)
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go
index f2f7ce33..95461801 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go
@@ -1,809 +1,1163 @@
 package controller
 
-//
-//import (
-//	"context"
-//	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-//	"agent/pkg/monitoring"
-//	"testing"
-//
-//	"github.com/stretchr/testify/assert"
-//	v1 "k8s.io/api/core/v1"
-//	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-//)
-//
-//func TestLVMVolumeGroupWatcherCtrl(t *testing.T) {
-//	e2eCL := NewFakeClient()
-//	ctx := context.Background()
-//	metrics := monitoring.GetMetrics("")
-//	namespace := "test"
-//
-//	t.Run("getLVMVolumeGroup_lvg_exists_returns_correct", func(t *testing.T) {
-//		const name = "test_name"
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		actual, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//
-//		if assert.NoError(t, err) {
-//			assert.NotNil(t, actual)
-//			assert.Equal(t, name, actual.Name)
-//			assert.Equal(t, namespace, actual.Namespace)
-//		}
-//	})
-//
-//	t.Run("getLVMVolumeGroup_lvg_doesnt_exist_returns_nil", func(t *testing.T) {
-//		const name = "test_name"
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		actual, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, "another-name")
-//
-//		if assert.EqualError(t, err, "lvmvolumegroups.storage.deckhouse.io \"another-name\" not found") {
-//			assert.Nil(t, actual)
-//		}
-//	})
-//
-//	t.Run("updateLVMVolumeGroupHealthStatus_new_old_health_is_operational_doesnt_update_returns_nil", func(t *testing.T) {
-//		const (
-//			name    = "test_name"
-//			message = "All good"
-//		)
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Status: v1alpha1.LvmVolumeGroupStatus{
-//				Health:  Operational,
-//				Message: message,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		oldLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, oldLvg.Name)
-//			assert.Equal(t, Operational, oldLvg.Status.Health)
-//			assert.Equal(t, message, oldLvg.Status.Message)
-//		}
-//
-//		err = updateLVMVolumeGroupHealthStatus(ctx, e2eCL, metrics, name, namespace, "new message", Operational)
-//		assert.Nil(t, err)
-//
-//		updatedLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, updatedLvg.Name)
-//			assert.Equal(t, Operational, updatedLvg.Status.Health)
-//			assert.Equal(t, message, updatedLvg.Status.Message)
-//		}
-//	})
-//
-//	t.Run("updateLVMVolumeGroupHealthStatus_new_old_health_and_messages_are_the_same_doesnt_updates_returns_nil", func(t *testing.T) {
-//		const (
-//			name    = "test_name"
-//			message = "All bad"
-//		)
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Status: v1alpha1.LvmVolumeGroupStatus{
-//				Health:  NonOperational,
-//				Message: message,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		oldLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, oldLvg.Name)
-//			assert.Equal(t, NonOperational, oldLvg.Status.Health)
-//			assert.Equal(t, message, oldLvg.Status.Message)
-//		}
-//
-//		err = updateLVMVolumeGroupHealthStatus(ctx, e2eCL, metrics, name, namespace, message, NonOperational)
-//		assert.Nil(t, err)
-//
-//		updatedLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, updatedLvg.Name)
-//			assert.Equal(t, NonOperational, updatedLvg.Status.Health)
-//			assert.Equal(t, message, updatedLvg.Status.Message)
-//		}
-//	})
-//
-//	t.Run("updateLVMVolumeGroupHealthStatus_new_old_health_are_nonoperational_different_messages_are_updates_message_returns_nil", func(t *testing.T) {
-//		const (
-//			name       = "test_name"
-//			oldMessage = "All bad1"
-//			newMessage = "All bad2"
-//		)
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Status: v1alpha1.LvmVolumeGroupStatus{
-//				Health:  NonOperational,
-//				Message: oldMessage,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		oldLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, oldLvg.Name)
-//			assert.Equal(t, NonOperational, oldLvg.Status.Health)
-//			assert.Equal(t, oldMessage, oldLvg.Status.Message)
-//		}
-//
-//		err = updateLVMVolumeGroupHealthStatus(ctx, e2eCL, metrics, name, namespace, newMessage, NonOperational)
-//		assert.Nil(t, err)
-//
-//		updatedLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, updatedLvg.Name)
-//			assert.Equal(t, NonOperational, updatedLvg.Status.Health)
-//			assert.Equal(t, newMessage, updatedLvg.Status.Message)
-//		}
-//	})
-//
-//	t.Run("updateLVMVolumeGroupHealthStatus_old_health_is_nonoperational_new_health_is_operational_updates_health_and_message_returns_nil", func(t *testing.T) {
-//		const (
-//			name       = "test_name"
-//			oldMessage = "All bad"
-//			newMessage = "All good"
-//		)
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Status: v1alpha1.LvmVolumeGroupStatus{
-//				Health:  NonOperational,
-//				Message: oldMessage,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		oldLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, oldLvg.Name)
-//			assert.Equal(t, NonOperational, oldLvg.Status.Health)
-//			assert.Equal(t, oldMessage, oldLvg.Status.Message)
-//		}
-//
-//		err = updateLVMVolumeGroupHealthStatus(ctx, e2eCL, metrics, name, namespace, newMessage, Operational)
-//		assert.Nil(t, err)
-//
-//		updatedLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, updatedLvg.Name)
-//			assert.Equal(t, Operational, updatedLvg.Status.Health)
-//			assert.Equal(t, newMessage, updatedLvg.Status.Message)
-//		}
-//	})
-//
-//	t.Run("getBlockDevice_bd_exists_returns_correct_one", func(t *testing.T) {
-//		const name = "test_name"
-//
-//		testObj := &v1alpha1.BlockDevice{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		bd, err := getBlockDevice(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, bd.Name)
-//			assert.Equal(t, namespace, bd.Namespace)
-//		}
-//	})
-//
-//	t.Run("getBlockDevice_bd_doesnt_exists_returns_nil", func(t *testing.T) {
-//		const name = "test_name"
-//
-//		testObj := &v1alpha1.BlockDevice{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		bd, err := getBlockDevice(ctx, e2eCL, metrics, namespace, "another-name")
-//		if assert.EqualError(t, err, "blockdevices.storage.deckhouse.io \"another-name\" not found") {
-//			assert.Nil(t, bd)
-//		}
-//	})
-//
-//	t.Run("ValidateLVMGroup_lvg_is_nil_returns_error", func(t *testing.T) {
-//		valid, obj, err := CheckLVMVGNodeOwnership(ctx, e2eCL, metrics, nil, "test_ns", "test_node")
-//		assert.False(t, valid)
-//		assert.Nil(t, obj)
-//		assert.EqualError(t, err, "lvmVolumeGroup is nil")
-//	})
-//
-//	t.Run("ValidateLVMGroup_type_local_selected_absent_bds_validation_fails", func(t *testing.T) {
-//		const lvgName = "test_name"
-//
-//		lvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      lvgName,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{"test_bd"},
-//				Type:             Local,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, lvg)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, lvg)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		valid, status, err := CheckLVMVGNodeOwnership(ctx, e2eCL, metrics, lvg, namespace, "test_node")
-//		assert.False(t, valid)
-//		if assert.NotNil(t, status) {
-//			assert.Equal(t, NonOperational, status.Health)
-//			assert.EqualError(t, err, "error getBlockDevice: blockdevices.storage.deckhouse.io \"test_bd\" not found")
-//		}
-//	})
-//
-//	t.Run("ValidateLVMGroup_type_local_selected_bds_from_different_nodes_validation_fails", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			firstBd  = "first"
-//			secondBd = "second"
-//			testNode = "test_node"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName: testNode,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName: "another_node",
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{firstBd, secondBd},
-//				Type:             Local,
-//			},
-//		}
-//
-//		err = e2eCL.Create(ctx, testLvg)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testLvg)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		valid, status, err := CheckLVMVGNodeOwnership(ctx, e2eCL, metrics, testLvg, namespace, testNode)
-//		assert.False(t, valid)
-//		if assert.NotNil(t, status) {
-//			assert.Equal(t, NonOperational, status.Health)
-//			assert.Equal(t, "selected block devices are from different nodes for local LVMVolumeGroup", status.Message)
-//		}
-//		assert.Nil(t, err)
-//	})
-//
-//	t.Run("ValidateLVMGroup_type_local_validation_passes", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			firstBd  = "first"
-//			secondBd = "second"
-//			testNode = "test_node"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName: testNode,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName: testNode,
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames:      []string{firstBd, secondBd},
-//				Type:                  Local,
-//				ActualVGNameOnTheNode: "some-vg",
-//			},
-//		}
-//
-//		err = e2eCL.Create(ctx, testLvg)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testLvg)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		valid, status, err := CheckLVMVGNodeOwnership(ctx, e2eCL, metrics, testLvg, namespace, testNode)
-//		assert.True(t, valid)
-//		if assert.NotNil(t, status) {
-//			assert.Equal(t, "", status.Health)
-//			assert.Equal(t, "", status.Message)
-//		}
-//		assert.Nil(t, err)
-//	})
-//
-//	t.Run("CreateEventLVMVolumeGroup_creates_event", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			nodeName = "test_node"
-//		)
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			TypeMeta: metav1.TypeMeta{
-//				Kind: "test_kind",
-//			},
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//				UID:       "test_UUID",
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{"absent_bd"},
-//				Type:             Local,
-//			},
-//		}
-//
-//		err := CreateEventLVMVolumeGroup(ctx, e2eCL, metrics, EventReasonDeleting, EventActionDeleting, nodeName, testLvg)
-//		if assert.NoError(t, err) {
-//			events := &v1.EventList{}
-//			err = e2eCL.List(ctx, events)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//
-//			if assert.Equal(t, 1, len(events.Items)) {
-//				event := events.Items[0]
-//
-//				assert.Equal(t, testLvg.Name+"-", event.GenerateName)
-//				assert.Equal(t, nameSpaceEvent, event.Namespace)
-//				assert.Equal(t, EventReasonDeleting, event.Reason)
-//				assert.Equal(t, testLvg.Name, event.InvolvedObject.Name)
-//				assert.Equal(t, testLvg.Kind, event.InvolvedObject.Kind)
-//				assert.Equal(t, testLvg.UID, event.InvolvedObject.UID)
-//				assert.Equal(t, "apiextensions.k8s.io/v1", event.InvolvedObject.APIVersion)
-//				assert.Equal(t, v1.EventTypeNormal, event.Type)
-//				assert.Equal(t, EventActionDeleting, event.Action)
-//				assert.Equal(t, nodeName, event.ReportingInstance)
-//				assert.Equal(t, LVMVolumeGroupWatcherCtrlName, event.ReportingController)
-//				assert.Equal(t, "Event Message", event.Message)
-//
-//				err = e2eCL.Delete(ctx, &event)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}
-//		}
-//	})
-//
-//	t.Run("ValidateConsumableDevices_validation_passes", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			firstBd  = "first"
-//			secondBd = "second"
-//			testNode = "test_node"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: true,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: true,
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{firstBd, secondBd},
-//				Type:             Shared,
-//			},
-//		}
-//
-//		passed, err := ValidateConsumableDevices(ctx, e2eCL, metrics, testLvg)
-//		if assert.NoError(t, err) {
-//			assert.True(t, passed)
-//		}
-//	})
-//
-//	t.Run("ValidateConsumableDevices_validation_fails", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			firstBd  = "first"
-//			secondBd = "second"
-//			testNode = "test_node"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: false,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: true,
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{firstBd, secondBd},
-//				Type:             Shared,
-//			},
-//		}
-//
-//		passed, err := ValidateConsumableDevices(ctx, e2eCL, metrics, testLvg)
-//		if assert.NoError(t, err) {
-//			assert.False(t, passed)
-//		}
-//	})
-//
-//	t.Run("ValidateConsumableDevices_lvg_is_nil_validation_fails", func(t *testing.T) {
-//		passed, err := ValidateConsumableDevices(ctx, e2eCL, metrics, nil)
-//		if assert.EqualError(t, err, "lvmVolumeGroup is nil") {
-//			assert.False(t, passed)
-//		}
-//	})
-//
-//	t.Run("GetPathsConsumableDevicesFromLVMVG_lvg_is_nil_returns_error", func(t *testing.T) {
-//		paths, err := GetPathsConsumableDevicesFromLVMVG(ctx, e2eCL, metrics, nil)
-//
-//		if assert.EqualError(t, err, "lvmVolumeGroup is nil") {
-//			assert.Nil(t, paths)
-//		}
-//	})
-//
-//	t.Run("GetPathsConsumableDevicesFromLVMVG_lvg_is_nil_returns_error", func(t *testing.T) {
-//		const (
-//			name       = "test_name"
-//			firstBd    = "first"
-//			secondBd   = "second"
-//			testNode   = "test_node"
-//			firstPath  = "first_path"
-//			secondPath = "second_path"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: false,
-//						Path:       firstPath,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: true,
-//						Path:       secondPath,
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{firstBd, secondBd},
-//				Type:             Shared,
-//			},
-//		}
-//
-//		expected := []string{firstPath, secondPath}
-//
-//		actual, err := GetPathsConsumableDevicesFromLVMVG(ctx, e2eCL, metrics, testLvg)
-//		if assert.NoError(t, err) {
-//			assert.ElementsMatch(t, expected, actual)
-//		}
-//
-//	})
-//}
+import (
+	"bytes"
+	"context"
+	"testing"
+	"time"
+
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
+	"github.com/stretchr/testify/assert"
+	errors2 "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/resource"
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/utils/strings/slices"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func TestLVMVolumeGroupWatcherCtrl(t *testing.T) {
+	cl := NewFakeClient()
+	ctx := context.Background()
+	log := logger.Logger{}
+	metrics := monitoring.GetMetrics("")
+
+	t.Run("validateLVGForUpdateFunc", func(t *testing.T) {
+		t.Run("without_thin_pools_returns_true", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+
+				firstPath  = "first-path"
+				secondPath = "second-path"
+			)
+
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       firstPath,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       secondPath,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+				},
+				Status: v1alpha1.LvmVolumeGroupStatus{
+					Phase:                "",
+					Conditions:           nil,
+					ThinPoolReady:        "",
+					ConfigurationApplied: "",
+					VGFree:               resource.Quantity{},
+				},
+			}
+
+			// so second block device is new one
+			pvs := []internal.PVData{
+				{
+					PVName: firstPath,
+				},
+			}
+
+			ch := cache.New()
+			ch.StorePVs(pvs, bytes.Buffer{})
+
+			valid, reason := validateLVGForUpdateFunc(log, ch, lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("without_thin_pools_returns_false", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+
+				firstPath  = "first-path"
+				secondPath = "second-path"
+			)
+
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       firstPath,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: false,
+						Path:       secondPath,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+				},
+				Status: v1alpha1.LvmVolumeGroupStatus{
+					Phase:                "",
+					Conditions:           nil,
+					ThinPoolReady:        "",
+					ConfigurationApplied: "",
+					VGFree:               resource.Quantity{},
+				},
+			}
+
+			// so second block device is new one
+			pvs := []internal.PVData{
+				{
+					PVName: firstPath,
+				},
+			}
+
+			ch := cache.New()
+			ch.StorePVs(pvs, bytes.Buffer{})
+
+			// new block device is not consumable
+			valid, _ := validateLVGForUpdateFunc(log, ch, lvg, bds)
+			assert.False(t, valid)
+		})
+
+		t.Run("with_thin_pools_returns_true", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+
+				firstPath  = "first-path"
+				secondPath = "second-path"
+
+				vgName = "test-vg"
+			)
+
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       firstPath,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("2G"),
+						Consumable: true,
+						Path:       secondPath,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+						{
+							Name:            "new-thin",
+							Size:            "2.5G",
+							AllocationLimit: "150%",
+						},
+					},
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+
+			// so second block device is new one
+			pvs := []internal.PVData{
+				{
+					PVName: firstPath,
+				},
+			}
+
+			vgs := []internal.VGData{
+				{
+					VGName: vgName,
+					VGSize: resource.MustParse("1G"),
+					VGFree: resource.MustParse("1G"),
+				},
+			}
+
+			ch := cache.New()
+			ch.StorePVs(pvs, bytes.Buffer{})
+			ch.StoreVGs(vgs, bytes.Buffer{})
+
+			valid, reason := validateLVGForUpdateFunc(log, ch, lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("with_thin_pools_returns_false", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+
+				firstPath  = "first-path"
+				secondPath = "second-path"
+
+				vgName = "test-vg"
+			)
+
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       firstPath,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("2G"),
+						Consumable: true,
+						Path:       secondPath,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+						{
+							Name:            "new-thin",
+							Size:            "4G",
+							AllocationLimit: "150%",
+						},
+					},
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+
+			// so second block device is new one
+			pvs := []internal.PVData{
+				{
+					PVName: firstPath,
+				},
+			}
+
+			vgs := []internal.VGData{
+				{
+					VGName: vgName,
+					VGSize: resource.MustParse("1G"),
+					VGFree: resource.MustParse("1G"),
+				},
+			}
+
+			ch := cache.New()
+			ch.StorePVs(pvs, bytes.Buffer{})
+			ch.StoreVGs(vgs, bytes.Buffer{})
+
+			valid, _ := validateLVGForUpdateFunc(log, ch, lvg, bds)
+			assert.False(t, valid)
+		})
+	})
+
+	t.Run("validateLVGForCreateFunc", func(t *testing.T) {
+		t.Run("without_thin_pools_returns_true", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+			)
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+				},
+			}
+
+			valid, reason := validateLVGForCreateFunc(log, lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("without_thin_pools_returns_false", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+			)
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+				},
+			}
+
+			valid, _ := validateLVGForCreateFunc(log, lvg, bds)
+			assert.False(t, valid)
+		})
+
+		t.Run("with_thin_pools_returns_true", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+			)
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+						{
+							Size: "1G",
+						},
+					},
+				},
+			}
+
+			valid, reason := validateLVGForCreateFunc(log, lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("with_thin_pools_returns_false", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+			)
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+						{
+							Size: "3G",
+						},
+					},
+				},
+			}
+
+			valid, _ := validateLVGForCreateFunc(log, lvg, bds)
+			assert.False(t, valid)
+		})
+	})
+
+	t.Run("identifyLVGReconcileFunc", func(t *testing.T) {
+		t.Run("returns_create", func(t *testing.T) {
+			const vgName = "test-vg"
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+
+			ch := cache.New()
+
+			actual := identifyLVGReconcileFunc(lvg, ch)
+			assert.Equal(t, CreateReconcile, actual)
+		})
+
+		t.Run("returns_update", func(t *testing.T) {
+			const vgName = "test-vg"
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+			vgs := []internal.VGData{
+				{
+					VGName: vgName,
+				},
+			}
+
+			ch := cache.New()
+			ch.StoreVGs(vgs, bytes.Buffer{})
+
+			actual := identifyLVGReconcileFunc(lvg, ch)
+			assert.Equal(t, UpdateReconcile, actual)
+		})
+
+		t.Run("returns_delete", func(t *testing.T) {
+			const vgName = "test-vg"
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+			lvg.DeletionTimestamp = &v1.Time{}
+			vgs := []internal.VGData{
+				{
+					VGName: vgName,
+				},
+			}
+
+			ch := cache.New()
+			ch.StoreVGs(vgs, bytes.Buffer{})
+
+			actual := identifyLVGReconcileFunc(lvg, ch)
+			assert.Equal(t, DeleteReconcile, actual)
+		})
+	})
+
+	t.Run("removeLVGFinalizerIfExist", func(t *testing.T) {
+		t.Run("not_exist_no_remove", func(t *testing.T) {
+			lvg := &v1alpha1.LvmVolumeGroup{}
+
+			removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			assert.False(t, removed)
+		})
+
+		t.Run("does_exist_remove", func(t *testing.T) {
+			const lvgName = "test-lvg"
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Finalizers = append(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer)
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			defer func() {
+				err = cl.Delete(ctx, lvg)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+
+			removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			if assert.True(t, removed) {
+				updatedLVG := &v1alpha1.LvmVolumeGroup{}
+				err = cl.Get(ctx, client.ObjectKey{
+					Name: lvgName,
+				}, updatedLVG)
+				if err != nil {
+					t.Error(err)
+				}
+
+				assert.False(t, slices.Contains(updatedLVG.Finalizers, internal.SdsNodeConfiguratorFinalizer))
+			}
+		})
+	})
+
+	t.Run("getLVForVG", func(t *testing.T) {
+		const (
+			firstLV  = "first"
+			secondLV = "second"
+			vgName   = "test-vg"
+		)
+		lvs := []internal.LVData{
+			{
+				LVName: firstLV,
+				VGName: vgName,
+			},
+			{
+				LVName: secondLV,
+				VGName: "other",
+			},
+		}
+
+		ch := cache.New()
+		ch.StoreLVs(lvs, bytes.Buffer{})
+		expected := []string{firstLV}
+
+		actual := getLVForVG(ch, vgName)
+
+		assert.ElementsMatch(t, expected, actual)
+	})
+
+	t.Run("countVGSizeByBlockDevices", func(t *testing.T) {
+		const (
+			firstBd  = "first"
+			secondBd = "second"
+		)
+		bds := map[string]v1alpha1.BlockDevice{
+			firstBd: {
+				ObjectMeta: v1.ObjectMeta{
+					Name: firstBd,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Size: resource.MustParse("1G"),
+				},
+			},
+			secondBd: {
+				ObjectMeta: v1.ObjectMeta{
+					Name: secondBd,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Size: resource.MustParse("1G"),
+				},
+			},
+		}
+		lvg := &v1alpha1.LvmVolumeGroup{
+			Spec: v1alpha1.LvmVolumeGroupSpec{
+				BlockDeviceNames: []string{firstBd, secondBd},
+			},
+		}
+
+		expected := resource.MustParse("2G")
+
+		actual := countVGSizeByBlockDevices(lvg, bds)
+		assert.Equal(t, expected.Value(), actual.Value())
+	})
+
+	t.Run("getRequestedSizeFromString", func(t *testing.T) {
+		t.Run("for_percent_size", func(t *testing.T) {
+			actual, err := getRequestedSizeFromString("50%", resource.MustParse("10G"))
+			if err != nil {
+				t.Error(err)
+			}
+
+			expected := resource.MustParse("5G")
+			assert.Equal(t, expected.Value(), actual.Value())
+		})
+
+		t.Run("for_number_size", func(t *testing.T) {
+			actual, err := getRequestedSizeFromString("5G", resource.MustParse("10G"))
+			if err != nil {
+				t.Error(err)
+			}
+
+			expected := resource.MustParse("5G")
+			assert.Equal(t, expected.Value(), actual.Value())
+		})
+	})
+
+	t.Run("extractPathsFromBlockDevices", func(t *testing.T) {
+		const (
+			firstBd  = "first"
+			secondBd = "second"
+
+			firstPath  = "first-path"
+			secondPath = "second-path"
+		)
+		bdNames := []string{firstBd, secondBd}
+		bds := map[string]v1alpha1.BlockDevice{
+			firstBd: {
+				ObjectMeta: v1.ObjectMeta{
+					Name: firstBd,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Path: firstPath,
+				},
+			},
+			secondBd: {
+				ObjectMeta: v1.ObjectMeta{
+					Name: secondBd,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Path: secondPath,
+				},
+			},
+		}
+
+		expected := []string{firstPath, secondPath}
+		actual := extractPathsFromBlockDevices(bdNames, bds)
+		assert.ElementsMatch(t, expected, actual)
+	})
+
+	t.Run("validateSpecBlockDevices", func(t *testing.T) {
+		t.Run("validation_passes", func(t *testing.T) {
+			const (
+				nodeName = "nodeName"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{
+						"first", "second",
+					},
+				},
+			}
+
+			bds := map[string]v1alpha1.BlockDevice{
+				"first": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "first",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: nodeName,
+					},
+				},
+
+				"second": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "second",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: nodeName,
+					},
+				},
+			}
+
+			valid, reason := validateSpecBlockDevices(lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("validation_fails_due_to_bd_does_not_exist", func(t *testing.T) {
+			const (
+				nodeName = "nodeName"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{
+						"first", "second",
+					},
+				},
+			}
+
+			bds := map[string]v1alpha1.BlockDevice{
+				"first": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "first",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: nodeName,
+					},
+				},
+			}
+
+			valid, _ := validateSpecBlockDevices(lvg, bds)
+			assert.False(t, valid)
+		})
+
+		t.Run("validation_fails_due_to_bd_has_dif_node", func(t *testing.T) {
+			const (
+				nodeName = "nodeName"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{
+						"first", "second",
+					},
+				},
+			}
+
+			bds := map[string]v1alpha1.BlockDevice{
+				"first": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "first",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: nodeName,
+					},
+				},
+				"second": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "second",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: "another-node",
+					},
+				},
+			}
+
+			valid, _ := validateSpecBlockDevices(lvg, bds)
+			assert.False(t, valid)
+		})
+	})
+
+	t.Run("syncThinPoolsAllocationLimit", func(t *testing.T) {
+		const lvgName = "test"
+		lvg := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: v1.ObjectMeta{
+				Name: lvgName,
+			},
+			Spec: v1alpha1.LvmVolumeGroupSpec{
+				ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+					{
+						Name:            "first",
+						Size:            "1G",
+						AllocationLimit: "200%",
+					},
+				},
+			},
+			Status: v1alpha1.LvmVolumeGroupStatus{
+				ThinPools: []v1alpha1.LvmVolumeGroupThinPoolStatus{
+					{
+						Name:            "first",
+						AllocationLimit: "150%",
+					},
+				},
+			},
+		}
+
+		err := cl.Create(ctx, lvg)
+		if err != nil {
+			t.Error(err)
+		}
+
+		defer func() {
+			err = cl.Delete(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+		}()
+
+		err = syncThinPoolsAllocationLimit(ctx, cl, log, lvg)
+		if err != nil {
+			t.Error(err)
+		}
+
+		updatedLVG := &v1alpha1.LvmVolumeGroup{}
+		err = cl.Get(ctx, client.ObjectKey{
+			Name: lvgName,
+		}, updatedLVG)
+
+		assert.Equal(t, lvg.Spec.ThinPools[0].AllocationLimit, lvg.Status.ThinPools[0].AllocationLimit)
+	})
+
+	t.Run("addLVGFinalizerIfNotExist", func(t *testing.T) {
+		t.Run("not_exist_adds", func(t *testing.T) {
+			const (
+				lvgName = "test"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Finalizers = []string{}
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			defer func() {
+				err = cl.Delete(ctx, lvg)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+
+			added, err := addLVGFinalizerIfNotExist(ctx, cl, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			if assert.True(t, added) {
+				updatedLVG := &v1alpha1.LvmVolumeGroup{}
+				err = cl.Get(ctx, client.ObjectKey{
+					Name: lvgName,
+				}, updatedLVG)
+
+				assert.True(t, slices.Contains(updatedLVG.Finalizers, internal.SdsNodeConfiguratorFinalizer))
+			}
+		})
+
+		t.Run("does_exist_no_adds", func(t *testing.T) {
+			const (
+				lvgName = "test-1"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Finalizers = []string{
+				internal.SdsNodeConfiguratorFinalizer,
+			}
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			defer func() {
+				err = cl.Delete(ctx, lvg)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+
+			added, err := addLVGFinalizerIfNotExist(ctx, cl, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			if assert.False(t, added) {
+				updatedLVG := &v1alpha1.LvmVolumeGroup{}
+				err = cl.Get(ctx, client.ObjectKey{
+					Name: lvgName,
+				}, updatedLVG)
+
+				assert.True(t, slices.Contains(updatedLVG.Finalizers, internal.SdsNodeConfiguratorFinalizer))
+			}
+		})
+	})
+
+	t.Run("updateLVGConditionIfNeeded", func(t *testing.T) {
+		t.Run("diff_states_updates", func(t *testing.T) {
+			const (
+				lvgName   = "test-name"
+				badReason = "bad"
+			)
+			curTime := v1.NewTime(time.Now())
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Generation = 1
+			lvg.Status.Conditions = []v1.Condition{
+				{
+					Type:               internal.TypeVGConfigurationApplied,
+					Status:             v1.ConditionTrue,
+					ObservedGeneration: 1,
+					LastTransitionTime: curTime,
+					Reason:             "",
+					Message:            "",
+				},
+			}
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, badReason, "")
+			if err != nil {
+				t.Error(err)
+			}
+
+			notUpdatedLVG := &v1alpha1.LvmVolumeGroup{}
+			err = cl.Get(ctx, client.ObjectKey{
+				Name: lvgName,
+			}, notUpdatedLVG)
+			if err != nil {
+				t.Error(err)
+			}
+
+			assert.Equal(t, notUpdatedLVG.Status.Conditions[0].Status, v1.ConditionFalse)
+			assert.Equal(t, notUpdatedLVG.Status.Conditions[0].Reason, badReason)
+
+			assert.NotEqual(t, curTime, lvg.Status.Conditions[0].LastTransitionTime)
+		})
+
+		t.Run("same_states_does_not_update", func(t *testing.T) {
+			const (
+				lvgName = "test-name-2"
+			)
+			curTime := v1.NewTime(time.Now())
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Generation = 1
+			lvg.Status.Conditions = []v1.Condition{
+				{
+					Type:               internal.TypeVGConfigurationApplied,
+					Status:             v1.ConditionTrue,
+					ObservedGeneration: 1,
+					LastTransitionTime: curTime,
+					Reason:             "",
+					Message:            "",
+				},
+			}
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "", "")
+			if err != nil {
+				t.Error(err)
+			}
+
+			assert.Equal(t, curTime, lvg.Status.Conditions[0].LastTransitionTime)
+		})
+	})
+
+	t.Run("shouldReconcileLVGByDeleteFunc", func(t *testing.T) {
+		t.Run("returns_true", func(t *testing.T) {
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.DeletionTimestamp = &v1.Time{}
+
+			assert.True(t, shouldReconcileLVGByDeleteFunc(lvg))
+		})
+
+		t.Run("returns_false", func(t *testing.T) {
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.DeletionTimestamp = nil
+
+			assert.False(t, shouldReconcileLVGByDeleteFunc(lvg))
+		})
+	})
+
+	t.Run("shouldLVGWatcherReconcileUpdateEvent", func(t *testing.T) {
+		t.Run("deletion_timestamp_not_nil_returns_true", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG.DeletionTimestamp = &v1.Time{}
+			assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+
+		t.Run("spec_is_diff_returns_true", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			oldLVG.Spec.BlockDeviceNames = []string{"first"}
+			newLVG.Spec.BlockDeviceNames = []string{"first", "second"}
+			assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+
+		t.Run("condition_vg_configuration_applied_is_updating_returns_false", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG.Status.Conditions = []v1.Condition{
+				{
+					Type:   internal.TypeVGConfigurationApplied,
+					Reason: internal.ReasonUpdating,
+				},
+			}
+			assert.False(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+
+		t.Run("condition_vg_configuration_applied_is_creating_returns_false", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG.Status.Conditions = []v1.Condition{
+				{
+					Type:   internal.TypeVGConfigurationApplied,
+					Reason: internal.ReasonCreating,
+				},
+			}
+			assert.False(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+
+		t.Run("dev_size_and_pv_size_are_diff_returns_true", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG.Status.Nodes = []v1alpha1.LvmVolumeGroupNode{
+				{
+					Devices: []v1alpha1.LvmVolumeGroupDevice{
+						{
+							BlockDevice: "test",
+							DevSize:     resource.MustParse("1G"),
+							PVSize:      resource.MustParse("2G"),
+						},
+					},
+					Name: "some-node",
+				},
+			}
+			assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+	})
+
+	t.Run("checkIfVGExist", func(t *testing.T) {
+		const targetName = "test"
+		vgs := []internal.VGData{
+			{
+				VGName: targetName,
+			},
+			{
+				VGName: "another-name",
+			},
+		}
+
+		t.Run("returns_true", func(t *testing.T) {
+			assert.True(t, checkIfVGExist(targetName, vgs))
+		})
+
+		t.Run("returns_false", func(t *testing.T) {
+			assert.False(t, checkIfVGExist("not-existed", vgs))
+		})
+	})
+
+	t.Run("DeleteLVMVolumeGroup", func(t *testing.T) {
+		const (
+			lvgName  = "test=lvg"
+			nodeName = "test-node"
+		)
+
+		lvgToDelete := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: v1.ObjectMeta{
+				Name: lvgName,
+			},
+			Status: v1alpha1.LvmVolumeGroupStatus{
+				Nodes: []v1alpha1.LvmVolumeGroupNode{
+					{
+						Name: nodeName,
+					},
+				},
+			},
+		}
+
+		err := cl.Create(ctx, lvgToDelete)
+		if err != nil {
+			t.Error(err)
+		}
+
+		defer func() {
+			_ = cl.Delete(ctx, lvgToDelete)
+		}()
+
+		lvgCheck := &v1alpha1.LvmVolumeGroup{}
+		err = cl.Get(ctx, client.ObjectKey{
+			Name: lvgName,
+		}, lvgCheck)
+		if err != nil {
+			t.Error(err)
+		}
+		assert.Equal(t, lvgName, lvgCheck.Name)
+
+		err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvgToDelete, nodeName)
+		if err != nil {
+			t.Error(err)
+		}
+
+		lvgNewCheck := &v1alpha1.LvmVolumeGroup{}
+		err = cl.Get(ctx, client.ObjectKey{
+			Name: lvgName,
+		}, lvgNewCheck)
+		if assert.True(t, errors2.IsNotFound(err)) {
+			assert.Equal(t, "", lvgNewCheck.Name)
+		}
+	})
+
+	t.Run("getLVMVolumeGroup_lvg_exists_returns_correct", func(t *testing.T) {
+		const name = "test_name"
+		lvgToCreate := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: v1.ObjectMeta{
+				Name: name,
+			},
+		}
+
+		err := cl.Create(ctx, lvgToCreate)
+		if err != nil {
+			t.Error(err)
+		} else {
+			defer func() {
+				err = cl.Delete(ctx, lvgToCreate)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+		}
+
+		actual, err := getLVMVolumeGroup(ctx, cl, metrics, name)
+		if assert.NoError(t, err) {
+			assert.NotNil(t, actual)
+			assert.Equal(t, name, actual.Name)
+		}
+	})
+
+	t.Run("getLVMVolumeGroup_lvg_doesnt_exist_returns_nil", func(t *testing.T) {
+		const name = "test_name"
+		testObj := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: v1.ObjectMeta{
+				Name: name,
+			},
+		}
+
+		err := cl.Create(ctx, testObj)
+		if err != nil {
+			t.Error(err)
+		} else {
+			defer func() {
+				err = cl.Delete(ctx, testObj)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+		}
+
+		actual, err := getLVMVolumeGroup(ctx, cl, metrics, "another-name")
+
+		if assert.EqualError(t, err, "lvmvolumegroups.storage.deckhouse.io \"another-name\" not found") {
+			assert.Nil(t, actual)
+		}
+	})
+}
diff --git a/images/agent/src/pkg/kubutils/kubernetes.go b/images/agent/src/pkg/kubutils/kubernetes.go
index ed232de9..bcd95a6c 100644
--- a/images/agent/src/pkg/kubutils/kubernetes.go
+++ b/images/agent/src/pkg/kubutils/kubernetes.go
@@ -18,6 +18,7 @@ package kubutils
 
 import (
 	"fmt"
+
 	"k8s.io/client-go/rest"
 	"k8s.io/client-go/tools/clientcmd"
 )
diff --git a/images/agent/src/pkg/logger/logger.go b/images/agent/src/pkg/logger/logger.go
index 34b94245..164a2059 100644
--- a/images/agent/src/pkg/logger/logger.go
+++ b/images/agent/src/pkg/logger/logger.go
@@ -17,11 +17,11 @@ limitations under the License.
 package logger
 
 import (
-	"flag"
 	"fmt"
+	"strconv"
+
 	"github.com/go-logr/logr"
-	"k8s.io/klog/v2"
-	"k8s.io/klog/v2/klogr"
+	"k8s.io/klog/v2/textlogger"
 )
 
 const (
@@ -50,13 +50,12 @@ type Logger struct {
 }
 
 func NewLogger(level Verbosity) (*Logger, error) {
-	klog.InitFlags(nil)
-	if err := flag.Set("v", string(level)); err != nil {
+	v, err := strconv.Atoi(string(level))
+	if err != nil {
 		return nil, err
 	}
-	flag.Parse()
 
-	log := klogr.New().WithCallDepth(1)
+	log := textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(v))).WithCallDepth(1)
 
 	return &Logger{log: log}, nil
 }
diff --git a/images/agent/src/pkg/monitoring/monitoring.go b/images/agent/src/pkg/monitoring/monitoring.go
index b0158c35..fa638a46 100644
--- a/images/agent/src/pkg/monitoring/monitoring.go
+++ b/images/agent/src/pkg/monitoring/monitoring.go
@@ -17,11 +17,12 @@ limitations under the License.
 package monitoring
 
 import (
+	"strings"
+	"time"
+
 	"github.com/prometheus/client_golang/prometheus"
 	"k8s.io/utils/clock"
 	"sigs.k8s.io/controller-runtime/pkg/metrics"
-	"strings"
-	"time"
 )
 
 const (
@@ -133,15 +134,15 @@ func (m Metrics) UtilsCommandsErrorsCount(controllerName, command string) promet
 	return utilsCommandsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(command))
 }
 
-func (m Metrics) ApiMethodsDuration(controllerName, method string) prometheus.Observer {
+func (m Metrics) APIMethodsDuration(controllerName, method string) prometheus.Observer {
 	return apiMethodsDuration.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
-func (m Metrics) ApiMethodsExecutionCount(controllerName, method string) prometheus.Counter {
+func (m Metrics) APIMethodsExecutionCount(controllerName, method string) prometheus.Counter {
 	return apiMethodsExecutionCount.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
-func (m Metrics) ApiMethodsErrors(controllerName, method string) prometheus.Counter {
+func (m Metrics) APIMethodsErrors(controllerName, method string) prometheus.Counter {
 	return apiMethodsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
diff --git a/images/agent/src/pkg/scanner/scanner.go b/images/agent/src/pkg/scanner/scanner.go
index f5b2f60f..a7764cd8 100644
--- a/images/agent/src/pkg/scanner/scanner.go
+++ b/images/agent/src/pkg/scanner/scanner.go
@@ -1,6 +1,12 @@
 package scanner
 
 import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
 	"agent/config"
 	"agent/internal"
 	"agent/pkg/cache"
@@ -8,14 +14,9 @@ import (
 	"agent/pkg/logger"
 	"agent/pkg/throttler"
 	"agent/pkg/utils"
-	"bytes"
-	"context"
-	"errors"
-	"fmt"
 	"github.com/pilebones/go-udev/netlink"
 	kubeCtrl "sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
-	"time"
 )
 
 func RunScanner(ctx context.Context, log logger.Logger, cfg config.Options, sdsCache *cache.Cache, bdCtrl, lvgDiscoverCtrl kubeCtrl.Controller) error {
@@ -115,7 +116,7 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg
 	log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", controller.BlockDeviceCtrlName))
 	bdRes, err := bdCtrl.Reconcile(ctx, reconcile.Request{})
 	if err != nil {
-		log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occured while %s reconcile", controller.BlockDeviceCtrlName))
+		log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", controller.BlockDeviceCtrlName))
 		return err
 	}
 
@@ -128,7 +129,6 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg
 			}
 
 			log.Info("[runControllersReconcile] successfully reconciled BlockDevices after a retry")
-			return
 		}()
 	}
 
@@ -137,7 +137,7 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg
 	log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName))
 	lvgRes, err := lvgDiscoverCtrl.Reconcile(ctx, reconcile.Request{})
 	if err != nil {
-		log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occured while %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName))
+		log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName))
 		return err
 	}
 	if lvgRes.RequeueAfter > 0 {
@@ -149,7 +149,6 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg
 			}
 
 			log.Info("[runControllersReconcile] successfully reconciled LVMVolumeGroups after a retry")
-			return
 		}()
 	}
 	log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", controller.LVMVolumeGroupDiscoverCtrlName))
diff --git a/images/agent/src/pkg/utils/commands.go b/images/agent/src/pkg/utils/commands.go
index e39806f4..f00b84a1 100644
--- a/images/agent/src/pkg/utils/commands.go
+++ b/images/agent/src/pkg/utils/commands.go
@@ -17,16 +17,16 @@ limitations under the License.
 package utils
 
 import (
-	"agent/internal"
 	"bufio"
 	"bytes"
 	"context"
 	"encoding/json"
 	"fmt"
+	golog "log"
 	"os/exec"
 	"regexp"
 
-	golog "log"
+	"agent/internal"
 )
 
 func GetBlockDevices(ctx context.Context) ([]internal.Device, string, bytes.Buffer, error) {
@@ -243,8 +243,8 @@ func CreateVGShared(vgName, lvmVolumeGroupName string, pvNames []string) (string
 	return cmd.String(), nil
 }
 
-func CreateThinPool(thinPoolName, VGName string, size int64) (string, error) {
-	args := []string{"lvcreate", "-L", fmt.Sprintf("%dk", size/1024), "-T", fmt.Sprintf("%s/%s", VGName, thinPoolName)}
+func CreateThinPool(thinPoolName, vgName string, size int64) (string, error) {
+	args := []string{"lvcreate", "-L", fmt.Sprintf("%dk", size/1024), "-T", fmt.Sprintf("%s/%s", vgName, thinPoolName)}
 	extendedArgs := lvmStaticExtendedArgs(args)
 	cmd := exec.Command(internal.NSENTERCmd, extendedArgs...)
 
@@ -257,8 +257,8 @@ func CreateThinPool(thinPoolName, VGName string, size int64) (string, error) {
 	return cmd.String(), nil
 }
 
-func CreateThinPoolFullVGSpace(thinPoolName, VGName string) (string, error) {
-	args := []string{"lvcreate", "-l", "100%FREE", "-T", fmt.Sprintf("%s/%s", VGName, thinPoolName)}
+func CreateThinPoolFullVGSpace(thinPoolName, vgName string) (string, error) {
+	args := []string{"lvcreate", "-l", "100%FREE", "-T", fmt.Sprintf("%s/%s", vgName, thinPoolName)}
 	extendedArgs := lvmStaticExtendedArgs(args)
 	cmd := exec.Command(internal.NSENTERCmd, extendedArgs...)
 
@@ -476,12 +476,9 @@ func unmarshalPVs(out []byte) ([]internal.PVData, error) {
 		return nil, err
 	}
 
-	var pvs []internal.PVData
-
+	pvs := make([]internal.PVData, 0, len(pvR.Report))
 	for _, rep := range pvR.Report {
-		for _, pv := range rep.PV {
-			pvs = append(pvs, pv)
-		}
+		pvs = append(pvs, rep.PV...)
 	}
 
 	return pvs, nil
@@ -494,12 +491,9 @@ func unmarshalVGs(out []byte) ([]internal.VGData, error) {
 		return nil, err
 	}
 
-	var vgs []internal.VGData
-
+	vgs := make([]internal.VGData, 0, len(vgR.Report))
 	for _, rep := range vgR.Report {
-		for _, vg := range rep.VG {
-			vgs = append(vgs, vg)
-		}
+		vgs = append(vgs, rep.VG...)
 	}
 
 	return vgs, nil
@@ -512,27 +506,19 @@ func unmarshalLVs(out []byte) ([]internal.LVData, error) {
 		return nil, err
 	}
 
-	var lvs []internal.LVData
-
+	lvs := make([]internal.LVData, 0, len(lvR.Report))
 	for _, rep := range lvR.Report {
-		for _, lv := range rep.LV {
-			lvs = append(lvs, lv)
-		}
+		lvs = append(lvs, rep.LV...)
 	}
 
 	return lvs, nil
 }
 
-func extendArgs(args []string) []string {
-	nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"}
-	return append(nsenterArgs, args...)
-}
-
 func lvmStaticExtendedArgs(args []string) []string {
 	nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"}
 	lvmStaticBin := []string{"--", internal.LVMCmd}
-	result := append(nsenterArgs, lvmStaticBin...)
-	return append(result, args...)
+	nsenterArgs = append(nsenterArgs, lvmStaticBin...)
+	return append(nsenterArgs, args...)
 }
 
 // filterStdErr processes a bytes.Buffer containing stderr output and filters out specific
@@ -560,7 +546,13 @@ func filterStdErr(command string, stdErr bytes.Buffer) bytes.Buffer {
 	// will try to resize the Thin-pool with 100%VG space and will get the error.
 	regexpNoSizeChangeError := ` No size change.+`
 	regex1, err := regexp.Compile(regexpPattern)
+	if err != nil {
+		return stdErr
+	}
 	regex2, err := regexp.Compile(regexpSocketError)
+	if err != nil {
+		return stdErr
+	}
 	regex3, err := regexp.Compile(regexpNoSizeChangeError)
 	if err != nil {
 		return stdErr
diff --git a/images/agent/src/pkg/utils/commands_test.go b/images/agent/src/pkg/utils/commands_test.go
index 5bae6ecf..5d60afd1 100644
--- a/images/agent/src/pkg/utils/commands_test.go
+++ b/images/agent/src/pkg/utils/commands_test.go
@@ -17,12 +17,11 @@ limitations under the License.
 package utils
 
 import (
-	"agent/internal"
 	"testing"
 
-	"k8s.io/apimachinery/pkg/api/resource"
-
+	"agent/internal"
 	"github.com/stretchr/testify/assert"
+	"k8s.io/apimachinery/pkg/api/resource"
 )
 
 func TestCommands(t *testing.T) {
@@ -63,6 +62,9 @@ func TestCommands(t *testing.T) {
 }`
 
 			size30G, err := resource.ParseQuantity("30G")
+			if err != nil {
+				t.Error(err)
+			}
 			size1M, err := resource.ParseQuantity("1M")
 			if err != nil {
 				t.Error(err)
@@ -216,7 +218,7 @@ func TestCommands(t *testing.T) {
 			expectedVGs := internal.VG{VG: []internal.VGData{
 				{
 					VGName:   "test-vg",
-					VGUuid:   "P14t8J-nfUE-hryT-LiTv-JdFD-Wqxg-R8taCa",
+					VGUUID:   "P14t8J-nfUE-hryT-LiTv-JdFD-Wqxg-R8taCa",
 					VGTags:   "test-tag",
 					VGSize:   size2G,
 					VGShared: "test-shared",
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go
index d5c5ca2f..05be62d1 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go
@@ -2,7 +2,6 @@ package controller
 
 import (
 	"context"
-	"fmt"
 
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"gopkg.in/yaml.v3"
@@ -36,7 +35,6 @@ func getTargetConditionsCount(lvgCrd *v1.CustomResourceDefinition) (int, error)
 		} `json:"properties"`
 	}
 	i := item{}
-	fmt.Printf("%+v\n", lvgCrd.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["status"].Properties["conditions"].Items)
 	json, err := lvgCrd.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["status"].Properties["conditions"].Items.MarshalJSON()
 	if err != nil {
 		return 0, err
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_test.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_test.go
index a13fdc00..2ec4b0c9 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_test.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_test.go
@@ -2,6 +2,7 @@ package controller
 
 import (
 	"context"
+	"encoding/json"
 	"testing"
 
 	"github.com/stretchr/testify/assert"
@@ -44,6 +45,18 @@ func TestLVGConditionsWatcher(t *testing.T) {
 	})
 
 	t.Run("getTargetConditionsCount", func(t *testing.T) {
+		first, err := json.Marshal("first")
+		if err != nil {
+			t.Error(err)
+		}
+		second, err := json.Marshal("second")
+		if err != nil {
+			t.Error(err)
+		}
+		third, err := json.Marshal("third")
+		if err != nil {
+			t.Error(err)
+		}
 		crd := &v1.CustomResourceDefinition{
 			Spec: v1.CustomResourceDefinitionSpec{
 				Versions: []v1.CustomResourceDefinitionVersion{
@@ -60,13 +73,13 @@ func TestLVGConditionsWatcher(t *testing.T) {
 															"type": {
 																Enum: []v1.JSON{
 																	{
-																		Raw: []byte("first"),
+																		Raw: first,
 																	},
 																	{
-																		Raw: []byte("second"),
+																		Raw: second,
 																	},
 																	{
-																		Raw: []byte("third"),
+																		Raw: third,
 																	},
 																},
 															},
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher.go b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher.go
index 9c6bd21f..f7839af5 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher.go
@@ -19,6 +19,7 @@ package controller
 import (
 	"context"
 	"fmt"
+	"strings"
 	"time"
 
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -182,11 +183,17 @@ func RunSdsInfraWatcher(
 			log.Debug("[RunSdsInfraWatcher] check if every agent's pod is in a Ready state")
 			notReadyPods := getNotReadyPods(sdsPods)
 			if len(notReadyPods) > 0 {
-				log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] there is some sds-node-configurator agent's pods that is not Ready, pods: %v. Turn the LVMVolumeGroups condition AgentReady to False", notReadyPods))
+				podsNames := make([]string, 0, len(notReadyPods))
+				for name := range notReadyPods {
+					podsNames = append(podsNames, name)
+				}
+
+				log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] there is some sds-node-configurator agent's pods that is not Ready, pods: %s. Turn the LVMVolumeGroups condition AgentReady to False", strings.Join(podsNames, ",")))
 				nodeNames := getNodeNamesFromPods(notReadyPods)
 				log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] node names with not Ready sds-node-configurator agent's pods: %v", nodeNames))
 				lvgsNotReady := findLVMVolumeGroupsByNodeNames(lvgs, nodeNames)
 				for _, lvg := range lvgsNotReady {
+					log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] the LVMVolumeGroup %s is managed by not Ready pod, turns the condition %s to False", lvg.Name, agentReadyType))
 					err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, agentReadyType, "PodNotReady", "the pod is not Ready")
 					if err != nil {
 						log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition to the LVMVolumeGroup %s", lvg.Name))
diff --git a/images/sds-health-watcher-controller/src/pkg/logger/logger.go b/images/sds-health-watcher-controller/src/pkg/logger/logger.go
index 0b9eba48..37444699 100644
--- a/images/sds-health-watcher-controller/src/pkg/logger/logger.go
+++ b/images/sds-health-watcher-controller/src/pkg/logger/logger.go
@@ -50,19 +50,12 @@ type Logger struct {
 }
 
 func NewLogger(level Verbosity) (*Logger, error) {
-	// klog.InitFlags(nil)
-	// if err := flag.Set("v", string(level)); err != nil {
-	// return nil, err
-	// }
-	// flag.Parse()
-
 	v, err := strconv.Atoi(string(level))
 	if err != nil {
 		return nil, err
 	}
 
 	log := textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(v))).WithCallDepth(1)
-	// log := klogr.New().WithCallDepth(1)
 
 	return &Logger{log: log}, nil
 }