diff --git a/.golangci.yaml b/.golangci.yaml
index 4655fb04..f0eb7ce0 100644
--- a/.golangci.yaml
+++ b/.golangci.yaml
@@ -7,12 +7,16 @@ issues:
     - "should not use dot imports"
     - "don't use an underscore in package name"
     - "exported: .*"
+    - "could not import"
 
 linters-settings:
   gci:
     sections:
       - standard
       - default
+      - prefix(agent)
+      - prefix(sds-health-watcher-controller)
+      - prefix(sds-utils-installer)
   errcheck:
     ignore: fmt:.*,[rR]ead|[wW]rite|[cC]lose,io:Copy
 
@@ -24,7 +28,7 @@ linters:
   - gci
   - gocritic
   - gofmt
-  - goimports
+#  - goimports
   - gosimple
   - govet
   - ineffassign
diff --git a/images/agent/src/cmd/main.go b/images/agent/src/cmd/main.go
index 7a028a0f..c4b8c793 100644
--- a/images/agent/src/cmd/main.go
+++ b/images/agent/src/cmd/main.go
@@ -17,20 +17,12 @@ limitations under the License.
 package main
 
 import (
-	"agent/config"
-	"agent/pkg/cache"
-	"agent/pkg/controller"
-	"agent/pkg/kubutils"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/scanner"
 	"context"
 	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"os"
 	goruntime "runtime"
-	"sigs.k8s.io/controller-runtime/pkg/metrics/server"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	v1 "k8s.io/api/core/v1"
 	sv1 "k8s.io/api/storage/v1"
 	extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
@@ -39,6 +31,15 @@ import (
 	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/healthz"
 	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/metrics/server"
+
+	"agent/config"
+	"agent/pkg/cache"
+	"agent/pkg/controller"
+	"agent/pkg/kubutils"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/scanner"
 )
 
 var (
@@ -61,7 +62,7 @@ func main() {
 
 	log, err := logger.NewLogger(cfgParams.Loglevel)
 	if err != nil {
-		fmt.Println(fmt.Sprintf("unable to create NewLogger, err: %v", err))
+		fmt.Printf("unable to create NewLogger, err: %v\n", err)
 		os.Exit(1)
 	}
 
@@ -71,7 +72,7 @@ func main() {
 	log.Info("[main] CfgParams has been successfully created")
 	log.Info(fmt.Sprintf("[main] %s = %s", config.LogLevel, cfgParams.Loglevel))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.NodeName, cfgParams.NodeName))
-	log.Info(fmt.Sprintf("[main] %s = %s", config.MachineID, cfgParams.MachineId))
+	log.Info(fmt.Sprintf("[main] %s = %s", config.MachineID, cfgParams.MachineID))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.ScanInterval, cfgParams.BlockDeviceScanIntervalSec.String()))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.ThrottleInterval, cfgParams.ThrottleIntervalSec.String()))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.CmdDeadlineDuration, cfgParams.CmdDeadlineDurationSec.String()))
diff --git a/images/agent/src/config/config.go b/images/agent/src/config/config.go
index 416832e7..a657915d 100644
--- a/images/agent/src/config/config.go
+++ b/images/agent/src/config/config.go
@@ -17,16 +17,16 @@ limitations under the License.
 package config
 
 import (
-	"agent/internal"
-	"agent/pkg/logger"
 	"bytes"
 	"fmt"
 	"os"
 	"os/exec"
-
 	"strconv"
 	"strings"
 	"time"
+
+	"agent/internal"
+	"agent/pkg/logger"
 )
 
 const (
@@ -42,7 +42,7 @@ const (
 )
 
 type Options struct {
-	MachineId                  string
+	MachineID                  string
 	NodeName                   string
 	Loglevel                   logger.Verbosity
 	MetricsPort                string
@@ -69,11 +69,11 @@ func NewConfig() (*Options, error) {
 		opts.Loglevel = logger.Verbosity(loglevel)
 	}
 
-	machId, err := getMachineId()
+	machID, err := getMachineID()
 	if err != nil {
 		return nil, fmt.Errorf("[NewConfig] unable to get %s, error: %w", MachineID, err)
 	}
-	opts.MachineId = machId
+	opts.MachineID = machID
 
 	opts.MetricsPort = os.Getenv(MetricsPort)
 	if opts.MetricsPort == "" {
@@ -127,7 +127,7 @@ func NewConfig() (*Options, error) {
 	return &opts, nil
 }
 
-func getMachineId() (string, error) {
+func getMachineID() (string, error) {
 	id := os.Getenv(MachineID)
 	if id == "" {
 		args := []string{"-m", "-u", "-i", "-n", "-p", "-t", "1", "cat", "/etc/machine-id"}
diff --git a/images/agent/src/config/config_test.go b/images/agent/src/config/config_test.go
index 32ddbfda..837ea0cb 100644
--- a/images/agent/src/config/config_test.go
+++ b/images/agent/src/config/config_test.go
@@ -18,16 +18,17 @@ package config
 
 import (
 	"fmt"
-	"github.com/stretchr/testify/assert"
 	"os"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
 )
 
 func TestNewConfig(t *testing.T) {
 	t.Run("AllValuesSet_ReturnsNoError", func(t *testing.T) {
 		expNodeName := "test-node"
 		expMetricsPort := ":0000"
-		expMachineId := "test-id"
+		expMachineID := "test-id"
 
 		err := os.Setenv(NodeName, expNodeName)
 		if err != nil {
@@ -37,7 +38,10 @@ func TestNewConfig(t *testing.T) {
 		if err != nil {
 			t.Error(err)
 		}
-		err = os.Setenv(MachineID, expMachineId)
+		err = os.Setenv(MachineID, expMachineID)
+		if err != nil {
+			t.Error(err)
+		}
 		defer os.Clearenv()
 
 		opts, err := NewConfig()
@@ -45,12 +49,12 @@ func TestNewConfig(t *testing.T) {
 		if assert.NoError(t, err) {
 			assert.Equal(t, expNodeName, opts.NodeName)
 			assert.Equal(t, expMetricsPort, opts.MetricsPort)
-			assert.Equal(t, expMachineId, opts.MachineId)
+			assert.Equal(t, expMachineID, opts.MachineID)
 		}
 	})
 
 	t.Run("NodeNameNotSet_ReturnsError", func(t *testing.T) {
-		machineIdFile := "./host-root/etc/machine-id"
+		machineIDFile := "./host-root/etc/machine-id"
 		expMetricsPort := ":0000"
 		expErrorMsg := fmt.Sprintf("[NewConfig] required %s env variable is not specified", NodeName)
 
@@ -65,7 +69,7 @@ func TestNewConfig(t *testing.T) {
 			t.Error(err)
 		}
 
-		file, err := os.Create(machineIdFile)
+		file, err := os.Create(machineIDFile)
 		if err != nil {
 			t.Error(err)
 		}
@@ -85,11 +89,11 @@ func TestNewConfig(t *testing.T) {
 		assert.EqualError(t, err, expErrorMsg)
 	})
 
-	t.Run("MachineIdNotSet_ReturnsError", func(t *testing.T) {
+	t.Run("MachineIDNotSet_ReturnsError", func(t *testing.T) {
 		expMetricsPort := ":0000"
 		expNodeName := "test-node"
 		expErrorMsg := fmt.Sprintf("[NewConfig] unable to get %s, error: %s",
-			MachineID, "open /host-root/etc/machine-id: no such file or directory")
+			MachineID, "fork/exec /opt/deckhouse/sds/bin/nsenter.static: no such file or directory")
 
 		err := os.Setenv(MetricsPort, expMetricsPort)
 		if err != nil {
@@ -108,13 +112,13 @@ func TestNewConfig(t *testing.T) {
 	t.Run("MetricsPortNotSet_ReturnsDefaultPort", func(t *testing.T) {
 		expNodeName := "test-node"
 		expMetricsPort := ":4202"
-		expMachineId := "test-id"
+		expMachineID := "test-id"
 
 		err := os.Setenv(NodeName, expNodeName)
 		if err != nil {
 			t.Error(err)
 		}
-		err = os.Setenv(MachineID, expMachineId)
+		err = os.Setenv(MachineID, expMachineID)
 		if err != nil {
 			t.Error(err)
 		}
@@ -126,8 +130,7 @@ func TestNewConfig(t *testing.T) {
 		if assert.NoError(t, err) {
 			assert.Equal(t, expNodeName, opts.NodeName)
 			assert.Equal(t, expMetricsPort, opts.MetricsPort)
-			assert.Equal(t, expMachineId, opts.MachineId)
+			assert.Equal(t, expMachineID, opts.MachineID)
 		}
 	})
-
 }
diff --git a/images/agent/src/go.mod b/images/agent/src/go.mod
index d0a1ece8..29e4a40e 100644
--- a/images/agent/src/go.mod
+++ b/images/agent/src/go.mod
@@ -4,66 +4,67 @@ go 1.22.2
 
 require (
 	github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b
-	github.com/go-logr/logr v1.4.1
+	github.com/go-logr/logr v1.4.2
 	github.com/google/go-cmp v0.6.0
-	github.com/onsi/ginkgo/v2 v2.17.1
-	github.com/onsi/gomega v1.32.0
+	github.com/onsi/ginkgo/v2 v2.19.0
+	github.com/onsi/gomega v1.33.1
 	github.com/pilebones/go-udev v0.9.0
-	github.com/prometheus/client_golang v1.18.0
-	github.com/stretchr/testify v1.8.4
-	k8s.io/api v0.30.2
-	k8s.io/apiextensions-apiserver v0.30.1
-	k8s.io/apimachinery v0.30.2
-	k8s.io/client-go v0.30.1
-	k8s.io/klog/v2 v2.120.1
-	k8s.io/utils v0.0.0-20231127182322-b307cd553661
-	sigs.k8s.io/controller-runtime v0.18.4
+	github.com/prometheus/client_golang v1.19.1
+	github.com/stretchr/testify v1.9.0
+	k8s.io/api v0.31.0
+	k8s.io/apiextensions-apiserver v0.31.0
+	k8s.io/apimachinery v0.31.0
+	k8s.io/client-go v0.31.0
+	k8s.io/klog/v2 v2.130.1
+	k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+	sigs.k8s.io/controller-runtime v0.19.0
 )
 
 require (
 	github.com/beorn7/perks v1.0.1 // indirect
-	github.com/cespare/xxhash/v2 v2.2.0 // indirect
-	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/cespare/xxhash/v2 v2.3.0 // indirect
+	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
 	github.com/emicklei/go-restful/v3 v3.11.0 // indirect
 	github.com/evanphx/json-patch v5.6.0+incompatible // indirect
 	github.com/evanphx/json-patch/v5 v5.9.0 // indirect
 	github.com/fsnotify/fsnotify v1.7.0 // indirect
+	github.com/fxamacker/cbor/v2 v2.7.0 // indirect
 	github.com/go-openapi/jsonpointer v0.20.0 // indirect
 	github.com/go-openapi/jsonreference v0.20.2 // indirect
 	github.com/go-openapi/swag v0.22.4 // indirect
-	github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+	github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/protobuf v1.5.4 // indirect
 	github.com/google/gnostic-models v0.6.8 // indirect
 	github.com/google/gofuzz v1.2.0 // indirect
-	github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
-	github.com/google/uuid v1.4.0 // indirect
+	github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect
+	github.com/google/uuid v1.6.0 // indirect
 	github.com/imdario/mergo v0.3.16 // indirect
 	github.com/josharian/intern v1.0.0 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
 	github.com/mailru/easyjson v0.7.7 // indirect
-	github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
-	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/client_model v0.5.0 // indirect
-	github.com/prometheus/common v0.45.0 // indirect
-	github.com/prometheus/procfs v0.12.0 // indirect
+	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+	github.com/prometheus/client_model v0.6.1 // indirect
+	github.com/prometheus/common v0.55.0 // indirect
+	github.com/prometheus/procfs v0.15.1 // indirect
 	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/x448/float16 v0.8.4 // indirect
 	golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect
-	golang.org/x/net v0.23.0 // indirect
-	golang.org/x/oauth2 v0.15.0 // indirect
-	golang.org/x/sys v0.18.0 // indirect
-	golang.org/x/term v0.18.0 // indirect
-	golang.org/x/text v0.14.0 // indirect
+	golang.org/x/net v0.26.0 // indirect
+	golang.org/x/oauth2 v0.21.0 // indirect
+	golang.org/x/sys v0.21.0 // indirect
+	golang.org/x/term v0.21.0 // indirect
+	golang.org/x/text v0.16.0 // indirect
 	golang.org/x/time v0.5.0 // indirect
-	golang.org/x/tools v0.18.0 // indirect
+	golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
 	gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
-	google.golang.org/appengine v1.6.8 // indirect
-	google.golang.org/protobuf v1.33.0 // indirect
+	google.golang.org/protobuf v1.34.2 // indirect
+	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/images/agent/src/go.sum b/images/agent/src/go.sum
index 623d8d8a..ea685a09 100644
--- a/images/agent/src/go.sum
+++ b/images/agent/src/go.sum
@@ -1,14 +1,12 @@
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b h1:EYmHWTWcWMpyxJGZK05ZxlIFnh9s66DRrxLw/LNb/xw=
 github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0=
 github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
@@ -19,8 +17,10 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0
 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
 github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
 github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
 github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
 github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
@@ -31,30 +31,26 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
 github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
 github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
 github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
 github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
 github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
 github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
 github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
-github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
+github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
 github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
 github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -72,8 +68,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
 github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -81,41 +75,42 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
 github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
-github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
-github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
-github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
+github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
+github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
 github.com/pilebones/go-udev v0.9.0 h1:N1uEO/SxUwtIctc0WLU0t69JeBxIYEYnj8lT/Nabl9Q=
 github.com/pilebones/go-udev v0.9.0/go.mod h1:T2eI2tUSK0hA2WS5QLjXJUfQkluZQu+18Cqvem3CaXI=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
-github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
-github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
-github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
 go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
 go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
 go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -125,70 +120,53 @@ go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No=
 golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
-golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
-golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
 golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
 golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
-golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
 gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
 gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
 gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
 gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -197,22 +175,22 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
-k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
-k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws=
-k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4=
-k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
-k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q=
-k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc=
-k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
-k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
+k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
+k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk=
+k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk=
+k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
+k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
+k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
-k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw=
-sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
+sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
diff --git a/images/agent/src/internal/type.go b/images/agent/src/internal/type.go
index 24db2ed7..172e1474 100644
--- a/images/agent/src/internal/type.go
+++ b/images/agent/src/internal/type.go
@@ -37,7 +37,7 @@ type BlockDeviceCandidate struct {
 	PkName                string
 	Type                  string
 	FSType                string
-	MachineId             string
+	MachineID             string
 	PartUUID              string
 }
 
@@ -54,7 +54,7 @@ type LVMVolumeGroupCandidate struct {
 	StatusThinPools       []LVMVGStatusThinPool
 	VGSize                resource.Quantity
 	VGFree                resource.Quantity
-	VGUuid                string
+	VGUUID                string
 	Nodes                 map[string][]LVMVGDevice
 }
 
@@ -71,7 +71,7 @@ type LVMVGDevice struct {
 	Path        string
 	PVSize      resource.Quantity
 	DevSize     resource.Quantity
-	PVUuid      string
+	PVUUID      string
 	BlockDevice string
 }
 
@@ -127,7 +127,7 @@ type VGData struct {
 	VGShared string            `json:"vg_shared"`
 	VGSize   resource.Quantity `json:"vg_size"`
 	VGTags   string            `json:"vg_tags"`
-	VGUuid   string            `json:"vg_uuid"`
+	VGUUID   string            `json:"vg_uuid"`
 }
 
 type LVReport struct {
diff --git a/images/agent/src/pkg/cache/cache.go b/images/agent/src/pkg/cache/cache.go
index 78d3ec30..ea4962d4 100644
--- a/images/agent/src/pkg/cache/cache.go
+++ b/images/agent/src/pkg/cache/cache.go
@@ -1,10 +1,11 @@
 package cache
 
 import (
-	"agent/internal"
-	"agent/pkg/logger"
 	"bytes"
 	"fmt"
+
+	"agent/internal"
+	"agent/pkg/logger"
 )
 
 type Cache struct {
diff --git a/images/agent/src/pkg/cache/cache_test.go b/images/agent/src/pkg/cache/cache_test.go
index 7d54cdeb..d81988b0 100644
--- a/images/agent/src/pkg/cache/cache_test.go
+++ b/images/agent/src/pkg/cache/cache_test.go
@@ -1,10 +1,12 @@
 package cache
 
 import (
-	"agent/internal"
 	"bytes"
-	"github.com/stretchr/testify/assert"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"agent/internal"
 )
 
 func TestCache(t *testing.T) {
diff --git a/images/agent/src/pkg/controller/block_device.go b/images/agent/src/pkg/controller/block_device.go
index 022ead02..8f616d15 100644
--- a/images/agent/src/pkg/controller/block_device.go
+++ b/images/agent/src/pkg/controller/block_device.go
@@ -17,27 +17,28 @@ limitations under the License.
 package controller
 
 import (
-	"agent/config"
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"context"
 	"crypto/sha1"
 	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"os"
 	"regexp"
 	"strings"
 	"time"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	kclient "sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 const (
@@ -54,7 +55,7 @@ func RunBlockDeviceController(
 	cl := mgr.GetClient()
 
 	c, err := controller.New(BlockDeviceCtrlName, mgr, controller.Options{
-		Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
+		Reconciler: reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
 			log.Info("[RunBlockDeviceController] Reconciler starts BlockDevice resources reconciliation")
 
 			shouldRequeue := BlockDeviceReconcile(ctx, cl, log, metrics, cfg, sdsCache)
@@ -85,7 +86,7 @@ func BlockDeviceReconcile(ctx context.Context, cl kclient.Client, log logger.Log
 	candidates := GetBlockDeviceCandidates(log, cfg, sdsCache)
 	if len(candidates) == 0 {
 		log.Info("[RunBlockDeviceController] no block devices candidates found. Stop reconciliation")
-		return true
+		return false
 	}
 
 	apiBlockDevices, err := GetAPIBlockDevices(ctx, cl, metrics)
@@ -152,7 +153,7 @@ func hasBlockDeviceDiff(res v1alpha1.BlockDeviceStatus, candidate internal.Block
 		candidate.HotPlug != res.HotPlug ||
 		candidate.Type != res.Type ||
 		candidate.FSType != res.FsType ||
-		candidate.MachineId != res.MachineID
+		candidate.MachineID != res.MachineID
 }
 
 func GetAPIBlockDevices(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics) (map[string]v1alpha1.BlockDevice, error) {
@@ -160,10 +161,10 @@ func GetAPIBlockDevices(ctx context.Context, kc kclient.Client, metrics monitori
 
 	start := time.Now()
 	err := kc.List(ctx, listDevice)
-	metrics.ApiMethodsDuration(BlockDeviceCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "list").Inc()
+	metrics.APIMethodsDuration(BlockDeviceCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "list").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(BlockDeviceCtrlName, "list").Inc()
+		metrics.APIMethodsErrors(BlockDeviceCtrlName, "list").Inc()
 		return nil, fmt.Errorf("unable to kc.List, error: %w", err)
 	}
 
@@ -181,8 +182,8 @@ func RemoveDeprecatedAPIDevices(
 	metrics monitoring.Metrics,
 	candidates []internal.BlockDeviceCandidate,
 	apiBlockDevices map[string]v1alpha1.BlockDevice,
-	nodeName string) {
-
+	nodeName string,
+) {
 	actualCandidates := make(map[string]struct{}, len(candidates))
 	for _, candidate := range candidates {
 		actualCandidates[candidate.Name] = struct{}{}
@@ -260,7 +261,7 @@ func GetBlockDeviceCandidates(log logger.Logger, cfg config.Options, sdsCache *c
 			PkName:     device.PkName,
 			Type:       device.Type,
 			FSType:     device.FSType,
-			MachineId:  cfg.MachineId,
+			MachineID:  cfg.MachineID,
 			PartUUID:   device.PartUUID,
 		}
 
@@ -513,15 +514,15 @@ func UpdateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito
 		Model:                 candidate.Model,
 		Rota:                  candidate.Rota,
 		HotPlug:               candidate.HotPlug,
-		MachineID:             candidate.MachineId,
+		MachineID:             candidate.MachineID,
 	}
 
 	start := time.Now()
 	err := kc.Update(ctx, &blockDevice)
-	metrics.ApiMethodsDuration(BlockDeviceCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "update").Inc()
+	metrics.APIMethodsDuration(BlockDeviceCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "update").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(BlockDeviceCtrlName, "update").Inc()
+		metrics.APIMethodsErrors(BlockDeviceCtrlName, "update").Inc()
 		return err
 	}
 
@@ -549,16 +550,16 @@ func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito
 			Size:                  *resource.NewQuantity(candidate.Size.Value(), resource.BinarySI),
 			Model:                 candidate.Model,
 			Rota:                  candidate.Rota,
-			MachineID:             candidate.MachineId,
+			MachineID:             candidate.MachineID,
 		},
 	}
 
 	start := time.Now()
 	err := kc.Create(ctx, device)
-	metrics.ApiMethodsDuration(BlockDeviceCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "create").Inc()
+	metrics.APIMethodsDuration(BlockDeviceCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "create").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(BlockDeviceCtrlName, "create").Inc()
+		metrics.APIMethodsErrors(BlockDeviceCtrlName, "create").Inc()
 		return nil, err
 	}
 	return device, nil
@@ -567,10 +568,10 @@ func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito
 func DeleteAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics, device *v1alpha1.BlockDevice) error {
 	start := time.Now()
 	err := kc.Delete(ctx, device)
-	metrics.ApiMethodsDuration(BlockDeviceCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(BlockDeviceCtrlName, "delete").Inc()
+	metrics.APIMethodsDuration(BlockDeviceCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "delete").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(BlockDeviceCtrlName, "delete").Inc()
+		metrics.APIMethodsErrors(BlockDeviceCtrlName, "delete").Inc()
 		return err
 	}
 	return nil
diff --git a/images/agent/src/pkg/controller/block_device_test.go b/images/agent/src/pkg/controller/block_device_test.go
index 17d5c377..c70a802e 100644
--- a/images/agent/src/pkg/controller/block_device_test.go
+++ b/images/agent/src/pkg/controller/block_device_test.go
@@ -17,19 +17,208 @@ limitations under the License.
 package controller
 
 import (
-	"agent/internal"
-	"agent/pkg/logger"
-	"agent/pkg/utils"
+	"bytes"
+	"context"
 	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"testing"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
+	"github.com/stretchr/testify/assert"
+	errors2 "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"sigs.k8s.io/controller-runtime/pkg/client"
 
-	"github.com/stretchr/testify/assert"
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 func TestBlockDeviceCtrl(t *testing.T) {
+	ctx := context.Background()
+	log, _ := logger.NewLogger("1")
+	cfg := config.Options{
+		NodeName:  "test-node",
+		MachineID: "test-id",
+	}
+
+	t.Run("shouldDeleteBlockDevice", func(t *testing.T) {
+		t.Run("returns_true", func(t *testing.T) {
+			bd := v1alpha1.BlockDevice{
+				Status: v1alpha1.BlockDeviceStatus{
+					NodeName:   cfg.NodeName,
+					Consumable: true,
+				},
+			}
+			actual := map[string]struct{}{}
+
+			assert.True(t, shouldDeleteBlockDevice(bd, actual, cfg.NodeName))
+		})
+
+		t.Run("returns_false_cause_of_dif_node", func(t *testing.T) {
+			bd := v1alpha1.BlockDevice{
+				Status: v1alpha1.BlockDeviceStatus{
+					NodeName:   cfg.NodeName,
+					Consumable: true,
+				},
+			}
+			actual := map[string]struct{}{}
+
+			assert.False(t, shouldDeleteBlockDevice(bd, actual, "dif-node"))
+		})
+
+		t.Run("returns_false_cause_of_not_consumable", func(t *testing.T) {
+			bd := v1alpha1.BlockDevice{
+				Status: v1alpha1.BlockDeviceStatus{
+					NodeName:   cfg.NodeName,
+					Consumable: false,
+				},
+			}
+			actual := map[string]struct{}{}
+
+			assert.False(t, shouldDeleteBlockDevice(bd, actual, cfg.NodeName))
+		})
+
+		t.Run("returns_false_cause_of_not_deprecated", func(t *testing.T) {
+			const name = "test"
+			bd := v1alpha1.BlockDevice{
+				ObjectMeta: metav1.ObjectMeta{
+					Name: name,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					NodeName:   cfg.NodeName,
+					Consumable: true,
+				},
+			}
+			actual := map[string]struct{}{
+				name: {},
+			}
+
+			assert.False(t, shouldDeleteBlockDevice(bd, actual, cfg.NodeName))
+		})
+	})
+
+	t.Run("RemoveDeprecatedAPIDevices", func(t *testing.T) {
+		const (
+			goodName = "test-candidate1"
+			badName  = "test-candidate2"
+		)
+		cl := NewFakeClient()
+		candidates := []internal.BlockDeviceCandidate{
+			{
+				NodeName:              cfg.NodeName,
+				Consumable:            false,
+				PVUuid:                "142412421",
+				VGUuid:                "123123123",
+				LvmVolumeGroupName:    "test-lvg",
+				ActualVGNameOnTheNode: "test-vg",
+				Wwn:                   "12414212",
+				Serial:                "1412412412412",
+				Path:                  "/dev/vdb",
+				Size:                  resource.MustParse("1G"),
+				Rota:                  false,
+				Model:                 "124124-adf",
+				Name:                  goodName,
+				HotPlug:               false,
+				MachineID:             "1245151241241",
+			},
+		}
+
+		bds := map[string]v1alpha1.BlockDevice{
+			goodName: {
+				ObjectMeta: metav1.ObjectMeta{
+					Name: goodName,
+				},
+			},
+			badName: {
+				ObjectMeta: metav1.ObjectMeta{
+					Name: badName,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Consumable: true,
+					NodeName:   cfg.NodeName,
+				},
+			},
+		}
+
+		for _, bd := range bds {
+			err := cl.Create(ctx, &bd)
+			if err != nil {
+				t.Error(err)
+			}
+		}
+
+		defer func() {
+			for _, bd := range bds {
+				_ = cl.Delete(ctx, &bd)
+			}
+		}()
+
+		for _, bd := range bds {
+			createdBd := &v1alpha1.BlockDevice{}
+			err := cl.Get(ctx, client.ObjectKey{
+				Name: bd.Name,
+			}, createdBd)
+			if err != nil {
+				t.Error(err)
+			}
+			assert.Equal(t, bd.Name, createdBd.Name)
+		}
+
+		RemoveDeprecatedAPIDevices(ctx, cl, *log, monitoring.GetMetrics(cfg.NodeName), candidates, bds, cfg.NodeName)
+
+		_, ok := bds[badName]
+		assert.False(t, ok)
+
+		deleted := &v1alpha1.BlockDevice{}
+		err := cl.Get(ctx, client.ObjectKey{
+			Name: badName,
+		}, deleted)
+		if assert.True(t, errors2.IsNotFound(err)) {
+			assert.Equal(t, "", deleted.Name)
+		}
+	})
+
+	t.Run("GetBlockDeviceCandidates", func(t *testing.T) {
+		devices := []internal.Device{
+			{
+				Name:   "valid1",
+				Size:   resource.MustParse("1G"),
+				Serial: "131412",
+			},
+			{
+				Name:   "valid2",
+				Size:   resource.MustParse("1G"),
+				Serial: "12412412",
+			},
+			{
+				Name:   "valid3",
+				Size:   resource.MustParse("1G"),
+				Serial: "4214215",
+			},
+			{
+				Name:   "invalid",
+				FSType: "ext4",
+				Size:   resource.MustParse("1G"),
+			},
+		}
+
+		sdsCache := cache.New()
+		sdsCache.StoreDevices(devices, bytes.Buffer{})
+
+		candidates := GetBlockDeviceCandidates(*log, cfg, sdsCache)
+
+		assert.Equal(t, 3, len(candidates))
+		for i := range candidates {
+			assert.Equal(t, devices[i].Name, candidates[i].Path)
+			assert.Equal(t, cfg.MachineID, candidates[i].MachineID)
+			assert.Equal(t, cfg.NodeName, candidates[i].NodeName)
+		}
+	})
+
 	t.Run("CheckConsumable", func(t *testing.T) {
 		t.Run("Good device returns true", func(t *testing.T) {
 			goodDevice := internal.Device{
@@ -151,7 +340,7 @@ func TestBlockDeviceCtrl(t *testing.T) {
 				PkName:                "testPKNAME",
 				Type:                  "testTYPE",
 				FSType:                "testFS",
-				MachineId:             "testMACHINE",
+				MachineID:             "testMACHINE",
 			},
 			// diff state
 			{
@@ -173,7 +362,7 @@ func TestBlockDeviceCtrl(t *testing.T) {
 				PkName:                "testPKNAME2",
 				Type:                  "testTYPE2",
 				FSType:                "testFS2",
-				MachineId:             "testMACHINE2",
+				MachineID:             "testMACHINE2",
 			},
 		}
 		blockDevice := v1alpha1.BlockDevice{
@@ -205,10 +394,6 @@ func TestBlockDeviceCtrl(t *testing.T) {
 	})
 
 	t.Run("validateTestLSBLKOutput", func(t *testing.T) {
-		log, err := logger.NewLogger("1")
-		if err != nil {
-			t.Fatal(err)
-		}
 		testLsblkOutputBytes := []byte(testLsblkOutput)
 		devices, err := utils.UnmarshalDevices(testLsblkOutputBytes)
 		if assert.NoError(t, err) {
@@ -267,8 +452,8 @@ func TestBlockDeviceCtrl(t *testing.T) {
 				candidateName := CreateCandidateName(*log, candidate, devices)
 				assert.Equal(t, "dev-98ca88ddaaddec43b1c4894756f4856244985511", candidateName, "device name generated incorrectly")
 			}
-
 		}
+
 		if assert.NoError(t, err) {
 			assert.Equal(t, 7, len(filteredDevices))
 		}
diff --git a/images/agent/src/pkg/controller/controller_reconcile_test.go b/images/agent/src/pkg/controller/controller_reconcile_test.go
index 91929698..02f97de0 100644
--- a/images/agent/src/pkg/controller/controller_reconcile_test.go
+++ b/images/agent/src/pkg/controller/controller_reconcile_test.go
@@ -17,15 +17,17 @@ limitations under the License.
 package controller_test
 
 import (
-	"agent/internal"
-	"agent/pkg/controller"
-	"agent/pkg/monitoring"
 	"context"
 
-	"k8s.io/apimachinery/pkg/api/resource"
-
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"agent/internal"
+	"agent/pkg/controller"
+	"agent/pkg/monitoring"
 )
 
 var _ = Describe("Storage Controller", func() {
@@ -53,7 +55,7 @@ var _ = Describe("Storage Controller", func() {
 			PkName:                "/dev/sda14",
 			Type:                  "disk",
 			FSType:                "",
-			MachineId:             "1234",
+			MachineID:             "1234",
 		}
 	)
 
@@ -76,7 +78,7 @@ var _ = Describe("Storage Controller", func() {
 		Expect(blockDevice.Status.Model).To(Equal(candidate.Model))
 		Expect(blockDevice.Status.Type).To(Equal(candidate.Type))
 		Expect(blockDevice.Status.FsType).To(Equal(candidate.FSType))
-		Expect(blockDevice.Status.MachineID).To(Equal(candidate.MachineId))
+		Expect(blockDevice.Status.MachineID).To(Equal(candidate.MachineID))
 	})
 
 	It("GetAPIBlockDevices", func() {
@@ -110,7 +112,7 @@ var _ = Describe("Storage Controller", func() {
 			PkName:                "/dev/sda14",
 			Type:                  "disk",
 			FSType:                "",
-			MachineId:             "1234",
+			MachineID:             "1234",
 		}
 
 		resources, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics)
@@ -138,10 +140,15 @@ var _ = Describe("Storage Controller", func() {
 	})
 
 	It("DeleteAPIBlockDevice", func() {
-		err := controller.DeleteAPIBlockDevice(ctx, cl, testMetrics, deviceName)
+		err := controller.DeleteAPIBlockDevice(ctx, cl, testMetrics, &v1alpha1.BlockDevice{
+			ObjectMeta: metav1.ObjectMeta{
+				Name: deviceName,
+			},
+		})
 		Expect(err).NotTo(HaveOccurred())
 
 		devices, err := controller.GetAPIBlockDevices(context.Background(), cl, testMetrics)
+		Expect(err).NotTo(HaveOccurred())
 		for name := range devices {
 			Expect(name).NotTo(Equal(deviceName))
 		}
diff --git a/images/agent/src/pkg/controller/controller_suite_test.go b/images/agent/src/pkg/controller/controller_suite_test.go
index a4f27c0d..9ebb5197 100644
--- a/images/agent/src/pkg/controller/controller_suite_test.go
+++ b/images/agent/src/pkg/controller/controller_suite_test.go
@@ -17,17 +17,15 @@ limitations under the License.
 package controller_test
 
 import (
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"testing"
 
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"sigs.k8s.io/controller-runtime/pkg/client/fake"
-
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
-
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/client/fake"
 )
 
 func TestController(t *testing.T) {
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go b/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go
deleted file mode 100644
index 2aff0247..00000000
--- a/images/agent/src/pkg/controller/lvm_logical_volume_bench_test.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package controller
-
-import (
-	"agent/internal"
-	"agent/pkg/kubutils"
-	"context"
-	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	v1 "k8s.io/api/core/v1"
-	sv1 "k8s.io/api/storage/v1"
-	extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	apiruntime "k8s.io/apimachinery/pkg/runtime"
-	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
-	"os"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-	"testing"
-)
-
-var (
-	lvgName   = "hdd-lvg-on-node-0"
-	poolName  = "hhd-thin"
-	lvCount   = 600
-	size, err = resource.ParseQuantity("1Gi")
-
-	resizeOn = false
-
-	ctx   = context.Background()
-	e2eCL client.Client
-
-	resourcesSchemeFuncs = []func(*apiruntime.Scheme) error{
-		v1alpha1.AddToScheme,
-		clientgoscheme.AddToScheme,
-		extv1.AddToScheme,
-		v1.AddToScheme,
-		sv1.AddToScheme,
-	}
-)
-
-func BenchmarkRunThickLLVCreationSingleThread(b *testing.B) {
-	b.Logf("starts the test")
-	llvNames := make(map[string]bool, lvCount)
-
-	b.StartTimer()
-	for i := 0; i < lvCount; i++ {
-		llv := configureTestThickLLV(fmt.Sprintf("test-llv-%d", i), lvgName)
-		err := e2eCL.Create(ctx, llv)
-		if err != nil {
-			b.Logf("unable to create test LLV %s, err: %s", llv.Name, err.Error())
-		}
-		llvNames[llv.Name] = false
-	}
-	lvCreatedTime := b.Elapsed().Seconds()
-
-	succeeded := 0
-	for succeeded < len(llvNames) {
-		llvs, err := getAllLLV(ctx, e2eCL)
-		if err != nil {
-			b.Error(err)
-			continue
-		}
-
-		for llvName, created := range llvNames {
-			if llv, found := llvs[llvName]; found {
-				if llv.Status != nil {
-					b.Logf("LV %s status %s", llvName, llv.Status.Phase)
-				}
-				if err != nil {
-					b.Logf("can't check LLV %s llv", llvName)
-					continue
-				}
-
-				if llv.Status != nil &&
-					llv.Status.Phase == LLVStatusPhaseCreated &&
-					!created {
-					succeeded++
-					llvNames[llvName] = true
-
-					if resizeOn {
-						add, err := resource.ParseQuantity("1G")
-						if err != nil {
-							b.Logf(err.Error())
-							continue
-						}
-
-						llv.Spec.Size.Add(add)
-						err = e2eCL.Update(ctx, &llv)
-						if err != nil {
-							b.Logf(err.Error())
-							continue
-						}
-
-						b.Logf("resize for LV %s succeeded", llvName)
-					}
-				}
-			}
-
-		}
-	}
-	b.Logf("[TIME] LLV resources were configured for %f", lvCreatedTime)
-	b.Logf("All LLV were created for %f. Ends the test", b.Elapsed().Seconds())
-}
-
-func BenchmarkRunThinLLVCreationSingleThread(b *testing.B) {
-	b.Logf("starts thin test")
-	llvNames := make(map[string]bool, lvCount)
-
-	b.StartTimer()
-	for i := 0; i < lvCount; i++ {
-		llv := configureTestThinLLV(fmt.Sprintf("test-llv-%d", i), lvgName, poolName)
-		err := e2eCL.Create(ctx, llv)
-		if err != nil {
-			b.Logf("unable to create test LLV %s, err: %s", llv.Name, err.Error())
-			continue
-		}
-		llvNames[llv.Name] = false
-	}
-	createdTime := b.Elapsed().Seconds()
-
-	succeeded := 0
-	for succeeded < len(llvNames) {
-		llvs, err := getAllLLV(ctx, e2eCL)
-		if err != nil {
-			b.Error(err)
-			continue
-		}
-
-		for llvName, visited := range llvNames {
-			if llv, found := llvs[llvName]; found {
-				if llv.Status != nil {
-					b.Logf("LV %s status %s", llvName, llv.Status.Phase)
-				}
-				if err != nil {
-					b.Logf("can't check LLV %s llv", llvName)
-					continue
-				}
-
-				if llv.Status != nil &&
-					llv.Status.Phase == LLVStatusPhaseCreated &&
-					!visited {
-					succeeded++
-					llvNames[llvName] = true
-
-					if resizeOn {
-						add, err := resource.ParseQuantity("1G")
-						if err != nil {
-							b.Logf(err.Error())
-							continue
-						}
-
-						llv.Spec.Size.Add(add)
-						err = e2eCL.Update(ctx, &llv)
-						if err != nil {
-							b.Logf(err.Error())
-							continue
-						}
-
-						b.Logf("resize for LV %s succeeded", llvName)
-					}
-				}
-			}
-
-		}
-	}
-	b.Logf("All LLV were configured for %f. Ends the test", createdTime)
-	b.Logf("All LLV were created in %f. Ends the test", b.Elapsed().Seconds())
-}
-
-func getAllLLV(ctx context.Context, cl client.Client) (map[string]v1alpha1.LVMLogicalVolume, error) {
-	list := &v1alpha1.LVMLogicalVolumeList{}
-	err := cl.List(ctx, list)
-	if err != nil {
-		return nil, err
-	}
-
-	res := make(map[string]v1alpha1.LVMLogicalVolume, len(list.Items))
-	for _, lv := range list.Items {
-		res[lv.Name] = lv
-	}
-
-	return res, nil
-}
-
-func configureTestThickLLV(name, lvgName string) *v1alpha1.LVMLogicalVolume {
-	return &v1alpha1.LVMLogicalVolume{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:       name,
-			Finalizers: []string{internal.SdsNodeConfiguratorFinalizer},
-		},
-		Spec: v1alpha1.LVMLogicalVolumeSpec{
-			ActualLVNameOnTheNode: name,
-			Type:                  Thick,
-			Size:                  size,
-			LvmVolumeGroupName:    lvgName,
-		},
-	}
-}
-
-func configureTestThinLLV(name, lvgName, poolName string) *v1alpha1.LVMLogicalVolume {
-	return &v1alpha1.LVMLogicalVolume{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:       name,
-			Finalizers: []string{internal.SdsNodeConfiguratorFinalizer},
-		},
-		Spec: v1alpha1.LVMLogicalVolumeSpec{
-			ActualLVNameOnTheNode: name,
-			Type:                  Thin,
-			Size:                  size,
-			LvmVolumeGroupName:    lvgName,
-			Thin:                  &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: poolName},
-		},
-	}
-}
-
-func init() {
-	config, err := kubutils.KubernetesDefaultConfigCreate()
-	if err != nil {
-		fmt.Println(err.Error())
-		os.Exit(1)
-	}
-
-	scheme := runtime.NewScheme()
-	for _, f := range resourcesSchemeFuncs {
-		err := f(scheme)
-		if err != nil {
-			fmt.Println(err.Error())
-			os.Exit(1)
-		}
-	}
-
-	options := client.Options{
-		Scheme: scheme,
-	}
-
-	e2eCL, err = client.New(config, options)
-	if err != nil {
-		fmt.Println(err)
-		os.Exit(1)
-	}
-}
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go
index bf5d2ec5..078ac6fe 100644
--- a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go
+++ b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go
@@ -1,20 +1,16 @@
 package controller
 
 import (
-	"agent/config"
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"context"
 	"errors"
 	"fmt"
+	"reflect"
+	"time"
+
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	k8serr "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
-	"reflect"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/event"
@@ -22,7 +18,13 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
 	"sigs.k8s.io/controller-runtime/pkg/source"
-	"time"
+
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 const (
@@ -78,14 +80,14 @@ func RunLVMLogicalVolumeExtenderWatcherController(
 		return err
 	}
 
-	err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup]{
-		CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
+	err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup, reconcile.Request]{
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got a Create event for the LVMVolumeGroup %s", e.Object.GetName()))
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
 			q.Add(request)
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] added the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName()))
 		},
-		UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got an Update event for the LVMVolumeGroup %s", e.ObjectNew.GetName()))
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}}
 			q.Add(request)
@@ -124,7 +126,7 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m
 	log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] tries to get LLV resources with percent size for the LVMVolumeGroup %s", lvg.Name))
 	llvs, err := getAllLLVsWithPercentSize(ctx, cl, lvg.Name)
 	if err != nil {
-		log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources"))
+		log.Error(err, "[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources")
 		return true
 	}
 	log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got LLV resources for the LVMVolumeGroup %s", lvg.Name))
@@ -181,6 +183,12 @@ func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, m
 
 		log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should be extended from %s to %s size", llv.Name, llv.Status.ActualSize.String(), llvRequestedSize.String()))
 		err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, &llv, LLVStatusPhaseResizing, "")
+		if err != nil {
+			log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name))
+			shouldRetry = true
+			continue
+		}
+
 		cmd, err := utils.ExtendLV(llvRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode)
 		if err != nil {
 			log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend LV %s of the LVMLogicalVolume %s, cmd: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, cmd))
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go
index dcb55e1d..5323a005 100644
--- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go
+++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go
@@ -1,21 +1,16 @@
 package controller
 
 import (
-	"agent/config"
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"context"
 	"errors"
 	"fmt"
+	"reflect"
+
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"github.com/google/go-cmp/cmp"
 	k8serr "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
-	"reflect"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/event"
@@ -23,6 +18,13 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
 	"sigs.k8s.io/controller-runtime/pkg/source"
+
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 const (
@@ -139,7 +141,7 @@ func RunLVMLogicalVolumeWatcherController(
 				}
 			}
 			if shouldRequeue {
-				log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occured while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", request.Name, cfg.LLVRequeueIntervalSec.String()))
+				log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] some issues were occurred while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", request.Name, cfg.LLVRequeueIntervalSec.String()))
 				return reconcile.Result{RequeueAfter: cfg.LLVRequeueIntervalSec}, nil
 			}
 
@@ -154,15 +156,15 @@ func RunLVMLogicalVolumeWatcherController(
 		return nil, err
 	}
 
-	err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMLogicalVolume{}, handler.TypedFuncs[*v1alpha1.LVMLogicalVolume]{
-		CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.RateLimitingInterface) {
+	err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMLogicalVolume{}, handler.TypedFuncs[*v1alpha1.LVMLogicalVolume, reconcile.Request]{
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got a create event for the LVMLogicalVolume: %s", e.Object.GetName()))
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
 			q.Add(request)
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] added the request of the LVMLogicalVolume %s to Reconciler", e.Object.GetName()))
 		},
 
-		UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.RateLimitingInterface) {
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMLogicalVolume], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got an update event for the LVMLogicalVolume: %s", e.ObjectNew.GetName()))
 			// TODO: Figure out how to log it in our logger.
 			if cfg.Loglevel == "4" {
@@ -333,7 +335,7 @@ func reconcileLLVUpdateFunc(
 		log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name))
 		return false, err
 	}
-	log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] sucessfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String()))
+	log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String()))
 
 	if utils.AreSizesEqualWithinDelta(actualSize, llvRequestSize, internal.ResizeDelta) {
 		log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String()))
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go
index 78a474e9..91ad1c81 100644
--- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go
+++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go
@@ -1,20 +1,21 @@
 package controller
 
 import (
-	"agent/pkg/cache"
 	"context"
 	"fmt"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"strings"
 
-	"agent/internal"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"k8s.io/apimachinery/pkg/api/resource"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/utils/strings/slices"
 	"sigs.k8s.io/controller-runtime/pkg/client"
+
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 func identifyReconcileFunc(sdsCache *cache.Cache, vgName string, llv *v1alpha1.LVMLogicalVolume) reconcileType {
@@ -37,13 +38,10 @@ func identifyReconcileFunc(sdsCache *cache.Cache, vgName string, llv *v1alpha1.L
 }
 
 func shouldReconcileByDeleteFunc(llv *v1alpha1.LVMLogicalVolume) bool {
-	if llv.DeletionTimestamp == nil {
-		return false
-	}
-
-	return true
+	return llv.DeletionTimestamp != nil
 }
 
+//nolint:unparam
 func checkIfConditionIsTrue(lvg *v1alpha1.LvmVolumeGroup, conType string) bool {
 	// this check prevents infinite resource updating after a retry
 	for _, c := range lvg.Status.Conditions {
@@ -98,9 +96,9 @@ func removeLLVFinalizersIfExist(
 
 	if removed {
 		log.Trace(fmt.Sprintf("[removeLLVFinalizersIfExist] removed finalizer %s from the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name))
-		err := updateLVMLogicalVolume(ctx, metrics, cl, llv)
+		err := updateLVMLogicalVolumeSpec(ctx, metrics, cl, llv)
 		if err != nil {
-			log.Error(err, fmt.Sprintf("[updateLVMLogicalVolume] unable to update the LVMVolumeGroup %s", llv.Name))
+			log.Error(err, fmt.Sprintf("[updateLVMLogicalVolumeSpec] unable to update the LVMVolumeGroup %s", llv.Name))
 			return err
 		}
 	}
@@ -128,7 +126,7 @@ func checkIfLVBelongsToLLV(llv *v1alpha1.LVMLogicalVolume, lv *internal.LVData)
 func updateLLVPhaseToCreatedIfNeeded(ctx context.Context, cl client.Client, llv *v1alpha1.LVMLogicalVolume, actualSize resource.Quantity) (bool, error) {
 	var contiguous *bool
 	if llv.Spec.Thick != nil {
-		if *llv.Spec.Thick.Contiguous == true {
+		if *llv.Spec.Thick.Contiguous {
 			contiguous = llv.Spec.Thick.Contiguous
 		}
 	}
@@ -194,7 +192,7 @@ func addLLVFinalizerIfNotExist(ctx context.Context, cl client.Client, log logger
 	llv.Finalizers = append(llv.Finalizers, internal.SdsNodeConfiguratorFinalizer)
 
 	log.Trace(fmt.Sprintf("[addLLVFinalizerIfNotExist] added finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name))
-	err := updateLVMLogicalVolume(ctx, metrics, cl, llv)
+	err := updateLVMLogicalVolumeSpec(ctx, metrics, cl, llv)
 	if err != nil {
 		return false, err
 	}
@@ -208,11 +206,7 @@ func shouldReconcileByCreateFunc(sdsCache *cache.Cache, vgName string, llv *v1al
 	}
 
 	lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode)
-	if lv != nil {
-		return false
-	}
-
-	return true
+	return lv == nil
 }
 
 func getFreeLVGSpaceForLLV(lvg *v1alpha1.LvmVolumeGroup, llv *v1alpha1.LVMLogicalVolume) resource.Quantity {
@@ -300,12 +294,10 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol
 
 	// if a specified Thick LV name matches the existing Thin one
 	lv := sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode)
-	if lv != nil && len(lv.LVAttr) == 0 {
-		reason.WriteString(fmt.Sprintf("LV %s was found on the node, but can't be validated due to its attributes is empty string. ", lv.LVName))
-	}
-
 	if lv != nil {
-		if !checkIfLVBelongsToLLV(llv, lv) {
+		if len(lv.LVAttr) == 0 {
+			reason.WriteString(fmt.Sprintf("LV %s was found on the node, but can't be validated due to its attributes is empty string. ", lv.LVName))
+		} else if !checkIfLVBelongsToLLV(llv, lv) {
 			reason.WriteString(fmt.Sprintf("Specified LV %s is already created and it is doesnt match the one on the node.", lv.LVName))
 		}
 	}
@@ -317,7 +309,7 @@ func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVol
 	return true, ""
 }
 
-func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, llv *v1alpha1.LVMLogicalVolume, phase, reason string) error {
+func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, _ monitoring.Metrics, llv *v1alpha1.LVMLogicalVolume, phase, reason string) error {
 	if llv.Status != nil &&
 		llv.Status.Phase == phase &&
 		llv.Status.Reason == reason {
@@ -342,7 +334,7 @@ func updateLVMLogicalVolumePhaseIfNeeded(ctx context.Context, cl client.Client,
 	return nil
 }
 
-func updateLVMLogicalVolume(ctx context.Context, metrics monitoring.Metrics, cl client.Client, llv *v1alpha1.LVMLogicalVolume) error {
+func updateLVMLogicalVolumeSpec(ctx context.Context, _ monitoring.Metrics, cl client.Client, llv *v1alpha1.LVMLogicalVolume) error {
 	return cl.Update(ctx, llv)
 }
 
@@ -352,11 +344,7 @@ func shouldReconcileByUpdateFunc(sdsCache *cache.Cache, vgName string, llv *v1al
 	}
 
 	lv := sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode)
-	if lv == nil {
-		return false
-	}
-
-	return true
+	return lv != nil
 }
 
 func isContiguous(llv *v1alpha1.LVMLogicalVolume) bool {
diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go
index ba81dd61..196e7b1e 100644
--- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go
+++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go
@@ -1,20 +1,21 @@
 package controller
 
 import (
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"bytes"
-	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
+	"context"
 	"testing"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"github.com/stretchr/testify/assert"
 	"k8s.io/apimachinery/pkg/api/resource"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"sigs.k8s.io/controller-runtime/pkg/client"
+
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 func TestLVMLogicaVolumeWatcher(t *testing.T) {
@@ -23,7 +24,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 		log     = logger.Logger{}
 		metrics = monitoring.Metrics{}
 		vgName  = "test-vg"
-		delta   = resource.MustParse(internal.ResizeDelta)
+		ctx     = context.Background()
 	)
 
 	t.Run("subtractQuantity_returns_correct_value", func(t *testing.T) {
@@ -119,12 +120,12 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 				Spec: v1alpha1.LVMLogicalVolumeSpec{
 					ActualLVNameOnTheNode: "test-lv",
 					Type:                  Thick,
-					Size:                  resource.MustParse("10M"),
+					Size:                  "10M",
 					LvmVolumeGroupName:    lvgName,
 				},
 			}
 
-			v, r := validateLVMLogicalVolume(&cache.Cache{}, llv, lvg, delta)
+			v, r := validateLVMLogicalVolume(&cache.Cache{}, llv, lvg)
 			if assert.True(t, v) {
 				assert.Equal(t, 0, len(r))
 			}
@@ -137,7 +138,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 				Spec: v1alpha1.LVMLogicalVolumeSpec{
 					ActualLVNameOnTheNode: lvName,
 					Type:                  Thick,
-					Size:                  resource.MustParse("0M"),
+					Size:                  "0M",
 					LvmVolumeGroupName:    "some-lvg",
 					Thin:                  &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: "some-lvg"},
 				},
@@ -150,9 +151,9 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 				},
 			}, bytes.Buffer{})
 
-			v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LvmVolumeGroup{}, delta)
+			v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LvmVolumeGroup{})
 			if assert.False(t, v) {
-				assert.Equal(t, "zero size for LV; no LV name specified; thin pool specified for Thick LV; ", r)
+				assert.Equal(t, "Zero size for LV. Thin pool specified for Thick LV. LV test-lv was found on the node, but can't be validated due to its attributes is empty string. ", r)
 			}
 		})
 
@@ -169,7 +170,8 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 				Status: v1alpha1.LvmVolumeGroupStatus{
 					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolStatus{
 						{
-							Name: tpName,
+							Name:            tpName,
+							AllocationLimit: internal.AllocationLimitDefaultValue,
 						},
 					},
 				},
@@ -179,25 +181,24 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 				Spec: v1alpha1.LVMLogicalVolumeSpec{
 					ActualLVNameOnTheNode: "test-lv",
 					Type:                  Thin,
-					Size:                  resource.MustParse("10M"),
+					Size:                  "10M",
 					LvmVolumeGroupName:    lvgName,
 					Thin:                  &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: tpName},
 				},
 			}
 
-			v, r := validateLVMLogicalVolume(cache.New(), llv, lvg, delta)
+			v, r := validateLVMLogicalVolume(cache.New(), llv, lvg)
 			if assert.True(t, v) {
 				assert.Equal(t, 0, len(r))
 			}
 		})
 
 		t.Run("thin_all_bad_returns_false", func(t *testing.T) {
-
 			llv := &v1alpha1.LVMLogicalVolume{
 				Spec: v1alpha1.LVMLogicalVolumeSpec{
 					ActualLVNameOnTheNode: "",
 					Type:                  Thin,
-					Size:                  resource.MustParse("0M"),
+					Size:                  "0M",
 					LvmVolumeGroupName:    "some-lvg",
 				},
 			}
@@ -209,12 +210,11 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 				},
 			}, bytes.Buffer{})
 
-			v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LvmVolumeGroup{}, delta)
+			v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LvmVolumeGroup{})
 			if assert.False(t, v) {
-				assert.Equal(t, "zero size for LV; no LV name specified; no thin pool specified; ", r)
+				assert.Equal(t, "No LV name specified. Zero size for LV. No thin pool specified. ", r)
 			}
 		})
-
 	})
 
 	t.Run("getThinPoolAvailableSpace", func(t *testing.T) {
@@ -224,10 +224,10 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 			ActualSize:      resource.MustParse("10Gi"),
 			UsedSize:        resource.MustParse("1Gi"),
 			AllocatedSize:   resource.MustParse("5Gi"),
-			AllocationLimit: "150%",
+			AllocationLimit: internal.AllocationLimitDefaultValue,
 		}
 
-		free, err := getThinPoolAvailableSpace(tp.ActualSize, tp.AllocatedSize, tpName)
+		free, err := getThinPoolAvailableSpace(tp.ActualSize, tp.AllocatedSize, tp.AllocationLimit)
 		if err != nil {
 			t.Error(err)
 		}
@@ -271,23 +271,20 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 		})
 
 		t.Run("returns_update", func(t *testing.T) {
-			specSize := resource.NewQuantity(40000000000, resource.BinarySI)
-			statusSize := resource.NewQuantity(10000000000, resource.BinarySI)
 			lvName := "test-lv"
 			llv := &v1alpha1.LVMLogicalVolume{
 				Spec: v1alpha1.LVMLogicalVolumeSpec{
 					ActualLVNameOnTheNode: lvName,
-					Size:                  *specSize,
 				},
 				Status: &v1alpha1.LVMLogicalVolumeStatus{
-					Phase:      LLVStatusPhaseCreated,
-					ActualSize: *statusSize,
+					Phase: LLVStatusPhaseCreated,
 				},
 			}
 			sdsCache := cache.New()
 			sdsCache.StoreLVs([]internal.LVData{
 				{
 					LVName: lvName,
+					VGName: vgName,
 				},
 			}, bytes.Buffer{})
 
@@ -308,63 +305,66 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 
 			assert.Equal(t, DeleteReconcile, actual)
 		})
+	})
 
-		t.Run("returns_empty", func(t *testing.T) {
-			specSize := resource.NewQuantity(40000000000, resource.BinarySI)
-			statusSize := resource.NewQuantity(40000000000, resource.BinarySI)
+	t.Run("shouldReconcileByCreateFunc", func(t *testing.T) {
+		t.Run("if_lv_is_not_created_returns_true", func(t *testing.T) {
+			lvName := "test-lv"
 			llv := &v1alpha1.LVMLogicalVolume{
 				Spec: v1alpha1.LVMLogicalVolumeSpec{
-					Size: *specSize,
+					ActualLVNameOnTheNode: lvName,
 				},
 				Status: &v1alpha1.LVMLogicalVolumeStatus{
-					Phase:      LLVStatusPhaseCreated,
-					ActualSize: *statusSize,
+					Phase: LLVStatusPhaseCreated,
 				},
 			}
 
-			actual := identifyReconcileFunc(cache.New(), vgName, llv)
-
-			assert.Equal(t, reconcileType(""), actual)
-		})
-	})
-
-	t.Run("shouldReconcileByCreateFunc", func(t *testing.T) {
-		t.Run("if_status_nill_returns_true", func(t *testing.T) {
-			llv := &v1alpha1.LVMLogicalVolume{}
-
 			should := shouldReconcileByCreateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.True(t, should)
-			}
+			assert.True(t, should)
 		})
 
-		t.Run("if_phase_created_returns_false", func(t *testing.T) {
+		t.Run("if_lv_is_created_returns_false", func(t *testing.T) {
+			lvName := "test-lv"
 			llv := &v1alpha1.LVMLogicalVolume{
+				Spec: v1alpha1.LVMLogicalVolumeSpec{
+					ActualLVNameOnTheNode: lvName,
+				},
 				Status: &v1alpha1.LVMLogicalVolumeStatus{
 					Phase: LLVStatusPhaseCreated,
 				},
 			}
-
-			should := shouldReconcileByCreateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.False(t, should)
-			}
+			sdsCache := cache.New()
+			sdsCache.StoreLVs([]internal.LVData{
+				{
+					LVName: lvName,
+					VGName: vgName,
+				},
+			}, bytes.Buffer{})
+			should := shouldReconcileByCreateFunc(sdsCache, vgName, llv)
+			assert.False(t, should)
 		})
 
-		t.Run("if_phase_resizing_returns_false", func(t *testing.T) {
+		t.Run("if_deletion_timestamp_is_not_nil_returns_false", func(t *testing.T) {
+			lvName := "test-lv"
 			llv := &v1alpha1.LVMLogicalVolume{
+				ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &v1.Time{}},
+				Spec: v1alpha1.LVMLogicalVolumeSpec{
+					ActualLVNameOnTheNode: lvName,
+				},
 				Status: &v1alpha1.LVMLogicalVolumeStatus{
-					Phase: LLVStatusPhaseResizing,
+					Phase: LLVStatusPhaseCreated,
 				},
 			}
+			sdsCache := cache.New()
+			sdsCache.StoreLVs([]internal.LVData{
+				{
+					LVName: lvName,
+					VGName: vgName,
+				},
+			}, bytes.Buffer{})
 
 			should := shouldReconcileByCreateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.False(t, should)
-			}
+			assert.False(t, should)
 		})
 	})
 
@@ -377,108 +377,42 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 			}
 
 			should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.False(t, should)
-			}
-		})
-
-		t.Run("if_status_nil_returns_false", func(t *testing.T) {
-			llv := &v1alpha1.LVMLogicalVolume{}
-
-			should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.False(t, should)
-			}
-		})
-
-		t.Run("if_phase_pending_returns_false", func(t *testing.T) {
-			llv := &v1alpha1.LVMLogicalVolume{
-				Status: &v1alpha1.LVMLogicalVolumeStatus{
-					Phase: LLVStatusPhasePending,
-				},
-			}
-
-			should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.False(t, should)
-			}
-		})
-
-		t.Run("if_phase_resizing_returns_false", func(t *testing.T) {
-			llv := &v1alpha1.LVMLogicalVolume{
-				Status: &v1alpha1.LVMLogicalVolumeStatus{
-					Phase: LLVStatusPhaseResizing,
-				},
-			}
-
-			should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.False(t, should)
-			}
+			assert.False(t, should)
 		})
 
-		t.Run("if_spec_size_less_than_status_one_returns_false_and_error", func(t *testing.T) {
-			specSize := resource.NewQuantity(100000000, resource.BinarySI)
-			statusSize := resource.NewQuantity(200000000, resource.BinarySI)
+		t.Run("if_lv_exists_returns_true", func(t *testing.T) {
+			lvName := "test-lv"
 			llv := &v1alpha1.LVMLogicalVolume{
 				Spec: v1alpha1.LVMLogicalVolumeSpec{
-					Size: *specSize,
+					ActualLVNameOnTheNode: lvName,
 				},
 				Status: &v1alpha1.LVMLogicalVolumeStatus{
-					Phase:      LLVStatusPhaseCreated,
-					ActualSize: *statusSize,
+					Phase: LLVStatusPhaseCreated,
 				},
 			}
-
-			should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv)
-
-			if assert.ErrorContains(t, err, fmt.Sprintf("requested size %d is less than actual %d", llv.Spec.Size.Value(), llv.Status.ActualSize.Value())) {
-				assert.False(t, should)
-			}
-		})
-
-		t.Run("if_spec_size_more_than_status_one_but_less_than_delta_returns_false", func(t *testing.T) {
-			specSize := resource.NewQuantity(30000, resource.BinarySI)
-			statusSize := resource.NewQuantity(20000, resource.BinarySI)
-			llv := &v1alpha1.LVMLogicalVolume{
-				Spec: v1alpha1.LVMLogicalVolumeSpec{
-					Size: *specSize,
-				},
-				Status: &v1alpha1.LVMLogicalVolumeStatus{
-					Phase:      LLVStatusPhaseCreated,
-					ActualSize: *statusSize,
+			sdsCache := cache.New()
+			sdsCache.StoreLVs([]internal.LVData{
+				{
+					LVName: lvName,
+					VGName: vgName,
 				},
-			}
-
-			should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.False(t, should)
-			}
+			}, bytes.Buffer{})
+			should := shouldReconcileByUpdateFunc(sdsCache, vgName, llv)
+			assert.True(t, should)
 		})
 
-		t.Run("if_spec_size_more_than_status_returns_true", func(t *testing.T) {
-			specSize := resource.NewQuantity(40000000000, resource.BinarySI)
-			statusSize := resource.NewQuantity(10000000000, resource.BinarySI)
+		t.Run("if_lv_does_not_exist_returns_false", func(t *testing.T) {
+			lvName := "test-lv"
 			llv := &v1alpha1.LVMLogicalVolume{
 				Spec: v1alpha1.LVMLogicalVolumeSpec{
-					Size: *specSize,
+					ActualLVNameOnTheNode: lvName,
 				},
 				Status: &v1alpha1.LVMLogicalVolumeStatus{
-					Phase:      LLVStatusPhaseCreated,
-					ActualSize: *statusSize,
+					Phase: LLVStatusPhaseCreated,
 				},
 			}
-
 			should := shouldReconcileByUpdateFunc(cache.New(), vgName, llv)
-
-			if assert.NoError(t, err) {
-				assert.True(t, should)
-			}
+			assert.False(t, should)
 		})
 	})
 
@@ -618,20 +552,27 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 		})
 	})
 
-	t.Run("updateLVMLogicalVolume", func(t *testing.T) {
+	t.Run("updateLVMLogicalVolumeSpec", func(t *testing.T) {
 		const (
 			lvgName = "test-lvg"
-			oldSize = int64(100000000)
-			newSize = int64(200000000)
+		)
+		var (
+			oldSize = resource.NewQuantity(100000000, resource.BinarySI)
+			newSize = resource.NewQuantity(200000000, resource.BinarySI)
 		)
 		llv := &v1alpha1.LVMLogicalVolume{
 			ObjectMeta: v1.ObjectMeta{
 				Name: lvgName,
 			},
+			Spec: v1alpha1.LVMLogicalVolumeSpec{
+				ActualLVNameOnTheNode: "",
+				Type:                  "",
+				Size:                  oldSize.String(),
+			},
 			Status: &v1alpha1.LVMLogicalVolumeStatus{
 				Phase:      LLVStatusPhasePending,
 				Reason:     "",
-				ActualSize: *resource.NewQuantity(oldSize, resource.BinarySI),
+				ActualSize: *oldSize,
 			},
 		}
 
@@ -659,14 +600,85 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 
 		if assert.NotNil(t, oldLLV) {
 			assert.Equal(t, LLVStatusPhasePending, oldLLV.Status.Phase)
-			assert.Equal(t, oldSize, oldLLV.Status.ActualSize.Value())
+			assert.Equal(t, oldSize.Value(), oldLLV.Status.ActualSize.Value())
 		}
 
+		oldLLV.Spec.Size = newSize.String()
 		oldLLV.Status.Phase = LLVStatusPhaseCreated
-		oldLLV.Status.ActualSize = *resource.NewQuantity(newSize, resource.BinarySI)
-		err = updateLVMLogicalVolume(ctx, metrics, cl, oldLLV)
+		oldLLV.Status.ActualSize = *newSize
+
+		err = updateLVMLogicalVolumeSpec(ctx, metrics, cl, oldLLV)
+		if assert.NoError(t, err) {
+			newLLV := &v1alpha1.LVMLogicalVolume{}
+			err = cl.Get(ctx, client.ObjectKey{
+				Name: llv.Name,
+			}, newLLV)
+			if err != nil {
+				t.Error(err)
+				return
+			}
+
+			assert.Equal(t, LLVStatusPhasePending, newLLV.Status.Phase)
+			assert.Equal(t, oldSize.Value(), newLLV.Status.ActualSize.Value())
+		}
+	})
 
+	t.Run("updateLLVPhaseToCreatedIfNeeded", func(t *testing.T) {
+		const (
+			lvgName = "test-lvg"
+		)
+		var (
+			oldSize = resource.NewQuantity(100000000, resource.BinarySI)
+			newSize = resource.NewQuantity(200000000, resource.BinarySI)
+		)
+		llv := &v1alpha1.LVMLogicalVolume{
+			ObjectMeta: v1.ObjectMeta{
+				Name: lvgName,
+			},
+			Spec: v1alpha1.LVMLogicalVolumeSpec{
+				ActualLVNameOnTheNode: "",
+				Type:                  "",
+				Size:                  oldSize.String(),
+			},
+			Status: &v1alpha1.LVMLogicalVolumeStatus{
+				Phase:      LLVStatusPhasePending,
+				Reason:     "",
+				ActualSize: *oldSize,
+			},
+		}
+
+		err := cl.Create(ctx, llv)
+		if err != nil {
+			t.Error(err)
+			return
+		}
+
+		defer func() {
+			err = cl.Delete(ctx, llv)
+			if err != nil {
+				t.Error(err)
+			}
+		}()
+
+		oldLLV := &v1alpha1.LVMLogicalVolume{}
+		err = cl.Get(ctx, client.ObjectKey{
+			Name: llv.Name,
+		}, oldLLV)
+		if err != nil {
+			t.Error(err)
+			return
+		}
+
+		if assert.NotNil(t, oldLLV) {
+			assert.Equal(t, LLVStatusPhasePending, oldLLV.Status.Phase)
+			assert.Equal(t, oldSize.Value(), oldLLV.Status.ActualSize.Value())
+		}
+
+		oldLLV.Spec.Size = newSize.String()
+
+		updated, err := updateLLVPhaseToCreatedIfNeeded(ctx, cl, oldLLV, *newSize)
 		if assert.NoError(t, err) {
+			assert.True(t, updated)
 			newLLV := &v1alpha1.LVMLogicalVolume{}
 			err = cl.Get(ctx, client.ObjectKey{
 				Name: llv.Name,
@@ -676,8 +688,9 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 				return
 			}
 
+			assert.Equal(t, oldSize.String(), newLLV.Spec.Size)
 			assert.Equal(t, LLVStatusPhaseCreated, newLLV.Status.Phase)
-			assert.Equal(t, newSize, newLLV.Status.ActualSize.Value())
+			assert.Equal(t, newSize.Value(), newLLV.Status.ActualSize.Value())
 		}
 	})
 
@@ -730,33 +743,24 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) {
 	t.Run("AreSizesEqualWithinDelta", func(t *testing.T) {
 		t.Run("returns_true", func(t *testing.T) {
 			size := 10000000000
-			delta, err := resource.ParseQuantity(internal.ResizeDelta)
-			if err != nil {
-				t.Error(err)
-			}
 
 			left := resource.NewQuantity(int64(size), resource.BinarySI)
-			right := resource.NewQuantity(int64(size)+delta.Value()-1, resource.BinarySI)
+			right := resource.NewQuantity(int64(size)+internal.ResizeDelta.Value()-1, resource.BinarySI)
 
-			equal := utils.AreSizesEqualWithinDelta(*left, *right, delta)
+			equal := utils.AreSizesEqualWithinDelta(*left, *right, internal.ResizeDelta)
 
 			assert.True(t, equal)
 		})
 
 		t.Run("returns_false", func(t *testing.T) {
 			size := 10000000000
-			delta, err := resource.ParseQuantity(internal.ResizeDelta)
-			if err != nil {
-				t.Error(err)
-			}
 
 			left := resource.NewQuantity(int64(size), resource.BinarySI)
-			right := resource.NewQuantity(int64(size)+delta.Value(), resource.BinarySI)
+			right := resource.NewQuantity(int64(size)+internal.ResizeDelta.Value(), resource.BinarySI)
 
-			equal := utils.AreSizesEqualWithinDelta(*left, *right, delta)
+			equal := utils.AreSizesEqualWithinDelta(*left, *right, internal.ResizeDelta)
 
 			assert.False(t, equal)
 		})
-
 	})
 }
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover.go b/images/agent/src/pkg/controller/lvm_volume_group_discover.go
index 502176b4..f4f3d25d 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_discover.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_discover.go
@@ -17,20 +17,14 @@ limitations under the License.
 package controller
 
 import (
-	"agent/config"
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"context"
 	"errors"
 	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"strconv"
 	"strings"
 	"time"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/uuid"
@@ -38,6 +32,13 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 const (
@@ -54,12 +55,12 @@ func RunLVMVolumeGroupDiscoverController(
 	cl := mgr.GetClient()
 
 	c, err := controller.New(LVMVolumeGroupDiscoverCtrlName, mgr, controller.Options{
-		Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
+		Reconciler: reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
 			log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler starts LVMVolumeGroup resources reconciliation")
 
 			shouldRequeue := LVMVolumeGroupDiscoverReconcile(ctx, cl, metrics, log, cfg, sdsCache)
 			if shouldRequeue {
-				log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occured while run the Reconciler func, retry in %s", cfg.VolumeGroupScanIntervalSec.String()))
+				log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occurred while run the Reconciler func, retry in %s", cfg.VolumeGroupScanIntervalSec.String()))
 				return reconcile.Result{
 					RequeueAfter: cfg.VolumeGroupScanIntervalSec,
 				}, nil
@@ -161,7 +162,6 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met
 			}
 
 			log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] updated LvmVolumeGroup, name: "%s"`, lvg.Name))
-
 		} else {
 			log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is not yet created. Create it", lvg.Name))
 			lvm, err := CreateLVMVolumeGroupByCandidate(ctx, log, metrics, cl, candidate)
@@ -208,7 +208,6 @@ func filterLVGsByNode(
 	blockDevices map[string]v1alpha1.BlockDevice,
 	currentNode string,
 ) map[string]v1alpha1.LvmVolumeGroup {
-
 	filtered := make(map[string]v1alpha1.LvmVolumeGroup, len(lvgs))
 	blockDevicesNodes := make(map[string]string, len(blockDevices))
 
@@ -278,14 +277,14 @@ func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LvmVolumeGroup, candi
 		log.Trace(fmt.Sprintf("Resource ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String()))
 	}
 	log.Trace(fmt.Sprintf(`VGSize, candidate: %s, lvg: %s`, candidate.VGSize.String(), lvg.Status.VGSize.String()))
-	log.Trace(fmt.Sprintf(`VGUuid, candidate: %s, lvg: %s`, candidate.VGUuid, lvg.Status.VGUuid))
+	log.Trace(fmt.Sprintf(`VGUUID, candidate: %s, lvg: %s`, candidate.VGUUID, lvg.Status.VGUuid))
 	log.Trace(fmt.Sprintf(`Nodes, candidate: %+v, lvg: %+v`, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes))
 
 	return candidate.AllocatedSize.Value() != lvg.Status.AllocatedSize.Value() ||
 		hasStatusPoolDiff(convertedStatusPools, lvg.Status.ThinPools) ||
 		candidate.VGSize.Value() != lvg.Status.VGSize.Value() ||
 		candidate.VGFree.Value() != lvg.Status.VGFree.Value() ||
-		candidate.VGUuid != lvg.Status.VGUuid ||
+		candidate.VGUUID != lvg.Status.VGUuid ||
 		hasStatusNodesDiff(log, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes)
 }
 
@@ -374,15 +373,12 @@ func ReconcileUnhealthyLVMVolumeGroups(
 					if candidateTp, exist := candidateTPs[thinPool.Name]; !exist {
 						log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its ThinPool %s", lvg.Name, thinPool.Name))
 						messageBldr.WriteString(fmt.Sprintf("Unable to find ThinPool %s. ", thinPool.Name))
-					} else {
+					} else if !utils.AreSizesEqualWithinDelta(candidate.VGSize, thinPool.ActualSize, internal.ResizeDelta) &&
+						candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < thinPool.ActualSize.Value() {
 						// that means thin-pool is not 100%VG space
 						// use candidate VGSize as lvg.Status.VGSize might not be updated yet
-						if !utils.AreSizesEqualWithinDelta(candidate.VGSize, thinPool.ActualSize, internal.ResizeDelta) {
-							if candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < thinPool.ActualSize.Value() {
-								log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String()))
-								messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String()))
-							}
-						}
+						log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String()))
+						messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String()))
 					}
 				}
 			}
@@ -450,7 +446,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m
 
 	lvs, lvErrs := sdsCache.GetLVs()
 	var thinPools []internal.LVData
-	if lvs != nil && len(lvs) > 0 {
+	if len(lvs) > 0 {
 		// Filter LV to get only thin pools as we do not support thick for now.
 		thinPools = getThinPools(lvs)
 	}
@@ -471,9 +467,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m
 	sortedLVByThinPool := sortLVByThinPool(lvs)
 
 	for _, vg := range vgWithTag {
-		allocateSize := vg.VGSize
-		allocateSize.Sub(vg.VGFree)
-
+		allocateSize := getVGAllocatedSize(vg)
 		health, message := checkVGHealth(sortedBDs, vgIssues, pvIssues, lvIssues, vg)
 
 		candidate := internal.LVMVolumeGroupCandidate{
@@ -489,7 +483,7 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m
 			StatusThinPools:       getStatusThinPools(log, sortedThinPools, sortedLVByThinPool, vg, lvIssues),
 			VGSize:                *resource.NewQuantity(vg.VGSize.Value(), resource.BinarySI),
 			VGFree:                *resource.NewQuantity(vg.VGFree.Value(), resource.BinarySI),
-			VGUuid:                vg.VGUuid,
+			VGUUID:                vg.VGUUID,
 			Nodes:                 configureCandidateNodeDevices(sortedPVs, sortedBDs, vg, currentNode),
 		}
 
@@ -499,22 +493,28 @@ func GetLVMVolumeGroupCandidates(log logger.Logger, sdsCache *cache.Cache, bds m
 	return candidates, nil
 }
 
+func getVGAllocatedSize(vg internal.VGData) resource.Quantity {
+	allocatedSize := vg.VGSize
+	allocatedSize.Sub(vg.VGFree)
+	return allocatedSize
+}
+
 func checkVGHealth(blockDevices map[string][]v1alpha1.BlockDevice, vgIssues map[string]string, pvIssues map[string][]string, lvIssues map[string]map[string]string, vg internal.VGData) (health, message string) {
 	issues := make([]string, 0, len(vgIssues)+len(pvIssues)+len(lvIssues)+1)
 
-	if bds, exist := blockDevices[vg.VGName+vg.VGUuid]; !exist || len(bds) == 0 {
-		issues = append(issues, fmt.Sprintf("[ERROR] Unable to get BlockDevice resources for VG, name: %s ; uuid: %s", vg.VGName, vg.VGUuid))
+	if bds, exist := blockDevices[vg.VGName+vg.VGUUID]; !exist || len(bds) == 0 {
+		issues = append(issues, fmt.Sprintf("[ERROR] Unable to get BlockDevice resources for VG, name: %s ; uuid: %s", vg.VGName, vg.VGUUID))
 	}
 
-	if vgIssue, exist := vgIssues[vg.VGName+vg.VGUuid]; exist {
+	if vgIssue, exist := vgIssues[vg.VGName+vg.VGUUID]; exist {
 		issues = append(issues, vgIssue)
 	}
 
-	if pvIssue, exist := pvIssues[vg.VGName+vg.VGUuid]; exist {
+	if pvIssue, exist := pvIssues[vg.VGName+vg.VGUUID]; exist {
 		issues = append(issues, strings.Join(pvIssue, ""))
 	}
 
-	if lvIssue, exist := lvIssues[vg.VGName+vg.VGUuid]; exist {
+	if lvIssue, exist := lvIssues[vg.VGName+vg.VGUUID]; exist {
 		for lvName, issue := range lvIssue {
 			issues = append(issues, fmt.Sprintf("%s: %s", lvName, issue))
 		}
@@ -553,10 +553,8 @@ func sortThinPoolIssuesByVG(log logger.Logger, lvs []internal.LVData) map[string
 
 		if err != nil {
 			log.Error(err, fmt.Sprintf(`[sortThinPoolIssuesByVG] unable to run lvs command for lv, name: "%s"`, lv.LVName))
-			//lvIssuesByVG[lv.VGName+lv.VGUuid] = append(lvIssuesByVG[lv.VGName+lv.VGUuid], err.Error())
 			lvIssuesByVG[lv.VGName+lv.VGUuid] = make(map[string]string, len(lvs))
 			lvIssuesByVG[lv.VGName+lv.VGUuid][lv.LVName] = err.Error()
-
 		}
 
 		if stdErr.Len() != 0 {
@@ -599,12 +597,12 @@ func sortVGIssuesByVG(log logger.Logger, vgs []internal.VGData) map[string]strin
 		log.Debug(fmt.Sprintf("[sortVGIssuesByVG] runs cmd: %s", cmd))
 		if err != nil {
 			log.Error(err, fmt.Sprintf(`[sortVGIssuesByVG] unable to run vgs command for vg, name: "%s"`, vg.VGName))
-			vgIssues[vg.VGName+vg.VGUuid] = err.Error()
+			vgIssues[vg.VGName+vg.VGUUID] = err.Error()
 		}
 
 		if stdErr.Len() != 0 {
 			log.Error(fmt.Errorf(stdErr.String()), fmt.Sprintf(`[sortVGIssuesByVG] vgs command for vg "%s" has stderr: `, vg.VGName))
-			vgIssues[vg.VGName+vg.VGUuid] = stdErr.String()
+			vgIssues[vg.VGName+vg.VGUUID] = stdErr.String()
 			stdErr.Reset()
 		}
 	}
@@ -627,7 +625,7 @@ func sortLVByThinPool(lvs []internal.LVData) map[string][]internal.LVData {
 func sortThinPoolsByVG(lvs []internal.LVData, vgs []internal.VGData) map[string][]internal.LVData {
 	result := make(map[string][]internal.LVData, len(vgs))
 	for _, vg := range vgs {
-		result[vg.VGName+vg.VGUuid] = make([]internal.LVData, 0, len(lvs))
+		result[vg.VGName+vg.VGUUID] = make([]internal.LVData, 0, len(lvs))
 	}
 
 	for _, lv := range lvs {
@@ -642,7 +640,7 @@ func sortThinPoolsByVG(lvs []internal.LVData, vgs []internal.VGData) map[string]
 func sortPVsByVG(pvs []internal.PVData, vgs []internal.VGData) map[string][]internal.PVData {
 	result := make(map[string][]internal.PVData, len(vgs))
 	for _, vg := range vgs {
-		result[vg.VGName+vg.VGUuid] = make([]internal.PVData, 0, len(pvs))
+		result[vg.VGName+vg.VGUUID] = make([]internal.PVData, 0, len(pvs))
 	}
 
 	for _, pv := range pvs {
@@ -657,7 +655,7 @@ func sortPVsByVG(pvs []internal.PVData, vgs []internal.VGData) map[string][]inte
 func sortBlockDevicesByVG(bds map[string]v1alpha1.BlockDevice, vgs []internal.VGData) map[string][]v1alpha1.BlockDevice {
 	result := make(map[string][]v1alpha1.BlockDevice, len(vgs))
 	for _, vg := range vgs {
-		result[vg.VGName+vg.VGUuid] = make([]v1alpha1.BlockDevice, 0, len(bds))
+		result[vg.VGName+vg.VGUUID] = make([]v1alpha1.BlockDevice, 0, len(bds))
 	}
 
 	for _, bd := range bds {
@@ -670,8 +668,8 @@ func sortBlockDevicesByVG(bds map[string]v1alpha1.BlockDevice, vgs []internal.VG
 }
 
 func configureCandidateNodeDevices(pvs map[string][]internal.PVData, bds map[string][]v1alpha1.BlockDevice, vg internal.VGData, currentNode string) map[string][]internal.LVMVGDevice {
-	filteredPV := pvs[vg.VGName+vg.VGUuid]
-	filteredBds := bds[vg.VGName+vg.VGUuid]
+	filteredPV := pvs[vg.VGName+vg.VGUUID]
+	filteredBds := bds[vg.VGName+vg.VGUUID]
 	bdPathStatus := make(map[string]v1alpha1.BlockDevice, len(bds))
 	result := make(map[string][]internal.LVMVGDevice, len(filteredPV))
 
@@ -683,7 +681,7 @@ func configureCandidateNodeDevices(pvs map[string][]internal.PVData, bds map[str
 		device := internal.LVMVGDevice{
 			Path:   pv.PVName,
 			PVSize: *resource.NewQuantity(pv.PVSize.Value(), resource.BinarySI),
-			PVUuid: pv.PVUuid,
+			PVUUID: pv.PVUuid,
 		}
 
 		if bd, exist := bdPathStatus[pv.PVName]; exist {
@@ -706,7 +704,7 @@ func getVgType(vg internal.VGData) string {
 }
 
 func getSpecThinPools(thinPools map[string][]internal.LVData, vg internal.VGData) map[string]resource.Quantity {
-	lvs := thinPools[vg.VGName+vg.VGUuid]
+	lvs := thinPools[vg.VGName+vg.VGUUID]
 	tps := make(map[string]resource.Quantity, len(lvs))
 
 	for _, lv := range lvs {
@@ -729,7 +727,7 @@ func getThinPools(lvs []internal.LVData) []internal.LVData {
 }
 
 func getStatusThinPools(log logger.Logger, thinPools, sortedLVs map[string][]internal.LVData, vg internal.VGData, lvIssues map[string]map[string]string) []internal.LVMVGStatusThinPool {
-	tps := thinPools[vg.VGName+vg.VGUuid]
+	tps := thinPools[vg.VGName+vg.VGUUID]
 	result := make([]internal.LVMVGStatusThinPool, 0, len(tps))
 
 	for _, thinPool := range tps {
@@ -749,7 +747,7 @@ func getStatusThinPools(log logger.Logger, thinPools, sortedLVs map[string][]int
 			Message:       "",
 		}
 
-		if lverrs, exist := lvIssues[vg.VGName+vg.VGUuid][thinPool.LVName]; exist {
+		if lverrs, exist := lvIssues[vg.VGName+vg.VGUUID][thinPool.LVName]; exist {
 			tp.Ready = false
 			tp.Message = lverrs
 		}
@@ -795,7 +793,7 @@ func isThinPool(lv internal.LVData) bool {
 }
 
 func getBlockDevicesNames(bds map[string][]v1alpha1.BlockDevice, vg internal.VGData) []string {
-	sorted := bds[vg.VGName+vg.VGUuid]
+	sorted := bds[vg.VGName+vg.VGUUID]
 	names := make([]string, 0, len(sorted))
 
 	for _, bd := range sorted {
@@ -834,7 +832,7 @@ func CreateLVMVolumeGroupByCandidate(
 			Nodes:         convertLVMVGNodes(candidate.Nodes),
 			ThinPools:     thinPools,
 			VGSize:        candidate.VGSize,
-			VGUuid:        candidate.VGUuid,
+			VGUuid:        candidate.VGUUID,
 			VGFree:        candidate.VGFree,
 		},
 	}
@@ -851,10 +849,10 @@ func CreateLVMVolumeGroupByCandidate(
 
 	start := time.Now()
 	err = kc.Create(ctx, lvmVolumeGroup)
-	metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "create").Inc()
+	metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "create").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "create").Inc()
+		metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "create").Inc()
 		return nil, fmt.Errorf("unable to сreate LVMVolumeGroup, err: %w", err)
 	}
 
@@ -910,14 +908,14 @@ func UpdateLVMVolumeGroupByCandidate(
 	lvg.Status.ThinPools = thinPools
 	lvg.Status.VGSize = candidate.VGSize
 	lvg.Status.VGFree = candidate.VGFree
-	lvg.Status.VGUuid = candidate.VGUuid
+	lvg.Status.VGUuid = candidate.VGUUID
 
 	start := time.Now()
 	err = cl.Status().Update(ctx, lvg)
-	metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "update").Inc()
+	metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "update").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "update").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "update").Inc()
+		metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "update").Inc()
 		return fmt.Errorf(`[UpdateLVMVolumeGroupByCandidate] unable to update LVMVolumeGroup, name: "%s", err: %w`, lvg.Name, err)
 	}
 
@@ -950,7 +948,7 @@ func convertLVMVGDevices(devices []internal.LVMVGDevice) []v1alpha1.LvmVolumeGro
 			BlockDevice: dev.BlockDevice,
 			DevSize:     dev.DevSize,
 			PVSize:      dev.PVSize,
-			PVUuid:      dev.PVUuid,
+			PVUuid:      dev.PVUUID,
 			Path:        dev.Path,
 		})
 	}
@@ -1020,7 +1018,7 @@ func getThinPoolSpaceWithAllocationLimit(actualSize resource.Quantity, allocatio
 	}
 
 	factor := float64(percent)
-	factor = factor / 100
+	factor /= 100
 
 	return *resource.NewQuantity(int64(float64(actualSize.Value())*factor), resource.BinarySI), nil
 }
@@ -1034,10 +1032,10 @@ func GetAPILVMVolumeGroups(ctx context.Context, kc kclient.Client, metrics monit
 
 	start := time.Now()
 	err := kc.List(ctx, lvgList)
-	metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "list").Inc()
+	metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "list").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "list").Inc()
+		metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "list").Inc()
 		return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LvmVolumeGroups, err: %w", err)
 	}
 
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go b/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go
index 527b93d9..32d87d4e 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go
@@ -17,19 +17,20 @@ limitations under the License.
 package controller
 
 import (
-	"agent/internal"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
 	"context"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	"k8s.io/apimachinery/pkg/api/resource"
 	"testing"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"github.com/stretchr/testify/assert"
+	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+	"agent/internal"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
 )
 
 func TestLVMVolumeGroupDiscover(t *testing.T) {
@@ -73,15 +74,15 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("checkVGHealth_returns_Operational", func(t *testing.T) {
 		const (
 			vgName = "testVg"
-			vgUuid = "testUuid"
+			vgUUID = "testUuid"
 		)
 		bds := map[string][]v1alpha1.BlockDevice{
-			vgName + vgUuid: {{}},
+			vgName + vgUUID: {{}},
 		}
 		vgIssues := map[string]string{}
 		pvIssues := map[string][]string{}
 		lvIssues := map[string]map[string]string{}
-		vg := internal.VGData{VGName: vgName, VGUuid: vgUuid}
+		vg := internal.VGData{VGName: vgName, VGUUID: vgUUID}
 
 		health, _ := checkVGHealth(bds, vgIssues, pvIssues, lvIssues, vg)
 		assert.Equal(t, health, internal.LVMVGHealthOperational)
@@ -90,15 +91,15 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("checkVGHealth_returns_NonOperational", func(t *testing.T) {
 		const (
 			vgName = "testVg"
-			vgUuid = "testUuid"
+			vgUUID = "testUuid"
 		)
 		bds := map[string][]v1alpha1.BlockDevice{
-			vgName + vgUuid: {},
+			vgName + vgUUID: {},
 		}
 		vgIssues := map[string]string{}
 		pvIssues := map[string][]string{}
 		lvIssues := map[string]map[string]string{}
-		vg := internal.VGData{VGName: vgName, VGUuid: vgUuid}
+		vg := internal.VGData{VGName: vgName, VGUUID: vgUUID}
 
 		health, _ := checkVGHealth(bds, vgIssues, pvIssues, lvIssues, vg)
 		assert.Equal(t, health, internal.LVMVGHealthNonOperational)
@@ -125,37 +126,37 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("sortPVsByVG_returns_sorted_pvs", func(t *testing.T) {
 		const (
 			firstVgName  = "firstVg"
-			firstVgUuid  = "firstUUID"
+			firstVgUUID  = "firstUUID"
 			secondVgName = "secondVg"
-			secondVgUuid = "secondUUID"
+			secondVgUUID = "secondUUID"
 		)
 		pvs := []internal.PVData{
 			{
 				PVName: "first",
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUuid: firstVgUUID,
 			},
 			{
 				PVName: "second",
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUuid: secondVgUUID,
 			},
 		}
 
 		vgs := []internal.VGData{
 			{
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUUID: firstVgUUID,
 			},
 			{
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUUID: secondVgUUID,
 			},
 		}
 
 		expected := map[string][]internal.PVData{
-			firstVgName + firstVgUuid:   {pvs[0]},
-			secondVgName + secondVgUuid: {pvs[1]},
+			firstVgName + firstVgUUID:   {pvs[0]},
+			secondVgName + secondVgUUID: {pvs[1]},
 		}
 
 		actual := sortPVsByVG(pvs, vgs)
@@ -165,18 +166,18 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("sortBlockDevicesByVG_returns_sorted_bds", func(t *testing.T) {
 		const (
 			firstVgName  = "firstVg"
-			firstVgUuid  = "firstUUID"
+			firstVgUUID  = "firstUUID"
 			secondVgName = "secondVg"
-			secondVgUuid = "secondUUID"
+			secondVgUUID = "secondUUID"
 		)
 		vgs := []internal.VGData{
 			{
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUUID: firstVgUUID,
 			},
 			{
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUUID: secondVgUUID,
 			},
 		}
 
@@ -185,21 +186,21 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				ObjectMeta: metav1.ObjectMeta{Name: "first"},
 				Status: v1alpha1.BlockDeviceStatus{
 					ActualVGNameOnTheNode: firstVgName,
-					VGUuid:                firstVgUuid,
+					VGUuid:                firstVgUUID,
 				},
 			},
 			"second": {
 				ObjectMeta: metav1.ObjectMeta{Name: "second"},
 				Status: v1alpha1.BlockDeviceStatus{
 					ActualVGNameOnTheNode: secondVgName,
-					VGUuid:                secondVgUuid,
+					VGUuid:                secondVgUUID,
 				},
 			},
 		}
 
 		expected := map[string][]v1alpha1.BlockDevice{
-			firstVgName + firstVgUuid:   {bds["first"]},
-			secondVgName + secondVgUuid: {bds["second"]},
+			firstVgName + firstVgUUID:   {bds["first"]},
+			secondVgName + secondVgUUID: {bds["second"]},
 		}
 
 		actual := sortBlockDevicesByVG(bds, vgs)
@@ -209,35 +210,35 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("sortLVsByVG_returns_sorted_LVs", func(t *testing.T) {
 		const (
 			firstVgName  = "firstVg"
-			firstVgUuid  = "firstUUID"
+			firstVgUUID  = "firstUUID"
 			secondVgName = "secondVg"
-			secondVgUuid = "secondUUID"
+			secondVgUUID = "secondUUID"
 		)
 		vgs := []internal.VGData{
 			{
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUUID: firstVgUUID,
 			},
 			{
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUUID: secondVgUUID,
 			},
 		}
 		lvs := []internal.LVData{
 			{
 				LVName: "first",
 				VGName: firstVgName,
-				VGUuid: firstVgUuid,
+				VGUuid: firstVgUUID,
 			},
 			{
 				LVName: "second",
 				VGName: secondVgName,
-				VGUuid: secondVgUuid,
+				VGUuid: secondVgUUID,
 			},
 		}
 		expected := map[string][]internal.LVData{
-			firstVgName + firstVgUuid:   {lvs[0]},
-			secondVgName + secondVgUuid: {lvs[1]},
+			firstVgName + firstVgUUID:   {lvs[0]},
+			secondVgName + secondVgUUID: {lvs[1]},
 		}
 
 		actual := sortThinPoolsByVG(lvs, vgs)
@@ -247,16 +248,19 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("configureCandidateNodesDevices_returns_candidates_nodes", func(t *testing.T) {
 		const (
 			vgName   = "test_vg"
-			vgUuid   = "vg_uuid"
+			vgUUID   = "vg_uuid"
 			nodeName = "test_node"
 		)
 
 		vg := internal.VGData{
 			VGName: vgName,
-			VGUuid: vgUuid,
+			VGUUID: vgUUID,
 		}
 
 		size10G, err := resource.ParseQuantity("10G")
+		if err != nil {
+			t.Error(err)
+		}
 		size1G, err := resource.ParseQuantity("1G")
 		if err != nil {
 			t.Error(err)
@@ -268,13 +272,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				PVSize: size10G,
 				PVUuid: "pv_uuid1",
 				VGName: vgName,
-				VGUuid: vgUuid,
+				VGUuid: vgUUID,
 			},
 			{
 				PVName: "test_pv2",
 				PVSize: size1G,
 				PVUuid: "pv_uuid2",
-				VGUuid: vgUuid,
+				VGUuid: vgUUID,
 				VGName: vgName,
 			},
 		}
@@ -285,7 +289,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				Status: v1alpha1.BlockDeviceStatus{
 					Path:                  "test_pv1",
 					Size:                  resource.MustParse("10G"),
-					VGUuid:                vgUuid,
+					VGUuid:                vgUUID,
 					ActualVGNameOnTheNode: vgName,
 				},
 			},
@@ -294,7 +298,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				Status: v1alpha1.BlockDeviceStatus{
 					Path:                  "test_pv2",
 					Size:                  resource.MustParse("1G"),
-					VGUuid:                vgUuid,
+					VGUuid:                vgUUID,
 					ActualVGNameOnTheNode: vgName,
 				},
 			},
@@ -306,20 +310,20 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 					Path:        "test_pv1",
 					PVSize:      *resource.NewQuantity(size10G.Value(), resource.BinarySI),
 					DevSize:     *resource.NewQuantity(size10G.Value(), resource.BinarySI),
-					PVUuid:      "pv_uuid1",
+					PVUUID:      "pv_uuid1",
 					BlockDevice: "block_device1",
 				},
 				{
 					Path:        "test_pv2",
 					PVSize:      *resource.NewQuantity(size1G.Value(), resource.BinarySI),
 					DevSize:     *resource.NewQuantity(size1G.Value(), resource.BinarySI),
-					PVUuid:      "pv_uuid2",
+					PVUUID:      "pv_uuid2",
 					BlockDevice: "block_device2",
 				},
 			},
 		}
-		mp := map[string][]v1alpha1.BlockDevice{vgName + vgUuid: bds}
-		ar := map[string][]internal.PVData{vgName + vgUuid: pvs}
+		mp := map[string][]v1alpha1.BlockDevice{vgName + vgUUID: bds}
+		ar := map[string][]internal.PVData{vgName + vgUUID: pvs}
 
 		actual := configureCandidateNodeDevices(ar, mp, vg, nodeName)
 
@@ -347,7 +351,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 		vgs := []internal.VGData{
 			{
 				VGName: "firstVG",
-				VGUuid: "firstUUID",
+				VGUUID: "firstUUID",
 			},
 		}
 		actual := sortBlockDevicesByVG(bds, vgs)
@@ -378,19 +382,22 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("getSpecThinPools_returns_LVName_LVSize_map", func(t *testing.T) {
 		const (
 			vgName = "test_vg"
-			vgUuid = "test_uuid"
+			vgUUID = "test_uuid"
 		)
 
-		vg := internal.VGData{VGName: vgName, VGUuid: vgUuid}
+		vg := internal.VGData{VGName: vgName, VGUUID: vgUUID}
 
 		firstSize, err := resource.ParseQuantity("1G")
+		if err != nil {
+			t.Error(err)
+		}
 		secondSize, err := resource.ParseQuantity("2G")
 		if err != nil {
 			t.Error(err)
 		}
 
 		thinPools := map[string][]internal.LVData{
-			vgName + vgUuid: {
+			vgName + vgUUID: {
 				{
 					LVName: "first",
 					LVSize: firstSize,
@@ -419,10 +426,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 			Type                  = "local"
 			Health                = internal.LVMVGHealthOperational
 			Message               = "No problems detected"
-			VGUuid                = "test_uuid"
+			VGUUID                = "test_uuid"
 		)
 
 		size10G, err := resource.ParseQuantity("10G")
+		if err != nil {
+			t.Error(err)
+		}
 		size1G, err := resource.ParseQuantity("1G")
 		if err != nil {
 			t.Error(err)
@@ -448,7 +458,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 						Path:        "test/path",
 						PVSize:      size1G,
 						DevSize:     size1G,
-						PVUuid:      "test-pv-uuid",
+						PVUUID:      "test-pv-uuid",
 						BlockDevice: "test-device",
 					},
 				},
@@ -466,7 +476,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 			Message:               Message,
 			StatusThinPools:       statusThinPools,
 			VGSize:                size10G,
-			VGUuid:                VGUuid,
+			VGUUID:                VGUUID,
 			Nodes:                 nodes,
 		}
 
@@ -491,7 +501,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 				Nodes:         convertLVMVGNodes(nodes),
 				ThinPools:     thinPools,
 				VGSize:        size10G,
-				VGUuid:        VGUuid,
+				VGUuid:        VGUUID,
 			},
 		}
 
@@ -503,302 +513,98 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 
 	t.Run("GetLVMVolumeGroup", func(t *testing.T) {
 		const (
-			LVMVGName             = "test_lvm"
-			ActualVGNameOnTheNode = "test-vg"
-			Type                  = "local"
-			Health                = internal.LVMVGHealthOperational
-			Message               = "No problems detected"
-			VGUuid                = "test_uuid"
-		)
-
-		size10G, err := resource.ParseQuantity("10G")
-		size1G, err := resource.ParseQuantity("1G")
-		if err != nil {
-			t.Error(err)
-		}
-
-		var (
-			cl                = NewFakeClient()
-			testMetrics       = monitoring.GetMetrics("")
-			testLogger        = logger.Logger{}
-			ctx               = context.Background()
-			blockDevicesNames = []string{"first", "second"}
-			specThinPools     = map[string]resource.Quantity{"first": size10G}
-			statusThinPools   = []internal.LVMVGStatusThinPool{
-				{
-					Name:       "first_status_pool",
-					ActualSize: size10G,
-					UsedSize:   resource.MustParse("4G"),
-				},
-			}
-			nodes = map[string][]internal.LVMVGDevice{
-				"test-node-1": {
-					{
-						Path:        "test/path",
-						PVSize:      size1G,
-						DevSize:     size1G,
-						PVUuid:      "test-pv-uuid",
-						BlockDevice: "test-device",
-					},
-				},
-			}
+			LVMVGName = "test_lvm"
 		)
 
-		candidate := internal.LVMVolumeGroupCandidate{
-			LVMVGName:             LVMVGName,
-			ActualVGNameOnTheNode: ActualVGNameOnTheNode,
-			BlockDevicesNames:     blockDevicesNames,
-			SpecThinPools:         specThinPools,
-			Type:                  Type,
-			AllocatedSize:         size10G,
-			Health:                Health,
-			Message:               Message,
-			StatusThinPools:       statusThinPools,
-			VGSize:                size10G,
-			VGUuid:                VGUuid,
-			Nodes:                 nodes,
+		lvg := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: metav1.ObjectMeta{
+				Name: LVMVGName,
+			},
 		}
-
-		thinPools, err := convertStatusThinPools(v1alpha1.LvmVolumeGroup{}, statusThinPools)
+		err := cl.Create(ctx, lvg)
 		if err != nil {
 			t.Error(err)
 		}
 
-		expected := map[string]v1alpha1.LvmVolumeGroup{
-			LVMVGName: {
-				ObjectMeta: metav1.ObjectMeta{
-					Name:            LVMVGName,
-					ResourceVersion: "1",
-					OwnerReferences: nil,
-				},
-				Spec: v1alpha1.LvmVolumeGroupSpec{
-					ActualVGNameOnTheNode: ActualVGNameOnTheNode,
-					BlockDeviceNames:      blockDevicesNames,
-					ThinPools:             convertSpecThinPools(specThinPools),
-					Type:                  Type,
-				},
-				Status: v1alpha1.LvmVolumeGroupStatus{
-					AllocatedSize: size10G,
-					Nodes:         convertLVMVGNodes(nodes),
-					ThinPools:     thinPools,
-					VGSize:        size10G,
-					VGUuid:        VGUuid,
-				},
-			},
-		}
-
-		created, err := CreateLVMVolumeGroupByCandidate(ctx, testLogger, testMetrics, cl, candidate)
-		if assert.NoError(t, err) && assert.NotNil(t, created) {
-			actual, err := GetAPILVMVolumeGroups(ctx, cl, testMetrics)
-			if assert.NoError(t, err) && assert.Equal(t, 1, len(actual)) {
-				assert.Equal(t, expected, actual)
+		defer func() {
+			err = cl.Delete(ctx, lvg)
+			if err != nil {
+				t.Error(err)
 			}
+		}()
+
+		actual, err := GetAPILVMVolumeGroups(ctx, cl, monitoring.GetMetrics("test-node"))
+		if assert.NoError(t, err) {
+			_, ok := actual[LVMVGName]
+			assert.True(t, ok)
 		}
 	})
 
 	t.Run("DeleteLVMVolumeGroup", func(t *testing.T) {
 		const (
-			LVMVGName             = "test_lvm"
-			ActualVGNameOnTheNode = "test-vg"
-			Type                  = "local"
-			Health                = internal.LVMVGHealthOperational
-			Message               = "No problems detected"
-			VGUuid                = "test_uuid"
+			LVMVGName = "test_lvm-2"
 		)
 
-		size10G, err := resource.ParseQuantity("10G")
-		size1G, err := resource.ParseQuantity("1G")
+		metrics := monitoring.GetMetrics("test-node")
+
+		lvg := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: metav1.ObjectMeta{
+				Name: LVMVGName,
+			},
+		}
+		err := cl.Create(ctx, lvg)
 		if err != nil {
 			t.Error(err)
 		}
 
-		var (
-			cl                = NewFakeClient()
-			ctx               = context.Background()
-			testMetrics       = monitoring.GetMetrics("")
-			testLogger        = logger.Logger{}
-			blockDevicesNames = []string{"first", "second"}
-			specThinPools     = map[string]resource.Quantity{"first": size10G}
-			statusThinPools   = []internal.LVMVGStatusThinPool{
-				{
-					Name:       "first_status_pool",
-					ActualSize: size10G,
-					UsedSize:   resource.MustParse("4G"),
-				},
-			}
-			nodes = map[string][]internal.LVMVGDevice{
-				"test-node-1": {
-					{
-						Path:        "test/path",
-						PVSize:      size1G,
-						DevSize:     size1G,
-						PVUuid:      "test-pv-uuid",
-						BlockDevice: "test-device",
-					},
-				},
-			}
-		)
-
-		candidate := internal.LVMVolumeGroupCandidate{
-			LVMVGName:             LVMVGName,
-			ActualVGNameOnTheNode: ActualVGNameOnTheNode,
-			BlockDevicesNames:     blockDevicesNames,
-			SpecThinPools:         specThinPools,
-			Type:                  Type,
-			AllocatedSize:         size10G,
-			Health:                Health,
-			Message:               Message,
-			StatusThinPools:       statusThinPools,
-			VGSize:                size10G,
-			VGUuid:                VGUuid,
-			Nodes:                 nodes,
+		actual, err := GetAPILVMVolumeGroups(ctx, cl, metrics)
+		if assert.NoError(t, err) {
+			_, ok := actual[LVMVGName]
+			assert.True(t, ok)
 		}
 
-		created, err := CreateLVMVolumeGroupByCandidate(ctx, testLogger, testMetrics, cl, candidate)
-		if assert.NoError(t, err) && assert.NotNil(t, created) {
-			actual, err := GetAPILVMVolumeGroups(ctx, cl, testMetrics)
-			if assert.NoError(t, err) && assert.Equal(t, 1, len(actual)) {
-				err := DeleteLVMVolumeGroup(ctx, cl, testMetrics, &v1alpha1.LvmVolumeGroup{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: LVMVGName,
-					},
-				})
-				if assert.NoError(t, err) {
-					actual, err := GetAPILVMVolumeGroups(ctx, cl, testMetrics)
-					if assert.NoError(t, err) {
-						assert.Equal(t, 0, len(actual))
-					}
-				}
-			}
+		err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvg, "test-node")
+		if assert.NoError(t, err) {
+			actual, err = GetAPILVMVolumeGroups(ctx, cl, metrics)
+			assert.NoError(t, err)
+			assert.Equal(t, 0, len(actual))
 		}
 	})
 
 	t.Run("UpdateLVMVolumeGroup", func(t *testing.T) {
 		const (
-			LVMVGName             = "test_lvm"
-			ActualVGNameOnTheNode = "test-vg"
-			Type                  = "local"
-			Health                = internal.LVMVGHealthOperational
-			Message               = "No problems detected"
-			VGUuid                = "test_uuid"
+			LVMVGName = "test_lvm"
 		)
 
-		size10G, err := resource.ParseQuantity("10G")
-		size1G, err := resource.ParseQuantity("1G")
-		if err != nil {
-			t.Error(err)
-		}
-
-		var (
-			cl                = NewFakeClient()
-			ctx               = context.Background()
-			testMetrics       = monitoring.GetMetrics("")
-			testLogger        = logger.Logger{}
-			BlockDevicesNames = []string{"first", "second"}
-			SpecThinPools     = map[string]resource.Quantity{"first": size1G}
-			StatusThinPools   = []internal.LVMVGStatusThinPool{
-				{
-					Name:       "first_status_pool",
-					ActualSize: size10G,
-					UsedSize:   resource.MustParse("4G"),
-				},
-			}
-			oldNodes = map[string][]internal.LVMVGDevice{
-				"test-node-1": {
-					{
-						Path:        "test/path",
-						PVSize:      size1G,
-						DevSize:     size1G,
-						PVUuid:      "test-pv-uuid",
-						BlockDevice: "test-device",
-					},
-				},
-			}
-			newNodes = map[string][]internal.LVMVGDevice{
-				"test-node-1": {
-					{
-						Path:        "test/path",
-						PVSize:      size1G,
-						DevSize:     size1G,
-						PVUuid:      "test-pv-uuid",
-						BlockDevice: "test-device",
-					},
-					{
-						Path:        "test/path2",
-						PVSize:      size1G,
-						DevSize:     size1G,
-						PVUuid:      "test-pv-uuid2",
-						BlockDevice: "test-device2",
-					},
-				},
-			}
-		)
+		metrics := monitoring.GetMetrics("test-node")
 
-		oldCandidate := internal.LVMVolumeGroupCandidate{
-			LVMVGName:             LVMVGName,
-			ActualVGNameOnTheNode: ActualVGNameOnTheNode,
-			BlockDevicesNames:     BlockDevicesNames,
-			SpecThinPools:         SpecThinPools,
-			Type:                  Type,
-			AllocatedSize:         size10G,
-			Health:                Health,
-			Message:               Message,
-			StatusThinPools:       StatusThinPools,
-			VGSize:                size10G,
-			VGUuid:                VGUuid,
-			Nodes:                 oldNodes,
-		}
-
-		newCandidate := internal.LVMVolumeGroupCandidate{
-			LVMVGName:             LVMVGName,
-			ActualVGNameOnTheNode: ActualVGNameOnTheNode,
-			BlockDevicesNames:     BlockDevicesNames,
-			SpecThinPools:         SpecThinPools,
-			Type:                  Type,
-			AllocatedSize:         size10G,
-			Health:                Health,
-			Message:               Message,
-			StatusThinPools:       StatusThinPools,
-			VGSize:                size10G,
-			VGUuid:                VGUuid,
-			Nodes:                 newNodes,
+		lvg := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: metav1.ObjectMeta{
+				Name: LVMVGName,
+			},
 		}
-
-		thinPools, err := convertStatusThinPools(v1alpha1.LvmVolumeGroup{}, StatusThinPools)
+		err := cl.Create(ctx, lvg)
 		if err != nil {
 			t.Error(err)
 		}
 
-		expected := v1alpha1.LvmVolumeGroup{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:            LVMVGName,
-				ResourceVersion: "2",
-				OwnerReferences: nil,
-			},
-			Spec: v1alpha1.LvmVolumeGroupSpec{
-				ActualVGNameOnTheNode: ActualVGNameOnTheNode,
-				BlockDeviceNames:      BlockDevicesNames,
-				ThinPools:             convertSpecThinPools(SpecThinPools),
-				Type:                  Type,
-			},
-			Status: v1alpha1.LvmVolumeGroupStatus{
-				AllocatedSize: size10G,
-				Nodes:         convertLVMVGNodes(newNodes),
-				ThinPools:     thinPools,
-				VGSize:        size10G,
-				VGUuid:        VGUuid,
-			},
-		}
-
-		created, err := CreateLVMVolumeGroupByCandidate(ctx, testLogger, testMetrics, cl, oldCandidate)
+		actual, err := GetAPILVMVolumeGroups(ctx, cl, metrics)
 		if assert.NoError(t, err) {
-			err := UpdateLVMVolumeGroupByCandidate(ctx, cl, testMetrics, log, created, newCandidate)
+			createdLvg, ok := actual[LVMVGName]
+			assert.True(t, ok)
 
+			candidate := internal.LVMVolumeGroupCandidate{
+				LVMVGName:     LVMVGName,
+				AllocatedSize: *resource.NewQuantity(1000, resource.BinarySI),
+			}
+			err = UpdateLVMVolumeGroupByCandidate(ctx, cl, metrics, log, &createdLvg, candidate)
 			if assert.NoError(t, err) {
-				lmvs, err := GetAPILVMVolumeGroups(ctx, cl, testMetrics)
+				updated, err := GetAPILVMVolumeGroups(ctx, cl, metrics)
 				if assert.NoError(t, err) {
-					actual := lmvs[LVMVGName]
-					assert.Equal(t, expected, actual)
+					updatedLvg, ok := updated[LVMVGName]
+					assert.True(t, ok)
+
+					assert.Equal(t, candidate.AllocatedSize.Value(), updatedLvg.Status.AllocatedSize.Value())
 				}
 			}
 		}
@@ -923,7 +729,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 	t.Run("hasLVMVolumeGroupDiff", func(t *testing.T) {
 		t.Run("should_return_false", func(t *testing.T) {
 			size10G, err := resource.ParseQuantity("10G")
+			if err != nil {
+				t.Error(err)
+			}
 			size1G, err := resource.ParseQuantity("1G")
+			if err != nil {
+				t.Error(err)
+			}
 			size13G, err := resource.ParseQuantity("13G")
 			if err != nil {
 				t.Error(err)
@@ -960,7 +772,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 							Path:        "/test/ds",
 							PVSize:      size1G,
 							DevSize:     size13G,
-							PVUuid:      "testUUID",
+							PVUUID:      "testUUID",
 							BlockDevice: "something",
 						},
 					},
@@ -1000,25 +812,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 		})
 
 		t.Run("should_return_true", func(t *testing.T) {
-			size10G, err := resource.ParseQuantity("10G")
-			size1G, err := resource.ParseQuantity("1G")
-			size13G, err := resource.ParseQuantity("13G")
-			if err != nil {
-				t.Error(err)
-			}
+			size10G := resource.MustParse("10G")
+			size1G := resource.MustParse("1G")
+			size13G := resource.MustParse("13G")
+			vgFree := resource.MustParse("5G")
 
 			var (
-				blockDevicesNames = []string{
-					"first",
-					"second",
-				}
-				specThinPools = map[string]resource.Quantity{
-					"first":  size10G,
-					"second": size1G,
-				}
-				specType        = "type"
 				allocatedSize   = resource.MustParse("10G")
-				health          = internal.LVMVGHealthOperational
 				statusThinPools = []internal.LVMVGStatusThinPool{
 					{
 						Name:       "first",
@@ -1038,29 +838,25 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 							Path:        "/test/ds",
 							PVSize:      size1G,
 							DevSize:     size13G,
-							PVUuid:      "testUUID",
+							PVUUID:      "testUUID",
 							BlockDevice: "something",
 						},
 						{
 							Path:        "/test/ds2",
 							PVSize:      size1G,
 							DevSize:     size13G,
-							PVUuid:      "testUUID2",
+							PVUUID:      "testUUID2",
 							BlockDevice: "something2",
 						},
 					},
 				}
 			)
 			candidate := internal.LVMVolumeGroupCandidate{
-				BlockDevicesNames: blockDevicesNames,
-				SpecThinPools:     specThinPools,
-				Type:              specType,
-				AllocatedSize:     size10G,
-				Health:            health,
-				Message:           "NewMessage",
-				StatusThinPools:   statusThinPools,
-				VGSize:            size10G,
-				Nodes:             nodes,
+				AllocatedSize:   size10G,
+				StatusThinPools: statusThinPools,
+				VGSize:          size10G,
+				VGFree:          vgFree,
+				Nodes:           nodes,
 			}
 
 			thinPools, err := convertStatusThinPools(v1alpha1.LvmVolumeGroup{}, statusThinPools)
@@ -1069,16 +865,12 @@ func TestLVMVolumeGroupDiscover(t *testing.T) {
 			}
 
 			lvmVolumeGroup := v1alpha1.LvmVolumeGroup{
-				Spec: v1alpha1.LvmVolumeGroupSpec{
-					BlockDeviceNames: blockDevicesNames,
-					ThinPools:        convertSpecThinPools(specThinPools),
-					Type:             specType,
-				},
 				Status: v1alpha1.LvmVolumeGroupStatus{
 					AllocatedSize: allocatedSize,
 					Nodes:         convertLVMVGNodes(nodes),
 					ThinPools:     thinPools,
 					VGSize:        vgSize,
+					VGFree:        *resource.NewQuantity(vgFree.Value()+10000, resource.BinarySI),
 				},
 			}
 
@@ -1126,8 +918,7 @@ func NewFakeClient() client.WithWatch {
 	s := scheme.Scheme
 	_ = metav1.AddMetaToScheme(s)
 	_ = v1alpha1.AddToScheme(s)
-
-	builder := fake.NewClientBuilder().WithScheme(s).WithStatusSubresource(&v1alpha1.LvmVolumeGroup{})
+	builder := fake.NewClientBuilder().WithScheme(s).WithStatusSubresource(&v1alpha1.LvmVolumeGroup{}).WithStatusSubresource(&v1alpha1.LVMLogicalVolume{})
 
 	cl := builder.Build()
 	return cl
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_test.go b/images/agent/src/pkg/controller/lvm_volume_group_test.go
index 2cb0f1f6..f055f60f 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_test.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_test.go
@@ -18,11 +18,11 @@ package controller
 
 import (
 	"encoding/json"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	"k8s.io/apimachinery/pkg/api/resource"
 	"testing"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"github.com/stretchr/testify/assert"
+	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
@@ -115,11 +115,11 @@ func TestLvmVolumeGroupAPIObjects(t *testing.T) {
 				ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
 					{
 						Name: "test-name",
-						Size: *convertSize("10G", t),
+						Size: "10G",
 					},
 					{
 						Name: "test-name2",
-						Size: *convertSize("1G", t),
+						Size: "1G",
 					},
 				},
 			},
@@ -176,164 +176,6 @@ func TestLvmVolumeGroupAPIObjects(t *testing.T) {
 			assert.Equal(t, expected, actual)
 		}
 	})
-
-	t.Run("Marshal_LvmVolumeGroup_struct_to_json", func(t *testing.T) {
-		expected := `{
-   "apiVersion": "storage.deckhouse.io/v1alpha1",
-   "kind": "LvmVolumeGroup",
-   "metadata": {
-       "creationTimestamp": null,
-       "name": "lvg-test-1"
-   },
-   "spec": {
-       "actualVGNameOnTheNode": "testVGname",
-       "blockDeviceNames": [
-           "test-bd",
-           "test-bd2"
-       ],
-       "thinPools": [
-           {
-               "name": "test-name",
-               "size": "10G"
-           },
-           {
-               "name": "test-name2",
-               "size": "1G"
-           }
-       ],
-       "type": "local"
-   },
-   "status": {
-       "conditions": null,
-       "allocatedSize": "20G",
-       "health": "operational",
-       "message": "all-good",
-       "nodes": [
-           {
-               "devices": [
-                   {
-                       "blockDevice": "test/BD",
-                       "devSize": "1G",
-                       "path": "test/path1",
-                       "pvSize": "1G",
-                       "pvUUID": "testPV1"
-                   },
-                   {
-                       "blockDevice": "test/BD2",
-                       "devSize": "1G",
-                       "path": "test/path2",
-                       "pvSize": "2G",
-                       "pvUUID": "testPV2"
-                   }
-               ],
-               "name": "node1"
-           },
-           {
-               "devices": [
-                   {
-                       "blockDevice": "test/DB3",
-                       "devSize": "2G",
-                       "path": "test/path3",
-                       "pvSize": "3G",
-                       "pvUUID": "testPV3"
-                   }
-               ],
-               "name": "node2"
-           }
-       ],
-       "phase": "",
-       "thinPools": [
-           {
-               "name": "test-name",
-               "actualSize": "1G",
-				"usedSize": "500M",
-               "ready": true,
-               "message": ""
-           }
-       ],
-       "vgSize": "30G",
-       "vgUUID": "test-vg-uuid"
-   }
-}`
-		testObj := v1alpha1.LvmVolumeGroup{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:              "lvg-test-1",
-				CreationTimestamp: metav1.Time{},
-			},
-			TypeMeta: metav1.TypeMeta{
-				Kind:       "LvmVolumeGroup",
-				APIVersion: "storage.deckhouse.io/v1alpha1",
-			},
-			Spec: v1alpha1.LvmVolumeGroupSpec{
-				ActualVGNameOnTheNode: "testVGname",
-				BlockDeviceNames:      []string{"test-bd", "test-bd2"},
-				ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
-					{
-						Name: "test-name",
-						Size: *convertSize("10G", t),
-					},
-					{
-						Name: "test-name2",
-						Size: *convertSize("1G", t),
-					},
-				},
-				Type: "local",
-			},
-			Status: v1alpha1.LvmVolumeGroupStatus{
-				AllocatedSize: resource.MustParse("20G"),
-				Nodes: []v1alpha1.LvmVolumeGroupNode{
-					{
-						Devices: []v1alpha1.LvmVolumeGroupDevice{
-							{
-								BlockDevice: "test/BD",
-								DevSize:     *convertSize("1G", t),
-								PVSize:      resource.MustParse("1G"),
-								PVUuid:      "testPV1",
-								Path:        "test/path1",
-							},
-							{
-								BlockDevice: "test/BD2",
-								DevSize:     *convertSize("1G", t),
-								PVSize:      resource.MustParse("2G"),
-								PVUuid:      "testPV2",
-								Path:        "test/path2",
-							},
-						},
-						Name: "node1",
-					},
-					{
-						Devices: []v1alpha1.LvmVolumeGroupDevice{
-							{
-								BlockDevice: "test/DB3",
-								DevSize:     *convertSize("2G", t),
-								PVSize:      resource.MustParse("3G"),
-								PVUuid:      "testPV3",
-								Path:        "test/path3",
-							},
-						},
-						Name: "node2",
-					},
-				},
-				ThinPools: []v1alpha1.LvmVolumeGroupThinPoolStatus{
-					{
-						Name:       "test-name",
-						ActualSize: *convertSize("1G", t),
-						UsedSize:   resource.MustParse("500M"),
-						Ready:      true,
-						Message:    "",
-					},
-				},
-				VGSize: resource.MustParse("30G"),
-				VGUuid: "test-vg-uuid",
-			},
-		}
-
-		actual, err := json.Marshal(testObj)
-
-		if assert.NoError(t, err) {
-			assert.JSONEq(t, expected, string(actual))
-		}
-	})
 }
 
 func convertSize(size string, t *testing.T) *resource.Quantity {
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go
index e80ea32b..484fa0eb 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go
@@ -17,19 +17,12 @@ limitations under the License.
 package controller
 
 import (
-	"agent/config"
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"context"
-	"errors"
 	"fmt"
+
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	errors2 "k8s.io/apimachinery/pkg/api/errors"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
 	"sigs.k8s.io/controller-runtime/pkg/client"
@@ -39,6 +32,13 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
 	"sigs.k8s.io/controller-runtime/pkg/source"
+
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 const (
@@ -143,9 +143,8 @@ func RunLVMVolumeGroupWatcherController(
 				if err != nil {
 					log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to delete the LVMVolumeGroup %s", lvg.Name))
 					return reconcile.Result{}, err
-				} else {
-					log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name))
 				}
+				log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name))
 
 				return reconcile.Result{}, nil
 			}
@@ -192,8 +191,8 @@ func RunLVMVolumeGroupWatcherController(
 		return nil, err
 	}
 
-	err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup]{
-		CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
+	err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup, reconcile.Request]{
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc got a create event for the LVMVolumeGroup, name: %s", e.Object.GetName()))
 
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
@@ -201,7 +200,7 @@ func RunLVMVolumeGroupWatcherController(
 
 			log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName()))
 		},
-		UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.RateLimitingInterface) {
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] UpdateFunc got a update event for the LVMVolumeGroup %s", e.ObjectNew.GetName()))
 			if !shouldLVGWatcherReconcileUpdateEvent(log, e.ObjectOld, e.ObjectNew) {
 				log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] update event for the LVMVolumeGroup %s should not be reconciled as not target changed were made", e.ObjectNew.Name))
@@ -279,7 +278,7 @@ func reconcileLVGDeleteFunc(ctx context.Context, cl client.Client, log logger.Lo
 	}
 
 	log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] check if VG %s of the LVMVolumeGroup %s uses LVs", lvg.Spec.ActualVGNameOnTheNode, lvg.Name))
-	usedLVs := checkIfVGHasLV(sdsCache, lvg.Spec.ActualVGNameOnTheNode)
+	usedLVs := getLVForVG(sdsCache, lvg.Spec.ActualVGNameOnTheNode)
 	if len(usedLVs) > 0 {
 		err := fmt.Errorf("VG %s uses LVs: %v. Delete used LVs first", lvg.Spec.ActualVGNameOnTheNode, usedLVs)
 		log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to reconcile LVG %s", lvg.Name))
@@ -337,7 +336,7 @@ func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Lo
 
 	log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to validate the LVMVolumeGroup %s", lvg.Name))
 	pvs, _ := sdsCache.GetPVs()
-	valid, reason := validateLVGForUpdateFunc(log, sdsCache, lvg, blockDevices, pvs)
+	valid, reason := validateLVGForUpdateFunc(log, sdsCache, lvg, blockDevices)
 	if !valid {
 		log.Warning(fmt.Sprintf("[reconcileLVGUpdateFunc] the LVMVolumeGroup %s is not valid", lvg.Name))
 		err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason)
@@ -352,7 +351,7 @@ func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Lo
 	log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to get VG %s for the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name))
 	found, vg := tryGetVG(sdsCache, lvg.Spec.ActualVGNameOnTheNode)
 	if !found {
-		err := errors.New(fmt.Sprintf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode))
+		err := fmt.Errorf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode)
 		log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile the LVMVolumeGroup %s", lvg.Name))
 		err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGNotFound", err.Error())
 		if err != nil {
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go
index a87e907c..a28f9ecb 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go
@@ -17,24 +17,25 @@ limitations under the License.
 package controller
 
 import (
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/logger"
-	"agent/pkg/monitoring"
-	"agent/pkg/utils"
 	"context"
 	"errors"
 	"fmt"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/utils/strings/slices"
 	"reflect"
 	"strconv"
 	"strings"
 	"time"
 
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
+	"k8s.io/apimachinery/pkg/api/resource"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/utils/strings/slices"
 	"sigs.k8s.io/controller-runtime/pkg/client"
+
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+	"agent/pkg/utils"
 )
 
 func DeleteLVMVolumeGroup(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, currentNode string) error {
@@ -51,10 +52,10 @@ func DeleteLVMVolumeGroup(ctx context.Context, cl client.Client, log logger.Logg
 	if len(lvg.Status.Nodes) == 0 {
 		start := time.Now()
 		err := cl.Delete(ctx, lvg)
-		metrics.ApiMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start))
-		metrics.ApiMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "delete").Inc()
+		metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start))
+		metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "delete").Inc()
 		if err != nil {
-			metrics.ApiMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "delete").Inc()
+			metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "delete").Inc()
 			return err
 		}
 		log.Info(fmt.Sprintf("[DeleteLVMVolumeGroup] the LVMVolumeGroup %s deleted", lvg.Name))
@@ -106,11 +107,7 @@ func shouldLVGWatcherReconcileUpdateEvent(log logger.Logger, oldLVG, newLVG *v1a
 }
 
 func shouldReconcileLVGByDeleteFunc(lvg *v1alpha1.LvmVolumeGroup) bool {
-	if lvg.DeletionTimestamp != nil {
-		return true
-	}
-
-	return false
+	return lvg.DeletionTimestamp != nil
 }
 
 func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, status v1.ConditionStatus, conType, reason, message string) error {
@@ -260,11 +257,7 @@ func validateSpecBlockDevices(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[str
 
 func checkIfLVGBelongsToNode(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, nodeName string) bool {
 	bd := blockDevices[lvg.Spec.BlockDeviceNames[0]]
-	if bd.Status.NodeName != nodeName {
-		return false
-	}
-
-	return true
+	return bd.Status.NodeName == nodeName
 }
 
 func extractPathsFromBlockDevices(blockDevicesNames []string, blockDevices map[string]v1alpha1.BlockDevice) []string {
@@ -366,9 +359,9 @@ func validateLVGForCreateFunc(log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, b
 	return true, ""
 }
 
-func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, pvs []internal.PVData) (bool, string) {
+func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) {
 	reason := strings.Builder{}
-
+	pvs, _ := sdsCache.GetPVs()
 	log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] check if every new BlockDevice of the LVMVolumeGroup %s is comsumable", lvg.Name))
 	actualPVPaths := make(map[string]struct{}, len(pvs))
 	for _, pv := range pvs {
@@ -440,9 +433,10 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a
 			return false, reason.String()
 		}
 
+		newTotalVGSize := resource.NewQuantity(vg.VGSize.Value()+additionBlockDeviceSpace, resource.BinarySI)
 		for _, specTp := range lvg.Spec.ThinPools {
 			// might be a case when Thin-pool is already created, but is not shown in status
-			tpRequestedSize, err := getRequestedSizeFromString(specTp.Size, vg.VGSize)
+			tpRequestedSize, err := getRequestedSizeFromString(specTp.Size, *newTotalVGSize)
 			if err != nil {
 				reason.WriteString(err.Error())
 				continue
@@ -454,7 +448,7 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a
 			}
 
 			log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s thin-pool %s requested size %s, Status VG size %s", lvg.Name, specTp.Name, tpRequestedSize.String(), lvg.Status.VGSize.String()))
-			switch utils.AreSizesEqualWithinDelta(tpRequestedSize, lvg.Status.VGSize, internal.ResizeDelta) {
+			switch utils.AreSizesEqualWithinDelta(tpRequestedSize, *newTotalVGSize, internal.ResizeDelta) {
 			// means a user wants 100% of VG space
 			case true:
 				hasFullThinPool = true
@@ -484,7 +478,8 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a
 		}
 
 		if !hasFullThinPool {
-			totalFreeSpace := lvg.Status.VGSize.Value() - lvg.Status.AllocatedSize.Value() + additionBlockDeviceSpace
+			allocatedSize := getVGAllocatedSize(*vg)
+			totalFreeSpace := newTotalVGSize.Value() - allocatedSize.Value()
 			log.Trace(fmt.Sprintf("[validateLVGForUpdateFunc] new LVMVolumeGroup %s thin-pools requested %d size, additional BlockDevices space %d, total: %d", lvg.Name, addingThinPoolSize, additionBlockDeviceSpace, totalFreeSpace))
 			if addingThinPoolSize != 0 && addingThinPoolSize+internal.ResizeDelta.Value() > totalFreeSpace {
 				reason.WriteString("Added thin-pools requested sizes are more than allowed free space in VG.")
@@ -520,11 +515,8 @@ func shouldReconcileLVGByCreateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cach
 		return false
 	}
 
-	if vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode); vg != nil {
-		return false
-	}
-
-	return true
+	vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode)
+	return vg == nil
 }
 
 func shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cache) bool {
@@ -532,11 +524,8 @@ func shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cach
 		return false
 	}
 
-	if vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode); vg != nil {
-		return true
-	}
-
-	return false
+	vg := ch.FindVG(lvg.Spec.ActualVGNameOnTheNode)
+	return vg != nil
 }
 
 func ReconcileThinPoolsIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, vg internal.VGData, lvs []internal.LVData) error {
@@ -737,7 +726,7 @@ func removeLVGFinalizerIfExist(ctx context.Context, cl client.Client, lvg *v1alp
 	return true, nil
 }
 
-func checkIfVGHasLV(ch *cache.Cache, vgName string) []string {
+func getLVForVG(ch *cache.Cache, vgName string) []string {
 	lvs, _ := ch.GetLVs()
 	usedLVs := make([]string, 0, len(lvs))
 	for _, lv := range lvs {
@@ -755,10 +744,10 @@ func getLVMVolumeGroup(ctx context.Context, cl client.Client, metrics monitoring
 	err := cl.Get(ctx, client.ObjectKey{
 		Name: name,
 	}, obj)
-	metrics.ApiMethodsDuration(LVMVolumeGroupWatcherCtrlName, "get").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(LVMVolumeGroupWatcherCtrlName, "get").Inc()
+	metrics.APIMethodsDuration(LVMVolumeGroupWatcherCtrlName, "get").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(LVMVolumeGroupWatcherCtrlName, "get").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(LVMVolumeGroupWatcherCtrlName, "get").Inc()
+		metrics.APIMethodsErrors(LVMVolumeGroupWatcherCtrlName, "get").Inc()
 		return nil, err
 	}
 	return obj, nil
@@ -811,7 +800,7 @@ func DeleteVGIfExist(log logger.Logger, metrics monitoring.Metrics, sdsCache *ca
 	return nil
 }
 
-func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, VGName string, log logger.Logger) error {
+func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, vgName string, log logger.Logger) error {
 	for _, pvPath := range extendPVs {
 		start := time.Now()
 		command, err := utils.CreatePV(pvPath)
@@ -826,7 +815,7 @@ func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, VGName stri
 	}
 
 	start := time.Now()
-	command, err := utils.ExtendVG(VGName, extendPVs)
+	command, err := utils.ExtendVG(vgName, extendPVs)
 	metrics.UtilsCommandsDuration(LVMVolumeGroupWatcherCtrlName, "vgextend").Observe(metrics.GetEstimatedTimeInSeconds(start))
 	metrics.UtilsCommandsExecutionCount(LVMVolumeGroupWatcherCtrlName, "vgextend").Inc()
 	log.Debug(command)
diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go
index f2f7ce33..3427d098 100644
--- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go
+++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go
@@ -1,809 +1,1164 @@
 package controller
 
-//
-//import (
-//	"context"
-//	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
-//	"agent/pkg/monitoring"
-//	"testing"
-//
-//	"github.com/stretchr/testify/assert"
-//	v1 "k8s.io/api/core/v1"
-//	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-//)
-//
-//func TestLVMVolumeGroupWatcherCtrl(t *testing.T) {
-//	e2eCL := NewFakeClient()
-//	ctx := context.Background()
-//	metrics := monitoring.GetMetrics("")
-//	namespace := "test"
-//
-//	t.Run("getLVMVolumeGroup_lvg_exists_returns_correct", func(t *testing.T) {
-//		const name = "test_name"
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		actual, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//
-//		if assert.NoError(t, err) {
-//			assert.NotNil(t, actual)
-//			assert.Equal(t, name, actual.Name)
-//			assert.Equal(t, namespace, actual.Namespace)
-//		}
-//	})
-//
-//	t.Run("getLVMVolumeGroup_lvg_doesnt_exist_returns_nil", func(t *testing.T) {
-//		const name = "test_name"
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		actual, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, "another-name")
-//
-//		if assert.EqualError(t, err, "lvmvolumegroups.storage.deckhouse.io \"another-name\" not found") {
-//			assert.Nil(t, actual)
-//		}
-//	})
-//
-//	t.Run("updateLVMVolumeGroupHealthStatus_new_old_health_is_operational_doesnt_update_returns_nil", func(t *testing.T) {
-//		const (
-//			name    = "test_name"
-//			message = "All good"
-//		)
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Status: v1alpha1.LvmVolumeGroupStatus{
-//				Health:  Operational,
-//				Message: message,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		oldLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, oldLvg.Name)
-//			assert.Equal(t, Operational, oldLvg.Status.Health)
-//			assert.Equal(t, message, oldLvg.Status.Message)
-//		}
-//
-//		err = updateLVMVolumeGroupHealthStatus(ctx, e2eCL, metrics, name, namespace, "new message", Operational)
-//		assert.Nil(t, err)
-//
-//		updatedLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, updatedLvg.Name)
-//			assert.Equal(t, Operational, updatedLvg.Status.Health)
-//			assert.Equal(t, message, updatedLvg.Status.Message)
-//		}
-//	})
-//
-//	t.Run("updateLVMVolumeGroupHealthStatus_new_old_health_and_messages_are_the_same_doesnt_updates_returns_nil", func(t *testing.T) {
-//		const (
-//			name    = "test_name"
-//			message = "All bad"
-//		)
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Status: v1alpha1.LvmVolumeGroupStatus{
-//				Health:  NonOperational,
-//				Message: message,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		oldLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, oldLvg.Name)
-//			assert.Equal(t, NonOperational, oldLvg.Status.Health)
-//			assert.Equal(t, message, oldLvg.Status.Message)
-//		}
-//
-//		err = updateLVMVolumeGroupHealthStatus(ctx, e2eCL, metrics, name, namespace, message, NonOperational)
-//		assert.Nil(t, err)
-//
-//		updatedLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, updatedLvg.Name)
-//			assert.Equal(t, NonOperational, updatedLvg.Status.Health)
-//			assert.Equal(t, message, updatedLvg.Status.Message)
-//		}
-//	})
-//
-//	t.Run("updateLVMVolumeGroupHealthStatus_new_old_health_are_nonoperational_different_messages_are_updates_message_returns_nil", func(t *testing.T) {
-//		const (
-//			name       = "test_name"
-//			oldMessage = "All bad1"
-//			newMessage = "All bad2"
-//		)
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Status: v1alpha1.LvmVolumeGroupStatus{
-//				Health:  NonOperational,
-//				Message: oldMessage,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		oldLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, oldLvg.Name)
-//			assert.Equal(t, NonOperational, oldLvg.Status.Health)
-//			assert.Equal(t, oldMessage, oldLvg.Status.Message)
-//		}
-//
-//		err = updateLVMVolumeGroupHealthStatus(ctx, e2eCL, metrics, name, namespace, newMessage, NonOperational)
-//		assert.Nil(t, err)
-//
-//		updatedLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, updatedLvg.Name)
-//			assert.Equal(t, NonOperational, updatedLvg.Status.Health)
-//			assert.Equal(t, newMessage, updatedLvg.Status.Message)
-//		}
-//	})
-//
-//	t.Run("updateLVMVolumeGroupHealthStatus_old_health_is_nonoperational_new_health_is_operational_updates_health_and_message_returns_nil", func(t *testing.T) {
-//		const (
-//			name       = "test_name"
-//			oldMessage = "All bad"
-//			newMessage = "All good"
-//		)
-//		testObj := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Status: v1alpha1.LvmVolumeGroupStatus{
-//				Health:  NonOperational,
-//				Message: oldMessage,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		oldLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, oldLvg.Name)
-//			assert.Equal(t, NonOperational, oldLvg.Status.Health)
-//			assert.Equal(t, oldMessage, oldLvg.Status.Message)
-//		}
-//
-//		err = updateLVMVolumeGroupHealthStatus(ctx, e2eCL, metrics, name, namespace, newMessage, Operational)
-//		assert.Nil(t, err)
-//
-//		updatedLvg, err := getLVMVolumeGroup(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, updatedLvg.Name)
-//			assert.Equal(t, Operational, updatedLvg.Status.Health)
-//			assert.Equal(t, newMessage, updatedLvg.Status.Message)
-//		}
-//	})
-//
-//	t.Run("getBlockDevice_bd_exists_returns_correct_one", func(t *testing.T) {
-//		const name = "test_name"
-//
-//		testObj := &v1alpha1.BlockDevice{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		bd, err := getBlockDevice(ctx, e2eCL, metrics, namespace, name)
-//		if assert.NoError(t, err) {
-//			assert.Equal(t, name, bd.Name)
-//			assert.Equal(t, namespace, bd.Namespace)
-//		}
-//	})
-//
-//	t.Run("getBlockDevice_bd_doesnt_exists_returns_nil", func(t *testing.T) {
-//		const name = "test_name"
-//
-//		testObj := &v1alpha1.BlockDevice{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, testObj)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testObj)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		bd, err := getBlockDevice(ctx, e2eCL, metrics, namespace, "another-name")
-//		if assert.EqualError(t, err, "blockdevices.storage.deckhouse.io \"another-name\" not found") {
-//			assert.Nil(t, bd)
-//		}
-//	})
-//
-//	t.Run("ValidateLVMGroup_lvg_is_nil_returns_error", func(t *testing.T) {
-//		valid, obj, err := CheckLVMVGNodeOwnership(ctx, e2eCL, metrics, nil, "test_ns", "test_node")
-//		assert.False(t, valid)
-//		assert.Nil(t, obj)
-//		assert.EqualError(t, err, "lvmVolumeGroup is nil")
-//	})
-//
-//	t.Run("ValidateLVMGroup_type_local_selected_absent_bds_validation_fails", func(t *testing.T) {
-//		const lvgName = "test_name"
-//
-//		lvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      lvgName,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{"test_bd"},
-//				Type:             Local,
-//			},
-//		}
-//
-//		err := e2eCL.Create(ctx, lvg)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, lvg)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		valid, status, err := CheckLVMVGNodeOwnership(ctx, e2eCL, metrics, lvg, namespace, "test_node")
-//		assert.False(t, valid)
-//		if assert.NotNil(t, status) {
-//			assert.Equal(t, NonOperational, status.Health)
-//			assert.EqualError(t, err, "error getBlockDevice: blockdevices.storage.deckhouse.io \"test_bd\" not found")
-//		}
-//	})
-//
-//	t.Run("ValidateLVMGroup_type_local_selected_bds_from_different_nodes_validation_fails", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			firstBd  = "first"
-//			secondBd = "second"
-//			testNode = "test_node"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName: testNode,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName: "another_node",
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{firstBd, secondBd},
-//				Type:             Local,
-//			},
-//		}
-//
-//		err = e2eCL.Create(ctx, testLvg)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testLvg)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		valid, status, err := CheckLVMVGNodeOwnership(ctx, e2eCL, metrics, testLvg, namespace, testNode)
-//		assert.False(t, valid)
-//		if assert.NotNil(t, status) {
-//			assert.Equal(t, NonOperational, status.Health)
-//			assert.Equal(t, "selected block devices are from different nodes for local LVMVolumeGroup", status.Message)
-//		}
-//		assert.Nil(t, err)
-//	})
-//
-//	t.Run("ValidateLVMGroup_type_local_validation_passes", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			firstBd  = "first"
-//			secondBd = "second"
-//			testNode = "test_node"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName: testNode,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName: testNode,
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames:      []string{firstBd, secondBd},
-//				Type:                  Local,
-//				ActualVGNameOnTheNode: "some-vg",
-//			},
-//		}
-//
-//		err = e2eCL.Create(ctx, testLvg)
-//		if err != nil {
-//			t.Error(err)
-//		} else {
-//			defer func() {
-//				err = e2eCL.Delete(ctx, testLvg)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}()
-//		}
-//
-//		valid, status, err := CheckLVMVGNodeOwnership(ctx, e2eCL, metrics, testLvg, namespace, testNode)
-//		assert.True(t, valid)
-//		if assert.NotNil(t, status) {
-//			assert.Equal(t, "", status.Health)
-//			assert.Equal(t, "", status.Message)
-//		}
-//		assert.Nil(t, err)
-//	})
-//
-//	t.Run("CreateEventLVMVolumeGroup_creates_event", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			nodeName = "test_node"
-//		)
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			TypeMeta: metav1.TypeMeta{
-//				Kind: "test_kind",
-//			},
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//				UID:       "test_UUID",
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{"absent_bd"},
-//				Type:             Local,
-//			},
-//		}
-//
-//		err := CreateEventLVMVolumeGroup(ctx, e2eCL, metrics, EventReasonDeleting, EventActionDeleting, nodeName, testLvg)
-//		if assert.NoError(t, err) {
-//			events := &v1.EventList{}
-//			err = e2eCL.List(ctx, events)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//
-//			if assert.Equal(t, 1, len(events.Items)) {
-//				event := events.Items[0]
-//
-//				assert.Equal(t, testLvg.Name+"-", event.GenerateName)
-//				assert.Equal(t, nameSpaceEvent, event.Namespace)
-//				assert.Equal(t, EventReasonDeleting, event.Reason)
-//				assert.Equal(t, testLvg.Name, event.InvolvedObject.Name)
-//				assert.Equal(t, testLvg.Kind, event.InvolvedObject.Kind)
-//				assert.Equal(t, testLvg.UID, event.InvolvedObject.UID)
-//				assert.Equal(t, "apiextensions.k8s.io/v1", event.InvolvedObject.APIVersion)
-//				assert.Equal(t, v1.EventTypeNormal, event.Type)
-//				assert.Equal(t, EventActionDeleting, event.Action)
-//				assert.Equal(t, nodeName, event.ReportingInstance)
-//				assert.Equal(t, LVMVolumeGroupWatcherCtrlName, event.ReportingController)
-//				assert.Equal(t, "Event Message", event.Message)
-//
-//				err = e2eCL.Delete(ctx, &event)
-//				if err != nil {
-//					t.Error(err)
-//				}
-//			}
-//		}
-//	})
-//
-//	t.Run("ValidateConsumableDevices_validation_passes", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			firstBd  = "first"
-//			secondBd = "second"
-//			testNode = "test_node"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: true,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: true,
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{firstBd, secondBd},
-//				Type:             Shared,
-//			},
-//		}
-//
-//		passed, err := ValidateConsumableDevices(ctx, e2eCL, metrics, testLvg)
-//		if assert.NoError(t, err) {
-//			assert.True(t, passed)
-//		}
-//	})
-//
-//	t.Run("ValidateConsumableDevices_validation_fails", func(t *testing.T) {
-//		const (
-//			name     = "test_name"
-//			firstBd  = "first"
-//			secondBd = "second"
-//			testNode = "test_node"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: false,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: true,
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{firstBd, secondBd},
-//				Type:             Shared,
-//			},
-//		}
-//
-//		passed, err := ValidateConsumableDevices(ctx, e2eCL, metrics, testLvg)
-//		if assert.NoError(t, err) {
-//			assert.False(t, passed)
-//		}
-//	})
-//
-//	t.Run("ValidateConsumableDevices_lvg_is_nil_validation_fails", func(t *testing.T) {
-//		passed, err := ValidateConsumableDevices(ctx, e2eCL, metrics, nil)
-//		if assert.EqualError(t, err, "lvmVolumeGroup is nil") {
-//			assert.False(t, passed)
-//		}
-//	})
-//
-//	t.Run("GetPathsConsumableDevicesFromLVMVG_lvg_is_nil_returns_error", func(t *testing.T) {
-//		paths, err := GetPathsConsumableDevicesFromLVMVG(ctx, e2eCL, metrics, nil)
-//
-//		if assert.EqualError(t, err, "lvmVolumeGroup is nil") {
-//			assert.Nil(t, paths)
-//		}
-//	})
-//
-//	t.Run("GetPathsConsumableDevicesFromLVMVG_lvg_is_nil_returns_error", func(t *testing.T) {
-//		const (
-//			name       = "test_name"
-//			firstBd    = "first"
-//			secondBd   = "second"
-//			testNode   = "test_node"
-//			firstPath  = "first_path"
-//			secondPath = "second_path"
-//		)
-//
-//		bds := &v1alpha1.BlockDeviceList{
-//			Items: []v1alpha1.BlockDevice{
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      firstBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: false,
-//						Path:       firstPath,
-//					},
-//				},
-//				{
-//					ObjectMeta: metav1.ObjectMeta{
-//						Name:      secondBd,
-//						Namespace: namespace,
-//					},
-//					Status: v1alpha1.BlockDeviceStatus{
-//						NodeName:   testNode,
-//						Consumable: true,
-//						Path:       secondPath,
-//					},
-//				},
-//			},
-//		}
-//
-//		var err error
-//		for _, bd := range bds.Items {
-//			err = e2eCL.Create(ctx, &bd)
-//			if err != nil {
-//				t.Error(err)
-//			}
-//		}
-//
-//		if err == nil {
-//			defer func() {
-//				for _, bd := range bds.Items {
-//					err = e2eCL.Delete(ctx, &bd)
-//					if err != nil {
-//						t.Error(err)
-//					}
-//				}
-//			}()
-//		}
-//
-//		testLvg := &v1alpha1.LvmVolumeGroup{
-//			ObjectMeta: metav1.ObjectMeta{
-//				Name:      name,
-//				Namespace: namespace,
-//			},
-//			Spec: v1alpha1.LvmVolumeGroupSpec{
-//				BlockDeviceNames: []string{firstBd, secondBd},
-//				Type:             Shared,
-//			},
-//		}
-//
-//		expected := []string{firstPath, secondPath}
-//
-//		actual, err := GetPathsConsumableDevicesFromLVMVG(ctx, e2eCL, metrics, testLvg)
-//		if assert.NoError(t, err) {
-//			assert.ElementsMatch(t, expected, actual)
-//		}
-//
-//	})
-//}
+import (
+	"bytes"
+	"context"
+	"testing"
+	"time"
+
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
+	"github.com/stretchr/testify/assert"
+	errors2 "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/resource"
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/utils/strings/slices"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/logger"
+	"agent/pkg/monitoring"
+)
+
+func TestLVMVolumeGroupWatcherCtrl(t *testing.T) {
+	cl := NewFakeClient()
+	ctx := context.Background()
+	log := logger.Logger{}
+	metrics := monitoring.GetMetrics("")
+
+	t.Run("validateLVGForUpdateFunc", func(t *testing.T) {
+		t.Run("without_thin_pools_returns_true", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+
+				firstPath  = "first-path"
+				secondPath = "second-path"
+			)
+
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       firstPath,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       secondPath,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+				},
+				Status: v1alpha1.LvmVolumeGroupStatus{
+					Phase:                "",
+					Conditions:           nil,
+					ThinPoolReady:        "",
+					ConfigurationApplied: "",
+					VGFree:               resource.Quantity{},
+				},
+			}
+
+			// so second block device is new one
+			pvs := []internal.PVData{
+				{
+					PVName: firstPath,
+				},
+			}
+
+			ch := cache.New()
+			ch.StorePVs(pvs, bytes.Buffer{})
+
+			valid, reason := validateLVGForUpdateFunc(log, ch, lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("without_thin_pools_returns_false", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+
+				firstPath  = "first-path"
+				secondPath = "second-path"
+			)
+
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       firstPath,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: false,
+						Path:       secondPath,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+				},
+				Status: v1alpha1.LvmVolumeGroupStatus{
+					Phase:                "",
+					Conditions:           nil,
+					ThinPoolReady:        "",
+					ConfigurationApplied: "",
+					VGFree:               resource.Quantity{},
+				},
+			}
+
+			// so second block device is new one
+			pvs := []internal.PVData{
+				{
+					PVName: firstPath,
+				},
+			}
+
+			ch := cache.New()
+			ch.StorePVs(pvs, bytes.Buffer{})
+
+			// new block device is not consumable
+			valid, _ := validateLVGForUpdateFunc(log, ch, lvg, bds)
+			assert.False(t, valid)
+		})
+
+		t.Run("with_thin_pools_returns_true", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+
+				firstPath  = "first-path"
+				secondPath = "second-path"
+
+				vgName = "test-vg"
+			)
+
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       firstPath,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("2G"),
+						Consumable: true,
+						Path:       secondPath,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+						{
+							Name:            "new-thin",
+							Size:            "2.5G",
+							AllocationLimit: "150%",
+						},
+					},
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+
+			// so second block device is new one
+			pvs := []internal.PVData{
+				{
+					PVName: firstPath,
+				},
+			}
+
+			vgs := []internal.VGData{
+				{
+					VGName: vgName,
+					VGSize: resource.MustParse("1G"),
+					VGFree: resource.MustParse("1G"),
+				},
+			}
+
+			ch := cache.New()
+			ch.StorePVs(pvs, bytes.Buffer{})
+			ch.StoreVGs(vgs, bytes.Buffer{})
+
+			valid, reason := validateLVGForUpdateFunc(log, ch, lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("with_thin_pools_returns_false", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+
+				firstPath  = "first-path"
+				secondPath = "second-path"
+
+				vgName = "test-vg"
+			)
+
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+						Path:       firstPath,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("2G"),
+						Consumable: true,
+						Path:       secondPath,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+						{
+							Name:            "new-thin",
+							Size:            "4G",
+							AllocationLimit: "150%",
+						},
+					},
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+
+			// so second block device is new one
+			pvs := []internal.PVData{
+				{
+					PVName: firstPath,
+				},
+			}
+
+			vgs := []internal.VGData{
+				{
+					VGName: vgName,
+					VGSize: resource.MustParse("1G"),
+					VGFree: resource.MustParse("1G"),
+				},
+			}
+
+			ch := cache.New()
+			ch.StorePVs(pvs, bytes.Buffer{})
+			ch.StoreVGs(vgs, bytes.Buffer{})
+
+			valid, _ := validateLVGForUpdateFunc(log, ch, lvg, bds)
+			assert.False(t, valid)
+		})
+	})
+
+	t.Run("validateLVGForCreateFunc", func(t *testing.T) {
+		t.Run("without_thin_pools_returns_true", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+			)
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+				},
+			}
+
+			valid, reason := validateLVGForCreateFunc(log, lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("without_thin_pools_returns_false", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+			)
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+				},
+			}
+
+			valid, _ := validateLVGForCreateFunc(log, lvg, bds)
+			assert.False(t, valid)
+		})
+
+		t.Run("with_thin_pools_returns_true", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+			)
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+						{
+							Size: "1G",
+						},
+					},
+				},
+			}
+
+			valid, reason := validateLVGForCreateFunc(log, lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("with_thin_pools_returns_false", func(t *testing.T) {
+			const (
+				firstBd  = "first"
+				secondBd = "second"
+			)
+			bds := map[string]v1alpha1.BlockDevice{
+				firstBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: firstBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+				secondBd: {
+					ObjectMeta: v1.ObjectMeta{
+						Name: secondBd,
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						Size:       resource.MustParse("1G"),
+						Consumable: true,
+					},
+				},
+			}
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{firstBd, secondBd},
+					ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+						{
+							Size: "3G",
+						},
+					},
+				},
+			}
+
+			valid, _ := validateLVGForCreateFunc(log, lvg, bds)
+			assert.False(t, valid)
+		})
+	})
+
+	t.Run("identifyLVGReconcileFunc", func(t *testing.T) {
+		t.Run("returns_create", func(t *testing.T) {
+			const vgName = "test-vg"
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+
+			ch := cache.New()
+
+			actual := identifyLVGReconcileFunc(lvg, ch)
+			assert.Equal(t, CreateReconcile, actual)
+		})
+
+		t.Run("returns_update", func(t *testing.T) {
+			const vgName = "test-vg"
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+			vgs := []internal.VGData{
+				{
+					VGName: vgName,
+				},
+			}
+
+			ch := cache.New()
+			ch.StoreVGs(vgs, bytes.Buffer{})
+
+			actual := identifyLVGReconcileFunc(lvg, ch)
+			assert.Equal(t, UpdateReconcile, actual)
+		})
+
+		t.Run("returns_delete", func(t *testing.T) {
+			const vgName = "test-vg"
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					ActualVGNameOnTheNode: vgName,
+				},
+			}
+			lvg.DeletionTimestamp = &v1.Time{}
+			vgs := []internal.VGData{
+				{
+					VGName: vgName,
+				},
+			}
+
+			ch := cache.New()
+			ch.StoreVGs(vgs, bytes.Buffer{})
+
+			actual := identifyLVGReconcileFunc(lvg, ch)
+			assert.Equal(t, DeleteReconcile, actual)
+		})
+	})
+
+	t.Run("removeLVGFinalizerIfExist", func(t *testing.T) {
+		t.Run("not_exist_no_remove", func(t *testing.T) {
+			lvg := &v1alpha1.LvmVolumeGroup{}
+
+			removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			assert.False(t, removed)
+		})
+
+		t.Run("does_exist_remove", func(t *testing.T) {
+			const lvgName = "test-lvg"
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Finalizers = append(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer)
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			defer func() {
+				err = cl.Delete(ctx, lvg)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+
+			removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			if assert.True(t, removed) {
+				updatedLVG := &v1alpha1.LvmVolumeGroup{}
+				err = cl.Get(ctx, client.ObjectKey{
+					Name: lvgName,
+				}, updatedLVG)
+				if err != nil {
+					t.Error(err)
+				}
+
+				assert.False(t, slices.Contains(updatedLVG.Finalizers, internal.SdsNodeConfiguratorFinalizer))
+			}
+		})
+	})
+
+	t.Run("getLVForVG", func(t *testing.T) {
+		const (
+			firstLV  = "first"
+			secondLV = "second"
+			vgName   = "test-vg"
+		)
+		lvs := []internal.LVData{
+			{
+				LVName: firstLV,
+				VGName: vgName,
+			},
+			{
+				LVName: secondLV,
+				VGName: "other",
+			},
+		}
+
+		ch := cache.New()
+		ch.StoreLVs(lvs, bytes.Buffer{})
+		expected := []string{firstLV}
+
+		actual := getLVForVG(ch, vgName)
+
+		assert.ElementsMatch(t, expected, actual)
+	})
+
+	t.Run("countVGSizeByBlockDevices", func(t *testing.T) {
+		const (
+			firstBd  = "first"
+			secondBd = "second"
+		)
+		bds := map[string]v1alpha1.BlockDevice{
+			firstBd: {
+				ObjectMeta: v1.ObjectMeta{
+					Name: firstBd,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Size: resource.MustParse("1G"),
+				},
+			},
+			secondBd: {
+				ObjectMeta: v1.ObjectMeta{
+					Name: secondBd,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Size: resource.MustParse("1G"),
+				},
+			},
+		}
+		lvg := &v1alpha1.LvmVolumeGroup{
+			Spec: v1alpha1.LvmVolumeGroupSpec{
+				BlockDeviceNames: []string{firstBd, secondBd},
+			},
+		}
+
+		expected := resource.MustParse("2G")
+
+		actual := countVGSizeByBlockDevices(lvg, bds)
+		assert.Equal(t, expected.Value(), actual.Value())
+	})
+
+	t.Run("getRequestedSizeFromString", func(t *testing.T) {
+		t.Run("for_percent_size", func(t *testing.T) {
+			actual, err := getRequestedSizeFromString("50%", resource.MustParse("10G"))
+			if err != nil {
+				t.Error(err)
+			}
+
+			expected := resource.MustParse("5G")
+			assert.Equal(t, expected.Value(), actual.Value())
+		})
+
+		t.Run("for_number_size", func(t *testing.T) {
+			actual, err := getRequestedSizeFromString("5G", resource.MustParse("10G"))
+			if err != nil {
+				t.Error(err)
+			}
+
+			expected := resource.MustParse("5G")
+			assert.Equal(t, expected.Value(), actual.Value())
+		})
+	})
+
+	t.Run("extractPathsFromBlockDevices", func(t *testing.T) {
+		const (
+			firstBd  = "first"
+			secondBd = "second"
+
+			firstPath  = "first-path"
+			secondPath = "second-path"
+		)
+		bdNames := []string{firstBd, secondBd}
+		bds := map[string]v1alpha1.BlockDevice{
+			firstBd: {
+				ObjectMeta: v1.ObjectMeta{
+					Name: firstBd,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Path: firstPath,
+				},
+			},
+			secondBd: {
+				ObjectMeta: v1.ObjectMeta{
+					Name: secondBd,
+				},
+				Status: v1alpha1.BlockDeviceStatus{
+					Path: secondPath,
+				},
+			},
+		}
+
+		expected := []string{firstPath, secondPath}
+		actual := extractPathsFromBlockDevices(bdNames, bds)
+		assert.ElementsMatch(t, expected, actual)
+	})
+
+	t.Run("validateSpecBlockDevices", func(t *testing.T) {
+		t.Run("validation_passes", func(t *testing.T) {
+			const (
+				nodeName = "nodeName"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{
+						"first", "second",
+					},
+				},
+			}
+
+			bds := map[string]v1alpha1.BlockDevice{
+				"first": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "first",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: nodeName,
+					},
+				},
+
+				"second": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "second",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: nodeName,
+					},
+				},
+			}
+
+			valid, reason := validateSpecBlockDevices(lvg, bds)
+			if assert.True(t, valid) {
+				assert.Equal(t, "", reason)
+			}
+		})
+
+		t.Run("validation_fails_due_to_bd_does_not_exist", func(t *testing.T) {
+			const (
+				nodeName = "nodeName"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{
+						"first", "second",
+					},
+				},
+			}
+
+			bds := map[string]v1alpha1.BlockDevice{
+				"first": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "first",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: nodeName,
+					},
+				},
+			}
+
+			valid, _ := validateSpecBlockDevices(lvg, bds)
+			assert.False(t, valid)
+		})
+
+		t.Run("validation_fails_due_to_bd_has_dif_node", func(t *testing.T) {
+			const (
+				nodeName = "nodeName"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{
+				Spec: v1alpha1.LvmVolumeGroupSpec{
+					BlockDeviceNames: []string{
+						"first", "second",
+					},
+				},
+			}
+
+			bds := map[string]v1alpha1.BlockDevice{
+				"first": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "first",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: nodeName,
+					},
+				},
+				"second": {
+					ObjectMeta: v1.ObjectMeta{
+						Name: "second",
+					},
+					Status: v1alpha1.BlockDeviceStatus{
+						NodeName: "another-node",
+					},
+				},
+			}
+
+			valid, _ := validateSpecBlockDevices(lvg, bds)
+			assert.False(t, valid)
+		})
+	})
+
+	t.Run("syncThinPoolsAllocationLimit", func(t *testing.T) {
+		const lvgName = "test"
+		lvg := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: v1.ObjectMeta{
+				Name: lvgName,
+			},
+			Spec: v1alpha1.LvmVolumeGroupSpec{
+				ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{
+					{
+						Name:            "first",
+						Size:            "1G",
+						AllocationLimit: "200%",
+					},
+				},
+			},
+			Status: v1alpha1.LvmVolumeGroupStatus{
+				ThinPools: []v1alpha1.LvmVolumeGroupThinPoolStatus{
+					{
+						Name:            "first",
+						AllocationLimit: "150%",
+					},
+				},
+			},
+		}
+
+		err := cl.Create(ctx, lvg)
+		if err != nil {
+			t.Error(err)
+		}
+
+		defer func() {
+			err = cl.Delete(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+		}()
+
+		err = syncThinPoolsAllocationLimit(ctx, cl, log, lvg)
+		if err != nil {
+			t.Error(err)
+		}
+
+		updatedLVG := &v1alpha1.LvmVolumeGroup{}
+		err = cl.Get(ctx, client.ObjectKey{
+			Name: lvgName,
+		}, updatedLVG)
+
+		assert.Equal(t, lvg.Spec.ThinPools[0].AllocationLimit, lvg.Status.ThinPools[0].AllocationLimit)
+	})
+
+	t.Run("addLVGFinalizerIfNotExist", func(t *testing.T) {
+		t.Run("not_exist_adds", func(t *testing.T) {
+			const (
+				lvgName = "test"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Finalizers = []string{}
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			defer func() {
+				err = cl.Delete(ctx, lvg)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+
+			added, err := addLVGFinalizerIfNotExist(ctx, cl, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			if assert.True(t, added) {
+				updatedLVG := &v1alpha1.LvmVolumeGroup{}
+				err = cl.Get(ctx, client.ObjectKey{
+					Name: lvgName,
+				}, updatedLVG)
+
+				assert.True(t, slices.Contains(updatedLVG.Finalizers, internal.SdsNodeConfiguratorFinalizer))
+			}
+		})
+
+		t.Run("does_exist_no_adds", func(t *testing.T) {
+			const (
+				lvgName = "test-1"
+			)
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Finalizers = []string{
+				internal.SdsNodeConfiguratorFinalizer,
+			}
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			defer func() {
+				err = cl.Delete(ctx, lvg)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+
+			added, err := addLVGFinalizerIfNotExist(ctx, cl, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			if assert.False(t, added) {
+				updatedLVG := &v1alpha1.LvmVolumeGroup{}
+				err = cl.Get(ctx, client.ObjectKey{
+					Name: lvgName,
+				}, updatedLVG)
+
+				assert.True(t, slices.Contains(updatedLVG.Finalizers, internal.SdsNodeConfiguratorFinalizer))
+			}
+		})
+	})
+
+	t.Run("updateLVGConditionIfNeeded", func(t *testing.T) {
+		t.Run("diff_states_updates", func(t *testing.T) {
+			const (
+				lvgName   = "test-name"
+				badReason = "bad"
+			)
+			curTime := v1.NewTime(time.Now())
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Generation = 1
+			lvg.Status.Conditions = []v1.Condition{
+				{
+					Type:               internal.TypeVGConfigurationApplied,
+					Status:             v1.ConditionTrue,
+					ObservedGeneration: 1,
+					LastTransitionTime: curTime,
+					Reason:             "",
+					Message:            "",
+				},
+			}
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, badReason, "")
+			if err != nil {
+				t.Error(err)
+			}
+
+			notUpdatedLVG := &v1alpha1.LvmVolumeGroup{}
+			err = cl.Get(ctx, client.ObjectKey{
+				Name: lvgName,
+			}, notUpdatedLVG)
+			if err != nil {
+				t.Error(err)
+			}
+
+			assert.Equal(t, notUpdatedLVG.Status.Conditions[0].Status, v1.ConditionFalse)
+			assert.Equal(t, notUpdatedLVG.Status.Conditions[0].Reason, badReason)
+
+			assert.NotEqual(t, curTime, lvg.Status.Conditions[0].LastTransitionTime)
+		})
+
+		t.Run("same_states_does_not_update", func(t *testing.T) {
+			const (
+				lvgName = "test-name-2"
+			)
+			curTime := v1.NewTime(time.Now())
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.Name = lvgName
+			lvg.Generation = 1
+			lvg.Status.Conditions = []v1.Condition{
+				{
+					Type:               internal.TypeVGConfigurationApplied,
+					Status:             v1.ConditionTrue,
+					ObservedGeneration: 1,
+					LastTransitionTime: curTime,
+					Reason:             "",
+					Message:            "",
+				},
+			}
+
+			err := cl.Create(ctx, lvg)
+			if err != nil {
+				t.Error(err)
+			}
+
+			err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, "", "")
+			if err != nil {
+				t.Error(err)
+			}
+
+			assert.Equal(t, curTime, lvg.Status.Conditions[0].LastTransitionTime)
+		})
+	})
+
+	t.Run("shouldReconcileLVGByDeleteFunc", func(t *testing.T) {
+		t.Run("returns_true", func(t *testing.T) {
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.DeletionTimestamp = &v1.Time{}
+
+			assert.True(t, shouldReconcileLVGByDeleteFunc(lvg))
+		})
+
+		t.Run("returns_false", func(t *testing.T) {
+			lvg := &v1alpha1.LvmVolumeGroup{}
+			lvg.DeletionTimestamp = nil
+
+			assert.False(t, shouldReconcileLVGByDeleteFunc(lvg))
+		})
+	})
+
+	t.Run("shouldLVGWatcherReconcileUpdateEvent", func(t *testing.T) {
+		t.Run("deletion_timestamp_not_nil_returns_true", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG.DeletionTimestamp = &v1.Time{}
+			assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+
+		t.Run("spec_is_diff_returns_true", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			oldLVG.Spec.BlockDeviceNames = []string{"first"}
+			newLVG.Spec.BlockDeviceNames = []string{"first", "second"}
+			assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+
+		t.Run("condition_vg_configuration_applied_is_updating_returns_false", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG.Status.Conditions = []v1.Condition{
+				{
+					Type:   internal.TypeVGConfigurationApplied,
+					Reason: internal.ReasonUpdating,
+				},
+			}
+			assert.False(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+
+		t.Run("condition_vg_configuration_applied_is_creating_returns_false", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG.Status.Conditions = []v1.Condition{
+				{
+					Type:   internal.TypeVGConfigurationApplied,
+					Reason: internal.ReasonCreating,
+				},
+			}
+			assert.False(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+
+		t.Run("dev_size_and_pv_size_are_diff_returns_true", func(t *testing.T) {
+			oldLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG := &v1alpha1.LvmVolumeGroup{}
+			newLVG.Status.Nodes = []v1alpha1.LvmVolumeGroupNode{
+				{
+					Devices: []v1alpha1.LvmVolumeGroupDevice{
+						{
+							BlockDevice: "test",
+							DevSize:     resource.MustParse("1G"),
+							PVSize:      resource.MustParse("2G"),
+						},
+					},
+					Name: "some-node",
+				},
+			}
+			assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG))
+		})
+	})
+
+	t.Run("checkIfVGExist", func(t *testing.T) {
+		const targetName = "test"
+		vgs := []internal.VGData{
+			{
+				VGName: targetName,
+			},
+			{
+				VGName: "another-name",
+			},
+		}
+
+		t.Run("returns_true", func(t *testing.T) {
+			assert.True(t, checkIfVGExist(targetName, vgs))
+		})
+
+		t.Run("returns_false", func(t *testing.T) {
+			assert.False(t, checkIfVGExist("not-existed", vgs))
+		})
+	})
+
+	t.Run("DeleteLVMVolumeGroup", func(t *testing.T) {
+		const (
+			lvgName  = "test=lvg"
+			nodeName = "test-node"
+		)
+
+		lvgToDelete := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: v1.ObjectMeta{
+				Name: lvgName,
+			},
+			Status: v1alpha1.LvmVolumeGroupStatus{
+				Nodes: []v1alpha1.LvmVolumeGroupNode{
+					{
+						Name: nodeName,
+					},
+				},
+			},
+		}
+
+		err := cl.Create(ctx, lvgToDelete)
+		if err != nil {
+			t.Error(err)
+		}
+
+		defer func() {
+			_ = cl.Delete(ctx, lvgToDelete)
+		}()
+
+		lvgCheck := &v1alpha1.LvmVolumeGroup{}
+		err = cl.Get(ctx, client.ObjectKey{
+			Name: lvgName,
+		}, lvgCheck)
+		if err != nil {
+			t.Error(err)
+		}
+		assert.Equal(t, lvgName, lvgCheck.Name)
+
+		err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvgToDelete, nodeName)
+		if err != nil {
+			t.Error(err)
+		}
+
+		lvgNewCheck := &v1alpha1.LvmVolumeGroup{}
+		err = cl.Get(ctx, client.ObjectKey{
+			Name: lvgName,
+		}, lvgNewCheck)
+		if assert.True(t, errors2.IsNotFound(err)) {
+			assert.Equal(t, "", lvgNewCheck.Name)
+		}
+	})
+
+	t.Run("getLVMVolumeGroup_lvg_exists_returns_correct", func(t *testing.T) {
+		const name = "test_name"
+		lvgToCreate := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: v1.ObjectMeta{
+				Name: name,
+			},
+		}
+
+		err := cl.Create(ctx, lvgToCreate)
+		if err != nil {
+			t.Error(err)
+		} else {
+			defer func() {
+				err = cl.Delete(ctx, lvgToCreate)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+		}
+
+		actual, err := getLVMVolumeGroup(ctx, cl, metrics, name)
+		if assert.NoError(t, err) {
+			assert.NotNil(t, actual)
+			assert.Equal(t, name, actual.Name)
+		}
+	})
+
+	t.Run("getLVMVolumeGroup_lvg_doesnt_exist_returns_nil", func(t *testing.T) {
+		const name = "test_name"
+		testObj := &v1alpha1.LvmVolumeGroup{
+			ObjectMeta: v1.ObjectMeta{
+				Name: name,
+			},
+		}
+
+		err := cl.Create(ctx, testObj)
+		if err != nil {
+			t.Error(err)
+		} else {
+			defer func() {
+				err = cl.Delete(ctx, testObj)
+				if err != nil {
+					t.Error(err)
+				}
+			}()
+		}
+
+		actual, err := getLVMVolumeGroup(ctx, cl, metrics, "another-name")
+
+		if assert.EqualError(t, err, "lvmvolumegroups.storage.deckhouse.io \"another-name\" not found") {
+			assert.Nil(t, actual)
+		}
+	})
+}
diff --git a/images/agent/src/pkg/kubutils/kubernetes.go b/images/agent/src/pkg/kubutils/kubernetes.go
index b80d6a36..bcd95a6c 100644
--- a/images/agent/src/pkg/kubutils/kubernetes.go
+++ b/images/agent/src/pkg/kubutils/kubernetes.go
@@ -18,12 +18,12 @@ package kubutils
 
 import (
 	"fmt"
+
 	"k8s.io/client-go/rest"
 	"k8s.io/client-go/tools/clientcmd"
 )
 
 func KubernetesDefaultConfigCreate() (*rest.Config, error) {
-	//todo validate empty
 	clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
 		clientcmd.NewDefaultClientConfigLoadingRules(),
 		&clientcmd.ConfigOverrides{},
diff --git a/images/agent/src/pkg/logger/logger.go b/images/agent/src/pkg/logger/logger.go
index 34b94245..164a2059 100644
--- a/images/agent/src/pkg/logger/logger.go
+++ b/images/agent/src/pkg/logger/logger.go
@@ -17,11 +17,11 @@ limitations under the License.
 package logger
 
 import (
-	"flag"
 	"fmt"
+	"strconv"
+
 	"github.com/go-logr/logr"
-	"k8s.io/klog/v2"
-	"k8s.io/klog/v2/klogr"
+	"k8s.io/klog/v2/textlogger"
 )
 
 const (
@@ -50,13 +50,12 @@ type Logger struct {
 }
 
 func NewLogger(level Verbosity) (*Logger, error) {
-	klog.InitFlags(nil)
-	if err := flag.Set("v", string(level)); err != nil {
+	v, err := strconv.Atoi(string(level))
+	if err != nil {
 		return nil, err
 	}
-	flag.Parse()
 
-	log := klogr.New().WithCallDepth(1)
+	log := textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(v))).WithCallDepth(1)
 
 	return &Logger{log: log}, nil
 }
diff --git a/images/agent/src/pkg/monitoring/monitoring.go b/images/agent/src/pkg/monitoring/monitoring.go
index b0158c35..fa638a46 100644
--- a/images/agent/src/pkg/monitoring/monitoring.go
+++ b/images/agent/src/pkg/monitoring/monitoring.go
@@ -17,11 +17,12 @@ limitations under the License.
 package monitoring
 
 import (
+	"strings"
+	"time"
+
 	"github.com/prometheus/client_golang/prometheus"
 	"k8s.io/utils/clock"
 	"sigs.k8s.io/controller-runtime/pkg/metrics"
-	"strings"
-	"time"
 )
 
 const (
@@ -133,15 +134,15 @@ func (m Metrics) UtilsCommandsErrorsCount(controllerName, command string) promet
 	return utilsCommandsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(command))
 }
 
-func (m Metrics) ApiMethodsDuration(controllerName, method string) prometheus.Observer {
+func (m Metrics) APIMethodsDuration(controllerName, method string) prometheus.Observer {
 	return apiMethodsDuration.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
-func (m Metrics) ApiMethodsExecutionCount(controllerName, method string) prometheus.Counter {
+func (m Metrics) APIMethodsExecutionCount(controllerName, method string) prometheus.Counter {
 	return apiMethodsExecutionCount.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
-func (m Metrics) ApiMethodsErrors(controllerName, method string) prometheus.Counter {
+func (m Metrics) APIMethodsErrors(controllerName, method string) prometheus.Counter {
 	return apiMethodsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
diff --git a/images/agent/src/pkg/scanner/scanner.go b/images/agent/src/pkg/scanner/scanner.go
index f5b2f60f..7610576a 100644
--- a/images/agent/src/pkg/scanner/scanner.go
+++ b/images/agent/src/pkg/scanner/scanner.go
@@ -1,21 +1,23 @@
 package scanner
 
 import (
-	"agent/config"
-	"agent/internal"
-	"agent/pkg/cache"
-	"agent/pkg/controller"
-	"agent/pkg/logger"
-	"agent/pkg/throttler"
-	"agent/pkg/utils"
 	"bytes"
 	"context"
 	"errors"
 	"fmt"
+	"time"
+
 	"github.com/pilebones/go-udev/netlink"
 	kubeCtrl "sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
-	"time"
+
+	"agent/config"
+	"agent/internal"
+	"agent/pkg/cache"
+	"agent/pkg/controller"
+	"agent/pkg/logger"
+	"agent/pkg/throttler"
+	"agent/pkg/utils"
 )
 
 func RunScanner(ctx context.Context, log logger.Logger, cfg config.Options, sdsCache *cache.Cache, bdCtrl, lvgDiscoverCtrl kubeCtrl.Controller) error {
@@ -115,7 +117,7 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg
 	log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", controller.BlockDeviceCtrlName))
 	bdRes, err := bdCtrl.Reconcile(ctx, reconcile.Request{})
 	if err != nil {
-		log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occured while %s reconcile", controller.BlockDeviceCtrlName))
+		log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", controller.BlockDeviceCtrlName))
 		return err
 	}
 
@@ -128,7 +130,6 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg
 			}
 
 			log.Info("[runControllersReconcile] successfully reconciled BlockDevices after a retry")
-			return
 		}()
 	}
 
@@ -137,7 +138,7 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg
 	log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName))
 	lvgRes, err := lvgDiscoverCtrl.Reconcile(ctx, reconcile.Request{})
 	if err != nil {
-		log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occured while %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName))
+		log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", controller.LVMVolumeGroupDiscoverCtrlName))
 		return err
 	}
 	if lvgRes.RequeueAfter > 0 {
@@ -149,7 +150,6 @@ func runControllersReconcile(ctx context.Context, log logger.Logger, bdCtrl, lvg
 			}
 
 			log.Info("[runControllersReconcile] successfully reconciled LVMVolumeGroups after a retry")
-			return
 		}()
 	}
 	log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", controller.LVMVolumeGroupDiscoverCtrlName))
diff --git a/images/agent/src/pkg/utils/commands.go b/images/agent/src/pkg/utils/commands.go
index e39806f4..f00b84a1 100644
--- a/images/agent/src/pkg/utils/commands.go
+++ b/images/agent/src/pkg/utils/commands.go
@@ -17,16 +17,16 @@ limitations under the License.
 package utils
 
 import (
-	"agent/internal"
 	"bufio"
 	"bytes"
 	"context"
 	"encoding/json"
 	"fmt"
+	golog "log"
 	"os/exec"
 	"regexp"
 
-	golog "log"
+	"agent/internal"
 )
 
 func GetBlockDevices(ctx context.Context) ([]internal.Device, string, bytes.Buffer, error) {
@@ -243,8 +243,8 @@ func CreateVGShared(vgName, lvmVolumeGroupName string, pvNames []string) (string
 	return cmd.String(), nil
 }
 
-func CreateThinPool(thinPoolName, VGName string, size int64) (string, error) {
-	args := []string{"lvcreate", "-L", fmt.Sprintf("%dk", size/1024), "-T", fmt.Sprintf("%s/%s", VGName, thinPoolName)}
+func CreateThinPool(thinPoolName, vgName string, size int64) (string, error) {
+	args := []string{"lvcreate", "-L", fmt.Sprintf("%dk", size/1024), "-T", fmt.Sprintf("%s/%s", vgName, thinPoolName)}
 	extendedArgs := lvmStaticExtendedArgs(args)
 	cmd := exec.Command(internal.NSENTERCmd, extendedArgs...)
 
@@ -257,8 +257,8 @@ func CreateThinPool(thinPoolName, VGName string, size int64) (string, error) {
 	return cmd.String(), nil
 }
 
-func CreateThinPoolFullVGSpace(thinPoolName, VGName string) (string, error) {
-	args := []string{"lvcreate", "-l", "100%FREE", "-T", fmt.Sprintf("%s/%s", VGName, thinPoolName)}
+func CreateThinPoolFullVGSpace(thinPoolName, vgName string) (string, error) {
+	args := []string{"lvcreate", "-l", "100%FREE", "-T", fmt.Sprintf("%s/%s", vgName, thinPoolName)}
 	extendedArgs := lvmStaticExtendedArgs(args)
 	cmd := exec.Command(internal.NSENTERCmd, extendedArgs...)
 
@@ -476,12 +476,9 @@ func unmarshalPVs(out []byte) ([]internal.PVData, error) {
 		return nil, err
 	}
 
-	var pvs []internal.PVData
-
+	pvs := make([]internal.PVData, 0, len(pvR.Report))
 	for _, rep := range pvR.Report {
-		for _, pv := range rep.PV {
-			pvs = append(pvs, pv)
-		}
+		pvs = append(pvs, rep.PV...)
 	}
 
 	return pvs, nil
@@ -494,12 +491,9 @@ func unmarshalVGs(out []byte) ([]internal.VGData, error) {
 		return nil, err
 	}
 
-	var vgs []internal.VGData
-
+	vgs := make([]internal.VGData, 0, len(vgR.Report))
 	for _, rep := range vgR.Report {
-		for _, vg := range rep.VG {
-			vgs = append(vgs, vg)
-		}
+		vgs = append(vgs, rep.VG...)
 	}
 
 	return vgs, nil
@@ -512,27 +506,19 @@ func unmarshalLVs(out []byte) ([]internal.LVData, error) {
 		return nil, err
 	}
 
-	var lvs []internal.LVData
-
+	lvs := make([]internal.LVData, 0, len(lvR.Report))
 	for _, rep := range lvR.Report {
-		for _, lv := range rep.LV {
-			lvs = append(lvs, lv)
-		}
+		lvs = append(lvs, rep.LV...)
 	}
 
 	return lvs, nil
 }
 
-func extendArgs(args []string) []string {
-	nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"}
-	return append(nsenterArgs, args...)
-}
-
 func lvmStaticExtendedArgs(args []string) []string {
 	nsenterArgs := []string{"-t", "1", "-m", "-u", "-i", "-n", "-p"}
 	lvmStaticBin := []string{"--", internal.LVMCmd}
-	result := append(nsenterArgs, lvmStaticBin...)
-	return append(result, args...)
+	nsenterArgs = append(nsenterArgs, lvmStaticBin...)
+	return append(nsenterArgs, args...)
 }
 
 // filterStdErr processes a bytes.Buffer containing stderr output and filters out specific
@@ -560,7 +546,13 @@ func filterStdErr(command string, stdErr bytes.Buffer) bytes.Buffer {
 	// will try to resize the Thin-pool with 100%VG space and will get the error.
 	regexpNoSizeChangeError := ` No size change.+`
 	regex1, err := regexp.Compile(regexpPattern)
+	if err != nil {
+		return stdErr
+	}
 	regex2, err := regexp.Compile(regexpSocketError)
+	if err != nil {
+		return stdErr
+	}
 	regex3, err := regexp.Compile(regexpNoSizeChangeError)
 	if err != nil {
 		return stdErr
diff --git a/images/agent/src/pkg/utils/commands_test.go b/images/agent/src/pkg/utils/commands_test.go
index 5bae6ecf..98e447d5 100644
--- a/images/agent/src/pkg/utils/commands_test.go
+++ b/images/agent/src/pkg/utils/commands_test.go
@@ -17,12 +17,12 @@ limitations under the License.
 package utils
 
 import (
-	"agent/internal"
 	"testing"
 
+	"github.com/stretchr/testify/assert"
 	"k8s.io/apimachinery/pkg/api/resource"
 
-	"github.com/stretchr/testify/assert"
+	"agent/internal"
 )
 
 func TestCommands(t *testing.T) {
@@ -63,6 +63,9 @@ func TestCommands(t *testing.T) {
 }`
 
 			size30G, err := resource.ParseQuantity("30G")
+			if err != nil {
+				t.Error(err)
+			}
 			size1M, err := resource.ParseQuantity("1M")
 			if err != nil {
 				t.Error(err)
@@ -216,7 +219,7 @@ func TestCommands(t *testing.T) {
 			expectedVGs := internal.VG{VG: []internal.VGData{
 				{
 					VGName:   "test-vg",
-					VGUuid:   "P14t8J-nfUE-hryT-LiTv-JdFD-Wqxg-R8taCa",
+					VGUUID:   "P14t8J-nfUE-hryT-LiTv-JdFD-Wqxg-R8taCa",
 					VGTags:   "test-tag",
 					VGSize:   size2G,
 					VGShared: "test-shared",
diff --git a/images/sds-health-watcher-controller/src/cmd/main.go b/images/sds-health-watcher-controller/src/cmd/main.go
index 676387c8..9d750539 100644
--- a/images/sds-health-watcher-controller/src/cmd/main.go
+++ b/images/sds-health-watcher-controller/src/cmd/main.go
@@ -19,17 +19,11 @@ package main
 import (
 	"context"
 	"fmt"
-	dh "github.com/deckhouse/deckhouse/deckhouse-controller/pkg/apis/deckhouse.io/v1alpha1"
-	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"os"
 	goruntime "runtime"
-	"sds-health-watcher-controller/config"
-	"sds-health-watcher-controller/pkg/controller"
-	"sds-health-watcher-controller/pkg/kubutils"
-	"sds-health-watcher-controller/pkg/logger"
-	"sds-health-watcher-controller/pkg/monitoring"
-	"sigs.k8s.io/controller-runtime/pkg/metrics/server"
 
+	dh "github.com/deckhouse/deckhouse/deckhouse-controller/pkg/apis/deckhouse.io/v1alpha1"
+	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	v1 "k8s.io/api/core/v1"
 	extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -37,6 +31,13 @@ import (
 	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/healthz"
 	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/metrics/server"
+
+	"sds-health-watcher-controller/config"
+	"sds-health-watcher-controller/pkg/controller"
+	"sds-health-watcher-controller/pkg/kubutils"
+	"sds-health-watcher-controller/pkg/logger"
+	"sds-health-watcher-controller/pkg/monitoring"
 )
 
 var (
@@ -60,7 +61,7 @@ func main() {
 
 	log, err := logger.NewLogger(cfgParams.Loglevel)
 	if err != nil {
-		fmt.Println(fmt.Sprintf("unable to create NewLogger, err: %v", err))
+		fmt.Printf("unable to create NewLogger, err: %v\n", err)
 		os.Exit(1)
 	}
 
@@ -71,7 +72,6 @@ func main() {
 	log.Info(fmt.Sprintf("[main] %s = %s", config.LogLevel, cfgParams.Loglevel))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.MetricsPort, cfgParams.MetricsPort))
 	log.Info(fmt.Sprintf("[main] %s = %s", config.ScanInterval, cfgParams.ScanIntervalSec))
-	log.Info(fmt.Sprintf("[main] %s = %s", config.NodeName, cfgParams.NodeName))
 
 	kConfig, err := kubutils.KubernetesDefaultConfigCreate()
 	if err != nil {
diff --git a/images/sds-health-watcher-controller/src/config/config.go b/images/sds-health-watcher-controller/src/config/config.go
index fa0b865c..14ee89e2 100644
--- a/images/sds-health-watcher-controller/src/config/config.go
+++ b/images/sds-health-watcher-controller/src/config/config.go
@@ -19,15 +19,14 @@ package config
 import (
 	"fmt"
 	"os"
-	"sds-health-watcher-controller/pkg/logger"
-
 	"strconv"
 	"time"
+
+	"sds-health-watcher-controller/pkg/logger"
 )
 
 const (
 	ScanInterval                         = "SCAN_INTERVAL"
-	NodeName                             = "NODE_NAME"
 	LogLevel                             = "LOG_LEVEL"
 	MetricsPort                          = "METRICS_PORT"
 	DefaultHealthProbeBindAddressEnvName = "HEALTH_PROBE_BIND_ADDRESS"
diff --git a/images/sds-health-watcher-controller/src/config/config_test.go b/images/sds-health-watcher-controller/src/config/config_test.go
index 8885fda6..e5a26177 100644
--- a/images/sds-health-watcher-controller/src/config/config_test.go
+++ b/images/sds-health-watcher-controller/src/config/config_test.go
@@ -17,117 +17,38 @@ limitations under the License.
 package config
 
 import (
-	"fmt"
-	"github.com/stretchr/testify/assert"
 	"os"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
 )
 
 func TestNewConfig(t *testing.T) {
 	t.Run("AllValuesSet_ReturnsNoError", func(t *testing.T) {
-		expNodeName := "test-node"
 		expMetricsPort := ":0000"
-		expMachineId := "test-id"
 
-		err := os.Setenv(NodeName, expNodeName)
-		if err != nil {
-			t.Error(err)
-		}
-		err = os.Setenv(MetricsPort, expMetricsPort)
+		err := os.Setenv(MetricsPort, expMetricsPort)
 		if err != nil {
 			t.Error(err)
 		}
-		err = os.Setenv(MachineID, expMachineId)
 		defer os.Clearenv()
 
 		opts, err := NewConfig()
 
 		if assert.NoError(t, err) {
-			assert.Equal(t, expNodeName, opts.NodeName)
 			assert.Equal(t, expMetricsPort, opts.MetricsPort)
-			assert.Equal(t, expMachineId, opts.MachineId)
 		}
 	})
 
-	t.Run("NodeNameNotSet_ReturnsError", func(t *testing.T) {
-		machineIdFile := "./host-root/etc/machine-id"
-		expMetricsPort := ":0000"
-		expErrorMsg := fmt.Sprintf("[NewConfig] required %s env variable is not specified", NodeName)
-
-		err := os.Setenv(MetricsPort, expMetricsPort)
-		if err != nil {
-			t.Error(err)
-		}
-		defer os.Clearenv()
-
-		err = os.MkdirAll("./host-root/etc", 0750)
-		if err != nil {
-			t.Error(err)
-		}
-
-		file, err := os.Create(machineIdFile)
-		if err != nil {
-			t.Error(err)
-		}
-		defer func() {
-			err = file.Close()
-			if err != nil {
-				t.Error(err)
-			}
-
-			err = os.RemoveAll("./host-root")
-			if err != nil {
-				t.Error(err)
-			}
-		}()
-
-		_, err = NewConfig()
-		assert.EqualError(t, err, expErrorMsg)
-	})
-
-	t.Run("MachineIdNotSet_ReturnsError", func(t *testing.T) {
-		expMetricsPort := ":0000"
-		expNodeName := "test-node"
-		expErrorMsg := fmt.Sprintf("[NewConfig] unable to get %s, error: %s",
-			MachineID, "open /host-root/etc/machine-id: no such file or directory")
-
-		err := os.Setenv(MetricsPort, expMetricsPort)
-		if err != nil {
-			t.Error(err)
-		}
-		err = os.Setenv(NodeName, expNodeName)
-		if err != nil {
-			t.Error(err)
-		}
-		defer os.Clearenv()
-
-		_, err = NewConfig()
-		assert.EqualError(t, err, expErrorMsg)
-	})
-
 	t.Run("MetricsPortNotSet_ReturnsDefaultPort", func(t *testing.T) {
-		expNodeName := "test-node"
 		expMetricsPort := ":8080"
-		expMachineId := "test-id"
-
-		err := os.Setenv(NodeName, expNodeName)
-		if err != nil {
-			t.Error(err)
-		}
-		err = os.Setenv(MachineID, expMachineId)
-		if err != nil {
-			t.Error(err)
-		}
 
 		defer os.Clearenv()
 
 		opts, err := NewConfig()
 
 		if assert.NoError(t, err) {
-			assert.Equal(t, expNodeName, opts.NodeName)
 			assert.Equal(t, expMetricsPort, opts.MetricsPort)
-			assert.Equal(t, expMachineId, opts.MachineId)
 		}
 	})
-
 }
diff --git a/images/sds-health-watcher-controller/src/go.mod b/images/sds-health-watcher-controller/src/go.mod
index e8137b95..f38cead1 100644
--- a/images/sds-health-watcher-controller/src/go.mod
+++ b/images/sds-health-watcher-controller/src/go.mod
@@ -6,28 +6,28 @@ require (
 	github.com/cloudflare/cfssl v1.5.0
 	github.com/deckhouse/deckhouse v1.62.4
 	github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b
-	github.com/go-logr/logr v1.4.1
-	github.com/prometheus/client_golang v1.19.0
+	github.com/go-logr/logr v1.4.2
+	github.com/prometheus/client_golang v1.19.1
 	github.com/stretchr/testify v1.9.0
 	gopkg.in/yaml.v3 v3.0.1
-	k8s.io/api v0.30.1
-	k8s.io/apiextensions-apiserver v0.29.4
-	k8s.io/apimachinery v0.30.2
-	k8s.io/client-go v0.29.4
-	k8s.io/klog/v2 v2.120.1
-	k8s.io/utils v0.0.0-20230726121419-3b25d923346b
-	sigs.k8s.io/controller-runtime v0.17.3
+	k8s.io/api v0.31.0
+	k8s.io/apiextensions-apiserver v0.31.0
+	k8s.io/apimachinery v0.31.0
+	k8s.io/client-go v0.31.0
+	k8s.io/klog/v2 v2.130.1
+	k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+	sigs.k8s.io/controller-runtime v0.19.0
 )
 
 require (
 	github.com/Masterminds/semver/v3 v3.2.1 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
-	github.com/cespare/xxhash/v2 v2.2.0 // indirect
-	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/cespare/xxhash/v2 v2.3.0 // indirect
+	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
 	github.com/emicklei/go-restful/v3 v3.11.0 // indirect
-	github.com/evanphx/json-patch v5.6.0+incompatible // indirect
 	github.com/evanphx/json-patch/v5 v5.9.0 // indirect
 	github.com/fsnotify/fsnotify v1.7.0 // indirect
+	github.com/fxamacker/cbor/v2 v2.7.0 // indirect
 	github.com/go-openapi/jsonpointer v0.19.6 // indirect
 	github.com/go-openapi/jsonreference v0.20.2 // indirect
 	github.com/go-openapi/swag v0.22.5 // indirect
@@ -46,24 +46,24 @@ require (
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
-	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/client_model v0.5.0 // indirect
-	github.com/prometheus/common v0.48.0 // indirect
-	github.com/prometheus/procfs v0.12.0 // indirect
+	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+	github.com/prometheus/client_model v0.6.1 // indirect
+	github.com/prometheus/common v0.55.0 // indirect
+	github.com/prometheus/procfs v0.15.1 // indirect
 	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/x448/float16 v0.8.4 // indirect
 	golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
-	golang.org/x/net v0.23.0 // indirect
-	golang.org/x/oauth2 v0.17.0 // indirect
-	golang.org/x/sys v0.19.0 // indirect
-	golang.org/x/term v0.19.0 // indirect
-	golang.org/x/text v0.14.0 // indirect
+	golang.org/x/net v0.26.0 // indirect
+	golang.org/x/oauth2 v0.21.0 // indirect
+	golang.org/x/sys v0.21.0 // indirect
+	golang.org/x/term v0.21.0 // indirect
+	golang.org/x/text v0.16.0 // indirect
 	golang.org/x/time v0.5.0 // indirect
 	gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
-	google.golang.org/appengine v1.6.8 // indirect
-	google.golang.org/protobuf v1.33.0 // indirect
+	google.golang.org/protobuf v1.34.2 // indirect
+	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
-	k8s.io/component-base v0.29.4 // indirect
 	k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
 	sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
 	sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
diff --git a/images/sds-health-watcher-controller/src/go.sum b/images/sds-health-watcher-controller/src/go.sum
index dcea3d15..65829803 100644
--- a/images/sds-health-watcher-controller/src/go.sum
+++ b/images/sds-health-watcher-controller/src/go.sum
@@ -7,8 +7,8 @@ github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkK
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY=
 github.com/cloudflare/cfssl v1.5.0 h1:vFJDAvQgFSRbCn9zg8KpSrrEZrBAQ4KO5oNK7SXEyb0=
 github.com/cloudflare/cfssl v1.5.0/go.mod h1:sPPkBS5L8l8sRc/IOO1jG51Xb34u+TYhL6P//JdODMQ=
@@ -17,8 +17,9 @@ github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6S
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/deckhouse/deckhouse v1.62.4 h1:Jgd9TSGLRE/0nYsg3KabMvguVp+d6oqt/GTxO7EHgO4=
 github.com/deckhouse/deckhouse v1.62.4/go.mod h1:uJICbx/itedld6N9uv3srI6Hdt+m4P6IQyocUrtySVY=
 github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b h1:EYmHWTWcWMpyxJGZK05ZxlIFnh9s66DRrxLw/LNb/xw=
@@ -31,9 +32,11 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0
 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
 github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
 github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
 github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
 github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
 github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
@@ -43,29 +46,26 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
 github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU=
 github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
 github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
 github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
 github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
 github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
 github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
 github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
+github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
 github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
 github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
@@ -108,26 +108,27 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
 github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
 github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
-github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
-github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
-github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
+github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
+github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
 github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
-github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
-github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
-github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
 github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -148,9 +149,10 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
 github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
 github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
 github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
 github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
 github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
 github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
@@ -170,13 +172,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
 golang.org/x/crypto v0.0.0-20200124225646-8b5121be2f68/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
 golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
@@ -184,37 +184,26 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
 golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
-golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
-golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
-golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
-golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
 golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
 golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -222,9 +211,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
-golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -232,17 +220,15 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
 gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
 gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
 gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
 gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -253,24 +239,22 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
 gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY=
-k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM=
-k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk=
-k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM=
-k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
-k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8=
-k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk=
-k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo=
-k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0=
-k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
-k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
+k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
+k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk=
+k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk=
+k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
+k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
+k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk=
-sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
+sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher.go
index 5e8f4501..7effe7ff 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher.go
@@ -18,17 +18,15 @@ package controller
 
 import (
 	"context"
-	"errors"
 	"fmt"
+	"reflect"
+	"strings"
+
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
 	"k8s.io/utils/strings/slices"
-	"reflect"
-	"sds-health-watcher-controller/config"
-	"sds-health-watcher-controller/internal"
-	"sds-health-watcher-controller/pkg/logger"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/event"
@@ -36,7 +34,10 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
 	"sigs.k8s.io/controller-runtime/pkg/source"
-	"strings"
+
+	"sds-health-watcher-controller/config"
+	"sds-health-watcher-controller/internal"
+	"sds-health-watcher-controller/pkg/logger"
 )
 
 const (
@@ -94,8 +95,8 @@ func RunLVGConditionsWatcher(
 		return err
 	}
 
-	err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LvmVolumeGroup{}), handler.Funcs{
-		CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
+	err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup, reconcile.Request]{
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] got a create event for the LVMVolumeGroup %s", e.Object.GetName()))
 
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
@@ -103,35 +104,17 @@ func RunLVGConditionsWatcher(
 
 			log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] createFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName()))
 		},
-		UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] got a update event for the LVMVolumeGroup %s", e.ObjectNew.GetName()))
-
-			oldLVG, ok := e.ObjectOld.(*v1alpha1.LvmVolumeGroup)
-			if !ok {
-				err = errors.New("unable to cast event object to a given type")
-				log.Error(err, "[RunLVGConditionsWatcher] an error occurred while handling a update event")
-				return
-			}
-			log.Debug(fmt.Sprintf("[RunLVGConditionsWatcher] successfully casted an old state of the LVMVolumeGroup %s", oldLVG.Name))
-
-			newLVG, ok := e.ObjectNew.(*v1alpha1.LvmVolumeGroup)
-			if !ok {
-				err = errors.New("unable to cast event object to a given type")
-				log.Error(err, "[RunLVGConditionsWatcher] an error occurred while handling a update event")
-				return
-			}
-			log.Debug(fmt.Sprintf("[RunLVGConditionsWatcher] successfully casted a new state of the LVMVolumeGroup %s", newLVG.Name))
-
-			if reflect.DeepEqual(oldLVG.Status.Conditions, newLVG.Status.Conditions) {
-				log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] no condition changes for the LVMVolumeGroup %s. No need to reconcile", newLVG.Name))
+			if reflect.DeepEqual(e.ObjectOld.Status.Conditions, e.ObjectNew.Status.Conditions) {
+				log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] no condition changes for the LVMVolumeGroup %s. No need to reconcile", e.ObjectNew.Name))
 				return
 			}
 
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}}
 			q.Add(request)
-
 		},
-	})
+	}))
 	if err != nil {
 		log.Error(err, "[RunLVGConditionsWatcher] unable to watch the events")
 		return err
@@ -148,7 +131,7 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo
 		return true, nil
 	}
 
-	crd, err := getLVGCRD(ctx, cl)
+	crd, err := getCRD(ctx, cl, lvgCrdName)
 	if err != nil {
 		log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to get crd %s", lvgCrdName))
 		return true, err
@@ -156,7 +139,7 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo
 
 	targetConCount, err := getTargetConditionsCount(crd)
 	if err != nil {
-		log.Error(err, fmt.Sprintf("[reconcileLVGConditions]"))
+		log.Error(err, "[reconcileLVGConditions] unable to get target conditions count")
 		return true, err
 	}
 
@@ -187,9 +170,15 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo
 		log.Trace(fmt.Sprintf("[reconcileLVGConditions] check condition %+v of the LVMVolumeGroup %s", c, lvg.Name))
 		if c.Type == internal.TypeReady {
 			log.Debug(fmt.Sprintf("[reconcileLVGConditions] the condition %s of the LVMVolumeGroup %s is ours, skip it", c.Type, lvg.Name))
-		} else if c.Status == metav1.ConditionTrue {
+			continue
+		}
+
+		if c.Status == metav1.ConditionTrue {
 			log.Debug(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s condition %s has status True", lvg.Name, c.Type))
-		} else if c.Reason == internal.ReasonCreating {
+			continue
+		}
+
+		if c.Reason == internal.ReasonCreating {
 			ready = false
 			falseConditions = nil
 			log.Debug(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s condition %s has Creating reason. Turn the LVMVolumeGroup Ready condition and phase to Pending", lvg.Name, c.Type))
@@ -206,7 +195,9 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo
 			}
 
 			break
-		} else if c.Reason == internal.ReasonTerminating {
+		}
+
+		if c.Reason == internal.ReasonTerminating {
 			ready = false
 			falseConditions = nil
 			log.Debug(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s condition %s has Terminating reason. Turn the LVMVolumeGroup Ready condition and phase to Terminating", lvg.Name, c.Type))
@@ -222,7 +213,9 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo
 				return true, err
 			}
 			break
-		} else if c.Status == metav1.ConditionFalse &&
+		}
+
+		if c.Status == metav1.ConditionFalse &&
 			!slices.Contains(acceptableReasons, c.Reason) {
 			log.Warning(fmt.Sprintf("[reconcileLVGConditions] the condition %s of the LVMVolumeGroup %s has status False and its reason is not acceptable", c.Type, lvg.Name))
 			falseConditions = append(falseConditions, c.Type)
@@ -259,7 +252,6 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo
 		err = updateLVMVolumeGroupPhaseIfNeeded(ctx, cl, lvg, internal.PhaseReady)
 		if err != nil {
 			log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to update the LVMVolumeGroup %s phase", lvg.Name))
-
 		}
 		log.Info(fmt.Sprintf("[reconcileLVGConditions] successfully reconciled the LVMVolumeGroup %s phase to Ready", lvg.Name))
 		log.Info(fmt.Sprintf("[reconcileLVGConditions] successfully reconciled conditions of the LVMVolumeGroup %s", lvg.Name))
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go
index e6ab1f0b..05be62d1 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go
@@ -2,6 +2,7 @@ package controller
 
 import (
 	"context"
+
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"gopkg.in/yaml.v3"
 	v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
@@ -34,7 +35,6 @@ func getTargetConditionsCount(lvgCrd *v1.CustomResourceDefinition) (int, error)
 		} `json:"properties"`
 	}
 	i := item{}
-
 	json, err := lvgCrd.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["status"].Properties["conditions"].Items.MarshalJSON()
 	if err != nil {
 		return 0, err
@@ -48,10 +48,10 @@ func getTargetConditionsCount(lvgCrd *v1.CustomResourceDefinition) (int, error)
 	return len(i.Properties.Type.Enum), nil
 }
 
-func getLVGCRD(ctx context.Context, cl client.Client) (*v1.CustomResourceDefinition, error) {
+func getCRD(ctx context.Context, cl client.Client, crdName string) (*v1.CustomResourceDefinition, error) {
 	crd := &v1.CustomResourceDefinition{}
 	err := cl.Get(ctx, client.ObjectKey{
-		Name: lvgCrdName,
+		Name: crdName,
 	}, crd)
 
 	return crd, err
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_test.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_test.go
new file mode 100644
index 00000000..2ec4b0c9
--- /dev/null
+++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_test.go
@@ -0,0 +1,107 @@
+package controller
+
+import (
+	"context"
+	"encoding/json"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestLVGConditionsWatcher(t *testing.T) {
+	cl := NewFakeClient()
+	ctx := context.Background()
+
+	t.Run("getCRD", func(t *testing.T) {
+		targetName := "target"
+		crds := []v1.CustomResourceDefinition{
+			{
+				ObjectMeta: metav1.ObjectMeta{
+					Name: targetName,
+				},
+			},
+			{
+				ObjectMeta: metav1.ObjectMeta{
+					Name: "other-name",
+				},
+			},
+		}
+
+		for _, crd := range crds {
+			err := cl.Create(ctx, &crd)
+			if err != nil {
+				t.Error(err)
+			}
+		}
+
+		crd, err := getCRD(ctx, cl, targetName)
+		if err != nil {
+			t.Error(err)
+		}
+
+		assert.Equal(t, targetName, crd.Name)
+	})
+
+	t.Run("getTargetConditionsCount", func(t *testing.T) {
+		first, err := json.Marshal("first")
+		if err != nil {
+			t.Error(err)
+		}
+		second, err := json.Marshal("second")
+		if err != nil {
+			t.Error(err)
+		}
+		third, err := json.Marshal("third")
+		if err != nil {
+			t.Error(err)
+		}
+		crd := &v1.CustomResourceDefinition{
+			Spec: v1.CustomResourceDefinitionSpec{
+				Versions: []v1.CustomResourceDefinitionVersion{
+					{
+						Schema: &v1.CustomResourceValidation{
+							OpenAPIV3Schema: &v1.JSONSchemaProps{
+								Properties: map[string]v1.JSONSchemaProps{
+									"status": {
+										Properties: map[string]v1.JSONSchemaProps{
+											"conditions": {
+												Items: &v1.JSONSchemaPropsOrArray{
+													Schema: &v1.JSONSchemaProps{
+														Properties: map[string]v1.JSONSchemaProps{
+															"type": {
+																Enum: []v1.JSON{
+																	{
+																		Raw: first,
+																	},
+																	{
+																		Raw: second,
+																	},
+																	{
+																		Raw: third,
+																	},
+																},
+															},
+														},
+													},
+												},
+											},
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		}
+
+		count, err := getTargetConditionsCount(crd)
+		if err != nil {
+			t.Error(err)
+		}
+
+		assert.Equal(t, 3, count)
+	})
+}
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_status_watcher.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_status_watcher.go
index c020e6c0..e6674706 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/lvg_status_watcher.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_status_watcher.go
@@ -3,13 +3,12 @@ package controller
 import (
 	"context"
 	"fmt"
+
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	errors2 "k8s.io/apimachinery/pkg/api/errors"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
-	"sds-health-watcher-controller/internal"
-	"sds-health-watcher-controller/pkg/logger"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/event"
@@ -17,6 +16,9 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
 	"sigs.k8s.io/controller-runtime/pkg/source"
+
+	"sds-health-watcher-controller/internal"
+	"sds-health-watcher-controller/pkg/logger"
 )
 
 const (
@@ -65,20 +67,20 @@ func RunLVGStatusWatcher(
 		return err
 	}
 
-	err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LvmVolumeGroup{}), handler.Funcs{
-		CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
+	err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup, reconcile.Request]{
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVGStatusWatcher] got a create event for the LVMVolumeGroup %s", e.Object.GetName()))
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
 			q.Add(request)
 			log.Info(fmt.Sprintf("[RunLVGStatusWatcher] CreateFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName()))
 		},
-		UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
 			log.Info(fmt.Sprintf("[RunLVGStatusWatcher] got an update event for the LVMVolumeGroup %s", e.ObjectNew.GetName()))
 			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}}
 			q.Add(request)
 			log.Info(fmt.Sprintf("[RunLVGStatusWatcher] UpdateFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.ObjectNew.GetName()))
 		},
-	})
+	}))
 	if err != nil {
 		log.Error(err, "[RunLVGStatusWatcher] unable to watch the events")
 		return err
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/mc_watcher.go b/images/sds-health-watcher-controller/src/pkg/controller/mc_watcher.go
index beeecbbb..3a8032ba 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/mc_watcher.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/mc_watcher.go
@@ -20,11 +20,11 @@ import (
 	"context"
 	"encoding/json"
 	"fmt"
+
 	"github.com/cloudflare/cfssl/log"
 	dh "github.com/deckhouse/deckhouse/deckhouse-controller/pkg/apis/deckhouse.io/v1alpha1"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/util/workqueue"
-	"sds-health-watcher-controller/pkg/logger"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller"
 	"sigs.k8s.io/controller-runtime/pkg/event"
@@ -32,6 +32,8 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
 	"sigs.k8s.io/controller-runtime/pkg/source"
+
+	"sds-health-watcher-controller/pkg/logger"
 )
 
 const (
@@ -49,28 +51,33 @@ func RunMCWatcher(
 
 	c, err := controller.New(MCWatcherCtrlName, mgr, controller.Options{
 		Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
-			log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] Reconciler got a request %s", request.String()))
+			log.Info(fmt.Sprintf("[RunMCWatcher] Reconciler got a request %s", request.String()))
+			checkMCThinPoolsEnabled(ctx, cl)
 			return reconcile.Result{}, nil
 		}),
 	})
 
 	if err != nil {
-		log.Error(err, "[MCWatcherCtrlName] unable to create a controller")
+		log.Error(err, "[RunMCWatcher] unable to create a controller")
 		return err
 	}
 
-	err = c.Watch(source.Kind(mgr.GetCache(), &dh.ModuleConfig{}), handler.Funcs{
-		CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
-			log.Info(fmt.Sprintf("[MCWatcherCtrlName] got a create event for the ModuleConfig %s", e.Object.GetName()))
-			checkMCThinPoolsEnabled(ctx, cl)
+	err = c.Watch(source.Kind(mgr.GetCache(), &dh.ModuleConfig{}, handler.TypedFuncs[*dh.ModuleConfig, reconcile.Request]{
+		CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*dh.ModuleConfig], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
+			log.Info(fmt.Sprintf("[RunMCWatcher] got a create event for the ModuleConfig %s", e.Object.GetName()))
+			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}
+			q.Add(request)
+			log.Info(fmt.Sprintf("[RunMCWatcher] added the ModuleConfig %s to the Reconcilers queue", e.Object.GetName()))
 		},
-		UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
-			log.Info(fmt.Sprintf("[MCWatcherCtrlName] got a update event for the ModuleConfig %s", e.ObjectNew.GetName()))
-			checkMCThinPoolsEnabled(ctx, cl)
+		UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*dh.ModuleConfig], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
+			log.Info(fmt.Sprintf("[RunMCWatcher] got a update event for the ModuleConfig %s", e.ObjectNew.GetName()))
+			request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}}
+			q.Add(request)
+			log.Info(fmt.Sprintf("[RunMCWatcher] added the ModuleConfig %s to the Reconcilers queue", e.ObjectNew.GetName()))
 		},
-	})
+	}))
 	if err != nil {
-		log.Error(err, "[MCWatcherCtrlName] unable to watch the events")
+		log.Error(err, "[RunMCWatcher] unable to watch the events")
 		return err
 	}
 
@@ -91,16 +98,13 @@ func checkMCThinPoolsEnabled(ctx context.Context, cl client.Client) {
 		}
 
 		if value, exists := moduleItem.Spec.Settings["enableThinProvisioning"]; exists && value == true {
-			ctx := context.Background()
-
 			sncModuleConfig := &dh.ModuleConfig{}
-
 			err = cl.Get(ctx, types.NamespacedName{Name: sdsNodeConfiguratorModuleName, Namespace: ""}, sncModuleConfig)
 			if err != nil {
 				log.Fatal(err)
 			}
 
-			if value, exists := sncModuleConfig.Spec.Settings["enableThinProvisioning"]; exists && value == true {
+			if value, exists = sncModuleConfig.Spec.Settings["enableThinProvisioning"]; exists && value == true {
 				log.Info("Thin pools support is enabled")
 			} else {
 				log.Info("Enabling thin pools support")
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher.go b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher.go
index 67a33aa6..3c813043 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher.go
@@ -19,12 +19,15 @@ package controller
 import (
 	"context"
 	"fmt"
+	"strings"
+	"time"
+
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+
 	"sds-health-watcher-controller/config"
 	"sds-health-watcher-controller/pkg/logger"
 	"sds-health-watcher-controller/pkg/monitoring"
-	"sigs.k8s.io/controller-runtime/pkg/manager"
-	"time"
 )
 
 const (
@@ -59,7 +62,7 @@ func RunSdsInfraWatcher(
 				log.Error(err, "[RunSdsInfraWatcher] unable to get LVMVolumeGroups")
 				continue
 			}
-			log.Debug(fmt.Sprint("[RunSdsInfraWatcher] successfully got LVMVolumeGroups"))
+			log.Debug("[RunSdsInfraWatcher] successfully got LVMVolumeGroups")
 			if len(lvgs) == 0 {
 				log.Info("[RunSdsInfraWatcher] no LVMVolumeGroups found")
 				continue
@@ -73,13 +76,13 @@ func RunSdsInfraWatcher(
 			lvgNodes := getNodeNamesFromLVGs(lvgs)
 			log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] used nodes %v", lvgNodes))
 
-			log.Debug(fmt.Sprintf("[RunSdsInfraWatcher] tries to collect nodes used by LVMVolumeGroups"))
+			log.Debug("[RunSdsInfraWatcher] tries to collect nodes used by LVMVolumeGroups")
 			usedNodes, missedNodes, err := getNodesByNames(ctx, cl, lvgNodes)
 			if err != nil {
 				log.Error(err, "[RunSdsInfraWatcher] unable to get nodes")
 				continue
 			}
-			log.Debug(fmt.Sprintf("[RunSdsInfraWatcher] successfully collected nodes used by LVMVolumeGroups"))
+			log.Debug("[RunSdsInfraWatcher] successfully collected nodes used by LVMVolumeGroups")
 
 			if len(missedNodes) > 0 {
 				log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] some LVMVolumeGroups use missing nodes: %v. Turn those LVMVolumeGroups condition NodeReady to False", missedNodes))
@@ -159,8 +162,7 @@ func RunSdsInfraWatcher(
 				log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] found a pod: %s", p.Name))
 			}
 
-			var unmanagedNodes []string
-			unmanagedNodes = getNodeNamesWithoutAgent(usedNodes, sdsPods)
+			unmanagedNodes := getNodeNamesWithoutAgent(usedNodes, sdsPods)
 			if len(unmanagedNodes) > 0 {
 				log.Warning("[RunSdsInfraWatcher] some LVMVolumeGroups are not managed due to corresponding sds-node-configurator agent's pods are not running. Turn such LVMVolumeGroups to NotReady phase")
 				log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] nodes without the agent: %v", unmanagedNodes))
@@ -182,11 +184,17 @@ func RunSdsInfraWatcher(
 			log.Debug("[RunSdsInfraWatcher] check if every agent's pod is in a Ready state")
 			notReadyPods := getNotReadyPods(sdsPods)
 			if len(notReadyPods) > 0 {
-				log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] there is some sds-node-configurator agent's pods that is not Ready, pods: %v. Turn the LVMVolumeGroups condition AgentReady to False", notReadyPods))
+				podsNames := make([]string, 0, len(notReadyPods))
+				for name := range notReadyPods {
+					podsNames = append(podsNames, name)
+				}
+
+				log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] there is some sds-node-configurator agent's pods that is not Ready, pods: %s. Turn the LVMVolumeGroups condition AgentReady to False", strings.Join(podsNames, ",")))
 				nodeNames := getNodeNamesFromPods(notReadyPods)
 				log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] node names with not Ready sds-node-configurator agent's pods: %v", nodeNames))
 				lvgsNotReady := findLVMVolumeGroupsByNodeNames(lvgs, nodeNames)
 				for _, lvg := range lvgsNotReady {
+					log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] the LVMVolumeGroup %s is managed by not Ready pod, turns the condition %s to False", lvg.Name, agentReadyType))
 					err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, agentReadyType, "PodNotReady", "the pod is not Ready")
 					if err != nil {
 						log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition to the LVMVolumeGroup %s", lvg.Name))
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_funcs.go b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_funcs.go
index 91864cac..936e4055 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_funcs.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_funcs.go
@@ -19,15 +19,17 @@ package controller
 import (
 	"context"
 	"fmt"
+	"time"
+
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+
 	"sds-health-watcher-controller/internal"
 	"sds-health-watcher-controller/pkg/logger"
 	"sds-health-watcher-controller/pkg/monitoring"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-	"time"
 )
 
 func getNodeNamesFromPods(pods map[string]v1.Pod) []string {
@@ -47,7 +49,6 @@ func getNotReadyPods(pods map[string]v1.Pod) map[string]v1.Pod {
 		for _, c := range p.Status.Conditions {
 			if c.Type == internal.TypeReady && c.Status != v1.ConditionTrue {
 				result[p.Name] = p
-
 			}
 		}
 	}
@@ -155,10 +156,10 @@ func GetLVMVolumeGroups(ctx context.Context, cl client.Client, metrics monitorin
 
 	start := time.Now()
 	err := cl.List(ctx, lvgList)
-	metrics.ApiMethodsDuration(SdsInfraWatcherCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
-	metrics.ApiMethodsExecutionCount(SdsInfraWatcherCtrlName, "list").Inc()
+	metrics.APIMethodsDuration(SdsInfraWatcherCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start))
+	metrics.APIMethodsExecutionCount(SdsInfraWatcherCtrlName, "list").Inc()
 	if err != nil {
-		metrics.ApiMethodsErrors(SdsInfraWatcherCtrlName, "list").Inc()
+		metrics.APIMethodsErrors(SdsInfraWatcherCtrlName, "list").Inc()
 		return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LvmVolumeGroups, err: %w", err)
 	}
 
diff --git a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_test.go b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_test.go
index 88c36035..f33ddf7b 100644
--- a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_test.go
+++ b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_test.go
@@ -2,21 +2,23 @@ package controller
 
 import (
 	"context"
+	"testing"
+
 	"github.com/deckhouse/sds-node-configurator/api/v1alpha1"
 	"github.com/stretchr/testify/assert"
+	v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes/scheme"
 	"k8s.io/utils/strings/slices"
-	"sds-health-watcher-controller/pkg/monitoring"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/client/fake"
-	"testing"
+
+	"sds-health-watcher-controller/pkg/monitoring"
 )
 
 func TestHealthWatcher(t *testing.T) {
 	cl := NewFakeClient()
 	ctx := context.Background()
-	//log := logger.Logger{}
 	metrics := monitoring.GetMetrics("")
 
 	t.Run("GetLVMVolumeGroups_returns_lvgs", func(t *testing.T) {
@@ -42,7 +44,7 @@ func TestHealthWatcher(t *testing.T) {
 		}
 
 		lvgs, err := GetLVMVolumeGroups(ctx, cl, metrics)
-
+		assert.NoError(t, err)
 		assert.Equal(t, 2, len(lvgs))
 	})
 
@@ -126,6 +128,7 @@ func NewFakeClient() client.WithWatch {
 	s := scheme.Scheme
 	_ = metav1.AddMetaToScheme(s)
 	_ = v1alpha1.AddToScheme(s)
+	_ = v1.AddToScheme(s)
 
 	builder := fake.NewClientBuilder().WithScheme(s)
 
diff --git a/images/sds-health-watcher-controller/src/pkg/kubutils/kubernetes.go b/images/sds-health-watcher-controller/src/pkg/kubutils/kubernetes.go
index 7d478b0b..20e1c16e 100644
--- a/images/sds-health-watcher-controller/src/pkg/kubutils/kubernetes.go
+++ b/images/sds-health-watcher-controller/src/pkg/kubutils/kubernetes.go
@@ -18,12 +18,12 @@ package kubutils
 
 import (
 	"fmt"
+
 	"k8s.io/client-go/rest"
 	"k8s.io/client-go/tools/clientcmd"
 )
 
 func KubernetesDefaultConfigCreate() (*rest.Config, error) {
-	//todo validate empty
 	clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
 		clientcmd.NewDefaultClientConfigLoadingRules(),
 		&clientcmd.ConfigOverrides{},
diff --git a/images/sds-health-watcher-controller/src/pkg/logger/logger.go b/images/sds-health-watcher-controller/src/pkg/logger/logger.go
index 10b435ee..37444699 100644
--- a/images/sds-health-watcher-controller/src/pkg/logger/logger.go
+++ b/images/sds-health-watcher-controller/src/pkg/logger/logger.go
@@ -17,11 +17,11 @@ limitations under the License.
 package logger
 
 import (
-	"flag"
 	"fmt"
+	"strconv"
+
 	"github.com/go-logr/logr"
-	"k8s.io/klog/v2"
-	"k8s.io/klog/v2/klogr"
+	"k8s.io/klog/v2/textlogger"
 )
 
 const (
@@ -50,13 +50,12 @@ type Logger struct {
 }
 
 func NewLogger(level Verbosity) (*Logger, error) {
-	klog.InitFlags(nil)
-	if err := flag.Set("v", string(level)); err != nil {
+	v, err := strconv.Atoi(string(level))
+	if err != nil {
 		return nil, err
 	}
-	flag.Parse()
 
-	log := klogr.New().WithCallDepth(1)
+	log := textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(v))).WithCallDepth(1)
 
 	return &Logger{log: log}, nil
 }
diff --git a/images/sds-health-watcher-controller/src/pkg/monitoring/monitoring.go b/images/sds-health-watcher-controller/src/pkg/monitoring/monitoring.go
index a282d9fd..f4f1da6c 100644
--- a/images/sds-health-watcher-controller/src/pkg/monitoring/monitoring.go
+++ b/images/sds-health-watcher-controller/src/pkg/monitoring/monitoring.go
@@ -17,11 +17,12 @@ limitations under the License.
 package monitoring
 
 import (
+	"strings"
+	"time"
+
 	"github.com/prometheus/client_golang/prometheus"
 	"k8s.io/utils/clock"
 	"sigs.k8s.io/controller-runtime/pkg/metrics"
-	"strings"
-	"time"
 )
 
 const (
@@ -133,15 +134,15 @@ func (m Metrics) UtilsCommandsErrorsCount(controllerName, command string) promet
 	return utilsCommandsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(command))
 }
 
-func (m Metrics) ApiMethodsDuration(controllerName, method string) prometheus.Observer {
+func (m Metrics) APIMethodsDuration(controllerName, method string) prometheus.Observer {
 	return apiMethodsDuration.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
-func (m Metrics) ApiMethodsExecutionCount(controllerName, method string) prometheus.Counter {
+func (m Metrics) APIMethodsExecutionCount(controllerName, method string) prometheus.Counter {
 	return apiMethodsExecutionCount.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
-func (m Metrics) ApiMethodsErrors(controllerName, method string) prometheus.Counter {
+func (m Metrics) APIMethodsErrors(controllerName, method string) prometheus.Counter {
 	return apiMethodsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(method))
 }
 
diff --git a/images/sds-utils-installer/src/cmd/main.go b/images/sds-utils-installer/src/cmd/main.go
index fb2d0a02..c2d4ff4b 100644
--- a/images/sds-utils-installer/src/cmd/main.go
+++ b/images/sds-utils-installer/src/cmd/main.go
@@ -97,7 +97,6 @@ func copyPerm(srcPath, dstPath string) (err error) {
 }
 
 func arePermissionsEqual(srcPath, dstPath string) (equal bool, err error) {
-
 	srcInfo, err := os.Stat(srcPath)
 	if err != nil {
 		return false, err
@@ -110,9 +109,9 @@ func arePermissionsEqual(srcPath, dstPath string) (equal bool, err error) {
 	log.Printf("file %s mode %s", dstPath, dstInfo.Mode())
 	if srcInfo.Mode() == dstInfo.Mode() {
 		return true, nil
-	} else {
-		return false, nil
 	}
+
+	return false, nil
 }
 
 func getChecksum(filePath string) (checksum string, err error) {
@@ -172,7 +171,7 @@ func copyFilesRecursive(srcDir, dstDir string) error {
 				if err != nil {
 					return err
 				}
-				if equal == false {
+				if !equal {
 					err = copyPerm(srcPath, dstPath)
 					if err != nil {
 						return err
@@ -184,7 +183,6 @@ func copyFilesRecursive(srcDir, dstDir string) error {
 				return nil
 			}
 			log.Printf("Copying %s: Checksum is different\n", srcPath)
-
 		}
 
 		err = copyFile(srcPath, dstPath)