diff --git a/.github/workflows/go_modules_check.yaml b/.github/workflows/go_modules_check.yaml index 8365dd53..5a5f7e02 100644 --- a/.github/workflows/go_modules_check.yaml +++ b/.github/workflows/go_modules_check.yaml @@ -37,16 +37,20 @@ jobs: echo "Processing $go_mod_file" while IFS= read -r line; do + if [[ "line" =~ ^replace ]]; then + continue + fi + if [[ "$line" == *github.com/deckhouse/sds-* || "$line" == *github.com/deckhouse/csi-* || "$line" == *github.com/deckhouse/virtualization ]]; then repository=$(echo "$line" | awk '{print $1}' | awk -F'/' '{ print "https://"$1"/"$2"/"$3".git" }') pseudo_tag=$(echo "$line" | awk '{print $2}') echo "Cloning repo $repository into $temp_dir" - + git clone "$repository" "$temp_dir/$repository" >/dev/null 2>&1 - + if [ -d "$temp_dir/$repository/api" ]; then cd "$temp_dir/$repository" || continue - + commit_info=$(git log -1 --pretty=format:"%H %cd" --date=iso-strict -- api/*) short_hash=$(echo "$commit_info" | awk '{print substr($1,1,12)}') commit_date=$(echo "$commit_info" | awk '{print $2}') @@ -54,17 +58,17 @@ jobs: actual_pseudo_tag="v0.0.0-"$commit_date"-"$short_hash pseudo_tag_date=$(echo $pseudo_tag | awk -F'-' '{ print $2 }') echo "Latest commit in $repository: $short_hash $commit_date" - + if [[ "$pseudo_tag_date" < "$commit_date" ]]; then echo "Incorrect pseudo tag for repo $repository in file "$go_mod_file" (current: "$pseudo_tag", actual:"$actual_pseudo_tag")" echo "Incorrect pseudo tag for repo $repository in file "$go_mod_file" (current: "$pseudo_tag", actual:"$actual_pseudo_tag")" >> $temp_dir"/incorrect_alert" fi - + cd - >/dev/null 2>&1 else echo "No api directory in $repository" fi - + rm -rf "$temp_dir/$repository" fi done < "$go_mod_file" @@ -75,4 +79,4 @@ jobs: if [ $alert_lines_count != 0 ]; then echo "We have non-actual pseudo-tags in repository's go.mod files" exit 1 - fi + fi \ No newline at end of file diff --git a/.werf/bundle.yaml b/.werf/bundle.yaml index 4b00eee0..72c5d96f 100644 --- a/.werf/bundle.yaml +++ b/.werf/bundle.yaml @@ -2,7 +2,7 @@ --- image: bundle from: registry.deckhouse.io/base_images/scratch@sha256:b054705fcc9f2205777d80a558d920c0b4209efdc3163c22b5bfcb5dda1db5fc -fromCacheVersion: 20240821092325 +fromCacheVersion: 20240830154106 import: # Rendering .werf/images-digests.yaml is required! - image: images-digests diff --git a/.werf/python-deps.yaml b/.werf/python-deps.yaml index 94742cff..ad711986 100644 --- a/.werf/python-deps.yaml +++ b/.werf/python-deps.yaml @@ -2,7 +2,7 @@ --- image: python-dependencies from: registry.deckhouse.io/base_images/alpine:3.16.3 -fromCacheVersion: 20240821092325 +fromCacheVersion: 20240830154106 git: - add: / to: / diff --git a/api/v1alpha1/block_device.go b/api/v1alpha1/block_device.go index c6ddb4b4..dc7e22a0 100644 --- a/api/v1alpha1/block_device.go +++ b/api/v1alpha1/block_device.go @@ -44,7 +44,7 @@ type BlockDeviceStatus struct { PVUuid string `json:"pvUUID"` VGUuid string `json:"vgUUID"` PartUUID string `json:"partUUID"` - LvmVolumeGroupName string `json:"lvmVolumeGroupName"` + LVMVolumeGroupName string `json:"lvmVolumeGroupName"` ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"` Wwn string `json:"wwn"` Serial string `json:"serial"` diff --git a/api/v1alpha1/lvm_logical_volume.go b/api/v1alpha1/lvm_logical_volume.go index f3eb69b8..dca01c8e 100644 --- a/api/v1alpha1/lvm_logical_volume.go +++ b/api/v1alpha1/lvm_logical_volume.go @@ -40,7 +40,7 @@ type LVMLogicalVolumeSpec struct { ActualLVNameOnTheNode string `json:"actualLVNameOnTheNode"` Type string `json:"type"` Size string `json:"size"` - LvmVolumeGroupName string `json:"lvmVolumeGroupName"` + LVMVolumeGroupName string `json:"lvmVolumeGroupName"` Thin *LVMLogicalVolumeThinSpec `json:"thin"` Thick *LVMLogicalVolumeThickSpec `json:"thick"` } diff --git a/api/v1alpha1/lvm_volume_group.go b/api/v1alpha1/lvm_volume_group.go index fc8090af..0346f7f7 100644 --- a/api/v1alpha1/lvm_volume_group.go +++ b/api/v1alpha1/lvm_volume_group.go @@ -21,32 +21,33 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -type LvmVolumeGroupList struct { +type LVMVolumeGroupList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` - Items []LvmVolumeGroup `json:"items"` + Items []LVMVolumeGroup `json:"items"` } -type LvmVolumeGroup struct { +type LVMVolumeGroup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec LvmVolumeGroupSpec `json:"spec"` - Status LvmVolumeGroupStatus `json:"status,omitempty"` + Spec LVMVolumeGroupSpec `json:"spec"` + Status LVMVolumeGroupStatus `json:"status,omitempty"` } -type LvmVolumeGroupSpec struct { +type LVMVolumeGroupSpec struct { ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"` - BlockDeviceNames []string `json:"blockDeviceNames"` - ThinPools []LvmVolumeGroupThinPoolSpec `json:"thinPools"` + BlockDeviceSelector *metav1.LabelSelector `json:"blockDeviceSelector"` + ThinPools []LVMVolumeGroupThinPoolSpec `json:"thinPools"` Type string `json:"type"` + Local LVMVolumeGroupLocalSpec `json:"local"` } -type LvmVolumeGroupStatus struct { +type LVMVolumeGroupStatus struct { AllocatedSize resource.Quantity `json:"allocatedSize"` - Nodes []LvmVolumeGroupNode `json:"nodes"` - ThinPools []LvmVolumeGroupThinPoolStatus `json:"thinPools"` + Nodes []LVMVolumeGroupNode `json:"nodes"` + ThinPools []LVMVolumeGroupThinPoolStatus `json:"thinPools"` VGSize resource.Quantity `json:"vgSize"` VGUuid string `json:"vgUUID"` Phase string `json:"phase"` @@ -56,7 +57,7 @@ type LvmVolumeGroupStatus struct { VGFree resource.Quantity `json:"vgFree"` } -type LvmVolumeGroupDevice struct { +type LVMVolumeGroupDevice struct { BlockDevice string `json:"blockDevice"` DevSize resource.Quantity `json:"devSize"` PVSize resource.Quantity `json:"pvSize"` @@ -64,12 +65,12 @@ type LvmVolumeGroupDevice struct { Path string `json:"path"` } -type LvmVolumeGroupNode struct { - Devices []LvmVolumeGroupDevice `json:"devices"` +type LVMVolumeGroupNode struct { + Devices []LVMVolumeGroupDevice `json:"devices"` Name string `json:"name"` } -type LvmVolumeGroupThinPoolStatus struct { +type LVMVolumeGroupThinPoolStatus struct { Name string `json:"name"` ActualSize resource.Quantity `json:"actualSize"` UsedSize resource.Quantity `json:"usedSize"` @@ -80,8 +81,12 @@ type LvmVolumeGroupThinPoolStatus struct { Message string `json:"message"` } -type LvmVolumeGroupThinPoolSpec struct { +type LVMVolumeGroupThinPoolSpec struct { Name string `json:"name"` Size string `json:"size"` AllocationLimit string `json:"allocationLimit"` } + +type LVMVolumeGroupLocalSpec struct { + NodeName string `json:"nodeName"` +} diff --git a/api/v1alpha1/register.go b/api/v1alpha1/register.go index 1d94779a..55f47986 100644 --- a/api/v1alpha1/register.go +++ b/api/v1alpha1/register.go @@ -42,8 +42,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &BlockDevice{}, &BlockDeviceList{}, - &LvmVolumeGroup{}, - &LvmVolumeGroupList{}, + &LVMVolumeGroup{}, + &LVMVolumeGroupList{}, &LVMLogicalVolume{}, &LVMLogicalVolumeList{}, ) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 270e883e..18da16ae 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -76,24 +76,24 @@ func (in *BlockDeviceList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LvmVolumeGroup) DeepCopyInto(out *LvmVolumeGroup) { +func (in *LVMVolumeGroup) DeepCopyInto(out *LVMVolumeGroup) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. -func (in *LvmVolumeGroup) DeepCopy() *LvmVolumeGroup { +func (in *LVMVolumeGroup) DeepCopy() *LVMVolumeGroup { if in == nil { return nil } - out := new(LvmVolumeGroup) + out := new(LVMVolumeGroup) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LvmVolumeGroup) DeepCopyObject() runtime.Object { +func (in *LVMVolumeGroup) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -101,13 +101,13 @@ func (in *LvmVolumeGroup) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LvmVolumeGroupList) DeepCopyInto(out *LvmVolumeGroupList) { +func (in *LVMVolumeGroupList) DeepCopyInto(out *LVMVolumeGroupList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]LvmVolumeGroup, len(*in)) + *out = make([]LVMVolumeGroup, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -115,17 +115,17 @@ func (in *LvmVolumeGroupList) DeepCopyInto(out *LvmVolumeGroupList) { } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. -func (in *LvmVolumeGroupList) DeepCopy() *LvmVolumeGroupList { +func (in *LVMVolumeGroupList) DeepCopy() *LVMVolumeGroupList { if in == nil { return nil } - out := new(LvmVolumeGroupList) + out := new(LVMVolumeGroupList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LvmVolumeGroupList) DeepCopyObject() runtime.Object { +func (in *LVMVolumeGroupList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } diff --git a/crds/doc-ru-lvmvolumegroup.yaml b/crds/doc-ru-lvmvolumegroup.yaml index f967c8c1..23c0b779 100644 --- a/crds/doc-ru-lvmvolumegroup.yaml +++ b/crds/doc-ru-lvmvolumegroup.yaml @@ -15,9 +15,16 @@ spec: description: | Тип Volume Group. Может быть: - Local, то есть локальным, если используемые девайсы не являются распределенными (не Shared LUN). - blockDeviceNames: + local: description: | - Список имен ресурсов BlockDevice для создания Volume Group. + Желаемая конфигурация для локальной Volume Group. + properties: + nodeName: + description: | + Имя ноды, на которой планируется размещение Volume Group. + blockDeviceSelector: + description: | + Желаемый селектор для выбора BlockDevice ресурсов. > Обратите внимание, что выбранные блок-девайсы должны принадлежать одному узлу для Volume Group с типом 'Local'. actualVGNameOnTheNode: diff --git a/crds/doc-ru-lvmvolumegroupbackup.yaml b/crds/doc-ru-lvmvolumegroupbackup.yaml new file mode 100644 index 00000000..2e53629f --- /dev/null +++ b/crds/doc-ru-lvmvolumegroupbackup.yaml @@ -0,0 +1,7 @@ +spec: + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: | + Ресурс, сохраняющий состояние lvmvolumegroups ресурсов для совершения миграции kind с LvmVolumeGroup до LVMVolumeGroup. \ No newline at end of file diff --git a/crds/lvmvolumegroup.yaml b/crds/lvmvolumegroup.yaml index b8b60aa6..bc99e489 100644 --- a/crds/lvmvolumegroup.yaml +++ b/crds/lvmvolumegroup.yaml @@ -10,7 +10,7 @@ spec: group: storage.deckhouse.io scope: Cluster names: - kind: LvmVolumeGroup + kind: LVMVolumeGroup plural: lvmvolumegroups singular: lvmvolumegroup shortNames: @@ -33,32 +33,81 @@ spec: properties: spec: type: object + x-kubernetes-validations: + - rule: | + (self.type == "Local" && has(self.local)) || self.type != "Local" + message: "The 'local' field is required when the 'type' field is 'Local'." required: - type - - blockDeviceNames + - blockDeviceSelector - actualVGNameOnTheNode properties: type: type: string description: | - The type of a Volume Group. Might be: + The type of a VolumeGroup. Might be: - Local, that is, local if the devices used are not distributed (not Shared LUN). enum: - Local - blockDeviceNames: - type: array + x-kubernetes-validations: + - rule: self == oldSelf + message: "The type field is immutable." + local: + type: object description: | - An array of block device resource names to create a Volume Group. - - > Note that the selected block devices must belong to the same node for a Local Volume Group. - items: - type: string + The desired configuration for a local Volume Group. + required: + - nodeName + properties: + nodeName: + type: string + description: | + The desired node for the LVM Volume Group. + + > This field is immutable. + x-kubernetes-validations: + - rule: self == oldSelf + message: "The nodeName field is immutable." + blockDeviceSelector: + type: object + description: | + The desired block device selector. + properties: + matchLabels: + type: object + description: | + The desired block device selector labels. + additionalProperties: + type: string + matchExpressions: + type: array + description: | + The desired block device selector expressions. + items: + type: object + properties: + key: + type: string + operator: + type: string + enum: + - In + - NotIn + - Exists + - DoesNotExist + values: + type: array + items: + type: string actualVGNameOnTheNode: type: string description: | The desired name of a Volume Group. Must be unique for the node it is on. > This field is immutable. + x-kubernetes-validations: + - rule: self == oldSelf + message: "The actualVGNameOnTheNode field is immutable." thinPools: type: array description: | diff --git a/crds/lvmvolumegroupbackup.yaml b/crds/lvmvolumegroupbackup.yaml new file mode 100644 index 00000000..a1df3085 --- /dev/null +++ b/crds/lvmvolumegroupbackup.yaml @@ -0,0 +1,286 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: lvmvolumegroupbackups.storage.deckhouse.io + labels: + heritage: deckhouse + module: storage +spec: + group: storage.deckhouse.io + scope: Cluster + names: + kind: LvmVolumeGroupBackup + plural: lvmvolumegroupbackups + singular: lvmvolumegroupbackup + shortNames: + - lvgbk + preserveUnknownFields: false + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + description: | + Stores the information from lvmvolumegroups resources to migrate their kind from LvmVolumeGroup to LVMVolumeGroup. + required: + - spec + properties: + spec: + type: object + required: + - type + - blockDeviceNames + - actualVGNameOnTheNode + properties: + type: + type: string + description: | + The type of a Volume Group. Might be: + - Local, that is, local if the devices used are not distributed (not Shared LUN). + enum: + - Local + blockDeviceNames: + type: array + description: | + An array of block device resource names to create a Volume Group. + + > Note that the selected block devices must belong to the same node for a Local Volume Group. + items: + type: string + actualVGNameOnTheNode: + type: string + description: | + The desired name of a Volume Group. Must be unique for the node it is on. + + > This field is immutable. + thinPools: + type: array + description: | + The desired Thin-pool configuration. + items: + type: object + properties: + name: + type: string + description: | + The desired thin pool name. + + > This field is immutable. + size: + x-kubernetes-int-or-string: true + pattern: '^[0-9]+(\.[0-9]+)?(E|P|T|G|M|k|Ei|Pi|Ti|Gi|Mi|Ki)?$|^[1-9][0-9]?%$|100%' + description: | + The desired thin pool size. Might be specified as number or percent size of total VG space. + + > Note, that if you specify the percent size, the thin pool will be automatically extended when VG is extended. + allocationLimit: + type: string + pattern: '^[1-9][0-9]{2,3}%$' + default: "150%" + description: | + Thin pool oversize limit. Default is 150%. + required: + - name + - size + status: + type: object + properties: + thinPoolReady: + type: string + description: | + Shows the healthy and total Thin-pool count. + configurationApplied: + type: string + description: | + Show if last configuration has been successfully applied. + phase: + type: string + description: | + The general LVMVolumeGroup condition. + enum: + - Pending + - Ready + - NotReady + - Terminating + - "" + conditions: + description: | + The LVMVolumeGroup conditions. + type: array + items: + type: object + properties: + type: + type: string + description: | + The condition type. + enum: + - VGConfigurationApplied + - VGReady + - NodeReady + - AgentReady + - Ready + status: + type: string + description: | + The condition status. + reason: + type: string + description: | + The reason of the current status. + message: + type: string + description: | + The information about the current status. + lastTransitionTime: + type: string + description: | + The time the condition was updated. + observedGeneration: + type: integer + description: | + The resource version when the condition applied. + vgUUID: + type: string + description: | + The Volume Group UUID. + vgSize: + type: string + description: | + The Volume Group capacity. + vgFree: + type: string + description: | + The Volume Group free space. + allocatedSize: + type: string + description: | + The amount of space currently occupied on the Volume Group. + thinPools: + type: array + description: | + The current state of the Volume Group's Thin-pool. + + > Might be empty if there is no any Thin-pool in the Volume Group. + items: + type: object + required: + - name + - actualSize + - usedSize + properties: + name: + type: string + description: | + The Thin-pool name. + actualSize: + type: string + description: | + The Thin-pool capacity. + usedSize: + type: string + description: | + The Thin-pool used size. + allocatedSize: + type: string + description: | + Total LVs request size in the thin-pool. + allocationLimit: + type: string + description: | + Thin pool oversize limit. Default is 150%. + default: "150%" + pattern: '^[1-9][0-9]{2,3}%$' + availableSpace: + type: string + description: | + Thin pool free space available. + ready: + type: boolean + description: | + The Thin-pool health status. + message: + type: string + description: | + Information about the status. + nodes: + type: array + description: | + Information about the nodes the Volume Group is on. + items: + type: object + properties: + name: + type: string + description: | + The node name. + devices: + type: array + description: | + Information about the devices used in the Volume Group on the current node. + items: + type: object + properties: + path: + type: string + description: | + The device path on the node (e.g., /dev/sda). + pvSize: + type: string + description: | + The Physical Volume size. + devSize: + type: string + description: | + The block device size. + pvUUID: + type: string + description: | + The LVM Physical Volume UUID. + blockDevice: + type: string + description: | + The name of the corresponding block device resource. + subresources: + status: { } + additionalPrinterColumns: + - jsonPath: .status.thinPoolReady + name: ThinPools + type: string + description: Current and total Thin pools count. + - jsonPath: .status.configurationApplied + name: Configuration Applied + type: string + description: If last configuration has been successfully applied. + - jsonPath: .status.phase + name: phase + type: string + description: Resource phase. + - jsonPath: .status.nodes..name + name: node + type: string + description: The node the VG is on. + - jsonPath: .status.vgSize + name: size + type: string + description: Total VG size. + - jsonPath: .status.allocatedSize + name: allocated size + type: string + description: Actual VG size. + - jsonPath: .spec.actualVGNameOnTheNode + name: VG + type: string + description: Actual VG name. + - jsonPath: .spec.type + name: type + type: string + description: Volume Group type. + priority: 1 + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + description: The age of this resource diff --git a/docs/USAGE.md b/docs/USAGE.md index 707dd702..0c5974ad 100644 --- a/docs/USAGE.md +++ b/docs/USAGE.md @@ -67,40 +67,87 @@ There are two ways to create an `LVMVolumeGroup` resource: ```yaml apiVersion: storage.deckhouse.io/v1alpha1 - kind: LvmVolumeGroup + kind: LVMVolumeGroup metadata: name: "vg-0-on-node-0" spec: type: Local - blockDeviceNames: - - dev-c1de9f9b534bf5c0b44e8b1cd39da80d5cda7c3f - - dev-f3269d92a99e1f668255a47d5d3500add1462711 - actualVGNameOnTheNode: "vg-0" + local: + nodeName: "node-0" + blockDeviceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - dev-07ad52cef2348996b72db262011f1b5f896bb68f + - dev-e90e8915902bd6c371e59f89254c0fd644126da7 + actualVGNameOnTheNode: "vg-0" + ``` + + ```yaml + apiVersion: storage.deckhouse.io/v1alpha1 + kind: LVMVolumeGroup + metadata: + name: "vg-0-on-node-0" + spec: + type: Local + local: + nodeName: "node-0" + blockDeviceSelector: + matchLabels: + kubernetes.io/hostname: node-0 + actualVGNameOnTheNode: "vg-0" ``` * An example of a resource for creating a local `LVM Volume Group` and a `Thin-pool` on it from multiple `BlockDevices`: ```yaml apiVersion: storage.deckhouse.io/v1alpha1 - kind: LvmVolumeGroup + kind: LVMVolumeGroup metadata: - name: "vg-thin-on-node-0" + name: "vg-0-on-node-0" spec: type: Local - blockDeviceNames: - - dev-0428672e39334e545eb96c85f8760fd59dcf15f1 - - dev-456977ded72ef804dd7cec90eec94b10acdf99b7 - actualVGNameOnTheNode: "vg-thin" + local: + nodeName: "node-0" + blockDeviceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - dev-07ad52cef2348996b72db262011f1b5f896bb68f + - dev-e90e8915902bd6c371e59f89254c0fd644126da7 + actualVGNameOnTheNode: "vg-0" + thinPools: + - name: thin-1 + size: 250Gi + ``` + + ```yaml + apiVersion: storage.deckhouse.io/v1alpha1 + kind: LVMVolumeGroup + metadata: + name: "vg-0-on-node-0" + spec: + type: Local + local: + nodeName: "node-0" + blockDeviceSelector: + matchLabels: + kubernetes.io/hostname: node-0 + actualVGNameOnTheNode: "vg-0" thinPools: - name: thin-1 size: 250Gi ``` - > Please note that the resource does not specify the node on which the `Volume Group` will be created. The node is picked from the `BlockDevice` resources whose names are listed in `spec.blockDeviceNames`. + > You can specify any selectors that are convenient for you for `BlockDevice` resources. For example, you can select all devices on a node (using, for instance, `matchLabels`), or choose a subset by additionally specifying their names (or other parameters). + > Please note that the `spec.local` field is mandatory for the `Local` type. If there's a discrepancy between the name in the `spec.local.nodeName` field and the selectors, the creation of the LVMVolumeGroup will not proceed. > **Caution!** All the selected block devices must belong to the same node for a 'Local' `LVMVolumeGroup`. ### Updating an `LVMVolumeGroup` resource and a `Volume Group` +You can change the desired state of a `VolumeGroup` or `thin pool` on nodes by modifying the `spec` field of the corresponding `LVMVolumeGroup` resource. The controller will automatically validate the new data and, if it is in a valid state, apply the necessary changes to the entities on the node. The controller automatically updates the `status` field of the `LVMVolumeGroup` resource to display up-to-date data about the corresponding `LVM Volume Group` on the node. We do **not recommend** making manual changes to the `status` field. @@ -117,6 +164,9 @@ A user can delete an `LVM Volume Group` and its associated `LVM Physical Volume` kubectl delete lvg %lvg-name% ``` +### Extracting the `BlockDevice` Resource from the `LVMVolumeGroup` Resource +To extract the `BlockDevice` resource from the `LVMVolumeGroup` resource, you need to either modify the `spec.blockDeviceSelector` field of the `LVMVolumeGroup` resource (by adding other selectors) or change the corresponding labels on the `BlockDevice` resource, so they no longer match the selectors of the `LVMVolumeGroup`. After this, you need to manually execute the commands `pvmove`, `vgreduce`, and `pvremove` on the node. + > **Caution!** If the deleting `LVM Volume Group` resource contains any `Logical Volume` (even if it is only the `Thin-pool` that is specified in `spec`), a user must delete all those `Logical Volumes` manually. Otherwise, the `LVMVolumeGroup` resource and its `Volume Group` will not be deleted. > A user can forbid to delete the `LVMVolumeGroup` resource by annotate it with `storage.deckhouse.io/deletion-protection`. If the controller finds the annotation, it will not delete nether the resource or the `Volume Group` till the annotation removal. diff --git a/docs/USAGE_RU.md b/docs/USAGE_RU.md index ab88b171..72a90841 100644 --- a/docs/USAGE_RU.md +++ b/docs/USAGE_RU.md @@ -67,40 +67,87 @@ description: Использование и примеры работы конт ```yaml apiVersion: storage.deckhouse.io/v1alpha1 - kind: LvmVolumeGroup + kind: LVMVolumeGroup metadata: name: "vg-0-on-node-0" spec: type: Local - blockDeviceNames: - - dev-c1de9f9b534bf5c0b44e8b1cd39da80d5cda7c3f - - dev-f3269d92a99e1f668255a47d5d3500add1462711 - actualVGNameOnTheNode: "vg-0" + local: + nodeName: "node-0" + blockDeviceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - dev-07ad52cef2348996b72db262011f1b5f896bb68f + - dev-e90e8915902bd6c371e59f89254c0fd644126da7 + actualVGNameOnTheNode: "vg-0" + ``` + + ```yaml + apiVersion: storage.deckhouse.io/v1alpha1 + kind: LVMVolumeGroup + metadata: + name: "vg-0-on-node-0" + spec: + type: Local + local: + nodeName: "node-0" + blockDeviceSelector: + matchLabels: + kubernetes.io/hostname: node-0 + actualVGNameOnTheNode: "vg-0" ``` * Пример ресурса для создания локальной `LVM Volume Group` и `Thin-pool` на ней из нескольких `BlockDevice`: ```yaml apiVersion: storage.deckhouse.io/v1alpha1 - kind: LvmVolumeGroup + kind: LVMVolumeGroup + metadata: + name: "vg-0-on-node-0" + spec: + type: Local + local: + nodeName: "node-0" + blockDeviceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - dev-07ad52cef2348996b72db262011f1b5f896bb68f + - dev-e90e8915902bd6c371e59f89254c0fd644126da7 + actualVGNameOnTheNode: "vg-0" + thinPools: + - name: thin-1 + size: 250Gi + ``` + + ```yaml + apiVersion: storage.deckhouse.io/v1alpha1 + kind: LVMVolumeGroup metadata: - name: "vg-thin-on-node-0" + name: "vg-0-on-node-0" spec: type: Local - blockDeviceNames: - - dev-0428672e39334e545eb96c85f8760fd59dcf15f1 - - dev-456977ded72ef804dd7cec90eec94b10acdf99b7 - actualVGNameOnTheNode: "vg-thin" + local: + nodeName: "node-0" + blockDeviceSelector: + matchLabels: + kubernetes.io/hostname: node-0 + actualVGNameOnTheNode: "vg-0" thinPools: - name: thin-1 size: 250Gi ``` - > Обратите внимание, что в ресурсе не указывается узел, на котором будет создана `Volume Group`. Узел берется из ресурсов `BlockDevice`, имена которых указаны в `spec.blockDeviceNames`. + > Вы можете указать любые удобные для Вас селекторы для ресурсов `BlockDevice`. Так, например, Вы можете выбрать все девайсы на этом узле (используя, например, `matchLabels`), либо выбрать часть, дополнительно указав их имена (или иные другие параметры). + > Обратите внимание, что поле `spec.local` является обязательным для типа `Local`. В случае расхождения имени в поле `spec.local.nodeName` и селекторах создание LVMVolumeGroup выполнено не будет. > **Внимание!** Все выбранные блок-девайсы должны принадлежать одному узлу для `LVMVolumeGroup` с типом 'Local'. ### Обновление ресурса `LVMVolumeGroup` +Вы можете изменить желаемое состояние `VolumeGroup` или `thin pool` на узлах с помощью изменения поля `spec` соответствующего ресурса `LVMVolumeGroup`. Контроллер автоматически провалидирует новые данные и, в случае их валидного состояния, внесет необходимые изменения в сущности на узле. Контроллер в автоматическом режиме обновляет поле `status` ресурса `LVMVolumeGroup`, отображая актуальные данные о соответствующей `LVM Volume Group` на узле. Пользователю **не рекомендуется** собственноручно вносить изменения в поле `status`. @@ -117,6 +164,9 @@ description: Использование и примеры работы конт kubectl delete lvg %lvg-name% ``` +### Вывод ресурса `BlockDevice` из `LVMVolumeGroup` ресурса +Для того чтобы вывести `BlockDevice` ресурс из `LVMVolumeGroup` ресурса, необходимо либо изменить поле `spec.blockDeviceSelector` `LVMVolumeGroup` ресурса (добавить другие селекторы), либо изменить соответствующие лейблы у `BlockDevice` ресурса, чтобы они больше не попадали под селекторы `LVMVolumeGroup`. После этого вам необходимо вручную выполнить команды `pvmove`, `vgreduce`, и `pvremove` на узле. + > **Внимание!** Если удаляемый ресурс `LVMVolumeGroup` содержит `Logical Volume` (даже если это только `Thin-pool`, который указан в `spec`) пользователю необходимо самостоятельно удалить все `Logical Volume`, которые содержит удаляемая `Volume Group`. В противном случае ни ресурс, ни `Volume Group` удалены не будут. > Пользователь может запретить удаление `LVMVolumeGroup` ресурса, повесив на ресурс специальную аннотацию `storage.deckhouse.io/deletion-protection`. При наличии данной аннотации контроллер не будет удалять ни ресурс, ни соответствующую `Volume Group` до тех пор, пока аннотация не будет снята с ресурса. diff --git a/hooks/convert_bd_names_to_selector.py b/hooks/convert_bd_names_to_selector.py new file mode 100755 index 00000000..b775e1bf --- /dev/null +++ b/hooks/convert_bd_names_to_selector.py @@ -0,0 +1,635 @@ +#!/usr/bin/env python3 +# +# Copyright 2024 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import datetime +import os +import time +from typing import Any, List + +import kubernetes +import yaml +from deckhouse import hook + +# This webhook ensures the migration of LVMVolumeGroup resources from the old CRD version to the new one: +# - Removes field spec.blockDeviceNames +# - Adds spec.Local field and fills its value 'nodeName' with the resource's node. +# - Adds spec.blockDeviceSelector field and fills it with the LVMVolumeGroup blockDeviceNames + +# The expecting result of the hook is: +# - LvmVolumeGroup kind is replaced by LVMVolumeGroup one for lvmvolumegroups CRD and CRs +# - LVMVolumeGroup CRD has the condition 'MigrationCompleted' with status 'True' +# - LvmVolumeGroupBackup resources are created if needed and their labels 'migration-completed' have value 'true' +# - new LVMVolumeGroup resources are in Ready state + +config = """ +configVersion: v1 +onStartup: 1 +""" +migrate_script = '[lvg_migration]' + +group = 'storage.deckhouse.io' +lvmvolumegroup_plural = 'lvmvolumegroups' +version = 'v1alpha1' +ds_name = 'sds-node-configurator' +ds_ns = 'd8-sds-node-configurator' +lvg_crd_name = "lvmvolumegroups.storage.deckhouse.io" + +migration_completed_label = 'migration-completed' +migration_condition_type = 'MigrationCompleted' + +retries = 5 + + +# need the param as it is given from a running context above +def main(ctx: hook.Context): + kubernetes.config.load_incluster_config() + api_v1 = kubernetes.client.AppsV1Api() + custom_api = kubernetes.client.CustomObjectsApi() + api_extension = kubernetes.client.ApiextensionsV1Api() + + print(f"{migrate_script} check if LvmVolumeGroup migration has been completed") + print(f"{migrate_script} tries to find lvmvolumegroup CRD") + try: + lvg_crd = api_extension.read_custom_resource_definition(lvg_crd_name) + print(f"{migrate_script} successfully found lvmvolumegroup CRD") + except kubernetes.client.exceptions.ApiException as ae: + # that means no LvmVolumeGroup or LVMVolumeGroup CRD was found, so just create LVMVolumeGroup CRD + if ae.status == 404: + print(f"{migrate_script} no {lvg_crd_name} was found, creates LVMVolumeGroup CRD") + try: + create_new_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to create LVMVolumeGroup CRD, error: {e}") + raise e + + # check if we have some backups + print(f"{migrate_script} tries to list LvmVolumeGroupBackups") + lvg_backup_list = None + try: + lvg_backup_list = custom_api.list_cluster_custom_object(group=group, + version=version, + plural='lvmvolumegroupbackups') + except kubernetes.client.exceptions.ApiException as ae2: + if ae2.status == 404: + print(f"{migrate_script} lvmvolumegroupbackups CRD was not found") + pass + except Exception as e: + print(f"{migrate_script} unable to list LvmVolumeGroupBackups, error: {e}") + raise e + + # if we do not have any of them, we are good, so just end the migration + if lvg_backup_list is None or len(lvg_backup_list.get('items', [])) == 0: + print(f"{migrate_script} no LvmVolumeGroup backups were found") + try: + add_condition_to_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to patch LVMVolumeGroup CRD, error: {e}") + raise e + print(f"{migrate_script} successfully migrated LvmVolumeGroup to LVMVolumeGroup CRD") + return + + # if there are some backups, we just create a LVMVolumeGroup for each of them + print( + f"{migrate_script} some LvmVolumeGroup backups were found, tries to create LVMVolumeGroups from backups") + for backup in lvg_backup_list.get('items', []): + if backup['metadata']['labels']['migration-completed'] == 'true': + print(f"{migrate_script} backup {backup['metadata']['name']} has been already migrated") + continue + + lvg = configure_new_lvg_from_backup(backup) + try: + create_or_update_custom_resource( + group=group, + version=version, + plural='lvmvolumegroups', + resource=lvg) + except Exception as e: + print( + f"{migrate_script} unable to create or update the LVMVolumeGroup {lvg['metadata']['name']}, error: {e}") + raise e + + try: + patch_backup_as_migrated(backup) + except Exception as e: + print( + f"{migrate_script} unable to patch the LvmVolumeGroupBackup {backup['metadata']['name']}, error: {e}") + raise e + + print(f"{migrate_script} all LVMVolumeGroup were created from the backups") + try: + add_condition_to_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to add a condition to the LVMVolumeGroup CRD, error: {e}") + raise e + print(f"{migrate_script} successfully migrated LvmVolumeGroup to LVMVolumeGroup CRD") + return + except Exception as e: + print(f"{migrate_script} error occurred, error: {e}") + raise e + + # check if migration has been already done + for condition in lvg_crd.status.conditions: + if condition.type == migration_condition_type: + print(f"{migrate_script} LvmVolumeGroup CRD has been already migrated to LVMVolumeGroup one") + return + + print( + f"{migrate_script} migration has not been completed, starts to migrate LvmVolumeGroup kind to LVMVolumeGroup new version") + + print(f"{migrate_script} tries to scale down the sds-node-configurator daemon set") + try: + api_v1.delete_namespaced_daemon_set(name=ds_name, namespace=ds_ns) + except kubernetes.client.exceptions.ApiException as e: + # if we are retrying the ds is already deleted + if e.status == 404: + pass + except Exception as e: + raise e + print(f"{migrate_script} daemon set has been successfully scaled down") + + # LvmVolumeGroup CRD flow + if lvg_crd.spec.names.kind == 'LvmVolumeGroup': + print(f"{migrate_script} found LvmVolumeGroup CRD") + print(f"{migrate_script} tries to list lvmvolumegroup resources") + lvg_list = {} + try: + lvg_list = custom_api.list_cluster_custom_object(group=group, + plural=lvmvolumegroup_plural, + version=version) + print(f"{migrate_script} successfully listed lvmvolumegroup resources") + + # as we have no LvmVolumeGroup resources, we do not need to back up anything, so just delete old CRD and create a new one + if len(lvg_list.get('items', [])) == 0: + print(f"{migrate_script} no lvmvolumegroup resources found, tries to delete LvmVolumeGroup CRD") + try: + delete_old_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to delete LvmVolumeGroup CRD, error: {e}") + raise e + + print(f"{migrate_script} successfully deleted the LvmVolumeGroup CRD") + + print(f"{migrate_script} tries to create LVMVolumeGroup CRD") + try: + create_new_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to create the LVMVolumeGroup CRD, error: {e}") + raise e + try: + add_condition_to_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to add a condition to the LVMVolumeGroup CRD, error: {e}") + raise e + + print(f"{migrate_script} successfully migrated LvmVolumeGroup to LVMVolumeGroup CRD") + return + except Exception as e: + raise e + + # as we have some LvmVolumeGroup resources, we need to back up them and then delete old CRD and create a new one + print(f"{migrate_script} some lvmvolumegroup resource were found, tries to create LvmVolumeGroupBackup CRD") + for dirpath, _, filenames in os.walk(top=find_crds_root(__file__)): + found = False + for filename in filenames: + if filename == 'lvmvolumegroupbackup.yaml': + crd_path = os.path.join(dirpath, filename) + print(f"{migrate_script} CRD path: {crd_path}") + with open(crd_path, "r", encoding="utf-8") as f: + for manifest in yaml.safe_load_all(f): + if manifest is None: + print(f"{migrate_script} LvmVolumeGroupBackup manifest is None, skip it") + continue + try: + found = True + print(f"{migrate_script} LvmVolumeGroupBackup manifest found, tries to create it") + api_extension.create_custom_resource_definition(manifest) + + # as the next step is creating the LvmVolumeGroupBackup resources, we need to be sure, that + # LvmVolumeGroupBackup CRD has been appeared in a cluster + for count in range(retries): + try: + api_extension.read_custom_resource_definition( + "lvmvolumegroupbackups.storage.deckhouse.io") + break + except kubernetes.client.exceptions.ApiException as ae: + if ae.status == 404: + if count == retries - 1: + return ae + + time.sleep(1) + pass + else: + print(f"{migrate_script} unable to read LvmVolumeGroupBackup CRD") + print(f"{migrate_script} successfully created LvmVolumeGroupBackup CRD") + break + except kubernetes.client.exceptions.ApiException as ae2: + if ae2.status == 409: + print(f"{migrate_script} LvmVolumeGroupBackup CRD has been already created") + pass + except Exception as e: + print(f"{migrate_script} unable to create LVMVolumeGroupBackup CRD, error: {e}") + raise e + if found: + break + + # we store the using node name as a label due to we will lose the status for the backup resource + print(f"{migrate_script} starts to create backups and add 'kubernetes.io/hostname' to store the node name") + for lvg in lvg_list.get('items', []): + lvg_backup = {'apiVersion': lvg['apiVersion'], + 'kind': 'LvmVolumeGroupBackup', + 'metadata': { + 'name': + lvg['metadata'][ + 'name'], + 'labels': + lvg['metadata'][ + 'labels'], + 'finalizers': + lvg['metadata'][ + 'finalizers']}, + 'spec': lvg['spec']} + lvg_backup['metadata']['labels']['kubernetes.io/hostname'] = lvg['status']['nodes'][0]['name'] + lvg_backup['metadata']['labels'][migration_completed_label] = 'false' + try: + create_or_update_custom_resource(group=group, + version=version, + plural='lvmvolumegroupbackups', + resource=lvg_backup) + except Exception as e: + print(f"{migrate_script} unable to create or update LvmVolumeGroupBackups, error {e}") + raise e + print(f"{migrate_script} every backup was successfully created for lvmvolumegroups") + + # before we are going to remove finalizers from LvmVolumeGroup resources and delete LvmVolumeGroup CRD, we need + # to be sure there are all backups created in the cluster + print(f"{migrate_script} check if every LvmVolumeGroupBackup is ready to be used") + lvg_backup_list = {} + try: + for count in range(retries): + lvg_backup_list = custom_api.list_cluster_custom_object(group=group, + plural='lvmvolumegroupbackups', + version=version) + print(f"{migrate_script} successfully got LvmVolumeGroupBackups") + + if len(lvg_backup_list.get('items', [])) < len(lvg_list.get('items', [])): + if count == retries - 1: + raise Exception('unable to find some LvmVolumeGroup backups') + + print(f"{migrate_script} some backups were not ready yet, retry in 1s") + time.sleep(1) + else: + print(f"{migrate_script} every backup is ready, continue") + break + except Exception as e: + print(f"{migrate_script} unable to list lvmvolumegroupbackups, error: {e}") + raise e + + print(f"{migrate_script} remove finalizers from old LvmVolumeGroup CRs") + for lvg in lvg_list.get('items', []): + try: + custom_api.patch_cluster_custom_object(group=group, + plural='lvmvolumegroups', + version=version, + name=lvg['metadata']['name'], + body={'metadata': {'finalizers': []}}) + print(f"{migrate_script} successfully removed finalizer from LvmVolumeGroup {lvg['metadata']['name']}") + except kubernetes.client.exceptions.ApiException as ae: + print(f"{migrate_script} unable to patch LvmVolumeGroup {lvg['metadata']['name']}, error: {ae}") + raise ae + except Exception as e: + print(f"{migrate_script} unable to remove finalizers from LvmVolumeGroups, error: {e}") + raise e + print(f"{migrate_script} successfully removed finalizers from all old LvmVolumeGroup resources") + + print(f"{migrate_script} tries to delete LvmVolumeGroup CRD") + try: + delete_old_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to delete LvmVolumeGroup CRD") + raise e + print(f"{migrate_script} successfully removed LvmVolumeGroup CRD") + + print(f"{migrate_script} tries to create LVMVolumeGroup CRD") + try: + create_new_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to create the LVMVolumeGroup CRD, error: {e}") + raise e + + print(f"{migrate_script} create new LVMVolumeGroup CRs from backups") + for lvg_backup in lvg_backup_list.get('items', []): + lvg = configure_new_lvg_from_backup(lvg_backup) + try: + create_or_update_custom_resource(group=group, + plural='lvmvolumegroups', + version=version, + resource=lvg) + except Exception as e: + print(f"{migrate_script} unable to create LVMVolumeGroup {lvg['metadata']['name']}, error: {e}") + raise e + try: + patch_backup_as_migrated(lvg_backup) + except Exception as e: + print( + f"{migrate_script} unable to update LvmVolumeGroupBackup {lvg_backup['metadata']['name']}, error: {e}") + raise e + + print(f"{migrate_script} successfully created every LVMVolumeGroup CR from backup") + try: + add_condition_to_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to add a condition to the LVMVolumeGroup CRD, error: {e}") + raise e + print(f"{migrate_script} successfully migrated LvmVolumeGroup to LVMVolumeGroup CRD") + return + # End of LvmVolumeGroup CRD flow + + # LVMVolumeGroup CRD flow + print(f"{migrate_script} found LVMVolumeGroup CRD") + print(f"{migrate_script} tries to list lvmvolumegroupbackups") + lvg_backup_list = None + try: + lvg_backup_list = custom_api.list_cluster_custom_object(group=group, + plural='lvmvolumegroupbackups', + version=version) + except kubernetes.client.exceptions.ApiException as ae: + if ae.status == 404: + print(f"{migrate_script} lvmvolumegroupbackups CRD was not found") + pass + except Exception as e: + print(f"{migrate_script} unable to list lvmvolumegroupbackups, error: {e}") + raise e + print(f"{migrate_script} successfully listed lvmvolumegroupbackups") + + # if we already have the new LVMVolumeGroup CRD and there is no any backup, we are good and just end the migration + if lvg_backup_list is None or len(lvg_backup_list.get('items', [])) == 0: + print(f"{migrate_script} no LvmVolumeGroupBackups found") + try: + add_condition_to_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to add a condition to the LVMVolumeGroup CRD, error: {e}") + raise e + print(f"{migrate_script} successfully migrated LvmVolumeGroup to LVMVolumeGroup CRD") + return + + # as we found some backups, we need to check for each of them if new LVMVolumeGroup resources should be created + print(f"{migrate_script} some LvmVolumeGroupBackups were found") + print(f"{migrate_script} tries to list LVMVolumeGroup resources") + try: + lvg_list: Any = custom_api.list_cluster_custom_object(group=group, + plural='lvmvolumegroups', + version=version) + except Exception as e: + print(f"{migrate_script} unable to list LVMVolumeGroup resources") + raise e + print(f"{migrate_script} successfully listed LVMVolumeGroup resources") + + # we store already created LVMVolumeGroups as ones which should not be migrated + actual_lvgs = {} + for lvg in lvg_list.get('items', []): + actual_lvgs[lvg['metadata']['name']] = '' + + for backup in lvg_backup_list.get('items', []): + if backup['metadata']['name'] in actual_lvgs: + print(f"{migrate_script} LVMVolumeGroup {backup['metadata']['name']} has been already migrated") + continue + + # this means that the corresponding LVMVolumeGroup has been already created from the backup, but also got + # deleted by a user. + if backup['metadata']['labels'][migration_completed_label] == 'true': + print(f"{migrate_script} the LvmVolumeGroup {backup['metadata']['name']} has been already migrated") + continue + + print(f"{migrate_script} tries to create LVMVolumeGroup {backup['metadata']['name']} from backup") + lvg = configure_new_lvg_from_backup(backup) + try: + create_or_update_custom_resource(group=group, + plural='lvmvolumegroups', + version=version, + resource=lvg) + except Exception as e: + print(f"{migrate_script} unable to create LVMVolumeGroup {lvg['metadata']['name']}, error: {e}") + raise e + + print(f"{migrate_script} tries to update LvmVolumeGroupBackup {backup['metadata']['name']}") + try: + patch_backup_as_migrated(backup) + except Exception as e: + print( + f"{migrate_script} unable to update LVMVolumeGroupBackup {backup['metadata']['name']}, error: {e}") + raise e + + print(f"{migrate_script} every LVMVolumeGroup resources has been migrated") + try: + add_condition_to_lvg_crd() + except Exception as e: + print(f"{migrate_script} unable to add a condition to the LVMVolumeGroup CRD, error: {e}") + raise e + print(f"{migrate_script} successfully migrated LvmVolumeGroup to LVMVolumeGroup CRD") + ### End of LVMVolumeGroup CRD flow + + +def patch_backup_as_migrated(backup): + try: + kubernetes.client.CustomObjectsApi().patch_cluster_custom_object(group=group, + version=version, + plural='lvmvolumegroupbackups', + name=backup['metadata']['name'], + body={ + 'metadata': {'labels': { + migration_completed_label: 'true'}}}) + print( + f"{migrate_script} the LVMVolumeGroupBackup {backup['metadata']['name']} {migration_completed_label} was updated to true") + except Exception as e: + print( + f"{migrate_script} unable to update LVMVolumeGroupBackup {backup['metadata']['name']}, error: {e}") + raise e + + +def delete_old_lvg_crd(): + try: + kubernetes.client.ApiextensionsV1Api().delete_custom_resource_definition(lvg_crd_name) + + # we need to be sure that old CRD was really removed before creating the new one + for count in range(retries): + try: + kubernetes.client.ApiextensionsV1Api().read_custom_resource_definition(lvg_crd_name) + if count == retries - 1: + raise Exception('LvmVolumeGroup CRD still exists in a cluster') + time.sleep(1) + except kubernetes.client.exceptions.ApiException as ae: + if ae.status == 404: + return + except Exception as e: + print(f"{migrate_script} unable to read {lvg_crd_name}, error: {e}") + raise e + except Exception as e: + raise e + + +def add_condition_to_lvg_crd(): + try: + status: Any = kubernetes.client.ApiextensionsV1Api().read_custom_resource_definition_status(lvg_crd_name) + status.status.conditions.append({ + 'lastTransitionTime': datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), + 'message': 'LvmVolumeGroup CRD has been migrated to LVMVolumeGroup one', + 'reason': 'ResourcesMigrated', + 'type': migration_condition_type, + 'status': "True", + }) + + kubernetes.client.ApiextensionsV1Api().patch_custom_resource_definition_status(name=lvg_crd_name, + body={ + 'status': status.status + }) + except kubernetes.client.api_client as ae: + raise ae + + +def create_new_lvg_crd(): + print(f"{migrate_script} creates the new LVMVolumeGroup CRD") + for dirpath, _, filenames in os.walk(top=find_crds_root(__file__)): + for filename in filenames: + if filename == 'lvmvolumegroup.yaml': + crd_path = os.path.join(dirpath, filename) + with open(crd_path, "r", encoding="utf-8") as f: + for manifest in yaml.safe_load_all(f): + if manifest is None: + continue + try: + kubernetes.client.ApiextensionsV1Api().create_custom_resource_definition(manifest) + + # we need to be sure that the new LVMVolumeGroup CRD was created before creating LVMVolumeGroup resources + for count in range(retries): + try: + kubernetes.client.ApiextensionsV1Api().read_custom_resource_definition(lvg_crd_name) + break + except kubernetes.client.exceptions.ApiException as ae: + if ae.status == 404: + if count == retries - 1: + raise ae + + time.sleep(1) + pass + else: + print(f"{migrate_script} unable to read LvmVolumeGroupBackup CRD") + raise ae + + print(f"{migrate_script} {filename} was successfully created") + break + except Exception as e: + print(f"{migrate_script} unable to create LVMVolumeGroup CRD, error: {e}") + raise e + + +def configure_new_lvg_from_backup(backup): + lvg = {'apiVersion': 'storage.deckhouse.io/v1alpha1', + 'kind': 'LVMVolumeGroup', + 'metadata': { + 'name': + backup['metadata'][ + 'name'], + 'labels': + backup['metadata'][ + 'labels'], + 'finalizers': + backup['metadata'][ + 'finalizers']}, + 'spec': backup['spec']} + + lvg_name = lvg['metadata']['name'] + bd_names: List[str] = backup['spec']['blockDeviceNames'] + print(f"{migrate_script} extracted BlockDevice names: {bd_names} from LVMVolumeGroup {lvg_name}") + del lvg['spec']['blockDeviceNames'] + lvg['spec']['local'] = {'nodeName': lvg['metadata']['labels']['kubernetes.io/hostname']} + del lvg['metadata']['labels']['kubernetes.io/hostname'] + del lvg['metadata']['labels'][migration_completed_label] + print(f"{migrate_script} LVMVolumeGroup {lvg_name} spec after adding the Local field: {lvg['spec']}") + lvg['spec']['blockDeviceSelector'] = { + 'matchExpressions': [ + {'key': 'kubernetes.io/metadata.name', 'operator': 'In', 'values': bd_names}] + } + print(f"{migrate_script} LVMVolumeGroup {lvg_name} spec after adding the Selector: {lvg['spec']}") + return lvg + + +def find_crds_root(hookpath): + hooks_root = os.path.dirname(hookpath) + module_root = os.path.dirname(hooks_root) + crds_root = os.path.join(module_root, "crds") + return crds_root + + +def create_or_update_custom_resource(group, plural, version, resource): + try: + kubernetes.client.CustomObjectsApi().create_cluster_custom_object(group=group, + plural=plural, + version=version, + body={ + 'apiVersion': f'{group}/{version}', + 'kind': resource['kind'], + 'metadata': + { + 'name': + resource['metadata'][ + 'name'], + 'labels': + resource['metadata'][ + 'labels'], + 'finalizers': + resource['metadata'][ + 'finalizers']}, + 'spec': resource['spec']}) + print(f"{migrate_script} successfully created {resource['kind']} {resource['metadata']['name']}") + except kubernetes.client.exceptions.ApiException as ae: + if ae.status == 409: + print( + f"{migrate_script} the {resource['kind']} {resource['metadata']['name']} has been already created, update it") + try: + kubernetes.client.CustomObjectsApi().patch_cluster_custom_object(group=group, + plural=plural, + version=version, + name=resource['metadata']['name'], + body={ + 'metadata': + { + 'name': + resource['metadata'][ + 'name'], + 'labels': + resource['metadata'][ + 'labels'], + 'finalizers': + resource['metadata'][ + 'finalizers'], + }, + 'spec': resource['spec']}) + print(f"{migrate_script} successfully updated {resource['kind']} {resource['metadata']['name']}") + except Exception as e: + print( + f"{migrate_script} Exception occurred while trying to update {resource['kind']} {resource['metadata']['name']}, error: {e}") + raise e + else: + print( + f"{migrate_script} unexpected error has been occurred while trying to create the {resource['kind']} {resource['metadata']['name']}, error: {ae}") + raise ae + except Exception as e: + print( + f"{migrate_script} failed to create {resource['kind']} {resource['metadata']['name']}") + raise e + + +if __name__ == "__main__": + hook.run(main, config=config) diff --git a/images/agent/src/go.mod b/images/agent/src/go.mod index 89d2db6c..21af1466 100644 --- a/images/agent/src/go.mod +++ b/images/agent/src/go.mod @@ -20,6 +20,8 @@ require ( sigs.k8s.io/controller-runtime v0.19.0 ) +replace github.com/deckhouse/sds-node-configurator/api => ../../../api + require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect diff --git a/images/agent/src/go.sum b/images/agent/src/go.sum index 5282c84f..28ef6755 100644 --- a/images/agent/src/go.sum +++ b/images/agent/src/go.sum @@ -7,8 +7,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b h1:EYmHWTWcWMpyxJGZK05ZxlIFnh9s66DRrxLw/LNb/xw= -github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= diff --git a/images/agent/src/internal/type.go b/images/agent/src/internal/type.go index 172e1474..c2ac1bf3 100644 --- a/images/agent/src/internal/type.go +++ b/images/agent/src/internal/type.go @@ -23,7 +23,7 @@ type BlockDeviceCandidate struct { Consumable bool PVUuid string VGUuid string - LvmVolumeGroupName string + LVMVolumeGroupName string ActualVGNameOnTheNode string Wwn string Serial string diff --git a/images/agent/src/pkg/cache/cache.go b/images/agent/src/pkg/cache/cache.go index a4b0c89c..3c8a78df 100644 --- a/images/agent/src/pkg/cache/cache.go +++ b/images/agent/src/pkg/cache/cache.go @@ -3,6 +3,7 @@ package cache import ( "bytes" "fmt" + "reflect" "sync" "agent/internal" @@ -81,8 +82,7 @@ func (c *Cache) StoreLVs(lvs []internal.LVData, stdErr bytes.Buffer) { c.m.Lock() defer c.m.Unlock() - for _, lv := range lvsOnNode { - k := c.configureLVKey(lv.VGName, lv.LVName) + for k, lv := range lvsOnNode { if cachedLV, exist := c.lvs[k]; !exist || cachedLV.Exist { c.lvs[k] = &LVData{ Data: lv, @@ -92,7 +92,7 @@ func (c *Cache) StoreLVs(lvs []internal.LVData, stdErr bytes.Buffer) { } for key, lv := range c.lvs { - if lv.Exist { + if lv.Exist && reflect.ValueOf(lv.Data).IsZero() { continue } @@ -126,7 +126,7 @@ func (c *Cache) AddLV(vgName, lvName string) { c.m.Lock() defer c.m.Unlock() c.lvs[c.configureLVKey(vgName, lvName)] = &LVData{ - Data: internal.LVData{VGName: vgName, LVName: lvName}, + Data: internal.LVData{}, Exist: true, } } diff --git a/images/agent/src/pkg/cache/cache_test.go b/images/agent/src/pkg/cache/cache_test.go index d81988b0..e0b623e0 100644 --- a/images/agent/src/pkg/cache/cache_test.go +++ b/images/agent/src/pkg/cache/cache_test.go @@ -10,68 +10,139 @@ import ( ) func TestCache(t *testing.T) { - sdsCache := New() - devices := []internal.Device{ - { - Name: "test-1", - }, - { - Name: "test-2", - }, - { - Name: "test-3", - }, - } + t.Run("general_functionality", func(t *testing.T) { + sdsCache := New() + devices := []internal.Device{ + { + Name: "test-1", + }, + { + Name: "test-2", + }, + { + Name: "test-3", + }, + } - pvs := []internal.PVData{ - { - PVName: "pv-1", - }, - { - PVName: "pv-2", - }, - { - PVName: "pv-3", - }, - } + pvs := []internal.PVData{ + { + PVName: "pv-1", + }, + { + PVName: "pv-2", + }, + { + PVName: "pv-3", + }, + } - vgs := []internal.VGData{ - { - VGName: "vg-1", - }, - { - VGName: "vg-2", - }, - { - VGName: "vg-3", - }, - } + vgs := []internal.VGData{ + { + VGName: "vg-1", + }, + { + VGName: "vg-2", + }, + { + VGName: "vg-3", + }, + } - lvs := []internal.LVData{ - { - LVName: "lv-1", - }, - { - LVName: "lv-2", - }, - { - LVName: "lv-3", - }, - } + lvs := []internal.LVData{ + { + LVName: "lv-1", + }, + { + LVName: "lv-2", + }, + { + LVName: "lv-3", + }, + } + + sdsCache.StoreDevices(devices, bytes.Buffer{}) + sdsCache.StorePVs(pvs, bytes.Buffer{}) + sdsCache.StoreVGs(vgs, bytes.Buffer{}) + sdsCache.StoreLVs(lvs, bytes.Buffer{}) - sdsCache.StoreDevices(devices, bytes.Buffer{}) - sdsCache.StorePVs(pvs, bytes.Buffer{}) - sdsCache.StoreVGs(vgs, bytes.Buffer{}) - sdsCache.StoreLVs(lvs, bytes.Buffer{}) - - actualDev, _ := sdsCache.GetDevices() - actualPVs, _ := sdsCache.GetPVs() - actualVGs, _ := sdsCache.GetVGs() - actualLVs, _ := sdsCache.GetLVs() - assert.ElementsMatch(t, devices, actualDev) - assert.ElementsMatch(t, pvs, actualPVs) - assert.ElementsMatch(t, vgs, actualVGs) - assert.ElementsMatch(t, lvs, actualLVs) + actualDev, _ := sdsCache.GetDevices() + actualPVs, _ := sdsCache.GetPVs() + actualVGs, _ := sdsCache.GetVGs() + actualLVs, _ := sdsCache.GetLVs() + assert.ElementsMatch(t, devices, actualDev) + assert.ElementsMatch(t, pvs, actualPVs) + assert.ElementsMatch(t, vgs, actualVGs) + assert.ElementsMatch(t, lvs, actualLVs) + }) + + t.Run("StoreLVs_LV_is_empty_and_exist_is_true_do_not_delete_it", func(t *testing.T) { + const ( + key = "some-vg/some-lv" + ) + cache := New() + cache.AddLV("some-vg", "some-lv") + cache.StoreLVs([]internal.LVData{}, bytes.Buffer{}) + + _, exist := cache.lvs[key] + assert.True(t, exist) + }) + + t.Run("StoreLVs_LV_is_empty_and_exist_is_false_delete_it", func(t *testing.T) { + const ( + key = "some-vg/some-lv" + ) + cache := New() + cache.lvs = map[string]*LVData{ + key: {Exist: false}, + } + + cache.StoreLVs([]internal.LVData{}, bytes.Buffer{}) + + _, exist := cache.lvs[key] + assert.False(t, exist) + }) + + t.Run("StoreLVs_LV_is_not_empty_and_exist_is_true_do_not_delete_it", func(t *testing.T) { + const ( + key = "some-vg/some-lv" + ) + lv := internal.LVData{ + LVName: "some-lv", + VGName: "some-vg", + } + cache := New() + cache.lvs = map[string]*LVData{ + key: { + Exist: true, + Data: lv}, + } + + cache.StoreLVs([]internal.LVData{lv}, bytes.Buffer{}) + + _, exist := cache.lvs[key] + assert.True(t, exist) + }) + + t.Run("StoreLVs_LV_is_not_empty_and_exist_is_true_delete_it_due_to_not_on_the_node", func(t *testing.T) { + const ( + key = "some-vg/some-lv" + ) + lv := internal.LVData{ + LVName: "some-lv", + VGName: "some-vg", + } + cache := New() + cache.lvs = map[string]*LVData{ + key: { + Exist: true, + Data: lv}, + } + + cache.StoreLVs([]internal.LVData{}, bytes.Buffer{}) + + _, exist := cache.lvs[key] + assert.False(t, exist) + }) } func BenchmarkCache(b *testing.B) { diff --git a/images/agent/src/pkg/controller/block_device.go b/images/agent/src/pkg/controller/block_device.go index 6ccd3794..d86ebeb4 100644 --- a/images/agent/src/pkg/controller/block_device.go +++ b/images/agent/src/pkg/controller/block_device.go @@ -31,7 +31,8 @@ import ( "github.com/gosimple/slug" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kclient "sigs.k8s.io/controller-runtime/pkg/client" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -81,7 +82,7 @@ func RunBlockDeviceController( return c, err } -func BlockDeviceReconcile(ctx context.Context, cl kclient.Client, log logger.Logger, metrics monitoring.Metrics, cfg config.Options, sdsCache *cache.Cache) bool { +func BlockDeviceReconcile(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, cfg config.Options, sdsCache *cache.Cache) bool { reconcileStart := time.Now() log.Info("[RunBlockDeviceController] START reconcile of block devices") @@ -92,7 +93,7 @@ func BlockDeviceReconcile(ctx context.Context, cl kclient.Client, log logger.Log return false } - apiBlockDevices, err := GetAPIBlockDevices(ctx, cl, metrics) + apiBlockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, nil) if err != nil { log.Error(err, "[RunBlockDeviceController] unable to GetAPIBlockDevices") return true @@ -147,7 +148,7 @@ func hasBlockDeviceDiff(blockDevice v1alpha1.BlockDevice, candidate internal.Blo candidate.PVUuid != blockDevice.Status.PVUuid || candidate.VGUuid != blockDevice.Status.VGUuid || candidate.PartUUID != blockDevice.Status.PartUUID || - candidate.LvmVolumeGroupName != blockDevice.Status.LvmVolumeGroupName || + candidate.LVMVolumeGroupName != blockDevice.Status.LVMVolumeGroupName || candidate.ActualVGNameOnTheNode != blockDevice.Status.ActualVGNameOnTheNode || candidate.Wwn != blockDevice.Status.Wwn || candidate.Serial != blockDevice.Status.Serial || @@ -162,28 +163,37 @@ func hasBlockDeviceDiff(blockDevice v1alpha1.BlockDevice, candidate internal.Blo !reflect.DeepEqual(ConfigureBlockDeviceLabels(blockDevice), blockDevice.Labels) } -func GetAPIBlockDevices(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics) (map[string]v1alpha1.BlockDevice, error) { - listDevice := &v1alpha1.BlockDeviceList{} - +// GetAPIBlockDevices returns map of BlockDevice resources with BlockDevice as a key. You might specify a selector to get a subset or +// leave it as nil to get all the resources. +func GetAPIBlockDevices(ctx context.Context, cl client.Client, metrics monitoring.Metrics, selector *metav1.LabelSelector) (map[string]v1alpha1.BlockDevice, error) { + list := &v1alpha1.BlockDeviceList{} + s, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return nil, err + } + if s == labels.Nothing() { + s = nil + } start := time.Now() - err := kc.List(ctx, listDevice) + err = cl.List(ctx, list, &client.ListOptions{LabelSelector: s}) metrics.APIMethodsDuration(BlockDeviceCtrlName, "list").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.APIMethodsExecutionCount(BlockDeviceCtrlName, "list").Inc() if err != nil { metrics.APIMethodsErrors(BlockDeviceCtrlName, "list").Inc() - return nil, fmt.Errorf("unable to kc.List, error: %w", err) + return nil, err } - devices := make(map[string]v1alpha1.BlockDevice, len(listDevice.Items)) - for _, blockDevice := range listDevice.Items { - devices[blockDevice.Name] = blockDevice + result := make(map[string]v1alpha1.BlockDevice, len(list.Items)) + for _, item := range list.Items { + result[item.Name] = item } - return devices, nil + + return result, nil } func RemoveDeprecatedAPIDevices( ctx context.Context, - cl kclient.Client, + cl client.Client, log logger.Logger, metrics monitoring.Metrics, candidates []internal.BlockDeviceCandidate, @@ -293,7 +303,7 @@ func GetBlockDeviceCandidates(log logger.Logger, cfg config.Options, sdsCache *c candidate.PVUuid = pv.PVUuid candidate.VGUuid = pv.VGUuid candidate.ActualVGNameOnTheNode = pv.VGName - candidate.LvmVolumeGroupName = lvmVGName + candidate.LVMVolumeGroupName = lvmVGName } else { if len(pv.VGName) != 0 { log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] The device is a PV with VG named %s that lacks our tag %s. Removing it from Kubernetes", pv.VGName, internal.LVMTags[0])) @@ -502,7 +512,7 @@ func readSerialBlockDevice(deviceName string, isMdRaid bool) (string, error) { return string(serial), nil } -func UpdateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics, blockDevice v1alpha1.BlockDevice, candidate internal.BlockDeviceCandidate) error { +func UpdateAPIBlockDevice(ctx context.Context, kc client.Client, metrics monitoring.Metrics, blockDevice v1alpha1.BlockDevice, candidate internal.BlockDeviceCandidate) error { blockDevice.Status = v1alpha1.BlockDeviceStatus{ Type: candidate.Type, FsType: candidate.FSType, @@ -511,7 +521,7 @@ func UpdateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito PVUuid: candidate.PVUuid, VGUuid: candidate.VGUuid, PartUUID: candidate.PartUUID, - LvmVolumeGroupName: candidate.LvmVolumeGroupName, + LVMVolumeGroupName: candidate.LVMVolumeGroupName, ActualVGNameOnTheNode: candidate.ActualVGNameOnTheNode, Wwn: candidate.Wwn, Serial: candidate.Serial, @@ -538,39 +548,39 @@ func UpdateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito } func ConfigureBlockDeviceLabels(blockDevice v1alpha1.BlockDevice) map[string]string { - var labels map[string]string + var lbls map[string]string if blockDevice.Labels == nil { - labels = make(map[string]string, 16) + lbls = make(map[string]string, 16) } else { - labels = make(map[string]string, len(blockDevice.Labels)) + lbls = make(map[string]string, len(blockDevice.Labels)) } for key, value := range blockDevice.Labels { - labels[key] = value + lbls[key] = value } slug.Lowercase = false - labels[internal.MetadataNameLabelKey] = slug.Make(blockDevice.ObjectMeta.Name) - labels[internal.HostNameLabelKey] = slug.Make(blockDevice.Status.NodeName) - labels[internal.BlockDeviceTypeLabelKey] = slug.Make(blockDevice.Status.Type) - labels[internal.BlockDeviceFSTypeLabelKey] = slug.Make(blockDevice.Status.FsType) - labels[internal.BlockDevicePVUUIDLabelKey] = blockDevice.Status.PVUuid - labels[internal.BlockDeviceVGUUIDLabelKey] = blockDevice.Status.VGUuid - labels[internal.BlockDevicePartUUIDLabelKey] = blockDevice.Status.PartUUID - labels[internal.BlockDeviceLVMVolumeGroupNameLabelKey] = slug.Make(blockDevice.Status.LvmVolumeGroupName) - labels[internal.BlockDeviceActualVGNameLabelKey] = slug.Make(blockDevice.Status.ActualVGNameOnTheNode) - labels[internal.BlockDeviceWWNLabelKey] = slug.Make(blockDevice.Status.Wwn) - labels[internal.BlockDeviceSerialLabelKey] = slug.Make(blockDevice.Status.Serial) - labels[internal.BlockDeviceSizeLabelKey] = blockDevice.Status.Size.String() - labels[internal.BlockDeviceModelLabelKey] = slug.Make(blockDevice.Status.Model) - labels[internal.BlockDeviceRotaLabelKey] = strconv.FormatBool(blockDevice.Status.Rota) - labels[internal.BlockDeviceHotPlugLabelKey] = strconv.FormatBool(blockDevice.Status.HotPlug) - labels[internal.BlockDeviceMachineIDLabelKey] = slug.Make(blockDevice.Status.MachineID) - - return labels + lbls[internal.MetadataNameLabelKey] = slug.Make(blockDevice.ObjectMeta.Name) + lbls[internal.HostNameLabelKey] = slug.Make(blockDevice.Status.NodeName) + lbls[internal.BlockDeviceTypeLabelKey] = slug.Make(blockDevice.Status.Type) + lbls[internal.BlockDeviceFSTypeLabelKey] = slug.Make(blockDevice.Status.FsType) + lbls[internal.BlockDevicePVUUIDLabelKey] = blockDevice.Status.PVUuid + lbls[internal.BlockDeviceVGUUIDLabelKey] = blockDevice.Status.VGUuid + lbls[internal.BlockDevicePartUUIDLabelKey] = blockDevice.Status.PartUUID + lbls[internal.BlockDeviceLVMVolumeGroupNameLabelKey] = slug.Make(blockDevice.Status.LVMVolumeGroupName) + lbls[internal.BlockDeviceActualVGNameLabelKey] = slug.Make(blockDevice.Status.ActualVGNameOnTheNode) + lbls[internal.BlockDeviceWWNLabelKey] = slug.Make(blockDevice.Status.Wwn) + lbls[internal.BlockDeviceSerialLabelKey] = slug.Make(blockDevice.Status.Serial) + lbls[internal.BlockDeviceSizeLabelKey] = blockDevice.Status.Size.String() + lbls[internal.BlockDeviceModelLabelKey] = slug.Make(blockDevice.Status.Model) + lbls[internal.BlockDeviceRotaLabelKey] = strconv.FormatBool(blockDevice.Status.Rota) + lbls[internal.BlockDeviceHotPlugLabelKey] = strconv.FormatBool(blockDevice.Status.HotPlug) + lbls[internal.BlockDeviceMachineIDLabelKey] = slug.Make(blockDevice.Status.MachineID) + + return lbls } -func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics, candidate internal.BlockDeviceCandidate) (*v1alpha1.BlockDevice, error) { +func CreateAPIBlockDevice(ctx context.Context, kc client.Client, metrics monitoring.Metrics, candidate internal.BlockDeviceCandidate) (*v1alpha1.BlockDevice, error) { blockDevice := &v1alpha1.BlockDevice{ ObjectMeta: metav1.ObjectMeta{ Name: candidate.Name, @@ -583,7 +593,7 @@ func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito PVUuid: candidate.PVUuid, VGUuid: candidate.VGUuid, PartUUID: candidate.PartUUID, - LvmVolumeGroupName: candidate.LvmVolumeGroupName, + LVMVolumeGroupName: candidate.LVMVolumeGroupName, ActualVGNameOnTheNode: candidate.ActualVGNameOnTheNode, Wwn: candidate.Wwn, Serial: candidate.Serial, @@ -608,7 +618,7 @@ func CreateAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monito return blockDevice, nil } -func DeleteAPIBlockDevice(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics, device *v1alpha1.BlockDevice) error { +func DeleteAPIBlockDevice(ctx context.Context, kc client.Client, metrics monitoring.Metrics, device *v1alpha1.BlockDevice) error { start := time.Now() err := kc.Delete(ctx, device) metrics.APIMethodsDuration(BlockDeviceCtrlName, "delete").Observe(metrics.GetEstimatedTimeInSeconds(start)) diff --git a/images/agent/src/pkg/controller/block_device_test.go b/images/agent/src/pkg/controller/block_device_test.go index bf534edc..497836b3 100644 --- a/images/agent/src/pkg/controller/block_device_test.go +++ b/images/agent/src/pkg/controller/block_device_test.go @@ -40,12 +40,255 @@ import ( func TestBlockDeviceCtrl(t *testing.T) { ctx := context.Background() + cl := NewFakeClient() + metrics := monitoring.GetMetrics("") log, _ := logger.NewLogger("1") cfg := config.Options{ NodeName: "test-node", MachineID: "test-id", } + t.Run("GetAPIBlockDevices", func(t *testing.T) { + t.Run("bds_exist_match_labels_and_expressions_return_bds", func(t *testing.T) { + const ( + name1 = "name1" + name2 = "name2" + name3 = "name3" + hostName = "test-host" + ) + + bds := []v1alpha1.BlockDevice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: name1, + Labels: map[string]string{ + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name1, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: name2, + Labels: map[string]string{ + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name2, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: name3, + Labels: map[string]string{ + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name3, + }, + }, + }, + } + + for _, bd := range bds { + err := cl.Create(ctx, &bd) + if err != nil { + t.Error(err) + } + } + + defer func() { + for _, bd := range bds { + err := cl.Delete(ctx, &bd) + if err != nil { + t.Error(err) + } + } + }() + + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + BlockDeviceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/hostname": hostName, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpIn, + Values: []string{name1, name2}, + }, + }, + }, + }, + } + + actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) + if assert.NoError(t, err) { + assert.Equal(t, 2, len(actualBd)) + + _, ok := actualBd[name1] + assert.True(t, ok) + _, ok = actualBd[name2] + assert.True(t, ok) + _, ok = actualBd[name3] + assert.False(t, ok) + } + }) + + t.Run("bds_exist_only_match_labels_return_bds", func(t *testing.T) { + const ( + name1 = "name11" + name2 = "name22" + name3 = "name33" + hostName = "test-host" + ) + + bds := []v1alpha1.BlockDevice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: name1, + Labels: map[string]string{ + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name1, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: name2, + Labels: map[string]string{ + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name2, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: name3, + Labels: map[string]string{ + "kubernetes.io/hostname": "other-host", + "kubernetes.io/metadata.name": name3, + }, + }, + }, + } + + for _, bd := range bds { + err := cl.Create(ctx, &bd) + if err != nil { + t.Error(err) + } + } + + defer func() { + for _, bd := range bds { + err := cl.Delete(ctx, &bd) + if err != nil { + t.Error(err) + } + } + }() + + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + BlockDeviceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/hostname": hostName}, + }, + }, + } + + actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) + if assert.NoError(t, err) { + assert.Equal(t, 2, len(actualBd)) + + _, ok := actualBd[name1] + assert.True(t, ok) + _, ok = actualBd[name2] + assert.True(t, ok) + _, ok = actualBd[name3] + assert.False(t, ok) + } + }) + + t.Run("bds_exist_only_match_expressions_return_bds", func(t *testing.T) { + const ( + name1 = "name111" + name2 = "name222" + name3 = "name333" + hostName = "test-host" + ) + + bds := []v1alpha1.BlockDevice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: name1, + Labels: map[string]string{ + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name1, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: name2, + Labels: map[string]string{ + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name2, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: name3, + Labels: map[string]string{ + "kubernetes.io/hostname": hostName, + "kubernetes.io/metadata.name": name3, + }, + }, + }, + } + + for _, bd := range bds { + err := cl.Create(ctx, &bd) + if err != nil { + t.Error(err) + } + } + + defer func() { + for _, bd := range bds { + err := cl.Delete(ctx, &bd) + if err != nil { + t.Error(err) + } + } + }() + + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + BlockDeviceSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpIn, + Values: []string{name1, name2}, + }, + }, + }, + }, + } + + actualBd, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) + if assert.NoError(t, err) { + assert.Equal(t, 2, len(actualBd)) + _, ok := actualBd[name1] + assert.True(t, ok) + _, ok = actualBd[name2] + assert.True(t, ok) + _, ok = actualBd[name3] + assert.False(t, ok) + } + }) + }) + t.Run("shouldDeleteBlockDevice", func(t *testing.T) { t.Run("returns_true", func(t *testing.T) { bd := v1alpha1.BlockDevice{ @@ -107,14 +350,14 @@ func TestBlockDeviceCtrl(t *testing.T) { goodName = "test-candidate1" badName = "test-candidate2" ) - cl := NewFakeClient() + candidates := []internal.BlockDeviceCandidate{ { NodeName: cfg.NodeName, Consumable: false, PVUuid: "142412421", VGUuid: "123123123", - LvmVolumeGroupName: "test-lvg", + LVMVolumeGroupName: "test-lvg", ActualVGNameOnTheNode: "test-vg", Wwn: "12414212", Serial: "1412412412412", @@ -328,7 +571,7 @@ func TestBlockDeviceCtrl(t *testing.T) { Consumable: false, PVUuid: "testPV", VGUuid: "testVGUID", - LvmVolumeGroupName: "testLVGName", + LVMVolumeGroupName: "testLVGName", ActualVGNameOnTheNode: "testNameOnNode", Wwn: "testWWN", Serial: "testSERIAL", @@ -353,7 +596,7 @@ func TestBlockDeviceCtrl(t *testing.T) { internal.BlockDevicePVUUIDLabelKey: blockDevice.Status.PVUuid, internal.BlockDeviceVGUUIDLabelKey: blockDevice.Status.VGUuid, internal.BlockDevicePartUUIDLabelKey: blockDevice.Status.PartUUID, - internal.BlockDeviceLVMVolumeGroupNameLabelKey: blockDevice.Status.LvmVolumeGroupName, + internal.BlockDeviceLVMVolumeGroupNameLabelKey: blockDevice.Status.LVMVolumeGroupName, internal.BlockDeviceActualVGNameLabelKey: blockDevice.Status.ActualVGNameOnTheNode, internal.BlockDeviceWWNLabelKey: blockDevice.Status.Wwn, internal.BlockDeviceSerialLabelKey: blockDevice.Status.Serial, @@ -377,7 +620,7 @@ func TestBlockDeviceCtrl(t *testing.T) { Consumable: false, PVUuid: "testPV", VGUuid: "testVGUID", - LvmVolumeGroupName: "testLVGName", + LVMVolumeGroupName: "testLVGName", ActualVGNameOnTheNode: "testNameOnNode", Wwn: "testWWN", Serial: "testSERIAL", @@ -399,7 +642,7 @@ func TestBlockDeviceCtrl(t *testing.T) { Consumable: true, PVUuid: "testPV2", VGUuid: "testVGUID2", - LvmVolumeGroupName: "testLVGName2", + LVMVolumeGroupName: "testLVGName2", ActualVGNameOnTheNode: "testNameOnNode2", Wwn: "testWWN2", Serial: "testSERIAL2", @@ -424,7 +667,7 @@ func TestBlockDeviceCtrl(t *testing.T) { Consumable: false, PVUuid: "testPV", VGUuid: "testVGUID", - LvmVolumeGroupName: "testLVGName", + LVMVolumeGroupName: "testLVGName", ActualVGNameOnTheNode: "testNameOnNode", Wwn: "testWWN", Serial: "testSERIAL", diff --git a/images/agent/src/pkg/controller/controller_reconcile_test.go b/images/agent/src/pkg/controller/controller_reconcile_test.go index 9bfc27bd..603cc6ed 100644 --- a/images/agent/src/pkg/controller/controller_reconcile_test.go +++ b/images/agent/src/pkg/controller/controller_reconcile_test.go @@ -41,7 +41,7 @@ var _ = Describe("Storage Controller", func() { Consumable: true, PVUuid: "123", VGUuid: "123", - LvmVolumeGroupName: "testLvm", + LVMVolumeGroupName: "testLvm", ActualVGNameOnTheNode: "testVG", Wwn: "WW12345678", Serial: "test", @@ -68,7 +68,7 @@ var _ = Describe("Storage Controller", func() { Expect(blockDevice.Status.Consumable).To(Equal(candidate.Consumable)) Expect(blockDevice.Status.PVUuid).To(Equal(candidate.PVUuid)) Expect(blockDevice.Status.VGUuid).To(Equal(candidate.VGUuid)) - Expect(blockDevice.Status.LvmVolumeGroupName).To(Equal(candidate.LvmVolumeGroupName)) + Expect(blockDevice.Status.LVMVolumeGroupName).To(Equal(candidate.LVMVolumeGroupName)) Expect(blockDevice.Status.ActualVGNameOnTheNode).To(Equal(candidate.ActualVGNameOnTheNode)) Expect(blockDevice.Status.Wwn).To(Equal(candidate.Wwn)) Expect(blockDevice.Status.Serial).To(Equal(candidate.Serial)) @@ -82,7 +82,7 @@ var _ = Describe("Storage Controller", func() { }) It("GetAPIBlockDevices", func() { - listDevice, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics) + listDevice, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil) Expect(err).NotTo(HaveOccurred()) Expect(listDevice).NotTo(BeNil()) Expect(len(listDevice)).To(Equal(1)) @@ -98,7 +98,7 @@ var _ = Describe("Storage Controller", func() { Consumable: false, PVUuid: "123", VGUuid: "123", - LvmVolumeGroupName: "updatedField", + LVMVolumeGroupName: "updatedField", ActualVGNameOnTheNode: "testVG", Wwn: "WW12345678", Serial: "test", @@ -115,7 +115,7 @@ var _ = Describe("Storage Controller", func() { MachineID: "1234", } - resources, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics) + resources, err := controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil) Expect(err).NotTo(HaveOccurred()) Expect(resources).NotTo(BeNil()) Expect(len(resources)).To(Equal(1)) @@ -127,7 +127,7 @@ var _ = Describe("Storage Controller", func() { err = controller.UpdateAPIBlockDevice(ctx, cl, testMetrics, oldResource, newCandidate) Expect(err).NotTo(HaveOccurred()) - resources, err = controller.GetAPIBlockDevices(ctx, cl, testMetrics) + resources, err = controller.GetAPIBlockDevices(ctx, cl, testMetrics, nil) Expect(err).NotTo(HaveOccurred()) Expect(resources).NotTo(BeNil()) Expect(len(resources)).To(Equal(1)) @@ -136,7 +136,7 @@ var _ = Describe("Storage Controller", func() { Expect(newResource).NotTo(BeNil()) Expect(newResource.Status.NodeName).To(Equal(candidate.NodeName)) Expect(newResource.Status.Consumable).To(BeFalse()) - Expect(newResource.Status.LvmVolumeGroupName).To(Equal("updatedField")) + Expect(newResource.Status.LVMVolumeGroupName).To(Equal("updatedField")) }) It("DeleteAPIBlockDevice", func() { @@ -147,7 +147,7 @@ var _ = Describe("Storage Controller", func() { }) Expect(err).NotTo(HaveOccurred()) - devices, err := controller.GetAPIBlockDevices(context.Background(), cl, testMetrics) + devices, err := controller.GetAPIBlockDevices(context.Background(), cl, testMetrics, nil) Expect(err).NotTo(HaveOccurred()) for name := range devices { Expect(name).NotTo(Equal(deviceName)) diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go index 39da5367..904f9d56 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_extender_watcher.go @@ -80,14 +80,14 @@ func RunLVMLogicalVolumeExtenderWatcherController( return err } - err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + err = c.Watch(source.Kind(mgrCache, &v1alpha1.LVMVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroup, reconcile.Request]{ + CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got a Create event for the LVMVolumeGroup %s", e.Object.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} q.Add(request) log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] added the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) }, - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] got an Update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} q.Add(request) @@ -102,9 +102,9 @@ func RunLVMLogicalVolumeExtenderWatcherController( return nil } -func shouldLLVExtenderReconcileEvent(log logger.Logger, newLVG *v1alpha1.LvmVolumeGroup, nodeName string) bool { +func shouldLLVExtenderReconcileEvent(log logger.Logger, newLVG *v1alpha1.LVMVolumeGroup, nodeName string) bool { // for new LVMVolumeGroups - if reflect.DeepEqual(newLVG.Status, v1alpha1.LvmVolumeGroupStatus{}) { + if reflect.DeepEqual(newLVG.Status, v1alpha1.LVMVolumeGroupStatus{}) { log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as its Status is not initialized yet", newLVG.Name)) return false } @@ -122,7 +122,7 @@ func shouldLLVExtenderReconcileEvent(log logger.Logger, newLVG *v1alpha1.LvmVolu return true } -func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, metrics monitoring.Metrics, log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LvmVolumeGroup) bool { +func ReconcileLVMLogicalVolumeExtension(ctx context.Context, cl client.Client, metrics monitoring.Metrics, log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LVMVolumeGroup) bool { log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] tries to get LLV resources with percent size for the LVMVolumeGroup %s", lvg.Name)) llvs, err := getAllLLVsWithPercentSize(ctx, cl, lvg.Name) if err != nil { @@ -254,7 +254,7 @@ func getAllLLVsWithPercentSize(ctx context.Context, cl client.Client, lvgName st result := make([]v1alpha1.LVMLogicalVolume, 0, len(llvList.Items)) for _, llv := range llvList.Items { - if llv.Spec.LvmVolumeGroupName == lvgName && isPercentSize(llv.Spec.Size) { + if llv.Spec.LVMVolumeGroupName == lvgName && isPercentSize(llv.Spec.Size) { result = append(result, llv) } } diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go index 3216e21b..05da56e4 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher.go @@ -71,11 +71,11 @@ func RunLVMLogicalVolumeWatcherController( return reconcile.Result{}, err } - lvg, err := getLVMVolumeGroup(ctx, cl, metrics, llv.Spec.LvmVolumeGroupName) + lvg, err := getLVMVolumeGroup(ctx, cl, metrics, llv.Spec.LVMVolumeGroupName) if err != nil { if k8serr.IsNotFound(err) { - log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] LVMVolumeGroup %s not found for LVMLogicalVolume %s. Retry in %s", llv.Spec.LvmVolumeGroupName, llv.Name, cfg.VolumeGroupScanIntervalSec.String())) - err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, fmt.Sprintf("LVMVolumeGroup %s not found", llv.Spec.LvmVolumeGroupName)) + log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] LVMVolumeGroup %s not found for LVMLogicalVolume %s. Retry in %s", llv.Spec.LVMVolumeGroupName, llv.Name, cfg.VolumeGroupScanIntervalSec.String())) + err = updateLVMLogicalVolumePhaseIfNeeded(ctx, cl, log, metrics, llv, LLVStatusPhaseFailed, fmt.Sprintf("LVMVolumeGroup %s not found", llv.Spec.LVMVolumeGroupName)) if err != nil { log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolume] unable to update the LVMLogicalVolume %s", llv.Name)) return reconcile.Result{}, err @@ -192,7 +192,7 @@ func RunLVMLogicalVolumeWatcherController( return c, err } -func ReconcileLVMLogicalVolume(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { +func ReconcileLVMLogicalVolume(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] starts the reconciliation for the LVMLogicalVolume %s", llv.Name)) log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] tries to identify the reconciliation type for the LVMLogicalVolume %s", llv.Name)) @@ -226,7 +226,7 @@ func reconcileLLVCreateFunc( metrics monitoring.Metrics, sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LvmVolumeGroup, + lvg *v1alpha1.LVMVolumeGroup, ) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) @@ -308,7 +308,7 @@ func reconcileLLVUpdateFunc( metrics monitoring.Metrics, sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LvmVolumeGroup, + lvg *v1alpha1.LVMVolumeGroup, ) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) @@ -435,7 +435,7 @@ func reconcileLLVDeleteFunc( metrics monitoring.Metrics, sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, - lvg *v1alpha1.LvmVolumeGroup, + lvg *v1alpha1.LVMVolumeGroup, ) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go index 4ceb4301..b14ca251 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_func.go @@ -42,7 +42,7 @@ func shouldReconcileByDeleteFunc(llv *v1alpha1.LVMLogicalVolume) bool { } //nolint:unparam -func checkIfConditionIsTrue(lvg *v1alpha1.LvmVolumeGroup, conType string) bool { +func checkIfConditionIsTrue(lvg *v1alpha1.LVMVolumeGroup, conType string) bool { // this check prevents infinite resource updating after a retry for _, c := range lvg.Status.Conditions { if c.Type == conType && c.Status == v1.ConditionTrue { @@ -57,7 +57,7 @@ func isPercentSize(size string) bool { return strings.Contains(size, "%") } -func getLLVRequestedSize(llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LvmVolumeGroup) (resource.Quantity, error) { +func getLLVRequestedSize(llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (resource.Quantity, error) { switch llv.Spec.Type { case Thick: return getRequestedSizeFromString(llv.Spec.Size, lvg.Status.VGSize) @@ -212,7 +212,7 @@ func shouldReconcileByCreateFunc(sdsCache *cache.Cache, vgName string, llv *v1al return lv == nil } -func getFreeLVGSpaceForLLV(lvg *v1alpha1.LvmVolumeGroup, llv *v1alpha1.LVMLogicalVolume) resource.Quantity { +func getFreeLVGSpaceForLLV(lvg *v1alpha1.LVMVolumeGroup, llv *v1alpha1.LVMLogicalVolume) resource.Quantity { switch llv.Spec.Type { case Thick: return lvg.Status.VGFree @@ -233,7 +233,7 @@ func subtractQuantity(currentQuantity, quantityToSubtract resource.Quantity) res return resultingQuantity } -func belongsToNode(lvg *v1alpha1.LvmVolumeGroup, nodeName string) bool { +func belongsToNode(lvg *v1alpha1.LVMVolumeGroup, nodeName string) bool { var belongs bool for _, node := range lvg.Status.Nodes { if node.Name == nodeName { @@ -244,7 +244,7 @@ func belongsToNode(lvg *v1alpha1.LvmVolumeGroup, nodeName string) bool { return belongs } -func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LvmVolumeGroup) (bool, string) { +func validateLVMLogicalVolume(sdsCache *cache.Cache, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (bool, string) { if llv.DeletionTimestamp != nil { // as the configuration doesn't matter if we want to delete it return true, "" diff --git a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go index 486653ed..4bb0f453 100644 --- a/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go +++ b/images/agent/src/pkg/controller/lvm_logical_volume_watcher_test.go @@ -98,7 +98,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { t.Run("thick_all_good_returns_true", func(t *testing.T) { const lvgName = "test-lvg" - lvg := &v1alpha1.LvmVolumeGroup{ + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: lvgName, }, @@ -121,7 +121,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { ActualLVNameOnTheNode: "test-lv", Type: Thick, Size: "10M", - LvmVolumeGroupName: lvgName, + LVMVolumeGroupName: lvgName, }, } @@ -139,7 +139,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { ActualLVNameOnTheNode: lvName, Type: Thick, Size: "0M", - LvmVolumeGroupName: "some-lvg", + LVMVolumeGroupName: "some-lvg", Thin: &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: "some-lvg"}, }, } @@ -151,7 +151,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { }, }, bytes.Buffer{}) - v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LvmVolumeGroup{}) + v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LVMVolumeGroup{}) if assert.False(t, v) { assert.Equal(t, "Zero size for LV. Thin pool specified for Thick LV. ", r) } @@ -163,12 +163,12 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { tpName = "test-tp" ) - lvg := &v1alpha1.LvmVolumeGroup{ + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: lvgName, }, - Status: v1alpha1.LvmVolumeGroupStatus{ - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolStatus{ + Status: v1alpha1.LVMVolumeGroupStatus{ + ThinPools: []v1alpha1.LVMVolumeGroupThinPoolStatus{ { Name: tpName, AllocationLimit: internal.AllocationLimitDefaultValue, @@ -182,7 +182,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { ActualLVNameOnTheNode: "test-lv", Type: Thin, Size: "10M", - LvmVolumeGroupName: lvgName, + LVMVolumeGroupName: lvgName, Thin: &v1alpha1.LVMLogicalVolumeThinSpec{PoolName: tpName}, }, } @@ -199,7 +199,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { ActualLVNameOnTheNode: "", Type: Thin, Size: "0M", - LvmVolumeGroupName: "some-lvg", + LVMVolumeGroupName: "some-lvg", }, } @@ -210,7 +210,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { }, }, bytes.Buffer{}) - v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LvmVolumeGroup{}) + v, r := validateLVMLogicalVolume(sdsCache, llv, &v1alpha1.LVMVolumeGroup{}) if assert.False(t, v) { assert.Equal(t, "No LV name specified. Zero size for LV. No thin pool specified. ", r) } @@ -219,7 +219,7 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { t.Run("getThinPoolAvailableSpace", func(t *testing.T) { const tpName = "test-tp" - tp := v1alpha1.LvmVolumeGroupThinPoolStatus{ + tp := v1alpha1.LVMVolumeGroupThinPoolStatus{ Name: tpName, ActualSize: resource.MustParse("10Gi"), UsedSize: resource.MustParse("1Gi"), @@ -240,9 +240,9 @@ func TestLVMLogicaVolumeWatcher(t *testing.T) { const ( nodeName = "test_node" ) - lvg := &v1alpha1.LvmVolumeGroup{ - Status: v1alpha1.LvmVolumeGroupStatus{ - Nodes: []v1alpha1.LvmVolumeGroupNode{ + lvg := &v1alpha1.LVMVolumeGroup{ + Status: v1alpha1.LVMVolumeGroupStatus{ + Nodes: []v1alpha1.LVMVolumeGroupNode{ { Name: nodeName, }, diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover.go b/images/agent/src/pkg/controller/lvm_volume_group_discover.go index 6ae11e0c..4a0fafce 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_discover.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_discover.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" - kclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -78,7 +78,7 @@ func RunLVMVolumeGroupDiscoverController( return c, err } -func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, metrics monitoring.Metrics, log logger.Logger, cfg config.Options, sdsCache *cache.Cache) bool { +func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl client.Client, metrics monitoring.Metrics, log logger.Logger, cfg config.Options, sdsCache *cache.Cache) bool { reconcileStart := time.Now() log.Info("[RunLVMVolumeGroupDiscoverController] starts the reconciliation") @@ -92,7 +92,7 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met log.Debug("[RunLVMVolumeGroupDiscoverController] no current LVMVolumeGroups found") } - blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics) + blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, nil) if err != nil { log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to GetAPIBlockDevices") for _, lvg := range currentLVMVGs { @@ -109,7 +109,7 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met return false } - filteredLVGs := filterLVGsByNode(ctx, cl, log, currentLVMVGs, blockDevices, cfg.NodeName) + filteredLVGs := filterLVGsByNode(currentLVMVGs, cfg.NodeName) log.Debug("[RunLVMVolumeGroupDiscoverController] tries to get LVMVolumeGroup candidates") candidates, err := GetLVMVolumeGroupCandidates(log, sdsCache, blockDevices, cfg.NodeName) @@ -144,7 +144,7 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] lvg: %+v", lvg)) if !hasLVMVolumeGroupDiff(log, lvg, candidate) { - log.Debug(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] no data to update for LvmVolumeGroup, name: "%s"`, lvg.Name)) + log.Debug(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] no data to update for LVMVolumeGroup, name: "%s"`, lvg.Name)) err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") if err != nil { log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) @@ -153,18 +153,18 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met continue } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LvmVolumeGroup %s should be updated", lvg.Name)) + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s should be updated", lvg.Name)) if err = UpdateLVMVolumeGroupByCandidate(ctx, cl, metrics, log, &lvg, candidate); err != nil { - log.Error(err, fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] unable to update LvmVolumeGroup, name: "%s". Requeue the request in %s`, + log.Error(err, fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] unable to update LVMVolumeGroup, name: "%s". Requeue the request in %s`, lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) shouldRequeue = true continue } - log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] updated LvmVolumeGroup, name: "%s"`, lvg.Name)) + log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] updated LVMVolumeGroup, name: "%s"`, lvg.Name)) } else { log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is not yet created. Create it", lvg.Name)) - lvm, err := CreateLVMVolumeGroupByCandidate(ctx, log, metrics, cl, candidate) + lvm, err := CreateLVMVolumeGroupByCandidate(ctx, log, metrics, cl, candidate, cfg.NodeName) if err != nil { log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to CreateLVMVolumeGroupByCandidate %s. Requeue the request in %s", candidate.LVMVGName, cfg.VolumeGroupScanIntervalSec.String())) shouldRequeue = true @@ -200,61 +200,10 @@ func LVMVolumeGroupDiscoverReconcile(ctx context.Context, cl kclient.Client, met return false } -func filterLVGsByNode( - ctx context.Context, - cl kclient.Client, - log logger.Logger, - lvgs map[string]v1alpha1.LvmVolumeGroup, - blockDevices map[string]v1alpha1.BlockDevice, - currentNode string, -) map[string]v1alpha1.LvmVolumeGroup { - filtered := make(map[string]v1alpha1.LvmVolumeGroup, len(lvgs)) - blockDevicesNodes := make(map[string]string, len(blockDevices)) - - for _, bd := range blockDevices { - blockDevicesNodes[bd.Name] = bd.Status.NodeName - } - +func filterLVGsByNode(lvgs map[string]v1alpha1.LVMVolumeGroup, currentNode string) map[string]v1alpha1.LVMVolumeGroup { + filtered := make(map[string]v1alpha1.LVMVolumeGroup, len(lvgs)) for _, lvg := range lvgs { - switch lvg.Spec.Type { - case Local: - currentNodeDevices := 0 - for _, bdName := range lvg.Spec.BlockDeviceNames { - if blockDevicesNodes[bdName] == currentNode { - currentNodeDevices++ - } - } - - // If we did not add every block device of local VG, that means a mistake, and we turn the resource's health to Nonoperational. - if currentNodeDevices > 0 && currentNodeDevices < len(lvg.Spec.BlockDeviceNames) { - if err := updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, internal.TypeVGConfigurationApplied, "InvalidBlockDevices", "there are block devices from different nodes for local volume group"); err != nil { - log.Error(err, `[filterLVGsByNode] unable to update resource, name: "%s"`, lvg.Name) - continue - } - } - - // If we did not find any block device for our node, we skip the resource. - if currentNodeDevices == 0 { - continue - } - - // Otherwise, we add the resource to the filtered ones. - filtered[lvg.Spec.ActualVGNameOnTheNode] = lvg - case Shared: - if len(lvg.Spec.BlockDeviceNames) != 1 { - if err := updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, internal.TypeVGConfigurationApplied, "InvalidBlockDevices", "there are more than one block devices for shared volume group"); err != nil { - log.Error(err, `[filterLVGsByNode] unable to update resource, name: "%s"`, lvg.Name) - continue - } - } - - // If the only one block devices does not belong to our node, we skip the resource. - singleBD := lvg.Spec.BlockDeviceNames[0] - if blockDevicesNodes[singleBD] != currentNode { - continue - } - - // Otherwise, we add the resource to the filtered ones. + if lvg.Spec.Local.NodeName == currentNode { filtered[lvg.Spec.ActualVGNameOnTheNode] = lvg } } @@ -262,7 +211,7 @@ func filterLVGsByNode( return filtered } -func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LvmVolumeGroup, candidate internal.LVMVolumeGroupCandidate) bool { +func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LVMVolumeGroup, candidate internal.LVMVolumeGroupCandidate) bool { convertedStatusPools, err := convertStatusThinPools(lvg, candidate.StatusThinPools) if err != nil { log.Error(err, fmt.Sprintf("[hasLVMVolumeGroupDiff] unable to identify candidate difference for the LVMVolumeGroup %s", lvg.Name)) @@ -288,7 +237,7 @@ func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LvmVolumeGroup, candi hasStatusNodesDiff(log, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes) } -func hasStatusNodesDiff(log logger.Logger, first, second []v1alpha1.LvmVolumeGroupNode) bool { +func hasStatusNodesDiff(log logger.Logger, first, second []v1alpha1.LVMVolumeGroupNode) bool { if len(first) != len(second) { return true } @@ -318,7 +267,7 @@ func hasStatusNodesDiff(log logger.Logger, first, second []v1alpha1.LvmVolumeGro return false } -func hasStatusPoolDiff(first, second []v1alpha1.LvmVolumeGroupThinPoolStatus) bool { +func hasStatusPoolDiff(first, second []v1alpha1.LVMVolumeGroupThinPoolStatus) bool { if len(first) != len(second) { return true } @@ -341,10 +290,10 @@ func hasStatusPoolDiff(first, second []v1alpha1.LvmVolumeGroupThinPoolStatus) bo // ReconcileUnhealthyLVMVolumeGroups turns LVMVolumeGroup resources without VG or ThinPools to NotReady. func ReconcileUnhealthyLVMVolumeGroups( ctx context.Context, - cl kclient.Client, + cl client.Client, log logger.Logger, candidates []internal.LVMVolumeGroupCandidate, - lvgs map[string]v1alpha1.LvmVolumeGroup, + lvgs map[string]v1alpha1.LVMVolumeGroup, ) ([]internal.LVMVolumeGroupCandidate, error) { candidateMap := make(map[string]internal.LVMVolumeGroupCandidate, len(candidates)) for _, candidate := range candidates { @@ -369,16 +318,17 @@ func ReconcileUnhealthyLVMVolumeGroups( } // take thin-pools from status instead of spec to prevent miss never-created ones - for _, thinPool := range lvg.Status.ThinPools { - if candidateTp, exist := candidateTPs[thinPool.Name]; !exist { - log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its ThinPool %s", lvg.Name, thinPool.Name)) - messageBldr.WriteString(fmt.Sprintf("Unable to find ThinPool %s. ", thinPool.Name)) - } else if !utils.AreSizesEqualWithinDelta(candidate.VGSize, thinPool.ActualSize, internal.ResizeDelta) && - candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < thinPool.ActualSize.Value() { + for i, statusTp := range lvg.Status.ThinPools { + if candidateTp, exist := candidateTPs[statusTp.Name]; !exist { + log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its ThinPool %s", lvg.Name, statusTp.Name)) + messageBldr.WriteString(fmt.Sprintf("Unable to find ThinPool %s. ", statusTp.Name)) + lvg.Status.ThinPools[i].Ready = false + } else if !utils.AreSizesEqualWithinDelta(candidate.VGSize, statusTp.ActualSize, internal.ResizeDelta) && + candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < statusTp.ActualSize.Value() { // that means thin-pool is not 100%VG space // use candidate VGSize as lvg.Status.VGSize might not be updated yet - log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String())) - messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", thinPool.Name, candidateTp.ActualSize.String(), thinPool.ActualSize.String())) + log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, statusTp.Name, candidateTp.ActualSize.String(), statusTp.ActualSize.String())) + messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", statusTp.Name, candidateTp.ActualSize.String(), statusTp.ActualSize.String())) } } } @@ -807,27 +757,29 @@ func CreateLVMVolumeGroupByCandidate( ctx context.Context, log logger.Logger, metrics monitoring.Metrics, - kc kclient.Client, + cl client.Client, candidate internal.LVMVolumeGroupCandidate, -) (*v1alpha1.LvmVolumeGroup, error) { - thinPools, err := convertStatusThinPools(v1alpha1.LvmVolumeGroup{}, candidate.StatusThinPools) + nodeName string, +) (*v1alpha1.LVMVolumeGroup, error) { + thinPools, err := convertStatusThinPools(v1alpha1.LVMVolumeGroup{}, candidate.StatusThinPools) if err != nil { return nil, err } - lvmVolumeGroup := &v1alpha1.LvmVolumeGroup{ + lvmVolumeGroup := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: candidate.LVMVGName, OwnerReferences: []metav1.OwnerReference{}, Finalizers: candidate.Finalizers, }, - Spec: v1alpha1.LvmVolumeGroupSpec{ + Spec: v1alpha1.LVMVolumeGroupSpec{ ActualVGNameOnTheNode: candidate.ActualVGNameOnTheNode, - BlockDeviceNames: candidate.BlockDevicesNames, + BlockDeviceSelector: configureBlockDeviceSelector(candidate), ThinPools: convertSpecThinPools(candidate.SpecThinPools), Type: candidate.Type, + Local: v1alpha1.LVMVolumeGroupLocalSpec{NodeName: nodeName}, }, - Status: v1alpha1.LvmVolumeGroupStatus{ + Status: v1alpha1.LVMVolumeGroupStatus{ AllocatedSize: candidate.AllocatedSize, Nodes: convertLVMVGNodes(candidate.Nodes), ThinPools: thinPools, @@ -848,7 +800,7 @@ func CreateLVMVolumeGroupByCandidate( } start := time.Now() - err = kc.Create(ctx, lvmVolumeGroup) + err = cl.Create(ctx, lvmVolumeGroup) metrics.APIMethodsDuration(LVMVolumeGroupDiscoverCtrlName, "create").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "create").Inc() if err != nil { @@ -861,10 +813,10 @@ func CreateLVMVolumeGroupByCandidate( func UpdateLVMVolumeGroupByCandidate( ctx context.Context, - cl kclient.Client, + cl client.Client, metrics monitoring.Metrics, log logger.Logger, - lvg *v1alpha1.LvmVolumeGroup, + lvg *v1alpha1.LVMVolumeGroup, candidate internal.LVMVolumeGroupCandidate, ) error { // Check if VG has some problems @@ -879,7 +831,7 @@ func UpdateLVMVolumeGroupByCandidate( // The resource.Status.Nodes can not be just re-written, it needs to be updated directly by a node. // We take all current resources nodes and convert them to map for better performance further. - resourceNodes := make(map[string][]v1alpha1.LvmVolumeGroupDevice, len(lvg.Status.Nodes)) + resourceNodes := make(map[string][]v1alpha1.LVMVolumeGroupDevice, len(lvg.Status.Nodes)) for _, node := range lvg.Status.Nodes { resourceNodes[node.Name] = node.Devices } @@ -927,11 +879,23 @@ func UpdateLVMVolumeGroupByCandidate( return err } -func convertLVMVGNodes(nodes map[string][]internal.LVMVGDevice) []v1alpha1.LvmVolumeGroupNode { - lvmvgNodes := make([]v1alpha1.LvmVolumeGroupNode, 0, len(nodes)) +func configureBlockDeviceSelector(candidate internal.LVMVolumeGroupCandidate) *metav1.LabelSelector { + return &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: internal.MetadataNameLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: candidate.BlockDevicesNames, + }, + }, + } +} + +func convertLVMVGNodes(nodes map[string][]internal.LVMVGDevice) []v1alpha1.LVMVolumeGroupNode { + lvmvgNodes := make([]v1alpha1.LVMVolumeGroupNode, 0, len(nodes)) for nodeName, nodeDevices := range nodes { - lvmvgNodes = append(lvmvgNodes, v1alpha1.LvmVolumeGroupNode{ + lvmvgNodes = append(lvmvgNodes, v1alpha1.LVMVolumeGroupNode{ Devices: convertLVMVGDevices(nodeDevices), Name: nodeName, }) @@ -940,11 +904,11 @@ func convertLVMVGNodes(nodes map[string][]internal.LVMVGDevice) []v1alpha1.LvmVo return lvmvgNodes } -func convertLVMVGDevices(devices []internal.LVMVGDevice) []v1alpha1.LvmVolumeGroupDevice { - convertedDevices := make([]v1alpha1.LvmVolumeGroupDevice, 0, len(devices)) +func convertLVMVGDevices(devices []internal.LVMVGDevice) []v1alpha1.LVMVolumeGroupDevice { + convertedDevices := make([]v1alpha1.LVMVolumeGroupDevice, 0, len(devices)) for _, dev := range devices { - convertedDevices = append(convertedDevices, v1alpha1.LvmVolumeGroupDevice{ + convertedDevices = append(convertedDevices, v1alpha1.LVMVolumeGroupDevice{ BlockDevice: dev.BlockDevice, DevSize: dev.DevSize, PVSize: dev.PVSize, @@ -956,10 +920,10 @@ func convertLVMVGDevices(devices []internal.LVMVGDevice) []v1alpha1.LvmVolumeGro return convertedDevices } -func convertSpecThinPools(thinPools map[string]resource.Quantity) []v1alpha1.LvmVolumeGroupThinPoolSpec { - result := make([]v1alpha1.LvmVolumeGroupThinPoolSpec, 0, len(thinPools)) +func convertSpecThinPools(thinPools map[string]resource.Quantity) []v1alpha1.LVMVolumeGroupThinPoolSpec { + result := make([]v1alpha1.LVMVolumeGroupThinPoolSpec, 0, len(thinPools)) for name, size := range thinPools { - result = append(result, v1alpha1.LvmVolumeGroupThinPoolSpec{ + result = append(result, v1alpha1.LVMVolumeGroupThinPoolSpec{ Name: name, AllocationLimit: "150%", Size: size.String(), @@ -969,13 +933,13 @@ func convertSpecThinPools(thinPools map[string]resource.Quantity) []v1alpha1.Lvm return result } -func convertStatusThinPools(lvg v1alpha1.LvmVolumeGroup, thinPools []internal.LVMVGStatusThinPool) ([]v1alpha1.LvmVolumeGroupThinPoolStatus, error) { +func convertStatusThinPools(lvg v1alpha1.LVMVolumeGroup, thinPools []internal.LVMVGStatusThinPool) ([]v1alpha1.LVMVolumeGroupThinPoolStatus, error) { tpLimits := make(map[string]string, len(lvg.Spec.ThinPools)) for _, tp := range lvg.Spec.ThinPools { tpLimits[tp.Name] = tp.AllocationLimit } - result := make([]v1alpha1.LvmVolumeGroupThinPoolStatus, 0, len(thinPools)) + result := make([]v1alpha1.LVMVolumeGroupThinPoolStatus, 0, len(thinPools)) for _, tp := range thinPools { limit := tpLimits[tp.Name] if len(limit) == 0 { @@ -987,7 +951,7 @@ func convertStatusThinPools(lvg v1alpha1.LvmVolumeGroup, thinPools []internal.LV return nil, err } - result = append(result, v1alpha1.LvmVolumeGroupThinPoolStatus{ + result = append(result, v1alpha1.LVMVolumeGroupThinPoolStatus{ Name: tp.Name, ActualSize: tp.ActualSize, AllocationLimit: limit, @@ -1028,8 +992,8 @@ func generateLVMVGName() string { return "vg-" + string(uuid.NewUUID()) } -func GetAPILVMVolumeGroups(ctx context.Context, kc kclient.Client, metrics monitoring.Metrics) (map[string]v1alpha1.LvmVolumeGroup, error) { - lvgList := &v1alpha1.LvmVolumeGroupList{} +func GetAPILVMVolumeGroups(ctx context.Context, kc client.Client, metrics monitoring.Metrics) (map[string]v1alpha1.LVMVolumeGroup, error) { + lvgList := &v1alpha1.LVMVolumeGroupList{} start := time.Now() err := kc.List(ctx, lvgList) @@ -1037,10 +1001,10 @@ func GetAPILVMVolumeGroups(ctx context.Context, kc kclient.Client, metrics monit metrics.APIMethodsExecutionCount(LVMVolumeGroupDiscoverCtrlName, "list").Inc() if err != nil { metrics.APIMethodsErrors(LVMVolumeGroupDiscoverCtrlName, "list").Inc() - return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LvmVolumeGroups, err: %w", err) + return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LVMVolumeGroups, err: %w", err) } - lvgs := make(map[string]v1alpha1.LvmVolumeGroup, len(lvgList.Items)) + lvgs := make(map[string]v1alpha1.LVMVolumeGroup, len(lvgList.Items)) for _, lvg := range lvgList.Items { lvgs[lvg.Name] = lvg } diff --git a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go b/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go index 32d87d4e..07e419a4 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_discover_test.go @@ -427,21 +427,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Health = internal.LVMVGHealthOperational Message = "No problems detected" VGUUID = "test_uuid" + NodeName = "test-node" ) - size10G, err := resource.ParseQuantity("10G") - if err != nil { - t.Error(err) - } - size1G, err := resource.ParseQuantity("1G") - if err != nil { - t.Error(err) - } + size10G := resource.MustParse("10G") + size1G := resource.MustParse("1G") var ( - cl = NewFakeClient() - ctx = context.Background() - testLogger = logger.Logger{} testMetrics = monitoring.GetMetrics("") blockDevicesNames = []string{"first", "second"} specThinPools = map[string]resource.Quantity{"first": size10G} @@ -480,23 +472,24 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Nodes: nodes, } - thinPools, err := convertStatusThinPools(v1alpha1.LvmVolumeGroup{}, statusThinPools) + thinPools, err := convertStatusThinPools(v1alpha1.LVMVolumeGroup{}, statusThinPools) if err != nil { t.Error(err) } - expected := v1alpha1.LvmVolumeGroup{ + expected := v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: LVMVGName, ResourceVersion: "1", OwnerReferences: []metav1.OwnerReference{}, }, - Spec: v1alpha1.LvmVolumeGroupSpec{ + Spec: v1alpha1.LVMVolumeGroupSpec{ ActualVGNameOnTheNode: ActualVGNameOnTheNode, - BlockDeviceNames: blockDevicesNames, ThinPools: convertSpecThinPools(specThinPools), Type: Type, + Local: v1alpha1.LVMVolumeGroupLocalSpec{NodeName: NodeName}, + BlockDeviceSelector: configureBlockDeviceSelector(candidate), }, - Status: v1alpha1.LvmVolumeGroupStatus{ + Status: v1alpha1.LVMVolumeGroupStatus{ AllocatedSize: size10G, Nodes: convertLVMVGNodes(nodes), ThinPools: thinPools, @@ -505,7 +498,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { }, } - created, err := CreateLVMVolumeGroupByCandidate(ctx, testLogger, testMetrics, cl, candidate) + created, err := CreateLVMVolumeGroupByCandidate(ctx, log, testMetrics, cl, candidate, NodeName) if assert.NoError(t, err) { assert.Equal(t, &expected, created) } @@ -513,10 +506,10 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("GetLVMVolumeGroup", func(t *testing.T) { const ( - LVMVGName = "test_lvm" + LVMVGName = "test_lvm-1" ) - lvg := &v1alpha1.LvmVolumeGroup{ + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: LVMVGName, }, @@ -547,7 +540,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { metrics := monitoring.GetMetrics("test-node") - lvg := &v1alpha1.LvmVolumeGroup{ + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: LVMVGName, }, @@ -566,19 +559,22 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvg, "test-node") if assert.NoError(t, err) { actual, err = GetAPILVMVolumeGroups(ctx, cl, metrics) - assert.NoError(t, err) - assert.Equal(t, 0, len(actual)) + if err != nil { + t.Error(err) + } + _, ok := actual[LVMVGName] + assert.False(t, ok) } }) t.Run("UpdateLVMVolumeGroup", func(t *testing.T) { const ( - LVMVGName = "test_lvm" + LVMVGName = "test_lvm_x" ) metrics := monitoring.GetMetrics("test-node") - lvg := &v1alpha1.LvmVolumeGroup{ + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: LVMVGName, }, @@ -612,66 +608,49 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { t.Run("filterResourcesByNode_returns_current_node_resources", func(t *testing.T) { var ( - ctx = context.Background() - cl = NewFakeClient() - testLogger = logger.Logger{} currentNode = "test_node" vgName = "test_vg" - firstBDName = "first_device" - secondBDName = "second_device" firstLVName = "first_lv" secondLVName = "second_lv" - blockDevices = map[string]v1alpha1.BlockDevice{ - firstBDName: { - ObjectMeta: metav1.ObjectMeta{ - Name: firstBDName, - }, - Status: v1alpha1.BlockDeviceStatus{ - NodeName: currentNode, - }, - }, - secondBDName: { - ObjectMeta: metav1.ObjectMeta{ - Name: secondBDName, - }, - Status: v1alpha1.BlockDeviceStatus{ - NodeName: "another_node", - }, - }, - } - lvs = map[string]v1alpha1.LvmVolumeGroup{ + lvs = map[string]v1alpha1.LVMVolumeGroup{ firstLVName: { ObjectMeta: metav1.ObjectMeta{Name: firstLVName}, - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBDName}, + Spec: v1alpha1.LVMVolumeGroupSpec{ Type: Local, ActualVGNameOnTheNode: vgName, + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: "other-node", + }, }, }, secondLVName: { ObjectMeta: metav1.ObjectMeta{Name: secondLVName}, - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{secondBDName}, + Spec: v1alpha1.LVMVolumeGroupSpec{ Type: Local, ActualVGNameOnTheNode: vgName, + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: currentNode, + }, }, }, } ) - expected := map[string]v1alpha1.LvmVolumeGroup{ + expected := map[string]v1alpha1.LVMVolumeGroup{ vgName: { - ObjectMeta: metav1.ObjectMeta{Name: firstLVName}, - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBDName}, + ObjectMeta: metav1.ObjectMeta{Name: secondLVName}, + Spec: v1alpha1.LVMVolumeGroupSpec{ Type: Local, ActualVGNameOnTheNode: vgName, + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: currentNode, + }, }, }, } - actual := filterLVGsByNode(ctx, cl, testLogger, lvs, blockDevices, currentNode) + actual := filterLVGsByNode(lvs, currentNode) assert.Equal(t, expected, actual) }) @@ -680,48 +659,32 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { var ( currentNode = "test_node" anotherNode = "another_node" - firstBDName = "first_device" - secondBDName = "second_device" firstLVName = "first_lv" secondLVName = "second_lv" - blockDevices = map[string]v1alpha1.BlockDevice{ - firstBDName: { - ObjectMeta: metav1.ObjectMeta{ - Name: firstBDName, - }, - Status: v1alpha1.BlockDeviceStatus{ - NodeName: anotherNode, - }, - }, - secondBDName: { - ObjectMeta: metav1.ObjectMeta{ - Name: secondBDName, - }, - Status: v1alpha1.BlockDeviceStatus{ - NodeName: anotherNode, - }, - }, - } - lvs = map[string]v1alpha1.LvmVolumeGroup{ + lvs = map[string]v1alpha1.LVMVolumeGroup{ firstLVName: { ObjectMeta: metav1.ObjectMeta{Name: firstLVName}, - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBDName}, - Type: Local, + Spec: v1alpha1.LVMVolumeGroupSpec{ + Type: Local, + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: anotherNode, + }, }, }, secondLVName: { ObjectMeta: metav1.ObjectMeta{Name: secondLVName}, - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{secondBDName}, - Type: Local, + Spec: v1alpha1.LVMVolumeGroupSpec{ + Type: Local, + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: anotherNode, + }, }, }, } ) - actual := filterLVGsByNode(ctx, cl, log, lvs, blockDevices, currentNode) + actual := filterLVGsByNode(lvs, currentNode) assert.Equal(t, 0, len(actual)) }) @@ -790,17 +753,16 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Nodes: nodes, } - thinPools, err := convertStatusThinPools(v1alpha1.LvmVolumeGroup{}, statusThinPools) + thinPools, err := convertStatusThinPools(v1alpha1.LVMVolumeGroup{}, statusThinPools) if err != nil { t.Error(err) } - lvmVolumeGroup := v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: blockDevicesNames, - ThinPools: convertSpecThinPools(specThinPools), - Type: specType, + lvmVolumeGroup := v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + ThinPools: convertSpecThinPools(specThinPools), + Type: specType, }, - Status: v1alpha1.LvmVolumeGroupStatus{ + Status: v1alpha1.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse("9765625Ki"), Nodes: convertLVMVGNodes(nodes), ThinPools: thinPools, @@ -859,13 +821,13 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { Nodes: nodes, } - thinPools, err := convertStatusThinPools(v1alpha1.LvmVolumeGroup{}, statusThinPools) + thinPools, err := convertStatusThinPools(v1alpha1.LVMVolumeGroup{}, statusThinPools) if err != nil { t.Error(err) } - lvmVolumeGroup := v1alpha1.LvmVolumeGroup{ - Status: v1alpha1.LvmVolumeGroupStatus{ + lvmVolumeGroup := v1alpha1.LVMVolumeGroup{ + Status: v1alpha1.LVMVolumeGroupStatus{ AllocatedSize: allocatedSize, Nodes: convertLVMVGNodes(nodes), ThinPools: thinPools, @@ -885,7 +847,7 @@ func TestLVMVolumeGroupDiscover(t *testing.T) { reason = "test-reason" message = "test-message" ) - lvg := &v1alpha1.LvmVolumeGroup{ + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: lvgName, }, @@ -918,7 +880,7 @@ func NewFakeClient() client.WithWatch { s := scheme.Scheme _ = metav1.AddMetaToScheme(s) _ = v1alpha1.AddToScheme(s) - builder := fake.NewClientBuilder().WithScheme(s).WithStatusSubresource(&v1alpha1.LvmVolumeGroup{}).WithStatusSubresource(&v1alpha1.LVMLogicalVolume{}) + builder := fake.NewClientBuilder().WithScheme(s).WithStatusSubresource(&v1alpha1.LVMVolumeGroup{}).WithStatusSubresource(&v1alpha1.LVMLogicalVolume{}) cl := builder.Build() return cl diff --git a/images/agent/src/pkg/controller/lvm_volume_group_test.go b/images/agent/src/pkg/controller/lvm_volume_group_test.go deleted file mode 100644 index f055f60f..00000000 --- a/images/agent/src/pkg/controller/lvm_volume_group_test.go +++ /dev/null @@ -1,188 +0,0 @@ -/* -Copyright 2023 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "encoding/json" - "testing" - - "github.com/deckhouse/sds-node-configurator/api/v1alpha1" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestLvmVolumeGroupAPIObjects(t *testing.T) { - t.Run("Unmarshal_LvmVolumeGroup_json_to_struct", func(t *testing.T) { - js := `{ - "apiVersion": "storage.deckhouse.io/v1alpha1", - "kind": "LvmVolumeGroup", - "metadata": { - "name": "lvg-test-1" - }, - "spec": { - "actualVGNameOnTheNode": "testVGname", - "blockDeviceNames": [ - "test-bd", - "test-bd2" - ], - "thinPools": [ - { - "name": "test-name", - "size": "10G" - }, - { - "name": "test-name2", - "size": "1G" - } - ], - "type": "local" - }, - "status": { - "allocatedSize": "20G", - "health": "operational", - "message": "all-good", - "nodes": [ - { - "devices": [ - { - "blockDevice": "test/BD", - "devSize": "1G", - "path": "test/path1", - "pvSize": "1G", - "pvUUID": "testPV1" - }, - { - "blockDevice": "test/BD2", - "devSize": "1G", - "path": "test/path2", - "pvSize": "2G", - "pvUUID": "testPV2" - } - ], - "name": "node1" - }, - { - "devices": [ - { - "blockDevice": "test/DB3", - "devSize": "2G", - "path": "test/path3", - "pvSize": "3G", - "pvUUID": "testPV3" - } - ], - "name": "node2" - } - ], - "thinPools": [ - { - "name": "test-name", - "actualSize": "1G" - } - ], - "vgSize": "30G", - "vgUUID": "test-vg-uuid" - } -}` - - expected := v1alpha1.LvmVolumeGroup{ - TypeMeta: metav1.TypeMeta{ - Kind: "LvmVolumeGroup", - APIVersion: "storage.deckhouse.io/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "lvg-test-1", - }, - Spec: v1alpha1.LvmVolumeGroupSpec{ - Type: "local", - BlockDeviceNames: []string{"test-bd", "test-bd2"}, - ActualVGNameOnTheNode: "testVGname", - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{ - { - Name: "test-name", - Size: "10G", - }, - { - Name: "test-name2", - Size: "1G", - }, - }, - }, - Status: v1alpha1.LvmVolumeGroupStatus{ - VGUuid: "test-vg-uuid", - VGSize: resource.MustParse("30G"), - AllocatedSize: resource.MustParse("20G"), - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolStatus{ - { - Name: "test-name", - ActualSize: *convertSize("1G", t), - }, - }, - Nodes: []v1alpha1.LvmVolumeGroupNode{ - { - Name: "node1", - Devices: []v1alpha1.LvmVolumeGroupDevice{ - { - Path: "test/path1", - PVSize: resource.MustParse("1G"), - DevSize: *convertSize("1G", t), - PVUuid: "testPV1", - BlockDevice: "test/BD", - }, - { - Path: "test/path2", - PVSize: resource.MustParse("2G"), - DevSize: *convertSize("1G", t), - PVUuid: "testPV2", - BlockDevice: "test/BD2", - }, - }, - }, - { - Name: "node2", - Devices: []v1alpha1.LvmVolumeGroupDevice{ - { - Path: "test/path3", - PVSize: resource.MustParse("3G"), - DevSize: *convertSize("2G", t), - PVUuid: "testPV3", - BlockDevice: "test/DB3", - }, - }, - }, - }, - }, - } - - var actual v1alpha1.LvmVolumeGroup - err := json.Unmarshal([]byte(js), &actual) - - if assert.NoError(t, err) { - assert.Equal(t, expected, actual) - } - }) -} - -func convertSize(size string, t *testing.T) *resource.Quantity { - sizeQTB, err := resource.ParseQuantity(size) - if err != nil { - t.Error(err) - } - - return &sizeQTB -} diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go index 380c2e50..fe13d1c5 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go @@ -60,7 +60,7 @@ func RunLVMVolumeGroupWatcherController( Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Reconciler starts to reconcile the request %s", request.NamespacedName.String())) - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} err := cl.Get(ctx, request.NamespacedName, lvg) if err != nil { if errors2.IsNotFound(err) { @@ -77,6 +77,13 @@ func RunLVMVolumeGroupWatcherController( return reconcile.Result{}, nil } + belongs := checkIfLVGBelongsToNode(lvg, cfg.NodeName) + if !belongs { + log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s does not belong to the node %s", lvg.Name, cfg.NodeName)) + return reconcile.Result{}, nil + } + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s belongs to the node %s. Starts to reconcile", lvg.Name, cfg.NodeName)) + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add the finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) added, err := addLVGFinalizerIfNotExist(ctx, cl, lvg) if err != nil { @@ -90,7 +97,19 @@ func RunLVMVolumeGroupWatcherController( log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to add a finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) } - blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics) + // this case handles the situation when a user decides to remove LVMVolumeGroup resource without created VG + deleted, err := deleteLVGIfNeeded(ctx, cl, log, metrics, cfg, sdsCache, lvg) + if err != nil { + return reconcile.Result{}, err + } + + if deleted { + log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s was deleted, stop the reconciliation", lvg.Name)) + return reconcile.Result{}, nil + } + + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to get block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector.MatchLabels)) + blockDevices, err := GetAPIBlockDevices(ctx, cl, metrics, lvg.Spec.BlockDeviceSelector) if err != nil { log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to get BlockDevices. Retry in %s", cfg.BlockDeviceScanIntervalSec.String())) err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "NoBlockDevices", fmt.Sprintf("unable to get block devices resources, err: %s", err.Error())) @@ -98,16 +117,14 @@ func RunLVMVolumeGroupWatcherController( log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, cfg.BlockDeviceScanIntervalSec.String())) } - return reconcile.Result{ - RequeueAfter: cfg.BlockDeviceScanIntervalSec, - }, nil + return reconcile.Result{RequeueAfter: cfg.BlockDeviceScanIntervalSec}, nil } - log.Debug("[RunLVMVolumeGroupController] successfully got BlockDevices") + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully got block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector.MatchLabels)) valid, reason := validateSpecBlockDevices(lvg, blockDevices) if !valid { log.Warning(fmt.Sprintf("[RunLVMVolumeGroupController] validation failed for the LVMVolumeGroup %s, reason: %s", lvg.Name, reason)) - err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "InvalidSpec", reason) + err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) if err != nil { log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, cfg.VolumeGroupScanIntervalSec.String())) return reconcile.Result{}, err @@ -117,13 +134,6 @@ func RunLVMVolumeGroupWatcherController( } log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully validated BlockDevices of the LVMVolumeGroup %s", lvg.Name)) - belongs := checkIfLVGBelongsToNode(lvg, blockDevices, cfg.NodeName) - if !belongs { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s does not belong to the node %s", lvg.Name, cfg.NodeName)) - return reconcile.Result{}, nil - } - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s belongs to the node %s. Starts to reconcile", lvg.Name, cfg.NodeName)) - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, cfg.NodeName)) added, err = addLVGLabelIfNeeded(ctx, cl, log, lvg, LVGMetadateNameLabelKey, lvg.Name) if err != nil { @@ -137,31 +147,19 @@ func RunLVMVolumeGroupWatcherController( log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) } - // this case handles the situation when a user decides to remove LVMVolumeGroup resource without created VG - vgs, _ := sdsCache.GetVGs() - if !checkIfVGExist(lvg.Spec.ActualVGNameOnTheNode, vgs) && lvg.DeletionTimestamp != nil { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] VG %s was not yet created for the LVMVolumeGroup %s and the resource is marked as deleting. Delete the resource", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) - removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - return reconcile.Result{}, err - } - - if removed { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully removed the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - } else { - log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) - } - - err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvg, cfg.NodeName) - if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to delete the LVMVolumeGroup %s", lvg.Name)) - return reconcile.Result{}, err - } - log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name)) + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, cfg.NodeName)) + added, err = addLVGLabelIfNeeded(ctx, cl, log, lvg, LVGMetadateNameLabelKey, lvg.Name) + if err != nil { + log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) + return reconcile.Result{}, err + } - return reconcile.Result{}, nil + if added { + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully added label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) + } else { + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) } + // We do this after BlockDevices validation and node belonging check to prevent multiple updates by all agents pods bds, _ := sdsCache.GetDevices() if len(bds) == 0 { @@ -205,8 +203,8 @@ func RunLVMVolumeGroupWatcherController( return nil, err } - err = c.Watch(source.Kind(mgrCache, &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + err = c.Watch(source.Kind(mgrCache, &v1alpha1.LVMVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroup, reconcile.Request]{ + CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc got a create event for the LVMVolumeGroup, name: %s", e.Object.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} @@ -214,7 +212,7 @@ func RunLVMVolumeGroupWatcherController( log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] createFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) }, - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] UpdateFunc got a update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) if !shouldLVGWatcherReconcileUpdateEvent(log, e.ObjectOld, e.ObjectNew) { log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] update event for the LVMVolumeGroup %s should not be reconciled as not target changed were made", e.ObjectNew.Name)) @@ -242,7 +240,7 @@ func runEventReconcile( metrics monitoring.Metrics, sdsCache *cache.Cache, cfg config.Options, - lvg *v1alpha1.LvmVolumeGroup, + lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, ) (bool, error) { recType := identifyLVGReconcileFunc(lvg, sdsCache) @@ -263,7 +261,7 @@ func runEventReconcile( return false, nil } -func reconcileLVGDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, cfg config.Options, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { +func reconcileLVGDeleteFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, cfg config.Options, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] tries to add the condition %s status false to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) @@ -345,7 +343,7 @@ func reconcileLVGDeleteFunc(ctx context.Context, cl client.Client, log logger.Lo return false, nil } -func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, error) { +func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, sdsCache *cache.Cache, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to validate the LVMVolumeGroup %s", lvg.Name)) @@ -445,7 +443,7 @@ func reconcileLVGUpdateFunc(ctx context.Context, cl client.Client, log logger.Lo return false, nil } -func reconcileLVGCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, error) { +func reconcileLVGCreateFunc(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) // this check prevents the LVMVolumeGroup resource's infinity updating after a retry @@ -495,7 +493,7 @@ func reconcileLVGCreateFunc(ctx context.Context, cl client.Client, log logger.Lo log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] the LVMVolumeGroup %s has thin-pools. Tries to create them", lvg.Name)) for _, tp := range lvg.Spec.ThinPools { - vgSize := countVGSizeByBlockDevices(lvg, blockDevices) + vgSize := countVGSizeByBlockDevices(blockDevices) tpRequestedSize, err := getRequestedSizeFromString(tp.Size, vgSize) if err != nil { log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to get thin-pool %s requested size of the LVMVolumeGroup %s", tp.Name, lvg.Name)) diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go index d5c8288f..eec25171 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_func.go @@ -31,6 +31,7 @@ import ( "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" + "agent/config" "agent/internal" "agent/pkg/cache" "agent/pkg/logger" @@ -38,7 +39,7 @@ import ( "agent/pkg/utils" ) -func DeleteLVMVolumeGroup(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, currentNode string) error { +func DeleteLVMVolumeGroup(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, currentNode string) error { log.Debug(fmt.Sprintf(`[DeleteLVMVolumeGroup] Node "%s" does not belong to VG "%s". It will be removed from LVM resource, name "%s"'`, currentNode, lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) for i, node := range lvg.Status.Nodes { if node.Name == currentNode { @@ -74,7 +75,7 @@ func checkIfVGExist(vgName string, vgs []internal.VGData) bool { return false } -func shouldUpdateLVGLabels(log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, labelKey, labelValue string) bool { +func shouldUpdateLVGLabels(log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, labelKey, labelValue string) bool { if lvg.Labels == nil { log.Debug(fmt.Sprintf("[shouldUpdateLVGLabels] the LVMVolumeGroup %s has no labels.", lvg.Name)) return true @@ -94,7 +95,7 @@ func shouldUpdateLVGLabels(log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, labe return false } -func shouldLVGWatcherReconcileUpdateEvent(log logger.Logger, oldLVG, newLVG *v1alpha1.LvmVolumeGroup) bool { +func shouldLVGWatcherReconcileUpdateEvent(log logger.Logger, oldLVG, newLVG *v1alpha1.LVMVolumeGroup) bool { if newLVG.DeletionTimestamp != nil { log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s has deletionTimestamp", newLVG.Name)) return true @@ -131,11 +132,11 @@ func shouldLVGWatcherReconcileUpdateEvent(log logger.Logger, oldLVG, newLVG *v1a return false } -func shouldReconcileLVGByDeleteFunc(lvg *v1alpha1.LvmVolumeGroup) bool { +func shouldReconcileLVGByDeleteFunc(lvg *v1alpha1.LVMVolumeGroup) bool { return lvg.DeletionTimestamp != nil } -func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, status v1.ConditionStatus, conType, reason, message string) error { +func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, status v1.ConditionStatus, conType, reason, message string) error { exist := false index := 0 newCondition := v1.Condition{ @@ -191,7 +192,7 @@ func checkIfEqualConditions(first, second v1.Condition) bool { first.ObservedGeneration == second.ObservedGeneration } -func addLVGFinalizerIfNotExist(ctx context.Context, cl client.Client, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { +func addLVGFinalizerIfNotExist(ctx context.Context, cl client.Client, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { if slices.Contains(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer) { return false, nil } @@ -205,7 +206,7 @@ func addLVGFinalizerIfNotExist(ctx context.Context, cl client.Client, lvg *v1alp return true, nil } -func syncThinPoolsAllocationLimit(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup) error { +func syncThinPoolsAllocationLimit(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup) error { updated := false tpSpecLimits := make(map[string]string, len(lvg.Spec.ThinPools)) @@ -252,45 +253,88 @@ func syncThinPoolsAllocationLimit(ctx context.Context, cl client.Client, log log return nil } -func validateSpecBlockDevices(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { - reason := strings.Builder{} - - targetNodeName := "" - for _, bdName := range lvg.Spec.BlockDeviceNames { - bd, exist := blockDevices[bdName] +func validateSpecBlockDevices(lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { + if len(blockDevices) == 0 { + return false, "none of specified BlockDevices were found" + } - if !exist { - reason.WriteString(fmt.Sprintf("the BlockDevice %s does not exist", bdName)) - continue - } + for _, me := range lvg.Spec.BlockDeviceSelector.MatchExpressions { + if me.Key == internal.MetadataNameLabelKey { + if len(me.Values) != len(blockDevices) { + missedBds := make([]string, 0, len(me.Values)) + for _, bdName := range me.Values { + if _, exist := blockDevices[bdName]; !exist { + missedBds = append(missedBds, bdName) + } + } - if targetNodeName == "" { - targetNodeName = bd.Status.NodeName + return false, fmt.Sprintf("unable to find specified BlockDevices: %s", strings.Join(missedBds, ",")) + } } + } - if bd.Status.NodeName != targetNodeName { - reason.WriteString(fmt.Sprintf("the BlockDevice %s has the node %s though the target node %s", bd.Name, bd.Status.NodeName, targetNodeName)) + bdFromOtherNode := make([]string, 0, len(blockDevices)) + for _, bd := range blockDevices { + if bd.Status.NodeName != lvg.Spec.Local.NodeName { + bdFromOtherNode = append(bdFromOtherNode, bd.Name) } } - if reason.Len() != 0 { - return false, reason.String() + if len(bdFromOtherNode) != 0 { + return false, fmt.Sprintf("block devices %s have different node names from LVMVolumeGroup Local.NodeName", strings.Join(bdFromOtherNode, ",")) } return true, "" } -func checkIfLVGBelongsToNode(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, nodeName string) bool { - bd := blockDevices[lvg.Spec.BlockDeviceNames[0]] - return bd.Status.NodeName == nodeName +func deleteLVGIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, cfg config.Options, sdsCache *cache.Cache, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { + if lvg.DeletionTimestamp == nil { + return false, nil + } + + vgs, _ := sdsCache.GetVGs() + if !checkIfVGExist(lvg.Spec.ActualVGNameOnTheNode, vgs) { + log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] VG %s was not yet created for the LVMVolumeGroup %s and the resource is marked as deleting. Delete the resource", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg) + if err != nil { + log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + return false, err + } + + if removed { + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully removed the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + } else { + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + } + + err = DeleteLVMVolumeGroup(ctx, cl, log, metrics, lvg, cfg.NodeName) + if err != nil { + log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to delete the LVMVolumeGroup %s", lvg.Name)) + return false, err + } + log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name)) + return true, nil + } + return false, nil } -func extractPathsFromBlockDevices(blockDevicesNames []string, blockDevices map[string]v1alpha1.BlockDevice) []string { - paths := make([]string, 0, len(blockDevicesNames)) +func checkIfLVGBelongsToNode(lvg *v1alpha1.LVMVolumeGroup, nodeName string) bool { + return lvg.Spec.Local.NodeName == nodeName +} - for _, bdName := range blockDevicesNames { - bd := blockDevices[bdName] - paths = append(paths, bd.Status.Path) +func extractPathsFromBlockDevices(targetDevices []string, blockDevices map[string]v1alpha1.BlockDevice) []string { + var paths []string + if len(targetDevices) > 0 { + paths = make([]string, 0, len(targetDevices)) + for _, bdName := range targetDevices { + bd := blockDevices[bdName] + paths = append(paths, bd.Status.Path) + } + } else { + paths = make([]string, 0, len(blockDevices)) + for _, bd := range blockDevices { + paths = append(paths, bd.Status.Path) + } } return paths @@ -313,27 +357,25 @@ func getRequestedSizeFromString(size string, targetSpace resource.Quantity) (res return resource.Quantity{}, nil } -func countVGSizeByBlockDevices(lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) resource.Quantity { +func countVGSizeByBlockDevices(blockDevices map[string]v1alpha1.BlockDevice) resource.Quantity { var totalVGSize int64 - for _, bdName := range lvg.Spec.BlockDeviceNames { - bd := blockDevices[bdName] + for _, bd := range blockDevices { totalVGSize += bd.Status.Size.Value() } return *resource.NewQuantity(totalVGSize, resource.BinarySI) } -func validateLVGForCreateFunc(log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { +func validateLVGForCreateFunc(log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { reason := strings.Builder{} log.Debug(fmt.Sprintf("[validateLVGForCreateFunc] check if every selected BlockDevice of the LVMVolumeGroup %s is consumable", lvg.Name)) // totalVGSize needs to count if there is enough space for requested thin-pools - totalVGSize := countVGSizeByBlockDevices(lvg, blockDevices) - for _, bdName := range lvg.Spec.BlockDeviceNames { - bd := blockDevices[bdName] + totalVGSize := countVGSizeByBlockDevices(blockDevices) + for _, bd := range blockDevices { if !bd.Status.Consumable { - log.Warning(fmt.Sprintf("[validateLVGForCreateFunc] BlockDevice %s is not consumable", bdName)) - log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] BlockDevice name: %s, status: %+v", bdName, bd.Status)) - reason.WriteString(fmt.Sprintf("BlockDevice %s is not consumable. ", bdName)) + log.Warning(fmt.Sprintf("[validateLVGForCreateFunc] BlockDevice %s is not consumable", bd.Name)) + log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] BlockDevice name: %s, status: %+v", bd.Name, bd.Status)) + reason.WriteString(fmt.Sprintf("BlockDevice %s is not consumable. ", bd.Name)) } } @@ -384,7 +426,7 @@ func validateLVGForCreateFunc(log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, b return true, "" } -func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { +func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) (bool, string) { reason := strings.Builder{} pvs, _ := sdsCache.GetPVs() log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] check if every new BlockDevice of the LVMVolumeGroup %s is comsumable", lvg.Name)) @@ -398,15 +440,14 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a // Check if added BlockDevices are consumable // additionBlockDeviceSpace value is needed to count if VG will have enough space for thin-pools var additionBlockDeviceSpace int64 - for _, bdName := range lvg.Spec.BlockDeviceNames { - specBd := blockDevices[bdName] - if _, found := actualPVPaths[specBd.Status.Path]; !found { - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] unable to find the PV %s for BlockDevice %s. Check if the BlockDevice is already used", specBd.Status.Path, specBd.Name)) + for _, bd := range blockDevices { + if _, found := actualPVPaths[bd.Status.Path]; !found { + log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] unable to find the PV %s for BlockDevice %s. Check if the BlockDevice is already used", bd.Status.Path, bd.Name)) for _, n := range lvg.Status.Nodes { for _, d := range n.Devices { - if d.BlockDevice == specBd.Name { - log.Warning(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s misses the PV %s. That might be because the corresponding device was removed from the node. Unable to validate BlockDevices", specBd.Name, specBd.Status.Path)) - reason.WriteString(fmt.Sprintf("BlockDevice %s misses the PV %s (that might be because the device was removed from the node). ", specBd.Name, specBd.Status.Path)) + if d.BlockDevice == bd.Name { + log.Warning(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s misses the PV %s. That might be because the corresponding device was removed from the node. Unable to validate BlockDevices", bd.Name, bd.Status.Path)) + reason.WriteString(fmt.Sprintf("BlockDevice %s misses the PV %s (that might be because the device was removed from the node). ", bd.Name, bd.Status.Path)) } if reason.Len() == 0 { @@ -415,19 +456,19 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a } } - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] PV %s for BlockDevice %s of the LVMVolumeGroup %s is not created yet, check if the BlockDevice is consumable", specBd.Status.Path, bdName, lvg.Name)) + log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] PV %s for BlockDevice %s of the LVMVolumeGroup %s is not created yet, check if the BlockDevice is consumable", bd.Status.Path, bd.Name, lvg.Name)) if reason.Len() > 0 { log.Debug("[validateLVGForUpdateFunc] some BlockDevices misses its PVs, unable to check if they are consumable") continue } - if !blockDevices[bdName].Status.Consumable { - reason.WriteString(fmt.Sprintf("BlockDevice %s is not consumable. ", bdName)) + if !bd.Status.Consumable { + reason.WriteString(fmt.Sprintf("BlockDevice %s is not consumable. ", bd.Name)) continue } - log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s is consumable", bdName)) - additionBlockDeviceSpace += specBd.Status.Size.Value() + log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s is consumable", bd.Name)) + additionBlockDeviceSpace += bd.Status.Size.Value() } } @@ -519,7 +560,7 @@ func validateLVGForUpdateFunc(log logger.Logger, sdsCache *cache.Cache, lvg *v1a return true, "" } -func identifyLVGReconcileFunc(lvg *v1alpha1.LvmVolumeGroup, sdsCache *cache.Cache) reconcileType { +func identifyLVGReconcileFunc(lvg *v1alpha1.LVMVolumeGroup, sdsCache *cache.Cache) reconcileType { if shouldReconcileLVGByCreateFunc(lvg, sdsCache) { return CreateReconcile } @@ -535,7 +576,7 @@ func identifyLVGReconcileFunc(lvg *v1alpha1.LvmVolumeGroup, sdsCache *cache.Cach return "none" } -func shouldReconcileLVGByCreateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cache) bool { +func shouldReconcileLVGByCreateFunc(lvg *v1alpha1.LVMVolumeGroup, ch *cache.Cache) bool { if lvg.DeletionTimestamp != nil { return false } @@ -544,7 +585,7 @@ func shouldReconcileLVGByCreateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cach return vg == nil } -func shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cache) bool { +func shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LVMVolumeGroup, ch *cache.Cache) bool { if lvg.DeletionTimestamp != nil { return false } @@ -553,7 +594,7 @@ func shouldReconcileLVGByUpdateFunc(lvg *v1alpha1.LvmVolumeGroup, ch *cache.Cach return vg != nil } -func ReconcileThinPoolsIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, vg internal.VGData, lvs []internal.LVData) error { +func ReconcileThinPoolsIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, vg internal.VGData, lvs []internal.LVData) error { actualThinPools := make(map[string]internal.LVData, len(lvs)) for _, lv := range lvs { if string(lv.LVAttr[0]) == "t" { @@ -629,7 +670,7 @@ func ReconcileThinPoolsIfNeeded(ctx context.Context, cl client.Client, log logge return nil } -func ResizePVIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup) error { +func ResizePVIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup) error { if len(lvg.Status.Nodes) == 0 { log.Warning(fmt.Sprintf("[ResizePVIfNeeded] the LVMVolumeGroup %s nodes are empty. Wait for the next update", lvg.Name)) return nil @@ -674,7 +715,7 @@ func ResizePVIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, return nil } -func ExtendVGIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, vg internal.VGData, pvs []internal.PVData, blockDevices map[string]v1alpha1.BlockDevice) error { +func ExtendVGIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, vg internal.VGData, pvs []internal.PVData, blockDevices map[string]v1alpha1.BlockDevice) error { for _, n := range lvg.Status.Nodes { for _, d := range n.Devices { log.Trace(fmt.Sprintf("[ExtendVGIfNeeded] the LVMVolumeGroup %s status block device: %s", lvg.Name, d.BlockDevice)) @@ -686,12 +727,11 @@ func ExtendVGIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, pvsMap[pv.PVName] = struct{}{} } - devicesToExtend := make([]string, 0, len(lvg.Spec.BlockDeviceNames)) - for _, bdName := range lvg.Spec.BlockDeviceNames { - bd := blockDevices[bdName] + devicesToExtend := make([]string, 0, len(blockDevices)) + for _, bd := range blockDevices { if _, exist := pvsMap[bd.Status.Path]; !exist { - log.Debug(fmt.Sprintf("[ExtendVGIfNeeded] the BlockDevice %s of LVMVolumeGroup %s Spec is not counted as used", bdName, lvg.Name)) - devicesToExtend = append(devicesToExtend, bdName) + log.Debug(fmt.Sprintf("[ExtendVGIfNeeded] the BlockDevice %s of LVMVolumeGroup %s Spec is not counted as used", bd.Name, lvg.Name)) + devicesToExtend = append(devicesToExtend, bd.Name) } } @@ -731,7 +771,7 @@ func tryGetVG(sdsCache *cache.Cache, vgName string) (bool, internal.VGData) { return false, internal.VGData{} } -func removeLVGFinalizerIfExist(ctx context.Context, cl client.Client, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { +func removeLVGFinalizerIfExist(ctx context.Context, cl client.Client, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { if !slices.Contains(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer) { return false, nil } @@ -763,8 +803,8 @@ func getLVForVG(ch *cache.Cache, vgName string) []string { return usedLVs } -func getLVMVolumeGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, name string) (*v1alpha1.LvmVolumeGroup, error) { - obj := &v1alpha1.LvmVolumeGroup{} +func getLVMVolumeGroup(ctx context.Context, cl client.Client, metrics monitoring.Metrics, name string) (*v1alpha1.LVMVolumeGroup, error) { + obj := &v1alpha1.LVMVolumeGroup{} start := time.Now() err := cl.Get(ctx, client.ObjectKey{ Name: name, @@ -852,8 +892,8 @@ func ExtendVGComplex(metrics monitoring.Metrics, extendPVs []string, vgName stri return nil } -func CreateVGComplex(metrics monitoring.Metrics, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) error { - paths := extractPathsFromBlockDevices(lvg.Spec.BlockDeviceNames, blockDevices) +func CreateVGComplex(metrics monitoring.Metrics, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) error { + paths := extractPathsFromBlockDevices(nil, blockDevices) log.Trace(fmt.Sprintf("[CreateVGComplex] LVMVolumeGroup %s devices paths %v", lvg.Name, paths)) for _, path := range paths { @@ -901,7 +941,7 @@ func CreateVGComplex(metrics monitoring.Metrics, log logger.Logger, lvg *v1alpha return nil } -func UpdateVGTagIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, vg internal.VGData) (bool, error) { +func UpdateVGTagIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, vg internal.VGData) (bool, error) { found, tagName := CheckTag(vg.VGTags) if found && lvg.Name != tagName { if checkIfConditionIsTrue(lvg, internal.TypeVGConfigurationApplied) { @@ -940,7 +980,7 @@ func UpdateVGTagIfNeeded(ctx context.Context, cl client.Client, log logger.Logge return false, nil } -func ExtendThinPool(log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LvmVolumeGroup, specThinPool v1alpha1.LvmVolumeGroupThinPoolSpec) error { +func ExtendThinPool(log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1.LVMVolumeGroup, specThinPool v1alpha1.LVMVolumeGroupThinPoolSpec) error { volumeGroupFreeSpaceBytes := lvg.Status.VGSize.Value() - lvg.Status.AllocatedSize.Value() tpRequestedSize, err := getRequestedSizeFromString(specThinPool.Size, lvg.Status.VGSize) if err != nil { @@ -973,7 +1013,7 @@ func ExtendThinPool(log logger.Logger, metrics monitoring.Metrics, lvg *v1alpha1 return nil } -func addLVGLabelIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, labelKey, labelValue string) (bool, error) { +func addLVGLabelIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, labelKey, labelValue string) (bool, error) { if !shouldUpdateLVGLabels(log, lvg, labelKey, labelValue) { return false, nil } diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go index b1fa2bad..dfdeed20 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher_test.go @@ -58,11 +58,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - }, - Status: v1alpha1.LvmVolumeGroupStatus{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{}, + Status: v1alpha1.LVMVolumeGroupStatus{ Phase: "", Conditions: nil, ThinPoolReady: "", @@ -118,11 +116,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - }, - Status: v1alpha1.LvmVolumeGroupStatus{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{}, + Status: v1alpha1.LVMVolumeGroupStatus{ Phase: "", Conditions: nil, ThinPoolReady: "", @@ -179,10 +175,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + ThinPools: []v1alpha1.LVMVolumeGroupThinPoolSpec{ { Name: "new-thin", Size: "2.5G", @@ -251,10 +246,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + ThinPools: []v1alpha1.LVMVolumeGroupThinPoolSpec{ { Name: "new-thin", Size: "4G", @@ -315,10 +309,8 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - }, + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{}, } valid, reason := validateLVGForCreateFunc(log, lvg, bds) @@ -329,8 +321,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("without_thin_pools_returns_false", func(t *testing.T) { const ( - firstBd = "first" - secondBd = "second" + firstBd = "first" ) bds := map[string]v1alpha1.BlockDevice{ firstBd: { @@ -339,14 +330,12 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, Status: v1alpha1.BlockDeviceStatus{ Size: resource.MustParse("1G"), - Consumable: true, + Consumable: false, }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - }, + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{}, } valid, _ := validateLVGForCreateFunc(log, lvg, bds) @@ -378,10 +367,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + ThinPools: []v1alpha1.LVMVolumeGroupThinPoolSpec{ { Size: "1G", }, @@ -420,10 +408,9 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + ThinPools: []v1alpha1.LVMVolumeGroupThinPoolSpec{ { Size: "3G", }, @@ -439,8 +426,8 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("identifyLVGReconcileFunc", func(t *testing.T) { t.Run("returns_create", func(t *testing.T) { const vgName = "test-vg" - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ ActualVGNameOnTheNode: vgName, }, } @@ -453,8 +440,8 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("returns_update", func(t *testing.T) { const vgName = "test-vg" - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ ActualVGNameOnTheNode: vgName, }, } @@ -473,8 +460,8 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("returns_delete", func(t *testing.T) { const vgName = "test-vg" - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ ActualVGNameOnTheNode: vgName, }, } @@ -495,7 +482,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("removeLVGFinalizerIfExist", func(t *testing.T) { t.Run("not_exist_no_remove", func(t *testing.T) { - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} removed, err := removeLVGFinalizerIfExist(ctx, cl, lvg) if err != nil { @@ -507,7 +494,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("does_exist_remove", func(t *testing.T) { const lvgName = "test-lvg" - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.Name = lvgName lvg.Finalizers = append(lvg.Finalizers, internal.SdsNodeConfiguratorFinalizer) @@ -529,7 +516,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } if assert.True(t, removed) { - updatedLVG := &v1alpha1.LvmVolumeGroup{} + updatedLVG := &v1alpha1.LVMVolumeGroup{} err = cl.Get(ctx, client.ObjectKey{ Name: lvgName, }, updatedLVG) @@ -591,15 +578,10 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, } - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{firstBd, secondBd}, - }, - } expected := resource.MustParse("2G") - actual := countVGSizeByBlockDevices(lvg, bds) + actual := countVGSizeByBlockDevices(bds) assert.Equal(t, expected.Value(), actual.Value()) }) @@ -626,36 +608,70 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }) t.Run("extractPathsFromBlockDevices", func(t *testing.T) { - const ( - firstBd = "first" - secondBd = "second" + t.Run("if_specified_returns_only_them", func(t *testing.T) { + const ( + firstBd = "first" + secondBd = "second" - firstPath = "first-path" - secondPath = "second-path" - ) - bdNames := []string{firstBd, secondBd} - bds := map[string]v1alpha1.BlockDevice{ - firstBd: { - ObjectMeta: v1.ObjectMeta{ - Name: firstBd, + firstPath = "first-path" + secondPath = "second-path" + ) + bdNames := []string{firstBd} + bds := map[string]v1alpha1.BlockDevice{ + firstBd: { + ObjectMeta: v1.ObjectMeta{ + Name: firstBd, + }, + Status: v1alpha1.BlockDeviceStatus{ + Path: firstPath, + }, }, - Status: v1alpha1.BlockDeviceStatus{ - Path: firstPath, + secondBd: { + ObjectMeta: v1.ObjectMeta{ + Name: secondBd, + }, + Status: v1alpha1.BlockDeviceStatus{ + Path: secondPath, + }, }, - }, - secondBd: { - ObjectMeta: v1.ObjectMeta{ - Name: secondBd, + } + + expected := []string{firstPath} + actual := extractPathsFromBlockDevices(bdNames, bds) + assert.ElementsMatch(t, expected, actual) + }) + + t.Run("if_nil_returns_all", func(t *testing.T) { + const ( + firstBd = "first" + secondBd = "second" + + firstPath = "first-path" + secondPath = "second-path" + ) + bds := map[string]v1alpha1.BlockDevice{ + firstBd: { + ObjectMeta: v1.ObjectMeta{ + Name: firstBd, + }, + Status: v1alpha1.BlockDeviceStatus{ + Path: firstPath, + }, }, - Status: v1alpha1.BlockDeviceStatus{ - Path: secondPath, + secondBd: { + ObjectMeta: v1.ObjectMeta{ + Name: secondBd, + }, + Status: v1alpha1.BlockDeviceStatus{ + Path: secondPath, + }, }, - }, - } + } - expected := []string{firstPath, secondPath} - actual := extractPathsFromBlockDevices(bdNames, bds) - assert.ElementsMatch(t, expected, actual) + expected := []string{firstPath, secondPath} + actual := extractPathsFromBlockDevices(nil, bds) + assert.ElementsMatch(t, expected, actual) + }) }) t.Run("validateSpecBlockDevices", func(t *testing.T) { @@ -663,10 +679,19 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { const ( nodeName = "nodeName" ) - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{ - "first", "second", + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: nodeName, + }, + BlockDeviceSelector: &v1.LabelSelector{ + MatchExpressions: []v1.LabelSelectorRequirement{ + { + Key: internal.MetadataNameLabelKey, + Operator: v1.LabelSelectorOpIn, + Values: []string{"first", "second"}, + }, + }, }, }, } @@ -697,14 +722,23 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } }) - t.Run("validation_fails_due_to_bd_does_not_exist", func(t *testing.T) { + t.Run("validation_fails_due_to_bd_has_dif_node", func(t *testing.T) { const ( nodeName = "nodeName" ) - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{ - "first", "second", + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: nodeName, + }, + BlockDeviceSelector: &v1.LabelSelector{ + MatchExpressions: []v1.LabelSelectorRequirement{ + { + Key: internal.MetadataNameLabelKey, + Operator: v1.LabelSelectorOpIn, + Values: []string{"first", "second"}, + }, + }, }, }, } @@ -718,20 +752,62 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { NodeName: nodeName, }, }, + "second": { + ObjectMeta: v1.ObjectMeta{ + Name: "second", + }, + Status: v1alpha1.BlockDeviceStatus{ + NodeName: "another-node", + }, + }, } valid, _ := validateSpecBlockDevices(lvg, bds) assert.False(t, valid) }) - t.Run("validation_fails_due_to_bd_has_dif_node", func(t *testing.T) { + t.Run("validation_fails_due_to_no_block_devices_were_found", func(t *testing.T) { + const ( + nodeName = "nodeName" + ) + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: nodeName, + }, + BlockDeviceSelector: &v1.LabelSelector{ + MatchExpressions: []v1.LabelSelectorRequirement{ + { + Key: internal.MetadataNameLabelKey, + Operator: v1.LabelSelectorOpIn, + Values: []string{"first", "second"}, + }, + }, + }, + }, + } + + valid, _ := validateSpecBlockDevices(lvg, nil) + assert.False(t, valid) + }) + + t.Run("validation_fails_due_to_some_blockdevice_were_not_found", func(t *testing.T) { const ( nodeName = "nodeName" ) - lvg := &v1alpha1.LvmVolumeGroup{ - Spec: v1alpha1.LvmVolumeGroupSpec{ - BlockDeviceNames: []string{ - "first", "second", + lvg := &v1alpha1.LVMVolumeGroup{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + Local: v1alpha1.LVMVolumeGroupLocalSpec{ + NodeName: nodeName, + }, + BlockDeviceSelector: &v1.LabelSelector{ + MatchExpressions: []v1.LabelSelectorRequirement{ + { + Key: internal.MetadataNameLabelKey, + Operator: v1.LabelSelectorOpIn, + Values: []string{"first", "second"}, + }, + }, }, }, } @@ -745,14 +821,6 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { NodeName: nodeName, }, }, - "second": { - ObjectMeta: v1.ObjectMeta{ - Name: "second", - }, - Status: v1alpha1.BlockDeviceStatus{ - NodeName: "another-node", - }, - }, } valid, _ := validateSpecBlockDevices(lvg, bds) @@ -762,12 +830,12 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("syncThinPoolsAllocationLimit", func(t *testing.T) { const lvgName = "test" - lvg := &v1alpha1.LvmVolumeGroup{ + lvg := &v1alpha1.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: lvgName, }, - Spec: v1alpha1.LvmVolumeGroupSpec{ - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolSpec{ + Spec: v1alpha1.LVMVolumeGroupSpec{ + ThinPools: []v1alpha1.LVMVolumeGroupThinPoolSpec{ { Name: "first", Size: "1G", @@ -775,8 +843,8 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }, }, }, - Status: v1alpha1.LvmVolumeGroupStatus{ - ThinPools: []v1alpha1.LvmVolumeGroupThinPoolStatus{ + Status: v1alpha1.LVMVolumeGroupStatus{ + ThinPools: []v1alpha1.LVMVolumeGroupThinPoolStatus{ { Name: "first", AllocationLimit: "150%", @@ -802,7 +870,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Error(err) } - updatedLVG := &v1alpha1.LvmVolumeGroup{} + updatedLVG := &v1alpha1.LVMVolumeGroup{} err = cl.Get(ctx, client.ObjectKey{ Name: lvgName, }, updatedLVG) @@ -815,7 +883,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { const ( lvgName = "test" ) - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.Name = lvgName lvg.Finalizers = []string{} @@ -837,7 +905,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } if assert.True(t, added) { - updatedLVG := &v1alpha1.LvmVolumeGroup{} + updatedLVG := &v1alpha1.LVMVolumeGroup{} err = cl.Get(ctx, client.ObjectKey{ Name: lvgName, }, updatedLVG) @@ -850,7 +918,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { const ( lvgName = "test-1" ) - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.Name = lvgName lvg.Finalizers = []string{ internal.SdsNodeConfiguratorFinalizer, @@ -874,7 +942,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { } if assert.False(t, added) { - updatedLVG := &v1alpha1.LvmVolumeGroup{} + updatedLVG := &v1alpha1.LVMVolumeGroup{} err = cl.Get(ctx, client.ObjectKey{ Name: lvgName, }, updatedLVG) @@ -891,7 +959,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { badReason = "bad" ) curTime := v1.NewTime(time.Now()) - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.Name = lvgName lvg.Generation = 1 lvg.Status.Conditions = []v1.Condition{ @@ -915,7 +983,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Error(err) } - notUpdatedLVG := &v1alpha1.LvmVolumeGroup{} + notUpdatedLVG := &v1alpha1.LVMVolumeGroup{} err = cl.Get(ctx, client.ObjectKey{ Name: lvgName, }, notUpdatedLVG) @@ -934,7 +1002,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { lvgName = "test-name-2" ) curTime := v1.NewTime(time.Now()) - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.Name = lvgName lvg.Generation = 1 lvg.Status.Conditions = []v1.Condition{ @@ -964,14 +1032,14 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("shouldReconcileLVGByDeleteFunc", func(t *testing.T) { t.Run("returns_true", func(t *testing.T) { - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.DeletionTimestamp = &v1.Time{} assert.True(t, shouldReconcileLVGByDeleteFunc(lvg)) }) t.Run("returns_false", func(t *testing.T) { - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.DeletionTimestamp = nil assert.False(t, shouldReconcileLVGByDeleteFunc(lvg)) @@ -980,37 +1048,37 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("shouldLVGWatcherReconcileUpdateEvent", func(t *testing.T) { t.Run("deletion_timestamp_not_nil_returns_true", func(t *testing.T) { - oldLVG := &v1alpha1.LvmVolumeGroup{} - newLVG := &v1alpha1.LvmVolumeGroup{} + oldLVG := &v1alpha1.LVMVolumeGroup{} + newLVG := &v1alpha1.LVMVolumeGroup{} newLVG.DeletionTimestamp = &v1.Time{} assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) }) t.Run("spec_is_diff_returns_true", func(t *testing.T) { - oldLVG := &v1alpha1.LvmVolumeGroup{} - newLVG := &v1alpha1.LvmVolumeGroup{} - oldLVG.Spec.BlockDeviceNames = []string{"first"} - newLVG.Spec.BlockDeviceNames = []string{"first", "second"} + oldLVG := &v1alpha1.LVMVolumeGroup{} + newLVG := &v1alpha1.LVMVolumeGroup{} + oldLVG.Spec.BlockDeviceSelector = &v1.LabelSelector{MatchLabels: map[string]string{"first": "second"}} + newLVG.Spec.BlockDeviceSelector = &v1.LabelSelector{MatchLabels: map[string]string{"second": "second"}} assert.True(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) }) t.Run("condition_vg_configuration_applied_is_updating_returns_false", func(t *testing.T) { - oldLVG := &v1alpha1.LvmVolumeGroup{} - newLVG := &v1alpha1.LvmVolumeGroup{} + oldLVG := &v1alpha1.LVMVolumeGroup{} + newLVG := &v1alpha1.LVMVolumeGroup{} newLVG.Name = "test-name" + newLVG.Labels = map[string]string{LVGMetadateNameLabelKey: "test-name"} newLVG.Status.Conditions = []v1.Condition{ { Type: internal.TypeVGConfigurationApplied, Reason: internal.ReasonUpdating, }, } - newLVG.Labels = map[string]string{LVGMetadateNameLabelKey: newLVG.Name} assert.False(t, shouldLVGWatcherReconcileUpdateEvent(log, oldLVG, newLVG)) }) t.Run("condition_vg_configuration_applied_is_creating_returns_false", func(t *testing.T) { - oldLVG := &v1alpha1.LvmVolumeGroup{} - newLVG := &v1alpha1.LvmVolumeGroup{} + oldLVG := &v1alpha1.LVMVolumeGroup{} + newLVG := &v1alpha1.LVMVolumeGroup{} newLVG.Name = "test-name" newLVG.Status.Conditions = []v1.Condition{ { @@ -1023,8 +1091,8 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }) t.Run("label_is_not_the_same_returns_true", func(t *testing.T) { - oldLVG := &v1alpha1.LvmVolumeGroup{} - newLVG := &v1alpha1.LvmVolumeGroup{} + oldLVG := &v1alpha1.LVMVolumeGroup{} + newLVG := &v1alpha1.LVMVolumeGroup{} newLVG.Name = "test-name" newLVG.Status.Conditions = []v1.Condition{ { @@ -1037,11 +1105,11 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { }) t.Run("dev_size_and_pv_size_are_diff_returns_true", func(t *testing.T) { - oldLVG := &v1alpha1.LvmVolumeGroup{} - newLVG := &v1alpha1.LvmVolumeGroup{} - newLVG.Status.Nodes = []v1alpha1.LvmVolumeGroupNode{ + oldLVG := &v1alpha1.LVMVolumeGroup{} + newLVG := &v1alpha1.LVMVolumeGroup{} + newLVG.Status.Nodes = []v1alpha1.LVMVolumeGroupNode{ { - Devices: []v1alpha1.LvmVolumeGroupDevice{ + Devices: []v1alpha1.LVMVolumeGroupDevice{ { BlockDevice: "test", DevSize: resource.MustParse("1G"), @@ -1057,17 +1125,17 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("shouldUpdateLVGLabels", func(t *testing.T) { t.Run("labels_nil_returns_true", func(t *testing.T) { - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} assert.True(t, shouldUpdateLVGLabels(log, lvg, "key", "value")) }) t.Run("no_such_label_returns_true", func(t *testing.T) { - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.Labels = map[string]string{"key": "value"} assert.True(t, shouldUpdateLVGLabels(log, lvg, "other-key", "value")) }) t.Run("key_exists_other_value_returns_true", func(t *testing.T) { const key = "key" - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.Labels = map[string]string{key: "value"} assert.True(t, shouldUpdateLVGLabels(log, lvg, key, "other-value")) }) @@ -1076,7 +1144,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { key = "key" value = "value" ) - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} lvg.Labels = map[string]string{key: value} assert.False(t, shouldUpdateLVGLabels(log, lvg, key, value)) }) @@ -1108,12 +1176,12 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { nodeName = "test-node" ) - lvgToDelete := &v1alpha1.LvmVolumeGroup{ + lvgToDelete := &v1alpha1.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: lvgName, }, - Status: v1alpha1.LvmVolumeGroupStatus{ - Nodes: []v1alpha1.LvmVolumeGroupNode{ + Status: v1alpha1.LVMVolumeGroupStatus{ + Nodes: []v1alpha1.LVMVolumeGroupNode{ { Name: nodeName, }, @@ -1130,7 +1198,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { _ = cl.Delete(ctx, lvgToDelete) }() - lvgCheck := &v1alpha1.LvmVolumeGroup{} + lvgCheck := &v1alpha1.LVMVolumeGroup{} err = cl.Get(ctx, client.ObjectKey{ Name: lvgName, }, lvgCheck) @@ -1144,7 +1212,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Error(err) } - lvgNewCheck := &v1alpha1.LvmVolumeGroup{} + lvgNewCheck := &v1alpha1.LVMVolumeGroup{} err = cl.Get(ctx, client.ObjectKey{ Name: lvgName, }, lvgNewCheck) @@ -1155,7 +1223,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("getLVMVolumeGroup_lvg_exists_returns_correct", func(t *testing.T) { const name = "test_name" - lvgToCreate := &v1alpha1.LvmVolumeGroup{ + lvgToCreate := &v1alpha1.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: name, }, @@ -1182,7 +1250,7 @@ func TestLVMVolumeGroupWatcherCtrl(t *testing.T) { t.Run("getLVMVolumeGroup_lvg_doesnt_exist_returns_nil", func(t *testing.T) { const name = "test_name" - testObj := &v1alpha1.LvmVolumeGroup{ + testObj := &v1alpha1.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: name, }, diff --git a/images/agent/werf.inc.yaml b/images/agent/werf.inc.yaml index 55308330..ea3a4be1 100644 --- a/images/agent/werf.inc.yaml +++ b/images/agent/werf.inc.yaml @@ -53,15 +53,18 @@ from: {{ $.BASE_GOLANG }} final: false git: - - add: /images/agent/src - to: /src + - add: / + to: / + includePaths: + - api + - images/agent/src stageDependencies: setup: - "**/*" shell: setup: - - cd /src/cmd + - cd /images/agent/src/cmd - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags="-s -w" -o /sds-node-configurator-agent - chmod +x /sds-node-configurator-agent diff --git a/images/sds-health-watcher-controller/src/go.mod b/images/sds-health-watcher-controller/src/go.mod index b0b0afd2..043a530e 100644 --- a/images/sds-health-watcher-controller/src/go.mod +++ b/images/sds-health-watcher-controller/src/go.mod @@ -4,7 +4,7 @@ go 1.22.3 require ( github.com/cloudflare/cfssl v1.5.0 - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240905123334-64f17b70f035 github.com/go-logr/logr v1.4.2 github.com/prometheus/client_golang v1.19.1 github.com/stretchr/testify v1.9.0 @@ -18,6 +18,8 @@ require ( sigs.k8s.io/controller-runtime v0.19.0 ) +replace github.com/deckhouse/sds-node-configurator/api => ../../../api + require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect diff --git a/images/sds-health-watcher-controller/src/go.sum b/images/sds-health-watcher-controller/src/go.sum index 8a29ad3d..4d0847f3 100644 --- a/images/sds-health-watcher-controller/src/go.sum +++ b/images/sds-health-watcher-controller/src/go.sum @@ -20,6 +20,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b h1:EYmHWTWcWMpyxJGZK05ZxlIFnh9s66DRrxLw/LNb/xw= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240905123334-64f17b70f035 h1:2kluZX0T5gk8YgNRk2bzd+m/mSkNmcKKaDHd6sVHP8I= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240905123334-64f17b70f035/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher.go index 7effe7ff..f382e08e 100644 --- a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher.go +++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher.go @@ -61,7 +61,7 @@ func RunLVGConditionsWatcher( Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] Reconciler got a request %s", request.String())) - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} err := cl.Get(ctx, request.NamespacedName, lvg) if err != nil { log.Error(err, fmt.Sprintf("[RunLVGConditionsWatcher] unable to get the LVMVolumeGroup %s", request.Name)) @@ -95,8 +95,8 @@ func RunLVGConditionsWatcher( return err } - err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroup, reconcile.Request]{ + CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] got a create event for the LVMVolumeGroup %s", e.Object.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} @@ -104,7 +104,7 @@ func RunLVGConditionsWatcher( log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] createFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) }, - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] got a update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) if reflect.DeepEqual(e.ObjectOld.Status.Conditions, e.ObjectNew.Status.Conditions) { log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] no condition changes for the LVMVolumeGroup %s. No need to reconcile", e.ObjectNew.Name)) @@ -123,7 +123,7 @@ func RunLVGConditionsWatcher( return nil } -func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup) (bool, error) { +func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { log.Debug(fmt.Sprintf("[reconcileLVGConditions] starts the reconciliation for the LVMVolumeGroup %s", lvg.Name)) if lvg.Status.Conditions == nil { diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go index 05be62d1..c61a96cd 100644 --- a/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go +++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_conditions_watcher_funcs.go @@ -57,7 +57,7 @@ func getCRD(ctx context.Context, cl client.Client, crdName string) (*v1.CustomRe return crd, err } -func updateLVMVolumeGroupPhaseIfNeeded(ctx context.Context, cl client.Client, lvg *v1alpha1.LvmVolumeGroup, phase string) error { +func updateLVMVolumeGroupPhaseIfNeeded(ctx context.Context, cl client.Client, lvg *v1alpha1.LVMVolumeGroup, phase string) error { if lvg.Status.Phase == phase { return nil } diff --git a/images/sds-health-watcher-controller/src/pkg/controller/lvg_status_watcher.go b/images/sds-health-watcher-controller/src/pkg/controller/lvg_status_watcher.go index e6674706..d5045cc1 100644 --- a/images/sds-health-watcher-controller/src/pkg/controller/lvg_status_watcher.go +++ b/images/sds-health-watcher-controller/src/pkg/controller/lvg_status_watcher.go @@ -35,7 +35,7 @@ func RunLVGStatusWatcher( Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log.Info(fmt.Sprintf("[RunLVGStatusWatcher] Reconciler got a request %s", request.String())) - lvg := &v1alpha1.LvmVolumeGroup{} + lvg := &v1alpha1.LVMVolumeGroup{} err := cl.Get(ctx, request.NamespacedName, lvg) if err != nil { if errors2.IsNotFound(err) { @@ -67,14 +67,14 @@ func RunLVGStatusWatcher( return err } - err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LvmVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LvmVolumeGroup, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroup, reconcile.Request]{ + CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVGStatusWatcher] got a create event for the LVMVolumeGroup %s", e.Object.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} q.Add(request) log.Info(fmt.Sprintf("[RunLVGStatusWatcher] CreateFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) }, - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LvmVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVGStatusWatcher] got an update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} q.Add(request) @@ -89,7 +89,7 @@ func RunLVGStatusWatcher( return nil } -func reconcileLVGStatus(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup) error { +func reconcileLVGStatus(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup) error { log.Debug(fmt.Sprintf("[reconcileLVGStatus] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) shouldUpdate := false @@ -118,7 +118,7 @@ func reconcileLVGStatus(ctx context.Context, cl client.Client, log logger.Logger return err } -func getActualThinPoolReadyCount(statusTp []v1alpha1.LvmVolumeGroupThinPoolStatus) int { +func getActualThinPoolReadyCount(statusTp []v1alpha1.LVMVolumeGroupThinPoolStatus) int { count := 0 for _, tp := range statusTp { @@ -130,7 +130,7 @@ func getActualThinPoolReadyCount(statusTp []v1alpha1.LvmVolumeGroupThinPoolStatu return count } -func getUniqueThinPoolCount(specTp []v1alpha1.LvmVolumeGroupThinPoolSpec, statusTp []v1alpha1.LvmVolumeGroupThinPoolStatus) int { +func getUniqueThinPoolCount(specTp []v1alpha1.LVMVolumeGroupThinPoolSpec, statusTp []v1alpha1.LVMVolumeGroupThinPoolStatus) int { unique := make(map[string]struct{}, len(specTp)+len(statusTp)) for _, tp := range specTp { @@ -144,7 +144,7 @@ func getUniqueThinPoolCount(specTp []v1alpha1.LvmVolumeGroupThinPoolSpec, status return len(unique) } -func getVGConfigurationAppliedStatus(lvg *v1alpha1.LvmVolumeGroup) v1.ConditionStatus { +func getVGConfigurationAppliedStatus(lvg *v1alpha1.LVMVolumeGroup) v1.ConditionStatus { for _, c := range lvg.Status.Conditions { if c.Type == internal.TypeVGConfigurationApplied { return c.Status diff --git a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_funcs.go b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_funcs.go index 0b269a43..79b55869 100644 --- a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_funcs.go +++ b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_funcs.go @@ -97,8 +97,8 @@ func getPodsBySelector(ctx context.Context, cl client.Client, selector map[strin return pods, nil } -func findLVMVolumeGroupsByNodeNames(lvgs map[string]v1alpha1.LvmVolumeGroup, nodeNames []string) map[string]v1alpha1.LvmVolumeGroup { - result := make(map[string]v1alpha1.LvmVolumeGroup, len(lvgs)) +func findLVMVolumeGroupsByNodeNames(lvgs map[string]v1alpha1.LVMVolumeGroup, nodeNames []string) map[string]v1alpha1.LVMVolumeGroup { + result := make(map[string]v1alpha1.LVMVolumeGroup, len(lvgs)) names := make(map[string]struct{}, len(nodeNames)) for _, n := range nodeNames { @@ -141,7 +141,7 @@ func getNodesByNames(ctx context.Context, cl client.Client, lvgNodeNames []strin return usedNodes, missedNodes, nil } -func getNodeNamesFromLVGs(lvgs map[string]v1alpha1.LvmVolumeGroup) []string { +func getNodeNamesFromLVGs(lvgs map[string]v1alpha1.LVMVolumeGroup) []string { nodes := make([]string, 0, len(lvgs)) for _, lvg := range lvgs { @@ -153,8 +153,8 @@ func getNodeNamesFromLVGs(lvgs map[string]v1alpha1.LvmVolumeGroup) []string { return nodes } -func GetLVMVolumeGroups(ctx context.Context, cl client.Client, metrics monitoring.Metrics) (map[string]v1alpha1.LvmVolumeGroup, error) { - lvgList := &v1alpha1.LvmVolumeGroupList{} +func GetLVMVolumeGroups(ctx context.Context, cl client.Client, metrics monitoring.Metrics) (map[string]v1alpha1.LVMVolumeGroup, error) { + lvgList := &v1alpha1.LVMVolumeGroupList{} start := time.Now() err := cl.List(ctx, lvgList) @@ -162,10 +162,10 @@ func GetLVMVolumeGroups(ctx context.Context, cl client.Client, metrics monitorin metrics.APIMethodsExecutionCount(SdsInfraWatcherCtrlName, "list").Inc() if err != nil { metrics.APIMethodsErrors(SdsInfraWatcherCtrlName, "list").Inc() - return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LvmVolumeGroups, err: %w", err) + return nil, fmt.Errorf("[GetApiLVMVolumeGroups] unable to list LVMVolumeGroups, err: %w", err) } - lvgs := make(map[string]v1alpha1.LvmVolumeGroup, len(lvgList.Items)) + lvgs := make(map[string]v1alpha1.LVMVolumeGroup, len(lvgList.Items)) for _, lvg := range lvgList.Items { lvgs[lvg.Name] = lvg } @@ -173,7 +173,7 @@ func GetLVMVolumeGroups(ctx context.Context, cl client.Client, metrics monitorin return lvgs, nil } -func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LvmVolumeGroup, status metav1.ConditionStatus, conType, reason, message string) error { +func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, status metav1.ConditionStatus, conType, reason, message string) error { exist := false index := 0 newCondition := metav1.Condition{ diff --git a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_test.go b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_test.go index 1894d8cd..cfa3899a 100644 --- a/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_test.go +++ b/images/sds-health-watcher-controller/src/pkg/controller/sds_infra_watcher_test.go @@ -126,7 +126,7 @@ func TestHealthWatcher(t *testing.T) { }) t.Run("GetLVMVolumeGroups_returns_lvgs", func(t *testing.T) { - lvgsToCreate := []v1alpha1.LvmVolumeGroup{ + lvgsToCreate := []v1alpha1.LVMVolumeGroup{ { ObjectMeta: metav1.ObjectMeta{ Name: "test-lvg-1", @@ -157,10 +157,10 @@ func TestHealthWatcher(t *testing.T) { node1 = "node1" node2 = "node2" ) - lvgs := map[string]v1alpha1.LvmVolumeGroup{ + lvgs := map[string]v1alpha1.LVMVolumeGroup{ "first": { - Status: v1alpha1.LvmVolumeGroupStatus{ - Nodes: []v1alpha1.LvmVolumeGroupNode{ + Status: v1alpha1.LVMVolumeGroupStatus{ + Nodes: []v1alpha1.LVMVolumeGroupNode{ { Name: node1, }, @@ -168,8 +168,8 @@ func TestHealthWatcher(t *testing.T) { }, }, "second": { - Status: v1alpha1.LvmVolumeGroupStatus{ - Nodes: []v1alpha1.LvmVolumeGroupNode{ + Status: v1alpha1.LVMVolumeGroupStatus{ + Nodes: []v1alpha1.LVMVolumeGroupNode{ { Name: node2, }, @@ -190,13 +190,13 @@ func TestHealthWatcher(t *testing.T) { node1 = "node1" node2 = "node2" ) - lvgs := map[string]v1alpha1.LvmVolumeGroup{ + lvgs := map[string]v1alpha1.LVMVolumeGroup{ "first": { ObjectMeta: metav1.ObjectMeta{ Name: "first", }, - Status: v1alpha1.LvmVolumeGroupStatus{ - Nodes: []v1alpha1.LvmVolumeGroupNode{ + Status: v1alpha1.LVMVolumeGroupStatus{ + Nodes: []v1alpha1.LVMVolumeGroupNode{ { Name: node1, }, @@ -207,8 +207,8 @@ func TestHealthWatcher(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "second", }, - Status: v1alpha1.LvmVolumeGroupStatus{ - Nodes: []v1alpha1.LvmVolumeGroupNode{ + Status: v1alpha1.LVMVolumeGroupStatus{ + Nodes: []v1alpha1.LVMVolumeGroupNode{ { Name: node2, }, diff --git a/images/sds-health-watcher-controller/werf.inc.yaml b/images/sds-health-watcher-controller/werf.inc.yaml index afb236be..882bbae1 100644 --- a/images/sds-health-watcher-controller/werf.inc.yaml +++ b/images/sds-health-watcher-controller/werf.inc.yaml @@ -7,8 +7,11 @@ from: {{ $.BASE_GOLANG }} final: false git: - - add: /images/sds-health-watcher-controller/src - to: /src + - add: / + to: / + includePaths: + - api + - images/sds-health-watcher-controller/src stageDependencies: setup: - "**/*" @@ -17,7 +20,7 @@ mount: to: /go/pkg shell: setup: - - cd /src/cmd + - cd images/sds-health-watcher-controller/src/cmd - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags="-s -w" -o /sds-health-watcher-controller - chmod +x /sds-health-watcher-controller diff --git a/lib/python/requirements.txt b/lib/python/requirements.txt index 31a1dc52..94943c43 100644 --- a/lib/python/requirements.txt +++ b/lib/python/requirements.txt @@ -1,4 +1,4 @@ deckhouse==0.4.9 dotmap==1.3.30 - PyYAML==6.0.1 +kubernetes==28.1.0