diff --git a/.github/workflows/build_dev.yml b/.github/workflows/build_dev.yml new file mode 100644 index 00000000..b0255cf7 --- /dev/null +++ b/.github/workflows/build_dev.yml @@ -0,0 +1,29 @@ +on: + pull_request: + +jobs: + dev_setup_build: + runs-on: ubuntu-latest + env: + MODULES_REGISTRY: ${{ vars.DEV_REGISTRY }} + CI_COMMIT_REF_NAME: ${{ github.ref_name }} + MODULES_MODULE_NAME: ${{ vars.MODULE_NAME }} + MODULES_MODULE_SOURCE: ${{ vars.DEV_MODULE_SOURCE }} + MODULES_REGISTRY_LOGIN: ${{ secrets.DEV_MODULES_REGISTRY_LOGIN }} + MODULES_REGISTRY_PASSWORD: ${{ secrets.DEV_MODULES_REGISTRY_PASSWORD }} + name: Build and Push images + steps: + - run: | + MODULES_MODULE_TAG="$(echo pr${{ github.ref_name }} | sed 's/\/.*//g')" + echo $MODULES_REGISTRY + echo $CI_COMMIT_REF_NAME + echo "MODULES_MODULE_TAG=$MODULES_MODULE_TAG" >> "$GITHUB_ENV" + echo $MODULES_MODULE_NAME + echo $MODULES_MODULE_SOURCE + echo $MODULES_MODULE_TAG + shell: bash + name: Show vars + + - uses: actions/checkout@v4 + - uses: deckhouse/modules-actions/setup@v1 + - uses: deckhouse/modules-actions/build@v1 diff --git a/.github/workflows/build_prod.yml b/.github/workflows/build_prod.yml new file mode 100644 index 00000000..f2286198 --- /dev/null +++ b/.github/workflows/build_prod.yml @@ -0,0 +1,73 @@ +name: Build and push for prod + +env: + MODULES_REGISTRY: ${{ vars.PROD_REGISTRY }} + CI_COMMIT_REF_NAME: ${{ github.ref_name }} + MODULES_MODULE_NAME: ${{ vars.MODULE_NAME }} + MODULE_SOURCE_NAME: ${{ vars.PROD_MODULE_SOURCE_NAME }} + MODULES_REGISTRY_LOGIN: ${{ secrets.PROD_MODULES_REGISTRY_LOGIN }} + MODULES_REGISTRY_PASSWORD: ${{ secrets.PROD_MODULES_REGISTRY_PASSWORD }} + MODULES_MODULE_TAG: ${{ github.ref_name }} + +on: + push: + tags: + - '**' + +jobs: + prod_ce_setup_build: + runs-on: ubuntu-latest + name: Build and Push CE + steps: + - name: SET VAR + run: | + echo "MODULES_MODULE_SOURCE=$MODULES_REGISTRY/$MODULE_SOURCE_NAME/ce/modules" >> "$GITHUB_ENV" + - run: | + echo $MODULES_REGISTRY + echo $MODULES_MODULE_NAME + echo $MODULES_MODULE_SOURCE + echo $MODULES_MODULE_TAG + shell: bash + name: Show vars + + - uses: actions/checkout@v4 + - uses: deckhouse/modules-actions/setup@v1 + - uses: deckhouse/modules-actions/build@v1 + + prod_ee_setup_build: + runs-on: ubuntu-latest + name: Build and Push EE + steps: + - name: SET VAR + run: | + echo "MODULES_MODULE_SOURCE=$MODULES_REGISTRY/$MODULE_SOURCE_NAME/ee/modules" >> "$GITHUB_ENV" + - run: | + echo $MODULES_REGISTRY + echo $MODULES_MODULE_NAME + echo $MODULES_MODULE_SOURCE + echo $MODULES_MODULE_TAG + shell: bash + name: Show vars + + - uses: actions/checkout@v4 + - uses: deckhouse/modules-actions/setup@v1 + - uses: deckhouse/modules-actions/build@v1 + + prod_fe_setup_build: + runs-on: ubuntu-latest + name: Build and Push FE + steps: + - name: SET VAR + run: | + echo "MODULES_MODULE_SOURCE=$MODULES_REGISTRY/$MODULE_SOURCE_NAME/fe/modules" >> "$GITHUB_ENV" + - run: | + echo $MODULES_REGISTRY + echo $MODULES_MODULE_NAME + echo $MODULES_MODULE_SOURCE + echo $MODULES_MODULE_TAG + shell: bash + name: Show vars + + - uses: actions/checkout@v4 + - uses: deckhouse/modules-actions/setup@v1 + - uses: deckhouse/modules-actions/build@v1 diff --git a/.github/workflows/deploy_dev.yml b/.github/workflows/deploy_dev.yml deleted file mode 100644 index 3ab7af5c..00000000 --- a/.github/workflows/deploy_dev.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Deploy Dev - -on: - workflow_dispatch: - inputs: - version: - description: "Select version" - type: choice - default: alpha - options: - - "alpha" - - "beta" - - "early-access" - - "stable" - - "rock-solid" - - tag: - description: "Tag of the module, exapmle v1.21.1" - type: string - required: true - -jobs: - deploy: - runs-on: ubuntu-latest - env: - MODULES_REGISTRY: ${{ vars.REGISTRY }} - CI_COMMIT_REF_NAME: ${{ github.ref_name }} - MODULES_MODULE_NAME: ${{ vars.MODULE_NAME }} - MODULES_MODULE_SOURCE: ${{ vars.MODULE_SOURCE }} - RELEASE_CHANNEL: ${{ github.event.inputs.version }} - MODULES_MODULE_TAG: ${{ github.event.inputs.tag }} - name: Build and Push images - steps: - - run: | - echo $MODULES_REGISTRY - echo $CI_COMMIT_REF_NAME - echo $MODULES_MODULE_NAME - echo $MODULES_MODULE_SOURCE - echo $RELEASE_CHANNEL - echo $MODULES_MODULE_TAG - shell: bash - name: Show vars - - - # steps: - # - uses: actions/checkout@v4 - # - uses: deckhouse/modules-actions/setup@v1 - # - uses: deckhouse/modules-actions/deploy@v1 diff --git a/.github/workflows/deploy_prod.yml b/.github/workflows/deploy_prod.yml index 0cfcc8da..5a3d9352 100644 --- a/.github/workflows/deploy_prod.yml +++ b/.github/workflows/deploy_prod.yml @@ -1,5 +1,15 @@ name: Deploy Prod +env: + MODULES_REGISTRY: ${{ vars.PROD_REGISTRY }} + MODULE_SOURCE_NAME: ${{ vars.PROD_MODULE_SOURCE_NAME }} + CI_COMMIT_REF_NAME: ${{ github.ref_name }} + MODULES_MODULE_NAME: ${{ vars.MODULE_NAME }} + RELEASE_CHANNEL: ${{ github.event.inputs.version }} + MODULES_REGISTRY_LOGIN: ${{ secrets.PROD_MODULES_REGISTRY_LOGIN }} + MODULES_REGISTRY_PASSWORD: ${{ secrets.PROD_MODULES_REGISTRY_PASSWORD }} + MODULES_MODULE_TAG: ${{ github.event.inputs.tag }} + on: workflow_dispatch: inputs: @@ -13,47 +23,81 @@ on: - "early-access" - "stable" - "rock-solid" - - edition: - description: "Select edition" - type: choice - default: fe - options: - - "ce" - - "ee" - - "fe" + + ce: + type: boolean + description: CE + ee: + type: boolean + description: EE + fe: + type: boolean + description: FE tag: - description: "Tag of the module, exapmle v1.21.1" + description: "Tag of the module, example v1.21.1" type: string required: true jobs: - deploy: + print-vars: runs-on: ubuntu-latest - env: - MODULES_REGISTRY: ${{ vars.PROD_REGISTRY }} - MODULE_SOURCE_NAME: ${{ vars.PROD_MODULE_SOURCE_NAME }} - CI_COMMIT_REF_NAME: ${{ github.ref_name }} - MODULES_MODULE_NAME: ${{ vars.MODULE_NAME }} - MODULES_MODULE_SOURCE: $MODULES_REGISTRY/$MODULE_SOURCE_NAME/${{ github.event.inputs.edition }}/modules - RELEASE_CHANNEL: ${{ github.event.inputs.version }} - MODULES_MODULE_TAG: ${{ github.event.inputs.tag }} - name: Build and Push images + name: Print vars steps: - - run: | + - name: PRINT VARS + run: | echo $MODULES_REGISTRY echo $MODULE_SOURCE_NAME echo $CI_COMMIT_REF_NAME echo $MODULES_MODULE_NAME - echo $MODULES_MODULE_SOURCE echo $RELEASE_CHANNEL echo $MODULES_MODULE_TAG shell: bash - name: Show vars - - - # steps: - # - uses: actions/checkout@v4 - # - uses: deckhouse/modules-actions/setup@v1 - # - uses: deckhouse/modules-actions/deploy@v1 + + job-CE: + name: Edition CE + runs-on: ubuntu-latest + if: github.event.inputs.ce == 'true' + steps: + - run: echo "CE" + - name: SET VAR + run: | + echo "MODULES_MODULE_SOURCE=$MODULES_REGISTRY/$MODULE_SOURCE_NAME/ce/modules" >> "$GITHUB_ENV" + - name: ECHO VAR + run: | + echo $MODULES_MODULE_SOURCE + - uses: actions/checkout@v4 + - uses: deckhouse/modules-actions/setup@v1 + - uses: deckhouse/modules-actions/deploy@v1 + + job-EE: + name: Edition EE + runs-on: ubuntu-latest + if: github.event.inputs.ee == 'true' + steps: + - run: echo "EE" + - name: SET VAR + run: | + echo "MODULES_MODULE_SOURCE=$MODULES_REGISTRY/$MODULE_SOURCE_NAME/ee/modules" >> "$GITHUB_ENV" + - name: ECHO VAR + run: | + echo $MODULES_MODULE_SOURCE + - uses: actions/checkout@v4 + - uses: deckhouse/modules-actions/setup@v1 + - uses: deckhouse/modules-actions/deploy@v1 + + job-FE: + name: Edition FE + runs-on: ubuntu-latest + if: github.event.inputs.fe == 'true' + steps: + - run: echo "FE" + - name: SET VAR + run: | + echo "MODULES_MODULE_SOURCE=$MODULES_REGISTRY/$MODULE_SOURCE_NAME/fe/modules" >> "$GITHUB_ENV" + - name: ECHO VAR + run: | + echo $MODULES_MODULE_SOURCE + - uses: actions/checkout@v4 + - uses: deckhouse/modules-actions/setup@v1 + - uses: deckhouse/modules-actions/deploy@v1 diff --git a/Chart.yaml b/Chart.yaml index b584bca9..d48b337e 100644 --- a/Chart.yaml +++ b/Chart.yaml @@ -1,5 +1,5 @@ name: sds-node-configurator -version: 0.0.25-dev.1701781997 +version: 0.0.1 dependencies: - name: deckhouse_lib_helm version: 1.1.3 diff --git a/crds/blockdevices.yaml b/crds/blockdevices.yaml index b38901ff..b0184c27 100644 --- a/crds/blockdevices.yaml +++ b/crds/blockdevices.yaml @@ -132,7 +132,6 @@ spec: name: Path type: string description: Actual device path (name). - priority: 1 - jsonPath: .status.actualVGNameOnTheNode name: VG type: string diff --git a/crds/lvmvolumegroup.yaml b/crds/lvmvolumegroup.yaml index cacd84b7..950b8108 100644 --- a/crds/lvmvolumegroup.yaml +++ b/crds/lvmvolumegroup.yaml @@ -87,11 +87,11 @@ spec: type: string description: | The global state of the Volume Group. Might be: - - Operational (if everything is fine) - - NonOperational (if there are some problems in the volume group) - enum: - Operational (if everything is fine with the Volume Group) - - NonOperational (if there are some problems with the Volume Group) + - NonOperational (if there are some problems in the Volume Group) + enum: + - Operational + - NonOperational message: type: string description: | @@ -192,7 +192,6 @@ spec: name: VG type: string description: Actual VG name. - priority: 1 - jsonPath: .spec.type name: type type: string diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md new file mode 100644 index 00000000..b3ac6b5e --- /dev/null +++ b/docs/CONFIGURATION.md @@ -0,0 +1,14 @@ +--- +title: "The Sds-Node-Configurator module: settings" +--- +{% alert level="warning" %} +The module is guaranteed to work in the following cases only: +- if stock kernels shipped with [supported distributions](../../supported_versions.html#linux) are used; +- if a 10Gbps network is used. + +As for any other configurations, the module may work, but its smooth operation is not guaranteed. +{% endalert %} + +{% include module-bundle.liquid %} + +No configuration on the user's side is required diff --git a/docs/FAQ.md b/docs/FAQ.md new file mode 100644 index 00000000..a4922ef3 --- /dev/null +++ b/docs/FAQ.md @@ -0,0 +1,70 @@ +--- +title: "The SDS-Node-Configurator module: FAQ" +description: "Common questions and answers." +--- +{% alert level="warning" %} +The module is guaranteed to work in the following cases only: +- if stock kernels shipped with the [supported distributions](../../supported_versions.html#linux) are used; +- if a 10Gbps network is used. + +As for any other configurations, the module may work, but its smooth operation is not guaranteed. +{% endalert %} + +## Why does creating `BlockDevice` and `LVMVolumeGroup` resources in a cluster fail? + +* In most cases, the creation of `BlockDevice` resources fails because the existing devices fail filtering by the controller. Please make sure that your devices meet the [requirements](link to requirements). + +* Creating `LVMVolumeGroup` resources may fail due to the missing `BlockDevice` resources, as they use them as the data source. + +* If the `BlockDevice` resources are present and the `LVMVolumeGroup` resources are not present, please make sure the existing `Volume Group` has a special tag `storage.deckhouse.io/enabled=true` attached. + +## I have deleted the `LVMVolumeGroup` resource, but the `Volume Group` is still there. What do I do? + +Deleting a `LVMVolumeGroup` resource does not delete the `Volume Group` it references. To delete it, +add a special `storage.deckhouse.io/sds-delete-vg: ""` annotation to trigger the deletion process. The controller will then automatically delete the +`Volume Group` and its associated resource. + +> Note thatSimply deleting the `LVMVolumeGroup` resource will result in the creation of a new resource with a generated name based on the existing `Volume Group`. + +## I have attached the delete annotation, but the `LVMVolumeGroup` resource is still there as well as the `Volume Group` on the node. Why? + +The usual case is that there are `Logical Volumes` for the `Volume Group` the resource references. The controller does not delete `Logical Volumes` because these `volumes` may contain data and the user must purge them manually. + +Once the `Logical Volume` has been deleted, the controller will proceed to delete the `Volume Group` and its corresponding resource. + +> The time it takes to delete may be longer if the controller's reconcile queue is crowded with other events. To delete the `Volume Group` and its linked resource immediately, update the delete annotation, e. g., by adding any number to its value: `storage.deckhouse.io/sds-delete-vg: ""` -> `storage.deckhouse.io/sds-delete-vg: "1"`. +> In this case, it will be deleted immediately. + +## I'm trying to create a `Volume Group` using the `LVMVolumeGroup` resource, but I'm not getting anywhere. Why? + +Most likely, your resource fails controller validation. +The exact cause of the failure can be found in the `Status.Message` field of the resource itself, +or you can refer to the controller's logs. + +> The problem usually stems from incorrectly defined `BlockDevice` resources. Please make sure that these resources meet the following requirements: +> - The `Consumable` field is set to `true`. +> - For a `Volume Group` of type `Local`, the specified `BlockDevice` belong to the same node. +> - For a `Volume Group` of type `Shared`, the specified `BlockDevice` is the only resource. +> - The selected `BlockDevice` does not share other `LVMVolumeGroup` resources (other `Volume Groups`). +> - The current names of the `BlockDevice` resources are specified. +> The full list of expected values can be found in the [CR reference](link to the reference) of the `LVMVolumeGroup` resource. + +## What happens if I unplug one of the devices in a `Volume Group`? Will the linked `LVMVolumeGroup` resource be deleted? + +The `LVMVolumeGroup` resource will persist as long as the corresponding `Volume Group` exists. As long as at least one device exists, the `Volume Group` will be there, albeit in an unhealthy state. +Note that these issues will be reflected in the resource's `Status`. + +When the unplugged device is reactivated, the `Volume Group` will recover while the linked `LVMVolumeGroup` resource be brought to its current state as well. + +## How do I get the controller to stop monitoring the `Volume Group`? + +Delete the `storage.deckhouse.io/enabled=true` tag for the target `Volume Group`. The controller will then stop tracking the selected `Volume Group` and delete the associated `LVMVolumeGroup` resource automatically. + +## I haven't added the `storage.deckhouse.io/enabled=true` tag to the `Volume Group`, but it is there. How is this possible? + +This is possible if you have created the `Volume group` using the `LVMVolumeGroup` resource (in this case, the controller will automatically add this tag to the created `Volume Group`) or if this `Volume Group` had the `Linstor` module tag (`linstor-*`). + +The `sds-node-configurator` module replaces some of the functionality of the `linstor-pools-importer` controller of the built-in `Linstor` module. +So when you switch from the `Linstor` module to the `sds-node-configurator` and `sds-drbd` modules, the `linstor-*` tags are automatically replaced with the `storage.deckhouse.io/enabled=true` tag in the `Volume Group`. This way, the `sds-node-configurator` gets control of these `Volume Groups`. + +> The controller performs a one-time re-tagging operation on all existing `Volume Groups` when it starts up. diff --git a/docs/USAGE.md b/docs/USAGE.md new file mode 100644 index 00000000..68ee9c0d --- /dev/null +++ b/docs/USAGE.md @@ -0,0 +1,88 @@ +--- +title: "The SDS-Node-Configurator module: usage examples" +description: Usage and examples of the SDS-Node-Configurator controller operation. +--- + +{% alert level="warning" %} +The module is guaranteed to work in the following cases only: +- if stock kernels shipped with the [supported distributions](../../supported_versions.html#linux) are used; +- if a 10Gbps network is used. + +As for any other configurations, the module may work, but its smooth operation is not guaranteed. +{% endalert %} + +The controller supports two types of resources: +* `BlockDevice`; +* `LVMVolumeGroup`. + +## [BlockDevice](block device) resources + +### Creating a `BlockDevice` resource + +The controller regularly scans the existing devices on the node. If a device meets all the conditions +imposed by the controller, a `BlockDevice` `custom resource` (CR) with a unique name is created. +It contains all the information about the device in question. + +#### The conditions the controller imposes on the device + +* The device is not a drbd device. +* The device is not a pseudo-device (i.e. not a loop device). +* The device is not a `Logical Volume`. +* File system is missing or matches LVM2_MEMBER. +* The block device has no partitions. +* The size of the block device is greater than 1 Gi. +* If the device is a virtual disk, it must have a serial number. + +The controller will use the information from the custom resource to handle `LVMVolumeGroup` resources going forward. + +### Updating a `BlockDevice` resource + +The controller updates the information in the custom resource independently if the state of the block device it refers to has changed. + +### Deleting a `BlockDevice` resource + +The following are the cases in which the controller will automatically delete a resource if the block device it refers to has become unavailable: +* if the resource had a Consumable status; +* if the block device belongs to a `Volume Group` that does not have the tag `storage.deckhouse.io/enabled=true` attached to it (this `Volume Group` is not managed by our controller). + + +> The controller performs the above activities automatically and requires no user intervention. + +## [LVMVolumeGroup](lvmVolumeGroup) resources + +The `BlockDevice` resources are required to create and update `LVMVolumeGroup` resources. + +The `LVMVolumeGroup` resources are designed to communicate with the `Volume Group` and display up-to-date information about their state. + +### Creating a `LVMVolumeGroup` resource and a `Volume Group` + +There are two ways to create a `LVMVolumeGroup` resource: +* Automatically: + * The controller automatically scans for information about the existing `Volume Groups` on nodes and creates a resource + if a `Volume Group` is tagged with `storage.deckhouse.io/enabled=true` and there is no matching resource for it. + * In this case, the controller populates all fields of the resource on its own. +* By the user: + * The user manually creates the resource by filling in only the `Spec` field. In it, they specify the desired state of the new `Volume Group`. + * This information is then validated to ensure that the configuration provided is correct and can be implemented. + * After successful validation, the controller uses the provided information to create the specified `Volume Group` and update the user resource with the actual information about the state of the created `Volume Group`. + +### Updating a `LVMVolumeGroup` resource and a `Volume Group` + +The controller automatically updates the `Status` field of the `LVMVolumeGroup` with the current data about the `Volume Group` in question. +We do **not recommend** making manual changes to the `Status` field. + +> The controller does not update the `Spec` field since it represents the desired state of the `Volume Group`. The user can make changes to the `Spec` field to change the state of the `Volume Group`. + +### Deleting a `LVMVolumeGroup` resource and a `Volume Group` + +The controller will automatically delete a resource if the `Volume Group` it references has become unavailable. + +> The user may delete a resource manually. However, if the corresponding `Volume Group` still exists at the moment the resource is deleted, +> the controller will create a resource *automatically* based on the existing `Volume Group` +> and assign it a new generated name. + +To delete a `Volume Group` and its associated `Physical Volume`, append the `storage.deckhouse.io/sds-delete-vg: ""` annotation to the corresponding `LVMVolumeGroup` resource. + +The controller will detect that the annotation has been added and initiate the process of deleting the `Volume Group` and its parts. + +This will result in the `Volume Group` being deleted, as well as its associated `Physical Volume`, and the `LVMVolumeGroup` resource (**if there is no `Logical Volume`** on the `Volume Group`**). If there is a `Logical Volume` on the `Volume Group`, the user must first manually delete the `Logical Volume` on the node. \ No newline at end of file diff --git a/images/agent/cmd/bc/main.go b/images/agent/cmd/bc/main.go index 14f14bb9..a8ec04fe 100644 --- a/images/agent/cmd/bc/main.go +++ b/images/agent/cmd/bc/main.go @@ -19,12 +19,6 @@ package main import ( "context" "fmt" - v1 "k8s.io/api/core/v1" - sv1 "k8s.io/api/storage/v1" - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/runtime" - apiruntime "k8s.io/apimachinery/pkg/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" "os" goruntime "runtime" "sds-node-configurator/api/v1alpha1" @@ -33,6 +27,13 @@ import ( "sds-node-configurator/pkg/kubutils" "sds-node-configurator/pkg/logger" "sds-node-configurator/pkg/monitoring" + + v1 "k8s.io/api/core/v1" + sv1 "k8s.io/api/storage/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + apiruntime "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" ) diff --git a/images/agent/internal/const.go b/images/agent/internal/const.go index 735011e2..c0fcb609 100644 --- a/images/agent/internal/const.go +++ b/images/agent/internal/const.go @@ -25,7 +25,7 @@ const ( AvailableBlockDevice = "available_block_device" SdsNodeConfigurator = "storage.deckhouse.io/sds-node-configurator" LVMVGHealthOperational = "Operational" - LVMVGHealthNonOperational = "Nonoperational" + LVMVGHealthNonOperational = "NonOperational" ) var ( diff --git a/images/agent/pkg/controller/block_device.go b/images/agent/pkg/controller/block_device.go index 2bcb4bf3..7b20e534 100644 --- a/images/agent/pkg/controller/block_device.go +++ b/images/agent/pkg/controller/block_device.go @@ -192,8 +192,7 @@ func RemoveDeprecatedAPIDevices( } for name, device := range apiBlockDevices { - if checkAPIBlockDeviceDeprecated(name, actualCandidates) && - device.Status.NodeName == nodeName { + if shouldDeleteBlockDevice(device, actualCandidates, nodeName) { err := DeleteAPIBlockDevice(ctx, cl, metrics, name) if err != nil { log.Error(err, fmt.Sprintf("[RunBlockDeviceController] unable to delete APIBlockDevice, name: %s", name)) @@ -206,8 +205,18 @@ func RemoveDeprecatedAPIDevices( } } -func checkAPIBlockDeviceDeprecated(apiDeviceName string, actualCandidates map[string]struct{}) bool { - _, ok := actualCandidates[apiDeviceName] +func shouldDeleteBlockDevice(bd v1alpha1.BlockDevice, actualCandidates map[string]struct{}, nodeName string) bool { + if bd.Status.NodeName == nodeName && + bd.Status.Consumable && + isBlockDeviceDeprecated(bd.Name, actualCandidates) { + return true + } + + return false +} + +func isBlockDeviceDeprecated(blockDevice string, actualCandidates map[string]struct{}) bool { + _, ok := actualCandidates[blockDevice] return !ok } diff --git a/images/agent/pkg/controller/watcher_lvm_volume_group.go b/images/agent/pkg/controller/watcher_lvm_volume_group.go index dca70e37..384822e5 100644 --- a/images/agent/pkg/controller/watcher_lvm_volume_group.go +++ b/images/agent/pkg/controller/watcher_lvm_volume_group.go @@ -66,7 +66,7 @@ func RunWatcherLVMVGController( log.Warning(fmt.Sprintf(`Added request, namespace: "%s" name: "%s", to requeue`, request.Namespace, request.Name)) return reconcile.Result{ RequeueAfter: cfg.VolumeGroupScanInterval * time.Second, - }, err + }, nil } log.Info(fmt.Sprintf(`Reconcile of RunWatcherLVMVGController on request, name: "%s" ends`, request.Name)) diff --git a/images/agent/pkg/controller/watcher_lvm_volume_group_constants.go b/images/agent/pkg/controller/watcher_lvm_volume_group_constants.go index d5cc3bf5..677ca3c4 100644 --- a/images/agent/pkg/controller/watcher_lvm_volume_group_constants.go +++ b/images/agent/pkg/controller/watcher_lvm_volume_group_constants.go @@ -22,7 +22,7 @@ const ( Failed = "Failed" - NoOperational = "Nonoperational" + NoOperational = "NonOperational" Operational = "Operational" delAnnotation = "storage.deckhouse.io/sds-delete-vg" diff --git a/images/agent/werf.inc.yaml b/images/agent/werf.inc.yaml index 42604c7b..ae0a153f 100644 --- a/images/agent/werf.inc.yaml +++ b/images/agent/werf.inc.yaml @@ -46,6 +46,7 @@ ansible: name: - lvm2 - curl + - kmod update_cache: yes setup: - shell: sed -i 's/udev_rules.*=.*/udev_rules=0/ ; s/udev_sync.*=.*/udev_sync=0/ ; s/obtain_device_list_from_udev.*=.*/obtain_device_list_from_udev=0/' /etc/lvm/lvm.conf