diff --git a/hooks/convert_block_device_names_to_selector.py b/hooks/convert_block_device_names_to_selector.py index 90a5c89f..1eea902c 100644 --- a/hooks/convert_block_device_names_to_selector.py +++ b/hooks/convert_block_device_names_to_selector.py @@ -4,17 +4,34 @@ import kubernetes +config = """ +configVersion: v1 +afterHelm: 10 +""" + +group = "storage.deckhouse.io" +plural = "lvmvolumegroups" +version = "v1alpha1" + def main(ctx: hook.Context): kubernetes.config.load_incluster_config() - lvg_list: Any = kubernetes.client.CustomObjectsApi().list_cluster_custom_object(group="storage.deckhouse.io", - plural="lvmvolumegroups", - version="v1alpha1") + lvg_list: Any = kubernetes.client.CustomObjectsApi().list_cluster_custom_object(group=group, + plural=plural, + version=version) for lvg in lvg_list["items"]: - bdNames: List[str] = lvg["spec"]["blockDeviceNames"] - lvg["spec"].pop("blockDeviceNames") + bd_names: List[str] = lvg["spec"]["blockDeviceNames"] + del lvg["spec"]["blockDeviceNames"] lvg["spec"]["local"]["nodeName"] = lvg["status"]["nodes"][0]["name"] - lvg["spec"]["blockDeviceSelector"]["matchLabels"] = {""} + lvg["spec"]["blockDeviceSelector"]["matchLabels"]["kubernetes.io/hostname"] = lvg["spec"]["local"]["nodeName"] + lvg["spec"]["blockDeviceSelector"]["matchExpressions"][0]["key"] = "kubernetes.io/metadata.name" + lvg["spec"]["blockDeviceSelector"]["matchExpressions"][0]["operator"] = "in" + lvg["spec"]["blockDeviceSelector"]["matchExpressions"][0]["values"] = bd_names + + kubernetes.client.CustomObjectsApi.patch_cluster_custom_object(group=group, plural=plural, version=version) + +if __name__ == "__main__": + hook.run(main, config=config) diff --git a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go index 7593824a..e68944b9 100644 --- a/images/agent/src/pkg/controller/lvm_volume_group_watcher.go +++ b/images/agent/src/pkg/controller/lvm_volume_group_watcher.go @@ -173,6 +173,19 @@ func RunLVMVolumeGroupWatcherController( // return reconcile.Result{}, nil //} + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, cfg.NodeName)) + added, err = addLVGLabelIfNeeded(ctx, cl, log, lvg, LVGMetadateNameLabelKey, lvg.Name) + if err != nil { + log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) + return reconcile.Result{}, err + } + + if added { + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully added label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) + } else { + log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to add label %s to the LVMVolumeGroup %s", LVGMetadateNameLabelKey, lvg.Name)) + } + // We do this after BlockDevices validation and node belonging check to prevent multiple updates by all agents pods bds, _ := sdsCache.GetDevices() if len(bds) == 0 {