diff --git a/internal/controllers/nodefeaturediscovery_reconciler.go b/internal/controllers/nodefeaturediscovery_reconciler.go index 090fc36c..f29cc9f7 100644 --- a/internal/controllers/nodefeaturediscovery_reconciler.go +++ b/internal/controllers/nodefeaturediscovery_reconciler.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/node-feature-discovery-operator/internal/daemonset" "sigs.k8s.io/node-feature-discovery-operator/internal/deployment" "sigs.k8s.io/node-feature-discovery-operator/internal/job" + "sigs.k8s.io/node-feature-discovery-operator/internal/status" ) const finalizerLabel = "nfd-finalizer" @@ -51,8 +52,8 @@ type nodeFeatureDiscoveryReconciler struct { } func NewNodeFeatureDiscoveryReconciler(client client.Client, deploymentAPI deployment.DeploymentAPI, daemonsetAPI daemonset.DaemonsetAPI, - configmapAPI configmap.ConfigMapAPI, jobAPI job.JobAPI, scheme *runtime.Scheme) *nodeFeatureDiscoveryReconciler { - helper := newNodeFeatureDiscoveryHelperAPI(client, deploymentAPI, daemonsetAPI, configmapAPI, jobAPI, scheme) + configmapAPI configmap.ConfigMapAPI, jobAPI job.JobAPI, statusAPI status.StatusAPI, scheme *runtime.Scheme) *nodeFeatureDiscoveryReconciler { + helper := newNodeFeatureDiscoveryHelperAPI(client, deploymentAPI, daemonsetAPI, configmapAPI, jobAPI, statusAPI, scheme) return &nodeFeatureDiscoveryReconciler{ helper: helper, } @@ -179,17 +180,19 @@ type nodeFeatureDiscoveryHelper struct { daemonsetAPI daemonset.DaemonsetAPI configmapAPI configmap.ConfigMapAPI jobAPI job.JobAPI + statusAPI status.StatusAPI scheme *runtime.Scheme } func newNodeFeatureDiscoveryHelperAPI(client client.Client, deploymentAPI deployment.DeploymentAPI, daemonsetAPI daemonset.DaemonsetAPI, - configmapAPI configmap.ConfigMapAPI, jobAPI job.JobAPI, scheme *runtime.Scheme) nodeFeatureDiscoveryHelperAPI { + configmapAPI configmap.ConfigMapAPI, jobAPI job.JobAPI, statusAPI status.StatusAPI, scheme *runtime.Scheme) nodeFeatureDiscoveryHelperAPI { return &nodeFeatureDiscoveryHelper{ client: client, deploymentAPI: deploymentAPI, daemonsetAPI: daemonsetAPI, configmapAPI: configmapAPI, jobAPI: jobAPI, + statusAPI: statusAPI, scheme: scheme, } } @@ -345,5 +348,11 @@ func (nfdh *nodeFeatureDiscoveryHelper) handlePrune(ctx context.Context, nfdInst } func (nfdh *nodeFeatureDiscoveryHelper) handleStatus(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) error { - return nil + conditions := nfdh.statusAPI.GetConditions(ctx, nfdInstance) + if nfdh.statusAPI.AreConditionsEqual(nfdInstance.Status.Conditions, conditions) { + return nil + } + unmodifiedCR := nfdInstance.DeepCopy() + nfdInstance.Status.Conditions = conditions + return nfdh.client.Status().Patch(ctx, nfdInstance, client.MergeFrom(unmodifiedCR)) } diff --git a/internal/controllers/nodefeaturediscovery_reconciler_test.go b/internal/controllers/nodefeaturediscovery_reconciler_test.go index 41c0aa83..94b7edee 100644 --- a/internal/controllers/nodefeaturediscovery_reconciler_test.go +++ b/internal/controllers/nodefeaturediscovery_reconciler_test.go @@ -39,6 +39,7 @@ import ( "sigs.k8s.io/node-feature-discovery-operator/internal/daemonset" "sigs.k8s.io/node-feature-discovery-operator/internal/deployment" "sigs.k8s.io/node-feature-discovery-operator/internal/job" + "sigs.k8s.io/node-feature-discovery-operator/internal/status" ) var _ = Describe("Reconcile", func() { @@ -179,7 +180,7 @@ var _ = Describe("handleMaster", func() { clnt = client.NewMockClient(ctrl) mockDeployment = deployment.NewMockDeploymentAPI(ctrl) - nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, mockDeployment, nil, nil, nil, scheme) + nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, mockDeployment, nil, nil, nil, nil, scheme) }) ctx := context.Background() @@ -248,7 +249,7 @@ var _ = Describe("handleWorker", func() { mockDS = daemonset.NewMockDaemonsetAPI(ctrl) mockCM = configmap.NewMockConfigMapAPI(ctrl) - nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, mockDS, mockCM, nil, scheme) + nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, mockDS, mockCM, nil, nil, scheme) }) ctx := context.Background() @@ -344,7 +345,7 @@ var _ = Describe("handleTopology", func() { clnt = client.NewMockClient(ctrl) mockDS = daemonset.NewMockDaemonsetAPI(ctrl) - nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, mockDS, nil, nil, scheme) + nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, mockDS, nil, nil, nil, scheme) }) ctx := context.Background() @@ -429,7 +430,7 @@ var _ = Describe("handleGC", func() { clnt = client.NewMockClient(ctrl) mockDeployment = deployment.NewMockDeploymentAPI(ctrl) - nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, mockDeployment, nil, nil, nil, scheme) + nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, mockDeployment, nil, nil, nil, nil, scheme) }) ctx := context.Background() @@ -485,7 +486,7 @@ var _ = Describe("handleGC", func() { var _ = Describe("hasFinalizer", func() { It("checking return status whether finalizer set or not", func() { - nfdh := newNodeFeatureDiscoveryHelperAPI(nil, nil, nil, nil, nil, nil) + nfdh := newNodeFeatureDiscoveryHelperAPI(nil, nil, nil, nil, nil, nil, nil) By("finalizers was empty") nfdCR := nfdv1.NodeFeatureDiscovery{ @@ -529,7 +530,7 @@ var _ = Describe("setFinalizer", func() { BeforeEach(func() { ctrl = gomock.NewController(GinkgoT()) clnt = client.NewMockClient(ctrl) - nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, nil, nil, nil, nil) + nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, nil, nil, nil, nil, nil) }) It("checking the return status of setFinalizer function", func() { @@ -588,7 +589,7 @@ var _ = Describe("finalizeComponents", func() { mockDS = daemonset.NewMockDaemonsetAPI(ctrl) mockCM = configmap.NewMockConfigMapAPI(ctrl) - nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, mockDeployment, mockDS, mockCM, nil, scheme) + nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, mockDeployment, mockDS, mockCM, nil, nil, scheme) }) ctx := context.Background() @@ -663,7 +664,7 @@ var _ = Describe("removeFinalizer", func() { ctrl = gomock.NewController(GinkgoT()) clnt = client.NewMockClient(ctrl) - nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, nil, nil, nil, scheme) + nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, nil, nil, nil, nil, scheme) }) ctx := context.Background() @@ -707,7 +708,7 @@ var _ = Describe("handlePrune", func() { BeforeEach(func() { ctrl = gomock.NewController(GinkgoT()) mockJob = job.NewMockJobAPI(ctrl) - nfdh = newNodeFeatureDiscoveryHelperAPI(nil, nil, nil, nil, mockJob, scheme) + nfdh = newNodeFeatureDiscoveryHelperAPI(nil, nil, nil, nil, mockJob, nil, scheme) }) ctx := context.Background() @@ -788,3 +789,73 @@ var _ = Describe("handlePrune", func() { Entry("job finished, its pod failed", true, false), ) }) + +var _ = Describe("handleStatus", func() { + var ( + ctrl *gomock.Controller + clnt *client.MockClient + mockStatus *status.MockStatusAPI + nfdh nodeFeatureDiscoveryHelperAPI + ) + + BeforeEach(func() { + ctrl = gomock.NewController(GinkgoT()) + clnt = client.NewMockClient(ctrl) + mockStatus = status.NewMockStatusAPI(ctrl) + nfdh = newNodeFeatureDiscoveryHelperAPI(clnt, nil, nil, nil, nil, mockStatus, scheme) + }) + + ctx := context.Background() + nfdCR := nfdv1.NodeFeatureDiscovery{ + Status: nfdv1.NodeFeatureDiscoveryStatus{ + Conditions: []metav1.Condition{}, + }, + } + newConditions := []metav1.Condition{} + + It("conditions are equal, no status update is needed", func() { + gomock.InOrder( + mockStatus.EXPECT().GetConditions(ctx, &nfdCR).Return(newConditions), + mockStatus.EXPECT().AreConditionsEqual(newConditions, nfdCR.Status.Conditions).Return(true), + ) + + err := nfdh.handleStatus(ctx, &nfdCR) + Expect(err).To(BeNil()) + }) + + It("conditions are not equal, status update is needed", func() { + statusWriter := client.NewMockStatusWriter(ctrl) + expectedNFD := nfdv1.NodeFeatureDiscovery{ + Status: nfdv1.NodeFeatureDiscoveryStatus{ + Conditions: newConditions, + }, + } + gomock.InOrder( + mockStatus.EXPECT().GetConditions(ctx, &nfdCR).Return(newConditions), + mockStatus.EXPECT().AreConditionsEqual(newConditions, nfdCR.Status.Conditions).Return(false), + clnt.EXPECT().Status().Return(statusWriter), + statusWriter.EXPECT().Patch(ctx, &expectedNFD, gomock.Any()).Return(nil), + ) + + err := nfdh.handleStatus(ctx, &nfdCR) + Expect(err).To(BeNil()) + }) + + It("conditions are not equal, status update failed", func() { + statusWriter := client.NewMockStatusWriter(ctrl) + expectedNFD := nfdv1.NodeFeatureDiscovery{ + Status: nfdv1.NodeFeatureDiscoveryStatus{ + Conditions: newConditions, + }, + } + gomock.InOrder( + mockStatus.EXPECT().GetConditions(ctx, &nfdCR).Return(newConditions), + mockStatus.EXPECT().AreConditionsEqual(newConditions, nfdCR.Status.Conditions).Return(false), + clnt.EXPECT().Status().Return(statusWriter), + statusWriter.EXPECT().Patch(ctx, &expectedNFD, gomock.Any()).Return(fmt.Errorf("some error")), + ) + + err := nfdh.handleStatus(ctx, &nfdCR) + Expect(err).To(HaveOccurred()) + }) +}) diff --git a/internal/status/mock_status.go b/internal/status/mock_status.go new file mode 100644 index 00000000..7ba43ee1 --- /dev/null +++ b/internal/status/mock_status.go @@ -0,0 +1,148 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: status.go +// +// Generated by this command: +// +// mockgen -source=status.go -package=status -destination=mock_status.go statusHelperAPI +// +// Package status is a generated GoMock package. +package status + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v10 "sigs.k8s.io/node-feature-discovery-operator/api/v1" +) + +// MockStatusAPI is a mock of StatusAPI interface. +type MockStatusAPI struct { + ctrl *gomock.Controller + recorder *MockStatusAPIMockRecorder +} + +// MockStatusAPIMockRecorder is the mock recorder for MockStatusAPI. +type MockStatusAPIMockRecorder struct { + mock *MockStatusAPI +} + +// NewMockStatusAPI creates a new mock instance. +func NewMockStatusAPI(ctrl *gomock.Controller) *MockStatusAPI { + mock := &MockStatusAPI{ctrl: ctrl} + mock.recorder = &MockStatusAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStatusAPI) EXPECT() *MockStatusAPIMockRecorder { + return m.recorder +} + +// AreConditionsEqual mocks base method. +func (m *MockStatusAPI) AreConditionsEqual(prevConditions, newConditions []v1.Condition) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AreConditionsEqual", prevConditions, newConditions) + ret0, _ := ret[0].(bool) + return ret0 +} + +// AreConditionsEqual indicates an expected call of AreConditionsEqual. +func (mr *MockStatusAPIMockRecorder) AreConditionsEqual(prevConditions, newConditions any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AreConditionsEqual", reflect.TypeOf((*MockStatusAPI)(nil).AreConditionsEqual), prevConditions, newConditions) +} + +// GetConditions mocks base method. +func (m *MockStatusAPI) GetConditions(ctx context.Context, nfdInstance *v10.NodeFeatureDiscovery) []v1.Condition { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConditions", ctx, nfdInstance) + ret0, _ := ret[0].([]v1.Condition) + return ret0 +} + +// GetConditions indicates an expected call of GetConditions. +func (mr *MockStatusAPIMockRecorder) GetConditions(ctx, nfdInstance any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConditions", reflect.TypeOf((*MockStatusAPI)(nil).GetConditions), ctx, nfdInstance) +} + +// MockstatusHelperAPI is a mock of statusHelperAPI interface. +type MockstatusHelperAPI struct { + ctrl *gomock.Controller + recorder *MockstatusHelperAPIMockRecorder +} + +// MockstatusHelperAPIMockRecorder is the mock recorder for MockstatusHelperAPI. +type MockstatusHelperAPIMockRecorder struct { + mock *MockstatusHelperAPI +} + +// NewMockstatusHelperAPI creates a new mock instance. +func NewMockstatusHelperAPI(ctrl *gomock.Controller) *MockstatusHelperAPI { + mock := &MockstatusHelperAPI{ctrl: ctrl} + mock.recorder = &MockstatusHelperAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockstatusHelperAPI) EXPECT() *MockstatusHelperAPIMockRecorder { + return m.recorder +} + +// getGCNotAvailableConditions mocks base method. +func (m *MockstatusHelperAPI) getGCNotAvailableConditions(ctx context.Context, nfdInstance *v10.NodeFeatureDiscovery) []v1.Condition { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getGCNotAvailableConditions", ctx, nfdInstance) + ret0, _ := ret[0].([]v1.Condition) + return ret0 +} + +// getGCNotAvailableConditions indicates an expected call of getGCNotAvailableConditions. +func (mr *MockstatusHelperAPIMockRecorder) getGCNotAvailableConditions(ctx, nfdInstance any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getGCNotAvailableConditions", reflect.TypeOf((*MockstatusHelperAPI)(nil).getGCNotAvailableConditions), ctx, nfdInstance) +} + +// getMasterNotAvailableConditions mocks base method. +func (m *MockstatusHelperAPI) getMasterNotAvailableConditions(ctx context.Context, nfdInstance *v10.NodeFeatureDiscovery) []v1.Condition { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getMasterNotAvailableConditions", ctx, nfdInstance) + ret0, _ := ret[0].([]v1.Condition) + return ret0 +} + +// getMasterNotAvailableConditions indicates an expected call of getMasterNotAvailableConditions. +func (mr *MockstatusHelperAPIMockRecorder) getMasterNotAvailableConditions(ctx, nfdInstance any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getMasterNotAvailableConditions", reflect.TypeOf((*MockstatusHelperAPI)(nil).getMasterNotAvailableConditions), ctx, nfdInstance) +} + +// getTopologyNotAvailableConditions mocks base method. +func (m *MockstatusHelperAPI) getTopologyNotAvailableConditions(ctx context.Context, nfdInstance *v10.NodeFeatureDiscovery) []v1.Condition { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getTopologyNotAvailableConditions", ctx, nfdInstance) + ret0, _ := ret[0].([]v1.Condition) + return ret0 +} + +// getTopologyNotAvailableConditions indicates an expected call of getTopologyNotAvailableConditions. +func (mr *MockstatusHelperAPIMockRecorder) getTopologyNotAvailableConditions(ctx, nfdInstance any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getTopologyNotAvailableConditions", reflect.TypeOf((*MockstatusHelperAPI)(nil).getTopologyNotAvailableConditions), ctx, nfdInstance) +} + +// getWorkerNotAvailableConditions mocks base method. +func (m *MockstatusHelperAPI) getWorkerNotAvailableConditions(ctx context.Context, nfdInstance *v10.NodeFeatureDiscovery) []v1.Condition { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getWorkerNotAvailableConditions", ctx, nfdInstance) + ret0, _ := ret[0].([]v1.Condition) + return ret0 +} + +// getWorkerNotAvailableConditions indicates an expected call of getWorkerNotAvailableConditions. +func (mr *MockstatusHelperAPIMockRecorder) getWorkerNotAvailableConditions(ctx, nfdInstance any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getWorkerNotAvailableConditions", reflect.TypeOf((*MockstatusHelperAPI)(nil).getWorkerNotAvailableConditions), ctx, nfdInstance) +} diff --git a/internal/status/status.go b/internal/status/status.go new file mode 100644 index 00000000..6f4b4768 --- /dev/null +++ b/internal/status/status.go @@ -0,0 +1,353 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "context" + "time" + + appsv1 "k8s.io/api/apps/v1" + meta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + nfdv1 "sigs.k8s.io/node-feature-discovery-operator/api/v1" + "sigs.k8s.io/node-feature-discovery-operator/internal/daemonset" + "sigs.k8s.io/node-feature-discovery-operator/internal/deployment" +) + +const ( + conditionStatusProgressing = "progressing" + conditionStatusDegraded = "degrading" + conditionStatusAvailable = "available" + + conditionFailedGettingNFDWorkerDaemonSet = "FailedGettingNFDWorkerDaemonSet" + conditionNFDWorkerDaemonSetDegraded = "NFDWorkerDaemonSetDegraded" + conditionNFDWorkerDaemonSetProgressing = "NFDWorkerDaemonSetProgressing" + + conditionFailedGettingNFDTopologyDaemonSet = "FailedGettingNFDTopologyDaemonSet" + conditionNFDTopologyDaemonSetDegraded = "NFDTopologyDaemonSetDegraded" + conditionNFDTopologyDaemonSetProgressing = "NFDTopologyDaemonSetProgressing" + + conditionFailedGettingNFDMasterDeployment = "FailedGettingNFDMasterDeployment" + conditionNFDMasterDeploymentDegraded = "NFDMasterDeploymentDegraded" + conditionNFDMasterDeploymentProgressing = "NFDMasterDeploymentProgressing" + + conditionFailedGettingNFDGCDeployment = "FailedGettingNFDGCDeployment" + conditionNFDGCDeploymentDegraded = "NFDGCDegraded" + conditionNFDGCDeploymentProgressing = "NFDGCDeploymentProgressing" + + conditionIsFalseReason = "ConditionNotBeingMetCurrently" + + // ConditionAvailable indicates that the resources maintained by the operator, + // is functional and available in the cluster. + conditionAvailable string = "Available" + + // ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the + // operator + conditionProgressing string = "Progressing" + + // ConditionDegraded indicates that the resources maintained by the operator are not functioning completely. + // An example of a degraded state would be if not all pods in a deployment were running. + // It may still be available, but it is degraded + conditionDegraded string = "Degraded" + + // ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade. + // When `False`, the resources maintained by the operator should not be upgraded and the + // message field should contain a human readable description of what the administrator should do to + // allow the operator to successfully update the resources maintained by the operator. + conditionUpgradeable string = "Upgradeable" +) + +//go:generate mockgen -source=status.go -package=status -destination=mock_status.go StatusAPI + +type StatusAPI interface { + GetConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition + AreConditionsEqual(prevConditions, newConditions []metav1.Condition) bool +} + +type status struct { + helper statusHelperAPI +} + +func NewStatusAPI(deploymentAPI deployment.DeploymentAPI, daemonsetAPI daemonset.DaemonsetAPI) StatusAPI { + helper := newStatusHelperAPI(deploymentAPI, daemonsetAPI) + return &status{ + helper: helper, + } +} + +func (s *status) GetConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition { + // get worker daemonset conditions + nonAvailableConditions := s.helper.getWorkerNotAvailableConditions(ctx, nfdInstance) + if nonAvailableConditions != nil { + return nonAvailableConditions + } + // get master deployment conditions + nonAvailableConditions = s.helper.getMasterNotAvailableConditions(ctx, nfdInstance) + if nonAvailableConditions != nil { + return nonAvailableConditions + } + // get GC deployment conditions + nonAvailableConditions = s.helper.getGCNotAvailableConditions(ctx, nfdInstance) + if nonAvailableConditions != nil { + return nonAvailableConditions + } + // get topology, if needed + if nfdInstance.Spec.TopologyUpdater { + nonAvailableConditions := s.helper.getTopologyNotAvailableConditions(ctx, nfdInstance) + if nonAvailableConditions != nil { + return nonAvailableConditions + } + } + + return getAvailableConditions() +} + +func (s *status) AreConditionsEqual(prevConditions, newConditions []metav1.Condition) bool { + for _, newCondition := range newConditions { + oldCondition := meta.FindStatusCondition(prevConditions, newCondition.Type) + if oldCondition == nil { + return false + } + // Ignore timestamps + if oldCondition.Status != newCondition.Status || + oldCondition.Reason != newCondition.Reason || + oldCondition.Message != newCondition.Message { + return false + } + } + return true +} + +//go:generate mockgen -source=status.go -package=status -destination=mock_status.go statusHelperAPI + +type statusHelperAPI interface { + getWorkerNotAvailableConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition + getTopologyNotAvailableConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition + getMasterNotAvailableConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition + getGCNotAvailableConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition +} + +type statusHelper struct { + deploymentAPI deployment.DeploymentAPI + daemonsetAPI daemonset.DaemonsetAPI +} + +func newStatusHelperAPI(deploymentAPI deployment.DeploymentAPI, daemonsetAPI daemonset.DaemonsetAPI) statusHelperAPI { + return &statusHelper{ + deploymentAPI: deploymentAPI, + daemonsetAPI: daemonsetAPI, + } +} + +func (sh *statusHelper) getWorkerNotAvailableConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition { + return sh.getDaemonSetNotAvailableConditions(ctx, + nfdInstance.Namespace, + "nfd-worker", + conditionFailedGettingNFDWorkerDaemonSet, + conditionNFDWorkerDaemonSetDegraded, + conditionNFDWorkerDaemonSetProgressing) +} + +func (sh *statusHelper) getTopologyNotAvailableConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition { + return sh.getDaemonSetNotAvailableConditions(ctx, + nfdInstance.Namespace, + "nfd-topology-updater", + conditionFailedGettingNFDTopologyDaemonSet, + conditionNFDTopologyDaemonSetDegraded, + conditionNFDTopologyDaemonSetProgressing) +} + +func (sh *statusHelper) getDaemonSetNotAvailableConditions(ctx context.Context, + dsNamespace, + dsName, + failedToGetDSReason, + dsDegradedReason, + dsProgressingReason string) []metav1.Condition { + + ds, err := sh.daemonsetAPI.GetDaemonSet(ctx, dsNamespace, dsName) + if err != nil { + return getDegradedConditions(failedToGetDSReason, err.Error()) + } + conditionsStatus, message := getDaemonSetConditions(ds) + if conditionsStatus == conditionStatusDegraded { + return getDegradedConditions(dsDegradedReason, message) + } else if conditionsStatus == conditionStatusProgressing { + return getProgressingConditions(dsProgressingReason, message) + } + return nil +} + +func (sh *statusHelper) getMasterNotAvailableConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition { + return sh.getDeploymentNotAvailableConditions(ctx, + nfdInstance.Namespace, + "nfd-master", + conditionFailedGettingNFDMasterDeployment, + conditionNFDMasterDeploymentDegraded, + conditionNFDMasterDeploymentProgressing) +} + +func (sh *statusHelper) getGCNotAvailableConditions(ctx context.Context, nfdInstance *nfdv1.NodeFeatureDiscovery) []metav1.Condition { + return sh.getDeploymentNotAvailableConditions(ctx, + nfdInstance.Namespace, + "nfd-gc", + conditionFailedGettingNFDGCDeployment, + conditionNFDGCDeploymentDegraded, + conditionNFDGCDeploymentProgressing) + +} + +func (sh *statusHelper) getDeploymentNotAvailableConditions(ctx context.Context, + deploymentNamespace, + deploymentName, + failedToGetDeploymentReason, + deploymentDegradedReason, + deploymentProgressingReason string) []metav1.Condition { + + dep, err := sh.deploymentAPI.GetDeployment(ctx, deploymentNamespace, deploymentName) + if err != nil { + return getDegradedConditions(failedToGetDeploymentReason, err.Error()) + } + conditionsStatus, message := getDeploymentConditions(dep) + if conditionsStatus == conditionStatusDegraded { + return getDegradedConditions(deploymentDegradedReason, message) + } else if conditionsStatus == conditionStatusProgressing { + return getProgressingConditions(deploymentProgressingReason, message) + } + return nil +} + +func getDaemonSetConditions(ds *appsv1.DaemonSet) (string, string) { + if ds.Status.DesiredNumberScheduled == 0 { + return conditionStatusDegraded, "number of desired nodes for scheduling is 0" + } + if ds.Status.CurrentNumberScheduled == 0 { + return conditionStatusDegraded, "0 nodes have pods scheduled" + } + if ds.Status.NumberReady == ds.Status.DesiredNumberScheduled { + return conditionStatusAvailable, "" + } + return conditionStatusProgressing, "ds is progressing" +} + +func getDeploymentConditions(dep *appsv1.Deployment) (string, string) { + if dep.Status.AvailableReplicas == 0 { + return conditionStatusDegraded, "number of available pods is 0" + } + return conditionStatusAvailable, "" +} + +// getAvailableConditions returns a list of Condition objects and marks +// every condition as FALSE except for ConditionAvailable so that the +// reconciler can determine that the resource is available. +func getAvailableConditions() []metav1.Condition { + now := time.Now() + return []metav1.Condition{ + { + Type: conditionAvailable, + Status: metav1.ConditionTrue, + Reason: "AllInstanceComponentsAreDeployedSuccessfuly", + LastTransitionTime: metav1.Time{Time: now}, + }, + { + Type: conditionUpgradeable, + Status: metav1.ConditionTrue, + Reason: "CanBeUpgraded", + LastTransitionTime: metav1.Time{Time: now}, + }, + { + Type: conditionProgressing, + Status: metav1.ConditionFalse, + Reason: conditionIsFalseReason, + LastTransitionTime: metav1.Time{Time: now}, + }, + { + Type: conditionDegraded, + Status: metav1.ConditionFalse, + Reason: conditionIsFalseReason, + LastTransitionTime: metav1.Time{Time: now}, + }, + } +} + +// getDegradedConditions returns a list of conditions.Condition objects and marks +// every condition as FALSE except for conditions.ConditionDegraded so that the +// reconciler can determine that the resource is degraded. +func getDegradedConditions(reason string, message string) []metav1.Condition { + now := time.Now() + return []metav1.Condition{ + { + Type: conditionAvailable, + Status: metav1.ConditionFalse, + Reason: conditionIsFalseReason, + LastTransitionTime: metav1.Time{Time: now}, + }, + { + Type: conditionUpgradeable, + Status: metav1.ConditionFalse, + Reason: conditionIsFalseReason, + LastTransitionTime: metav1.Time{Time: now}, + }, + { + Type: conditionProgressing, + Status: metav1.ConditionFalse, + Reason: conditionIsFalseReason, + LastTransitionTime: metav1.Time{Time: now}, + }, + { + Type: conditionDegraded, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: now}, + Reason: reason, + Message: message, + }, + } +} + +// getProgressingConditions returns a list of Condition objects and marks +// every condition as FALSE except for ConditionProgressing so that the +// reconciler can determine that the resource is progressing. +func getProgressingConditions(reason string, message string) []metav1.Condition { + now := time.Now() + return []metav1.Condition{ + { + Type: conditionAvailable, + Status: metav1.ConditionFalse, + Reason: conditionIsFalseReason, + LastTransitionTime: metav1.Time{Time: now}, + }, + { + Type: conditionUpgradeable, + Status: metav1.ConditionFalse, + Reason: conditionIsFalseReason, + LastTransitionTime: metav1.Time{Time: now}, + }, + { + Type: conditionProgressing, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: now}, + Reason: reason, + Message: message, + }, + { + Type: conditionDegraded, + Status: metav1.ConditionFalse, + Reason: conditionIsFalseReason, + LastTransitionTime: metav1.Time{Time: now}, + }, + } +} diff --git a/internal/status/status_test.go b/internal/status/status_test.go new file mode 100644 index 00000000..f5a54aed --- /dev/null +++ b/internal/status/status_test.go @@ -0,0 +1,366 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + nfdv1 "sigs.k8s.io/node-feature-discovery-operator/api/v1" + "sigs.k8s.io/node-feature-discovery-operator/internal/daemonset" + "sigs.k8s.io/node-feature-discovery-operator/internal/deployment" +) + +var _ = Describe("GetConditions", func() { + var ( + ctrl *gomock.Controller + mockHelper *MockstatusHelperAPI + st *status + ) + + BeforeEach(func() { + ctrl = gomock.NewController(GinkgoT()) + mockHelper = NewMockstatusHelperAPI(ctrl) + st = &status{ + helper: mockHelper, + } + }) + + ctx := context.Background() + nfdCR := nfdv1.NodeFeatureDiscovery{ + Spec: nfdv1.NodeFeatureDiscoverySpec{ + TopologyUpdater: true, + }, + } + progConds := getProgressingConditions("progressing reason", "progressing message") + degConds := getDegradedConditions("degraded reason", "degraded message") + availConds := getAvailableConditions() + + DescribeTable("checking all the flows", func(workerAvailable, masterAvailable, gcAvailable, topologyAvailable bool) { + expectConds := availConds + if !workerAvailable { + mockHelper.EXPECT().getWorkerNotAvailableConditions(ctx, &nfdCR).Return(degConds) + expectConds = degConds + goto executeTestFunction + } + mockHelper.EXPECT().getWorkerNotAvailableConditions(ctx, &nfdCR).Return(nil) + if !masterAvailable { + mockHelper.EXPECT().getMasterNotAvailableConditions(ctx, &nfdCR).Return(progConds) + expectConds = progConds + goto executeTestFunction + } + mockHelper.EXPECT().getMasterNotAvailableConditions(ctx, &nfdCR).Return(nil) + if !gcAvailable { + mockHelper.EXPECT().getGCNotAvailableConditions(ctx, &nfdCR).Return(degConds) + expectConds = degConds + goto executeTestFunction + } + mockHelper.EXPECT().getGCNotAvailableConditions(ctx, &nfdCR).Return(nil) + if !topologyAvailable { + mockHelper.EXPECT().getTopologyNotAvailableConditions(ctx, &nfdCR).Return(progConds) + expectConds = progConds + } else { + mockHelper.EXPECT().getTopologyNotAvailableConditions(ctx, &nfdCR).Return(nil) + } + + executeTestFunction: + conds := st.GetConditions(ctx, &nfdCR) + compareConditions(conds, expectConds) + }, + Entry("worker is not available yet", false, false, false, false), + Entry("worker available, master is not yet", true, false, false, false), + Entry("worker and master available, gc is not yet", true, true, false, false), + Entry("worker,master and gc available, topology is not yet", true, true, true, false), + Entry("all components are available", true, true, true, true), + ) +}) + +var _ = Describe("AreConditionsEqual", func() { + It("testing various use-cases", func() { + st := &status{} + + By("progressing conditions, reason not equal") + firstCond := getProgressingConditions("reason1", "message1") + secondCond := getProgressingConditions("reason2", "message1") + res := st.AreConditionsEqual(firstCond, secondCond) + Expect(res).To(BeFalse()) + + By("progressing conditions, message not equal") + firstCond = getProgressingConditions("reason1", "message1") + secondCond = getProgressingConditions("reason1", "message2") + res = st.AreConditionsEqual(firstCond, secondCond) + Expect(res).To(BeFalse()) + + By("progressing conditions equal") + firstCond = getProgressingConditions("reason1", "message1") + secondCond = getProgressingConditions("reason1", "message1") + res = st.AreConditionsEqual(firstCond, secondCond) + Expect(res).To(BeTrue()) + + By("degraded conditions, reason not equal") + firstCond = getDegradedConditions("reason1", "message1") + secondCond = getDegradedConditions("reason2", "message1") + res = st.AreConditionsEqual(firstCond, secondCond) + Expect(res).To(BeFalse()) + + By("degraded conditions, message not equal") + firstCond = getDegradedConditions("reason1", "message1") + secondCond = getDegradedConditions("reason1", "message2") + res = st.AreConditionsEqual(firstCond, secondCond) + Expect(res).To(BeFalse()) + + By("degraded conditions equal") + firstCond = getDegradedConditions("reason1", "message1") + secondCond = getDegradedConditions("reason1", "message1") + res = st.AreConditionsEqual(firstCond, secondCond) + Expect(res).To(BeTrue()) + + By("available conditions equal") + firstCond = getAvailableConditions() + secondCond = getAvailableConditions() + res = st.AreConditionsEqual(firstCond, secondCond) + Expect(res).To(BeTrue()) + + By("degraded and progressing conditions are not equal") + firstCond = getDegradedConditions("reason1", "message1") + secondCond = getProgressingConditions("reason1", "message1") + res = st.AreConditionsEqual(firstCond, secondCond) + Expect(res).To(BeFalse()) + }) +}) + +var _ = Describe("getWorkerOrTopologyNotAvailableConditions", func() { + var ( + ctrl *gomock.Controller + mockDS *daemonset.MockDaemonsetAPI + h statusHelperAPI + ) + + BeforeEach(func() { + ctrl = gomock.NewController(GinkgoT()) + mockDS = daemonset.NewMockDaemonsetAPI(ctrl) + h = newStatusHelperAPI(nil, mockDS) + }) + + nfdCR := nfdv1.NodeFeatureDiscovery{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + } + + ctx := context.Background() + + It("worker of topology ds not available", func() { + err := fmt.Errorf("some error") + By("checking worker") + expectedConds := getDegradedConditions(conditionFailedGettingNFDWorkerDaemonSet, err.Error()) + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-worker").Return(nil, err) + + resCond := h.getWorkerNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + + By("checking topology") + expectedConds = getDegradedConditions(conditionFailedGettingNFDTopologyDaemonSet, err.Error()) + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-topology-updater").Return(nil, err) + + resCond = h.getTopologyNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + }) + + It("worker or topology ds number of scheduled is 0", func() { + ds := &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 0, + }, + } + By("checking worker") + expectedConds := getDegradedConditions(conditionNFDWorkerDaemonSetDegraded, "number of desired nodes for scheduling is 0") + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-worker").Return(ds, nil) + + resCond := h.getWorkerNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + + By("checking topology") + expectedConds = getDegradedConditions(conditionNFDTopologyDaemonSetDegraded, "number of desired nodes for scheduling is 0") + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-topology-updater").Return(ds, nil) + + resCond = h.getTopologyNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + }) + + It("worker or topology ds current number of scheduled pods is 0", func() { + ds := &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 2, + CurrentNumberScheduled: 0, + }, + } + By("checking worker") + expectedConds := getDegradedConditions(conditionNFDWorkerDaemonSetDegraded, "0 nodes have pods scheduled") + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-worker").Return(ds, nil) + + resCond := h.getWorkerNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + + By("checking topology") + expectedConds = getDegradedConditions(conditionNFDTopologyDaemonSetDegraded, "0 nodes have pods scheduled") + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-topology-updater").Return(ds, nil) + + resCond = h.getTopologyNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + }) + + It("worker or topology ds number of pods has not yet reached desired number", func() { + ds := &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 2, + CurrentNumberScheduled: 2, + NumberReady: 1, + }, + } + By("worker") + expectedConds := getProgressingConditions(conditionNFDWorkerDaemonSetProgressing, "ds is progressing") + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-worker").Return(ds, nil) + + resCond := h.getWorkerNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + + By("topology") + expectedConds = getProgressingConditions(conditionNFDTopologyDaemonSetProgressing, "ds is progressing") + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-topology-updater").Return(ds, nil) + + resCond = h.getTopologyNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + }) + + It("worker or topology ds all pods are available", func() { + ds := &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 2, + CurrentNumberScheduled: 2, + NumberReady: 2, + }, + } + By("worker") + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-worker").Return(ds, nil) + + resCond := h.getWorkerNotAvailableConditions(ctx, &nfdCR) + Expect(resCond).To(BeNil()) + + By("topology") + mockDS.EXPECT().GetDaemonSet(ctx, nfdCR.Namespace, "nfd-topology-updater").Return(ds, nil) + + resCond = h.getTopologyNotAvailableConditions(ctx, &nfdCR) + Expect(resCond).To(BeNil()) + }) +}) + +var _ = Describe("getMasterOrGCNotAvailableCondition", func() { + var ( + ctrl *gomock.Controller + mockDeployment *deployment.MockDeploymentAPI + h statusHelperAPI + ) + + BeforeEach(func() { + ctrl = gomock.NewController(GinkgoT()) + mockDeployment = deployment.NewMockDeploymentAPI(ctrl) + h = newStatusHelperAPI(mockDeployment, nil) + }) + + nfdCR := nfdv1.NodeFeatureDiscovery{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + } + ctx := context.Background() + + It("master or GC deployment not available", func() { + err := fmt.Errorf("some error") + + By("master") + expectedConds := getDegradedConditions(conditionFailedGettingNFDMasterDeployment, err.Error()) + mockDeployment.EXPECT().GetDeployment(ctx, nfdCR.Namespace, "nfd-master").Return(nil, err) + + resCond := h.getMasterNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + + By("GC") + expectedConds = getDegradedConditions(conditionFailedGettingNFDGCDeployment, err.Error()) + mockDeployment.EXPECT().GetDeployment(ctx, nfdCR.Namespace, "nfd-gc").Return(nil, err) + + resCond = h.getGCNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + }) + + It("master or GC deployment available replicas 0", func() { + dep := &appsv1.Deployment{ + Status: appsv1.DeploymentStatus{ + AvailableReplicas: 0, + }, + } + By("master") + expectedConds := getDegradedConditions(conditionNFDMasterDeploymentDegraded, "number of available pods is 0") + mockDeployment.EXPECT().GetDeployment(ctx, nfdCR.Namespace, "nfd-master").Return(dep, nil) + + resCond := h.getMasterNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + + By("GC") + expectedConds = getDegradedConditions(conditionNFDGCDeploymentDegraded, "number of available pods is 0") + mockDeployment.EXPECT().GetDeployment(ctx, nfdCR.Namespace, "nfd-gc").Return(dep, nil) + + resCond = h.getGCNotAvailableConditions(ctx, &nfdCR) + compareConditions(resCond, expectedConds) + }) + + It("master or GC deployment all pods are available", func() { + dep := &appsv1.Deployment{ + Status: appsv1.DeploymentStatus{ + AvailableReplicas: 1, + }, + } + By("master") + mockDeployment.EXPECT().GetDeployment(ctx, nfdCR.Namespace, "nfd-master").Return(dep, nil) + + resCond := h.getMasterNotAvailableConditions(ctx, &nfdCR) + Expect(resCond).To(BeNil()) + + By("GC") + mockDeployment.EXPECT().GetDeployment(ctx, nfdCR.Namespace, "nfd-gc").Return(dep, nil) + + resCond = h.getGCNotAvailableConditions(ctx, &nfdCR) + Expect(resCond).To(BeNil()) + }) +}) + +func compareConditions(first, second []metav1.Condition) { + Expect(len(first)).To(Equal(len(second))) + testTimestamp := metav1.Time{Time: time.Now()} + for i := 0; i < len(first); i++ { + first[i].LastTransitionTime = testTimestamp + second[i].LastTransitionTime = testTimestamp + } + Expect(first).To(Equal(second)) +} diff --git a/internal/status/suite_test.go b/internal/status/suite_test.go new file mode 100644 index 00000000..7da707a0 --- /dev/null +++ b/internal/status/suite_test.go @@ -0,0 +1,40 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/node-feature-discovery-operator/internal/test" + //+kubebuilder:scaffold:imports +) + +var scheme *runtime.Scheme + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + var err error + + scheme, err = test.TestScheme() + Expect(err).NotTo(HaveOccurred()) + + RunSpecs(t, "Status Suite") +} diff --git a/main.go b/main.go index cbc7da1e..c8589f3a 100644 --- a/main.go +++ b/main.go @@ -39,6 +39,7 @@ import ( "sigs.k8s.io/node-feature-discovery-operator/internal/daemonset" "sigs.k8s.io/node-feature-discovery-operator/internal/deployment" "sigs.k8s.io/node-feature-discovery-operator/internal/job" + "sigs.k8s.io/node-feature-discovery-operator/internal/status" "sigs.k8s.io/node-feature-discovery-operator/pkg/utils" "sigs.k8s.io/node-feature-discovery-operator/pkg/version" // +kubebuilder:scaffold:imports @@ -132,12 +133,14 @@ func main() { daemonsetAPI := daemonset.NewDaemonsetAPI(client, scheme) configmapAPI := configmap.NewConfigMapAPI(client, scheme) jobAPI := job.NewJobAPI(client, scheme) + statusAPI := status.NewStatusAPI(deploymentAPI, daemonsetAPI) if err = new_controllers.NewNodeFeatureDiscoveryReconciler(client, deploymentAPI, daemonsetAPI, configmapAPI, jobAPI, + statusAPI, scheme).SetupWithManager(mgr); err != nil { setupLogger.Error(err, "unable to create controller", "controller", "NodeFeatureDiscovery") os.Exit(1)