diff --git a/apis/config/v1alpha1/config_types.go b/apis/config/v1alpha1/config_types.go index 372e496c511..672358e1457 100644 --- a/apis/config/v1alpha1/config_types.go +++ b/apis/config/v1alpha1/config_types.go @@ -16,6 +16,7 @@ limitations under the License. package v1alpha1 import ( + status "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" "github.com/open-policy-agent/gatekeeper/v3/pkg/wildcard" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -82,6 +83,7 @@ type ReadinessSpec struct { // ConfigStatus defines the observed state of Config. type ConfigStatus struct { // Important: Run "make" to regenerate code after modifying this file + ByPod []status.ConfigPodStatusStatus `json:"byPod,omitempty"` } type GVK struct { @@ -92,6 +94,8 @@ type GVK struct { // +kubebuilder:resource:scope=Namespaced // +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion // Config is the Schema for the configs API. type Config struct { diff --git a/apis/config/v1alpha1/zz_generated.deepcopy.go b/apis/config/v1alpha1/zz_generated.deepcopy.go index 75babe05f76..c60bc2cda4f 100644 --- a/apis/config/v1alpha1/zz_generated.deepcopy.go +++ b/apis/config/v1alpha1/zz_generated.deepcopy.go @@ -20,6 +20,7 @@ limitations under the License. package v1alpha1 import ( + "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" "github.com/open-policy-agent/gatekeeper/v3/pkg/wildcard" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -30,7 +31,7 @@ func (in *Config) DeepCopyInto(out *Config) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. @@ -111,6 +112,13 @@ func (in *ConfigSpec) DeepCopy() *ConfigSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { *out = *in + if in.ByPod != nil { + in, out := &in.ByPod, &out.ByPod + *out = make([]v1beta1.ConfigPodStatusStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. diff --git a/apis/status/v1beta1/configpodstatus_types.go b/apis/status/v1beta1/configpodstatus_types.go new file mode 100644 index 00000000000..014fa69739e --- /dev/null +++ b/apis/status/v1beta1/configpodstatus_types.go @@ -0,0 +1,94 @@ +package v1beta1 + +import ( + "github.com/open-policy-agent/gatekeeper/v3/pkg/operations" + "github.com/open-policy-agent/gatekeeper/v3/pkg/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// ConfigPodStatusStatus defines the observed state of ConfigPodStatus. + +// +kubebuilder:object:generate=true + +type ConfigPodStatusStatus struct { + ID string `json:"id,omitempty"` + ConfigUID types.UID `json:"configUID,omitempty"` + Operations []string `json:"operations,omitempty"` + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + Errors []*ConfigError `json:"errors,omitempty"` +} + +// +kubebuilder:object:generate=true + +type ConfigError struct { + Type string `json:"type,omitempty"` + Message string `json:"message"` +} + +// ConfigPodStatus is the Schema for the configpodstatuses API. + +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced + +type ConfigPodStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status ConfigPodStatusStatus `json:"status,omitempty"` +} + +// ConfigPodStatusList contains a list of ConfigPodStatus. + +// +kubebuilder:object:root=true +type ConfigPodStatusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConfigPodStatus `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ConfigPodStatus{}, &ConfigPodStatusList{}) +} + +// NewConfigStatusForPod returns an config status object +// that has been initialized with the bare minimum of fields to make it functional +// with the config status controller. +func NewConfigStatusForPod(pod *corev1.Pod, configNamespace string, configName string, scheme *runtime.Scheme) (*ConfigPodStatus, error) { + obj := &ConfigPodStatus{} + name, err := KeyForConfig(pod.Name, configNamespace, configName) + if err != nil { + return nil, err + } + obj.SetName(name) + obj.SetNamespace(util.GetNamespace()) + obj.Status.ID = pod.Name + obj.Status.Operations = operations.AssignedStringList() + obj.SetLabels(map[string]string{ + ConfigNameLabel: configName, + PodLabel: pod.Name, + }) + + if err := controllerutil.SetOwnerReference(pod, obj, scheme); err != nil { + return nil, err + } + + return obj, nil +} + +// KeyForConfig returns a unique status object name given the Pod ID and +// a config object. +// The object name must satisfy RFC 1123 Label Names spec +// (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/) +// and Kubernetes validation rules for object names. +// +// It's possible that dash packing/unpacking would result in a name +// that exceeds the maximum length allowed, but for Config resources, +// the configName should always be "config", and namespace would be "gatekeeper-system", +// so this validation will hold. +func KeyForConfig(id string, configNamespace string, configName string) (string, error) { + return DashPacker(id, configNamespace, configName) +} diff --git a/apis/status/v1beta1/configpodstatus_types_test.go b/apis/status/v1beta1/configpodstatus_types_test.go new file mode 100644 index 00000000000..7e9c73a0e9a --- /dev/null +++ b/apis/status/v1beta1/configpodstatus_types_test.go @@ -0,0 +1,69 @@ +package v1beta1_test + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" + "github.com/open-policy-agent/gatekeeper/v3/pkg/fakes" + "github.com/open-policy-agent/gatekeeper/v3/pkg/operations" + "github.com/open-policy-agent/gatekeeper/v3/test/testutils" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func TestNewConfigStatusForPod(t *testing.T) { + const podName = "some-gk-pod" + const podNS = "a-gk-namespace" + const configName = "a-config" + const configNameSpace = "a-gk-ns" + + testutils.Setenv(t, "POD_NAMESPACE", podNS) + + scheme := runtime.NewScheme() + err := v1beta1.AddToScheme(scheme) + if err != nil { + t.Fatal(err) + } + + err = corev1.AddToScheme(scheme) + if err != nil { + t.Fatal(err) + } + + pod := fakes.Pod( + fakes.WithNamespace(podNS), + fakes.WithName(podName), + ) + + expectedStatus := &v1beta1.ConfigPodStatus{} + expectedStatus.SetName("some--gk--pod-a--gk--ns-a--config") + expectedStatus.SetNamespace(podNS) + expectedStatus.Status.ID = podName + expectedStatus.Status.Operations = operations.AssignedStringList() + expectedStatus.SetLabels(map[string]string{ + v1beta1.ConfigNameLabel: configName, + v1beta1.PodLabel: podName, + }) + + err = controllerutil.SetOwnerReference(pod, expectedStatus, scheme) + if err != nil { + t.Fatal(err) + } + + status, err := v1beta1.NewConfigStatusForPod(pod, configNameSpace, configName, scheme) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(expectedStatus, status); diff != "" { + t.Fatal(diff) + } + n, err := v1beta1.KeyForConfig(podName, configNameSpace, configName) + if err != nil { + t.Fatal(err) + } + if status.Name != n { + t.Fatal("got status.Name != n, want equal") + } +} diff --git a/apis/status/v1beta1/labels.go b/apis/status/v1beta1/labels.go index 0f0caca91ce..61f3a7f384f 100644 --- a/apis/status/v1beta1/labels.go +++ b/apis/status/v1beta1/labels.go @@ -2,6 +2,7 @@ package v1beta1 // Label keys used for internal gatekeeper operations. const ( + ConfigNameLabel = "internal.gatekeeper.sh/config-name" ExpansionTemplateNameLabel = "internal.gatekeeper.sh/expansiontemplate-name" ConstraintNameLabel = "internal.gatekeeper.sh/constraint-name" ConstraintKindLabel = "internal.gatekeeper.sh/constraint-kind" diff --git a/apis/status/v1beta1/zz_generated.deepcopy.go b/apis/status/v1beta1/zz_generated.deepcopy.go index c361b6cdd9a..fa401e6ced1 100644 --- a/apis/status/v1beta1/zz_generated.deepcopy.go +++ b/apis/status/v1beta1/zz_generated.deepcopy.go @@ -24,6 +24,110 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigError) DeepCopyInto(out *ConfigError) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigError. +func (in *ConfigError) DeepCopy() *ConfigError { + if in == nil { + return nil + } + out := new(ConfigError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPodStatus) DeepCopyInto(out *ConfigPodStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPodStatus. +func (in *ConfigPodStatus) DeepCopy() *ConfigPodStatus { + if in == nil { + return nil + } + out := new(ConfigPodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigPodStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPodStatusList) DeepCopyInto(out *ConfigPodStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigPodStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPodStatusList. +func (in *ConfigPodStatusList) DeepCopy() *ConfigPodStatusList { + if in == nil { + return nil + } + out := new(ConfigPodStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigPodStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigPodStatusStatus) DeepCopyInto(out *ConfigPodStatusStatus) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = make([]*ConfigError, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ConfigError) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigPodStatusStatus. +func (in *ConfigPodStatusStatus) DeepCopy() *ConfigPodStatusStatus { + if in == nil { + return nil + } + out := new(ConfigPodStatusStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConstraintPodStatus) DeepCopyInto(out *ConstraintPodStatus) { *out = *in diff --git a/cmd/build/helmify/kustomization.yaml b/cmd/build/helmify/kustomization.yaml index 1c1943f2874..45eee7b3db5 100644 --- a/cmd/build/helmify/kustomization.yaml +++ b/cmd/build/helmify/kustomization.yaml @@ -40,6 +40,12 @@ patchesJson6902: kind: CustomResourceDefinition name: expansiontemplatepodstatuses.status.gatekeeper.sh path: labels_patch.yaml + - target: + group: apiextensions.k8s.io + version: v1 + kind: CustomResourceDefinition + name: configpodstatuses.status.gatekeeper.sh + path: labels_patch.yaml - target: group: apiextensions.k8s.io version: v1 diff --git a/config/crd/bases/config.gatekeeper.sh_configs.yaml b/config/crd/bases/config.gatekeeper.sh_configs.yaml index ddd2f55394f..ae9ed3fd806 100644 --- a/config/crd/bases/config.gatekeeper.sh_configs.yaml +++ b/config/crd/bases/config.gatekeeper.sh_configs.yaml @@ -112,7 +112,41 @@ spec: type: object status: description: ConfigStatus defines the observed state of Config. + properties: + byPod: + items: + properties: + configUID: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + errors: + items: + properties: + message: + type: string + type: + type: string + required: + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: array type: object type: object served: true storage: true + subresources: + status: {} diff --git a/config/crd/bases/status.gatekeeper.sh_configpodstatuses.yaml b/config/crd/bases/status.gatekeeper.sh_configpodstatuses.yaml new file mode 100644 index 00000000000..1afb74b2ece --- /dev/null +++ b/config/crd/bases/status.gatekeeper.sh_configpodstatuses.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: configpodstatuses.status.gatekeeper.sh +spec: + group: status.gatekeeper.sh + names: + kind: ConfigPodStatus + listKind: ConfigPodStatusList + plural: configpodstatuses + singular: configpodstatus + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + properties: + configUID: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + errors: + items: + properties: + message: + type: string + type: + type: string + required: + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: object + served: true + storage: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 4eb253c7672..568b63293b9 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -8,6 +8,7 @@ resources: - bases/status.gatekeeper.sh_constrainttemplatepodstatuses.yaml - bases/status.gatekeeper.sh_mutatorpodstatuses.yaml - bases/status.gatekeeper.sh_expansiontemplatepodstatuses.yaml +- bases/status.gatekeeper.sh_configpodstatuses.yaml - bases/mutations.gatekeeper.sh_assign.yaml - bases/mutations.gatekeeper.sh_assignimage.yaml - bases/mutations.gatekeeper.sh_assignmetadata.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index f3416ee2060..a6d56048491 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -56,6 +56,18 @@ rules: - patch - update - watch +- apiGroups: + - config.gatekeeper.sh + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - config.gatekeeper.sh resources: diff --git a/manifest_staging/charts/gatekeeper/crds/config-customresourcedefinition.yaml b/manifest_staging/charts/gatekeeper/crds/config-customresourcedefinition.yaml index 11a5d922789..8a5afdeb640 100644 --- a/manifest_staging/charts/gatekeeper/crds/config-customresourcedefinition.yaml +++ b/manifest_staging/charts/gatekeeper/crds/config-customresourcedefinition.yaml @@ -112,7 +112,41 @@ spec: type: object status: description: ConfigStatus defines the observed state of Config. + properties: + byPod: + items: + properties: + configUID: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + errors: + items: + properties: + message: + type: string + type: + type: string + required: + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: array type: object type: object served: true storage: true + subresources: + status: {} diff --git a/manifest_staging/charts/gatekeeper/crds/configpodstatus-customresourcedefinition.yaml b/manifest_staging/charts/gatekeeper/crds/configpodstatus-customresourcedefinition.yaml new file mode 100644 index 00000000000..f351b718375 --- /dev/null +++ b/manifest_staging/charts/gatekeeper/crds/configpodstatus-customresourcedefinition.yaml @@ -0,0 +1,72 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + gatekeeper.sh/system: "yes" + name: configpodstatuses.status.gatekeeper.sh +spec: + group: status.gatekeeper.sh + names: + kind: ConfigPodStatus + listKind: ConfigPodStatusList + plural: configpodstatuses + singular: configpodstatus + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + properties: + configUID: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + errors: + items: + properties: + message: + type: string + type: + type: string + required: + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: object + served: true + storage: true diff --git a/manifest_staging/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml b/manifest_staging/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml index a6306b3a285..591d36dc566 100644 --- a/manifest_staging/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml +++ b/manifest_staging/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml @@ -63,6 +63,18 @@ rules: - patch - update - watch +- apiGroups: + - config.gatekeeper.sh + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - config.gatekeeper.sh resources: diff --git a/manifest_staging/deploy/gatekeeper.yaml b/manifest_staging/deploy/gatekeeper.yaml index fe375e4eb43..a45835556c0 100644 --- a/manifest_staging/deploy/gatekeeper.yaml +++ b/manifest_staging/deploy/gatekeeper.yaml @@ -2387,6 +2387,78 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + gatekeeper.sh/system: "yes" + name: configpodstatuses.status.gatekeeper.sh +spec: + group: status.gatekeeper.sh + names: + kind: ConfigPodStatus + listKind: ConfigPodStatusList + plural: configpodstatuses + singular: configpodstatus + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + properties: + configUID: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + errors: + items: + properties: + message: + type: string + type: + type: string + required: + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.14.0 @@ -2498,10 +2570,44 @@ spec: type: object status: description: ConfigStatus defines the observed state of Config. + properties: + byPod: + items: + properties: + configUID: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + errors: + items: + properties: + message: + type: string + type: + type: string + required: + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: array type: object type: object served: true storage: true + subresources: + status: {} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -4720,6 +4826,18 @@ rules: - patch - update - watch +- apiGroups: + - config.gatekeeper.sh + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - config.gatekeeper.sh resources: diff --git a/pkg/controller/add_configstatus.go b/pkg/controller/add_configstatus.go new file mode 100644 index 00000000000..4dfded79e01 --- /dev/null +++ b/pkg/controller/add_configstatus.go @@ -0,0 +1,21 @@ +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "github.com/open-policy-agent/gatekeeper/v3/pkg/controller/configstatus" +) + +func init() { + Injectors = append(Injectors, &configstatus.Adder{}) +} diff --git a/pkg/controller/config/config_controller.go b/pkg/controller/config/config_controller.go index 4cfa78e27b6..afe0cd7adc7 100644 --- a/pkg/controller/config/config_controller.go +++ b/pkg/controller/config/config_controller.go @@ -20,15 +20,20 @@ import ( "fmt" configv1alpha1 "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + statusv1beta1 "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" cm "github.com/open-policy-agent/gatekeeper/v3/pkg/cachemanager" "github.com/open-policy-agent/gatekeeper/v3/pkg/cachemanager/aggregator" "github.com/open-policy-agent/gatekeeper/v3/pkg/controller/config/process" + "github.com/open-policy-agent/gatekeeper/v3/pkg/controller/configstatus" "github.com/open-policy-agent/gatekeeper/v3/pkg/keys" "github.com/open-policy-agent/gatekeeper/v3/pkg/readiness" + "github.com/open-policy-agent/gatekeeper/v3/pkg/util" "github.com/open-policy-agent/gatekeeper/v3/pkg/watch" - "k8s.io/apimachinery/pkg/api/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -51,12 +56,14 @@ type Adder struct { ControllerSwitch *watch.ControllerSwitch Tracker *readiness.Tracker CacheManager *cm.CacheManager + // GetPod returns an instance of the currently running Gatekeeper pod + GetPod func(context.Context) (*corev1.Pod, error) } // Add creates a new ConfigController and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. func (a *Adder) Add(mgr manager.Manager) error { - r, err := newReconciler(mgr, a.CacheManager, a.ControllerSwitch, a.Tracker) + r, err := newReconciler(mgr, a.CacheManager, a.ControllerSwitch, a.Tracker, a.GetPod) if err != nil { return err } @@ -76,8 +83,12 @@ func (a *Adder) InjectCacheManager(cm *cm.CacheManager) { a.CacheManager = cm } +func (a *Adder) InjectGetPod(getPod func(ctx context.Context) (*corev1.Pod, error)) { + a.GetPod = getPod +} + // newReconciler returns a new reconcile.Reconciler. -func newReconciler(mgr manager.Manager, cm *cm.CacheManager, cs *watch.ControllerSwitch, tracker *readiness.Tracker) (*ReconcileConfig, error) { +func newReconciler(mgr manager.Manager, cm *cm.CacheManager, cs *watch.ControllerSwitch, tracker *readiness.Tracker, getPod func(context.Context) (*corev1.Pod, error)) (*ReconcileConfig, error) { if cm == nil { return nil, fmt.Errorf("cacheManager must be non-nil") } @@ -90,6 +101,7 @@ func newReconciler(mgr manager.Manager, cm *cm.CacheManager, cs *watch.Controlle cs: cs, cacheManager: cm, tracker: tracker, + getPod: getPod, }, nil } @@ -107,6 +119,12 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return err } + err = c.Watch( + source.Kind(mgr.GetCache(), &statusv1beta1.ConfigPodStatus{}, handler.TypedEnqueueRequestsFromMapFunc(configstatus.PodStatusToConfigMapper(true)))) + if err != nil { + return err + } + return nil } @@ -123,6 +141,8 @@ type ReconcileConfig struct { cs *watch.ControllerSwitch tracker *readiness.Tracker + + getPod func(context.Context) (*corev1.Pod, error) } // +kubebuilder:rbac:groups=*,resources=*,verbs=get;list;watch @@ -154,7 +174,7 @@ func (r *ReconcileConfig) Reconcile(ctx context.Context, request reconcile.Reque err := r.reader.Get(ctx, request.NamespacedName, instance) if err != nil { // if config is not found, we should remove cached data - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { exists = false } else { // Error reading the object - requeue the request. @@ -167,7 +187,11 @@ func (r *ReconcileConfig) Reconcile(ctx context.Context, request reconcile.Reque // If the config is being deleted the user is saying they don't want to // sync anything gvksToSync := []schema.GroupVersionKind{} - if exists && instance.GetDeletionTimestamp().IsZero() { + + // K8s API conventions consider an object to be deleted when either the object no longer exists or when a deletion timestamp has been set. + deleted := !exists || !instance.GetDeletionTimestamp().IsZero() + + if !deleted { for _, entry := range instance.Spec.Sync.SyncOnly { gvk := schema.GroupVersionKind{Group: entry.Group, Version: entry.Version, Kind: entry.Kind} gvksToSync = append(gvksToSync, gvk) @@ -191,9 +215,87 @@ func (r *ReconcileConfig) Reconcile(ctx context.Context, request reconcile.Reque if err := r.cacheManager.UpsertSource(ctx, configSourceKey, gvksToSync); err != nil { r.tracker.For(configGVK).TryCancelExpect(instance) - return reconcile.Result{Requeue: true}, fmt.Errorf("config-controller: error establishing watches for new syncOny: %w", err) + return reconcile.Result{Requeue: true}, r.updateOrCreatePodStatus(ctx, instance, err) } r.tracker.For(configGVK).Observe(instance) - return reconcile.Result{}, nil + + if deleted { + return reconcile.Result{}, r.deleteStatus(ctx, request.NamespacedName.Namespace, request.NamespacedName.Name) + } + return reconcile.Result{}, r.updateOrCreatePodStatus(ctx, instance, nil) +} + +func (r *ReconcileConfig) deleteStatus(ctx context.Context, cfgNamespace string, cfgName string) error { + status := &statusv1beta1.ConfigPodStatus{} + pod, err := r.getPod(ctx) + if err != nil { + return fmt.Errorf("getting reconciler pod: %w", err) + } + sName, err := statusv1beta1.KeyForConfig(pod.Name, cfgNamespace, cfgName) + if err != nil { + return fmt.Errorf("getting key for config: %w", err) + } + status.SetName(sName) + status.SetNamespace(util.GetNamespace()) + if err := r.writer.Delete(ctx, status); err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil +} + +func (r *ReconcileConfig) updateOrCreatePodStatus(ctx context.Context, cfg *configv1alpha1.Config, upsertErr error) error { + pod, err := r.getPod(ctx) + if err != nil { + return fmt.Errorf("getting reconciler pod: %w", err) + } + + // Check if it exists already + sNS := pod.Namespace + sName, err := statusv1beta1.KeyForConfig(pod.Name, cfg.GetNamespace(), cfg.GetName()) + if err != nil { + return fmt.Errorf("getting key for config: %w", err) + } + shouldCreate := true + status := &statusv1beta1.ConfigPodStatus{} + + err = r.reader.Get(ctx, types.NamespacedName{Namespace: sNS, Name: sName}, status) + switch { + case err == nil: + shouldCreate = false + case apierrors.IsNotFound(err): + if status, err = r.newConfigStatus(pod, cfg); err != nil { + return fmt.Errorf("creating new config status: %w", err) + } + default: + return fmt.Errorf("getting config status in name %s, namespace %s: %w", cfg.GetName(), cfg.GetNamespace(), err) + } + + setStatusError(status, upsertErr) + + status.Status.ObservedGeneration = cfg.GetGeneration() + + if shouldCreate { + return r.writer.Create(ctx, status) + } + return r.writer.Update(ctx, status) +} + +func (r *ReconcileConfig) newConfigStatus(pod *corev1.Pod, cfg *configv1alpha1.Config) (*statusv1beta1.ConfigPodStatus, error) { + status, err := statusv1beta1.NewConfigStatusForPod(pod, cfg.GetNamespace(), cfg.GetName(), r.scheme) + if err != nil { + return nil, fmt.Errorf("creating status for pod: %w", err) + } + status.Status.ConfigUID = cfg.GetUID() + + return status, nil +} + +func setStatusError(status *statusv1beta1.ConfigPodStatus, etErr error) { + if etErr == nil { + status.Status.Errors = nil + return + } + e := &statusv1beta1.ConfigError{Message: etErr.Error()} + status.Status.Errors = []*statusv1beta1.ConfigError{e} } diff --git a/pkg/controller/config/config_controller_test.go b/pkg/controller/config/config_controller_test.go index 6de6361ea00..817e34e10b0 100644 --- a/pkg/controller/config/config_controller_test.go +++ b/pkg/controller/config/config_controller_test.go @@ -40,7 +40,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" - corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -154,7 +153,12 @@ func TestReconcile(t *testing.T) { assert.NoError(t, cacheManager.Start(ctx)) }() - rec, err := newReconciler(mgr, cacheManager, cs, tracker) + pod := fakes.Pod( + fakes.WithNamespace("gatekeeper-system"), + fakes.WithName("no-pod"), + ) + + rec, err := newReconciler(mgr, cacheManager, cs, tracker, func(context.Context) (*v1.Pod, error) { return pod, nil }) require.NoError(t, err) // Wrap the Controller Reconcile function so it writes each request to a map when it is finished reconciling. @@ -337,8 +341,8 @@ func TestConfig_DeleteSyncResources(t *testing.T) { fakes.WithNamespace("default"), fakes.WithName("testpod"), ) - pod.Spec = corev1.PodSpec{ - Containers: []corev1.Container{ + pod.Spec = v1.PodSpec{ + Containers: []v1.Container{ { Name: "nginx", Image: "nginx", @@ -444,7 +448,12 @@ func setupController(ctx context.Context, mgr manager.Manager, wm *watch.Manager _ = cacheManager.Start(ctx) }() - rec, err := newReconciler(mgr, cacheManager, cs, tracker) + pod := fakes.Pod( + fakes.WithNamespace("gatekeeper-system"), + fakes.WithName("no-pod"), + ) + + rec, err := newReconciler(mgr, cacheManager, cs, tracker, func(context.Context) (*v1.Pod, error) { return pod, nil }) if err != nil { return nil, fmt.Errorf("creating reconciler: %w", err) } @@ -618,7 +627,12 @@ func TestConfig_Retries(t *testing.T) { assert.NoError(t, cacheManager.Start(ctx)) }() - rec, _ := newReconciler(mgr, cacheManager, cs, tracker) + pod := fakes.Pod( + fakes.WithNamespace("gatekeeper-system"), + fakes.WithName("no-pod"), + ) + + rec, _ := newReconciler(mgr, cacheManager, cs, tracker, func(context.Context) (*v1.Pod, error) { return pod, nil }) err = add(mgr, rec) if err != nil { t.Fatal(err) diff --git a/pkg/controller/configstatus/configstatus_controller.go b/pkg/controller/configstatus/configstatus_controller.go new file mode 100644 index 00000000000..b0bae57e02a --- /dev/null +++ b/pkg/controller/configstatus/configstatus_controller.go @@ -0,0 +1,199 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configstatus + +import ( + "context" + "fmt" + "sort" + + "github.com/go-logr/logr" + configv1alpha1 "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" + "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" + "github.com/open-policy-agent/gatekeeper/v3/pkg/operations" + "github.com/open-policy-agent/gatekeeper/v3/pkg/readiness" + "github.com/open-policy-agent/gatekeeper/v3/pkg/util" + "github.com/open-policy-agent/gatekeeper/v3/pkg/watch" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var log = logf.Log.WithName("controller").WithValues(logging.Process, "config_status_controller") + +type Adder struct { + WatchManager *watch.Manager +} + +func (a *Adder) InjectControllerSwitch(_ *watch.ControllerSwitch) {} + +func (a *Adder) InjectTracker(_ *readiness.Tracker) {} + +// Add creates a new config Status Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func (a *Adder) Add(mgr manager.Manager) error { + if !operations.IsAssigned(operations.Status) { + return nil + } + r := newReconciler(mgr) + return add(mgr, r) +} + +// newReconciler returns a new reconcile.Reconciler. +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileConfigStatus{ + // Separate reader and writer because manager's default client bypasses the cache for unstructured resources. + writer: mgr.GetClient(), + statusClient: mgr.GetClient(), + reader: mgr.GetCache(), + + scheme: mgr.GetScheme(), + log: log, + } +} + +// PodStatusToConfigMapper correlates a ConfigPodStatus with its corresponding Config. +// `selfOnly` tells the mapper to only map statuses corresponding to the current pod. +func PodStatusToConfigMapper(selfOnly bool) handler.TypedMapFunc[*v1beta1.ConfigPodStatus] { + return func(_ context.Context, obj *v1beta1.ConfigPodStatus) []reconcile.Request { + labels := obj.GetLabels() + name, ok := labels[v1beta1.ConfigNameLabel] + if !ok { + log.Error(fmt.Errorf("config status resource with no mapping label: %s", obj.GetName()), "missing label while attempting to map a config status resource") + return nil + } + if selfOnly { + pod, ok := labels[v1beta1.PodLabel] + if !ok { + log.Error(fmt.Errorf("config status resource with no pod label: %s", obj.GetName()), "missing label while attempting to map a config status resource") + } + // Do not attempt to reconcile the resource when other pods have changed their status + if pod != util.GetPodName() { + return nil + } + } + + return []reconcile.Request{{NamespacedName: types.NamespacedName{ + Name: name, + Namespace: obj.Namespace, + }}} + } +} + +// Add creates a new config status Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func add(mgr manager.Manager, r reconcile.Reconciler) error { + c, err := controller.New("config-status-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + err = c.Watch( + source.Kind(mgr.GetCache(), &v1beta1.ConfigPodStatus{}, handler.TypedEnqueueRequestsFromMapFunc(PodStatusToConfigMapper(false)))) + if err != nil { + return err + } + + err = c.Watch(source.Kind(mgr.GetCache(), &configv1alpha1.Config{}, &handler.TypedEnqueueRequestForObject[*configv1alpha1.Config]{})) + if err != nil { + return err + } + return nil +} + +var _ reconcile.Reconciler = &ReconcileConfigStatus{} + +// ReconcileConfigStatus provides the dependencies required to reconcile +// the status of a Config resource. +type ReconcileConfigStatus struct { + reader client.Reader + writer client.Writer + statusClient client.StatusClient + + scheme *runtime.Scheme + log logr.Logger +} + +// +kubebuilder:rbac:groups=config.gatekeeper.sh,resources=*,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=status.gatekeeper.sh,resources=*,verbs=get;list;watch;create;update;patch;delete + +// Reconcile reads that state of the cluster for a config object and makes changes based on the state read +// and what is in the constraint.Spec. +func (r *ReconcileConfigStatus) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + cfg := &configv1alpha1.Config{} + err := r.reader.Get(ctx, request.NamespacedName, cfg) + if err != nil { + // If the Config does not exist then we are done + if errors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + sObjs := &v1beta1.ConfigPodStatusList{} + if err := r.reader.List( + ctx, + sObjs, + client.MatchingLabels{v1beta1.ConfigNameLabel: request.Name}, + client.InNamespace(util.GetNamespace()), + ); err != nil { + return reconcile.Result{}, err + } + statusObjs := make(sortableStatuses, len(sObjs.Items)) + copy(statusObjs, sObjs.Items) + sort.Sort(statusObjs) + + var s []v1beta1.ConfigPodStatusStatus + + for i := range statusObjs { + // Don't report status if it's not for the correct object. This can happen + // if a watch gets interrupted, causing the constraint status to be deleted + // out from underneath it + if statusObjs[i].Status.ConfigUID != cfg.GetUID() { + continue + } + s = append(s, statusObjs[i].Status) + } + + cfg.Status.ByPod = s + + if err := r.statusClient.Status().Update(ctx, cfg); err != nil { + return reconcile.Result{Requeue: true}, nil + } + return reconcile.Result{}, nil +} + +type sortableStatuses []v1beta1.ConfigPodStatus + +func (s sortableStatuses) Len() int { + return len(s) +} + +func (s sortableStatuses) Less(i, j int) bool { + return s[i].Status.ID < s[j].Status.ID +} + +func (s sortableStatuses) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/pkg/controller/constraintstatus/constraintstatus_controller.go b/pkg/controller/constraintstatus/constraintstatus_controller.go index 16f293894d4..7f6714c85f2 100644 --- a/pkg/controller/constraintstatus/constraintstatus_controller.go +++ b/pkg/controller/constraintstatus/constraintstatus_controller.go @@ -25,6 +25,7 @@ import ( constraintclient "github.com/open-policy-agent/frameworks/constraint/pkg/client" "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" + "github.com/open-policy-agent/gatekeeper/v3/pkg/operations" "github.com/open-policy-agent/gatekeeper/v3/pkg/util" "github.com/open-policy-agent/gatekeeper/v3/pkg/watch" "k8s.io/apimachinery/pkg/api/errors" @@ -54,6 +55,9 @@ type Adder struct { // Add creates a new Constraint Status Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func (a *Adder) Add(mgr manager.Manager) error { + if !operations.IsAssigned(operations.Status) { + return nil + } r := newReconciler(mgr, a.ControllerSwitch) if a.IfWatching != nil { r.ifWatching = a.IfWatching diff --git a/pkg/controller/constrainttemplatestatus/constrainttemplatestatus_controller.go b/pkg/controller/constrainttemplatestatus/constrainttemplatestatus_controller.go index 1b1f15f4608..6947a2c3ad8 100644 --- a/pkg/controller/constrainttemplatestatus/constrainttemplatestatus_controller.go +++ b/pkg/controller/constrainttemplatestatus/constrainttemplatestatus_controller.go @@ -26,6 +26,7 @@ import ( constraintclient "github.com/open-policy-agent/frameworks/constraint/pkg/client" "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" + "github.com/open-policy-agent/gatekeeper/v3/pkg/operations" "github.com/open-policy-agent/gatekeeper/v3/pkg/util" "github.com/open-policy-agent/gatekeeper/v3/pkg/watch" "k8s.io/apimachinery/pkg/api/errors" @@ -52,6 +53,9 @@ type Adder struct { // Add creates a new Constraint Status Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func (a *Adder) Add(mgr manager.Manager) error { + if !operations.IsAssigned(operations.Status) { + return nil + } r := newReconciler(mgr, a.ControllerSwitch) return add(mgr, r) } diff --git a/pkg/controller/expansion/expansion_controller.go b/pkg/controller/expansion/expansion_controller.go index 0aa3ceb89e2..3539508adf0 100644 --- a/pkg/controller/expansion/expansion_controller.go +++ b/pkg/controller/expansion/expansion_controller.go @@ -7,6 +7,7 @@ import ( "github.com/open-policy-agent/gatekeeper/v3/apis/expansion/unversioned" expansionv1beta1 "github.com/open-policy-agent/gatekeeper/v3/apis/expansion/v1beta1" statusv1beta1 "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" + "github.com/open-policy-agent/gatekeeper/v3/pkg/controller/expansionstatus" "github.com/open-policy-agent/gatekeeper/v3/pkg/expansion" "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" "github.com/open-policy-agent/gatekeeper/v3/pkg/metrics" @@ -110,9 +111,20 @@ func add(mgr manager.Manager, r *Reconciler) error { } // Watch for changes to ExpansionTemplates - return c.Watch( + err = c.Watch( source.Kind(mgr.GetCache(), &expansionv1beta1.ExpansionTemplate{}, &handler.TypedEnqueueRequestForObject[*expansionv1beta1.ExpansionTemplate]{})) + if err != nil { + return err + } + + // Watch for changes to ExpansionTemplateStatuses + err = c.Watch( + source.Kind(mgr.GetCache(), &statusv1beta1.ExpansionTemplatePodStatus{}, handler.TypedEnqueueRequestsFromMapFunc(expansionstatus.PodStatusToExpansionTemplateMapper(true)))) + if err != nil { + return err + } + return nil } func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { diff --git a/pkg/controller/expansionstatus/expansionstatus_controller.go b/pkg/controller/expansionstatus/expansionstatus_controller.go index 3f07fee22de..e512860f856 100644 --- a/pkg/controller/expansionstatus/expansionstatus_controller.go +++ b/pkg/controller/expansionstatus/expansionstatus_controller.go @@ -26,6 +26,7 @@ import ( "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" "github.com/open-policy-agent/gatekeeper/v3/pkg/expansion" "github.com/open-policy-agent/gatekeeper/v3/pkg/logging" + "github.com/open-policy-agent/gatekeeper/v3/pkg/operations" "github.com/open-policy-agent/gatekeeper/v3/pkg/readiness" "github.com/open-policy-agent/gatekeeper/v3/pkg/util" "github.com/open-policy-agent/gatekeeper/v3/pkg/watch" @@ -58,6 +59,9 @@ func (a *Adder) Add(mgr manager.Manager) error { if !*expansion.ExpansionEnabled { return nil } + if !operations.IsAssigned(operations.Status) { + return nil + } r := newReconciler(mgr) return add(mgr, r) } diff --git a/pkg/readiness/ready_tracker_unit_test.go b/pkg/readiness/ready_tracker_unit_test.go index ba5d039b941..94a1f9b9da6 100644 --- a/pkg/readiness/ready_tracker_unit_test.go +++ b/pkg/readiness/ready_tracker_unit_test.go @@ -49,6 +49,17 @@ const ( tick = 1 * time.Second ) +type WrapFakeClientWithMutex struct { + listMutex sync.Mutex + fakeLister Lister +} + +func (w *WrapFakeClientWithMutex) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + w.listMutex.Lock() + defer w.listMutex.Unlock() + return w.fakeLister.List(ctx, list, opts...) +} + var ( testConstraintTemplate = templates.ConstraintTemplate{ ObjectMeta: v1.ObjectMeta{ @@ -171,7 +182,9 @@ func mustInitializeScheme(scheme *runtime.Scheme) *runtime.Scheme { // Verify that TryCancelTemplate functions the same as regular CancelTemplate if readinessRetries is set to 0. func Test_ReadyTracker_TryCancelTemplate_No_Retries(t *testing.T) { lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(convertedTemplate.DeepCopyObject()).Build() - rt := newTracker(lister, false, false, false, false, nil, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + + rt := newTracker(&wrapLister, false, false, false, false, nil, func() objData { return objData{retries: 0} }) @@ -211,7 +224,9 @@ func Test_ReadyTracker_TryCancelTemplate_No_Retries(t *testing.T) { // Verify that TryCancelTemplate must be called enough times to remove all retries before canceling a template. func Test_ReadyTracker_TryCancelTemplate_Retries(t *testing.T) { lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(convertedTemplate.DeepCopyObject()).Build() - rt := newTracker(lister, false, false, false, false, nil, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + + rt := newTracker(&wrapLister, false, false, false, false, nil, func() objData { return objData{retries: 2} }) @@ -264,6 +279,9 @@ func Test_Tracker_TryCancelData(t *testing.T) { lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects( &testSyncSet, fakes.UnstructuredFor(podGVK, "", "pod1-name"), ).Build() + + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + tcs := []struct { name string retries int @@ -277,7 +295,7 @@ func Test_Tracker_TryCancelData(t *testing.T) { objDataFn := func() objData { return objData{retries: tc.retries} } - rt := newTracker(lister, false, false, false, false, nil, objDataFn) + rt := newTracker(&wrapLister, false, false, false, false, nil, objDataFn) ctx, cancel := context.WithCancel(context.Background()) var runErr error @@ -346,9 +364,10 @@ func Test_ReadyTracker_TrackAssignMetadata(t *testing.T) { } return client.List(ctx, list, opts...) } - lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testAssignMetadata).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, true, false, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + + rt := newTracker(&wrapLister, true, false, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -417,7 +436,8 @@ func Test_ReadyTracker_TrackAssign(t *testing.T) { } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testAssign).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, true, false, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, true, false, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -486,7 +506,8 @@ func Test_ReadyTracker_TrackModifySet(t *testing.T) { } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testModifySet).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, true, false, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, true, false, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -556,7 +577,8 @@ func Test_ReadyTracker_TrackAssignImage(t *testing.T) { } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testAssignImage).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, true, false, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, true, false, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -625,7 +647,8 @@ func Test_ReadyTracker_TrackExternalDataProvider(t *testing.T) { } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testExternalDataProvider).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, true, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, true, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -694,7 +717,8 @@ func Test_ReadyTracker_TrackExpansionTemplates(t *testing.T) { } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testExpansionTemplate).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, false, true, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, false, true, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -763,7 +787,8 @@ func Test_ReadyTracker_TrackConstraintTemplates(t *testing.T) { } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(convertedTemplate.DeepCopyObject()).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, false, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, false, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -866,7 +891,8 @@ func Test_ReadyTracker_TrackConfigAndSyncSets(t *testing.T) { return client.List(ctx, list, opts...) } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testSyncSet, &testConfig).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, false, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, false, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -954,7 +980,8 @@ func Test_ReadyTracker_TrackConstraint(t *testing.T) { } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(getTestConstraint()).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, false, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, false, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -1030,7 +1057,8 @@ func Test_ReadyTracker_TrackData(t *testing.T) { } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(fakes.UnstructuredFor(podGVK, "", "pod1-name")).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, false, false, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, false, false, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) errChan := make(chan error) @@ -1091,21 +1119,17 @@ func Test_ReadyTracker_Run_GRP_Wait(t *testing.T) { } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - var m sync.Mutex funcs := &interceptor.Funcs{} funcs.List = func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error { if _, ok := list.(*v1beta1.ConstraintTemplateList); ok { return fmt.Errorf("Force Test ConstraintTemplateList Failure") } - - // Adding a mutex lock here avoids the race condition within fake client's List method - m.Lock() - defer m.Unlock() return client.List(ctx, list, opts...) } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testExpansionTemplate, convertedTemplate.DeepCopyObject(), getTestConstraint(), &testSyncSet, fakes.UnstructuredFor(podGVK, "", "pod1-name")).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, false, true, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, false, true, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) @@ -1145,20 +1169,19 @@ func Test_ReadyTracker_Run_ConstraintTrackers_Wait(t *testing.T) { } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - var m sync.Mutex funcs := &interceptor.Funcs{} funcs.List = func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error { if v, ok := list.(*unstructured.UnstructuredList); ok && v.GroupVersionKind().Group == "constraints.gatekeeper.sh" { t.Log(v.GroupVersionKind()) return fmt.Errorf("Force Test constraint list Failure") } - m.Lock() - defer m.Unlock() + return client.List(ctx, list, opts...) } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testExpansionTemplate, convertedTemplate.DeepCopyObject(), getTestConstraint(), &testSyncSet, fakes.UnstructuredFor(podGVK, "", "pod1-name")).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, false, true, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, false, true, tc.failClose, retryNone, func() objData { return objData{retries: 0} }) @@ -1199,19 +1222,18 @@ func Test_ReadyTracker_Run_DataTrackers_Wait(t *testing.T) { } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - var m sync.Mutex funcs := &interceptor.Funcs{} funcs.List = func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error { if v, ok := list.(*unstructured.UnstructuredList); ok && v.GroupVersionKind().Kind == "PodList" { return fmt.Errorf("Force Test pod list Failure") } - m.Lock() - defer m.Unlock() + return client.List(ctx, list, opts...) } lister := fake.NewClientBuilder().WithScheme(mustInitializeScheme(runtime.NewScheme())).WithRuntimeObjects(&testExpansionTemplate, convertedTemplate.DeepCopyObject(), getTestConstraint(), &testSyncSet, fakes.UnstructuredFor(podGVK, "", "pod1-name")).WithInterceptorFuncs(*funcs).Build() - rt := newTracker(lister, false, false, true, tc.failClose, retryNone, func() objData { + wrapLister := WrapFakeClientWithMutex{fakeLister: lister} + rt := newTracker(&wrapLister, false, false, true, tc.failClose, retryNone, func() objData { return objData{retries: 0} })