Skip to content

Commit

Permalink
Merge pull request #5853 from chaosi-zju/automated-cherry-pick-of-#53…
Browse files Browse the repository at this point in the history
…18-upstream-release-1.9

Automated cherry pick of #5318: fix expected patch operations may be missed when
  • Loading branch information
karmada-bot authored Nov 21, 2024
2 parents c0e9eb7 + 3517093 commit ea15f5f
Show file tree
Hide file tree
Showing 11 changed files with 251 additions and 504 deletions.
81 changes: 54 additions & 27 deletions pkg/controllers/status/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,15 @@ import (
"context"
"reflect"

corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/event"
Expand All @@ -34,8 +37,8 @@ import (
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/events"
"github.com/karmada-io/karmada/pkg/resourceinterpreter"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/restmapper"
)

Expand Down Expand Up @@ -104,43 +107,67 @@ func updateResourceStatus(
dynamicClient dynamic.Interface,
restMapper meta.RESTMapper,
interpreter resourceinterpreter.ResourceInterpreter,
resource *unstructured.Unstructured,
eventRecorder record.EventRecorder,
objRef workv1alpha2.ObjectReference,
bindingStatus workv1alpha2.ResourceBindingStatus,
) error {
gvr, err := restmapper.GetGroupVersionResource(restMapper, schema.FromAPIVersionAndKind(resource.GetAPIVersion(), resource.GetKind()))
gvr, err := restmapper.GetGroupVersionResource(restMapper, schema.FromAPIVersionAndKind(objRef.APIVersion, objRef.Kind))
if err != nil {
klog.Errorf("Failed to get GVR from GVK(%s/%s), Error: %v", resource.GetAPIVersion(), resource.GetKind(), err)
klog.Errorf("Failed to get GVR from GVK(%s/%s), Error: %v", objRef.APIVersion, objRef.Kind, err)
return err
}

if !interpreter.HookEnabled(resource.GroupVersionKind(), configv1alpha1.InterpreterOperationAggregateStatus) {
gvk := schema.GroupVersionKind{Group: gvr.Group, Version: gvr.Version, Kind: objRef.Kind}
if !interpreter.HookEnabled(gvk, configv1alpha1.InterpreterOperationAggregateStatus) {
return nil
}
newObj, err := interpreter.AggregateStatus(resource, bindingStatus.AggregatedStatus)
if err != nil {
klog.Errorf("Failed to aggregate status for resource(%s/%s/%s, Error: %v", gvr, resource.GetNamespace(), resource.GetName(), err)
return err
}

oldStatus, _, _ := unstructured.NestedFieldNoCopy(resource.Object, "status")
newStatus, _, _ := unstructured.NestedFieldNoCopy(newObj.Object, "status")
if reflect.DeepEqual(oldStatus, newStatus) {
klog.V(3).Infof("Ignore update resource(%s/%s/%s) status as up to date.", gvr, resource.GetNamespace(), resource.GetName())
return nil
}
var resource *unstructured.Unstructured
if err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Fetch resource template from karmada-apiserver instead of informer cache, to avoid retry due to
// resource conflict which often happens, especially with a huge amount of resource templates and
// the informer cache doesn't sync quickly enough.
// For more details refer to https://github.com/karmada-io/karmada/issues/5285.
resource, err = dynamicClient.Resource(gvr).Namespace(objRef.Namespace).Get(context.TODO(), objRef.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
// It might happen when the resource template has been removed but the garbage collector hasn't removed
// the ResourceBinding which dependent on resource template.
// So, just return without retry(requeue) would save unnecessary loop.
return nil
}
klog.Errorf("Failed to fetch resource template(%s/%s/%s), Error: %v.", gvr, objRef.Namespace, objRef.Name, err)
return err
}

patchBytes, err := helper.GenReplaceFieldJSONPatch("/status", oldStatus, newStatus)
if err != nil {
klog.Errorf("Failed to gen patch bytes for resource(%s/%s/%s, Error: %v", gvr, resource.GetNamespace(), resource.GetName(), err)
return err
}
newObj, err := interpreter.AggregateStatus(resource, bindingStatus.AggregatedStatus)
if err != nil {
klog.Errorf("Failed to aggregate status for resource template(%s/%s/%s), Error: %v", gvr, resource.GetNamespace(), resource.GetName(), err)
return err
}

_, err = dynamicClient.Resource(gvr).Namespace(resource.GetNamespace()).
Patch(context.TODO(), resource.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}, "status")
if err != nil {
klog.Error("Failed to update resource(%s/%s/%s), Error: %v", gvr, resource.GetNamespace(), resource.GetName(), err)
oldStatus, _, _ := unstructured.NestedFieldNoCopy(resource.Object, "status")
newStatus, _, _ := unstructured.NestedFieldNoCopy(newObj.Object, "status")
if reflect.DeepEqual(oldStatus, newStatus) {
klog.V(3).Infof("Ignore update resource(%s/%s/%s) status as up to date.", gvr, resource.GetNamespace(), resource.GetName())
return nil
}

if _, err = dynamicClient.Resource(gvr).Namespace(resource.GetNamespace()).UpdateStatus(context.TODO(), newObj, metav1.UpdateOptions{}); err != nil {
klog.Errorf("Failed to update resource(%s/%s/%s), Error: %v", gvr, resource.GetNamespace(), resource.GetName(), err)
return err
}

eventRecorder.Event(resource, corev1.EventTypeNormal, events.EventReasonAggregateStatusSucceed, "Update Resource with AggregatedStatus successfully.")
klog.V(3).Infof("Update resource(%s/%s/%s) status successfully.", gvr, resource.GetNamespace(), resource.GetName())

return nil
}); err != nil {
if resource != nil {
eventRecorder.Event(resource, corev1.EventTypeWarning, events.EventReasonAggregateStatusFailed, err.Error())
}
return err
}
klog.V(3).Infof("Update resource(%s/%s/%s) status successfully.", gvr, resource.GetNamespace(), resource.GetName())

return nil
}
17 changes: 2 additions & 15 deletions pkg/controllers/status/crb_status_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,27 +109,14 @@ func (c *CRBStatusController) SetupWithManager(mgr controllerruntime.Manager) er
}

func (c *CRBStatusController) syncBindingStatus(binding *workv1alpha2.ClusterResourceBinding) error {
resource, err := helper.FetchResourceTemplate(c.DynamicClient, c.InformerManager, c.RESTMapper, binding.Spec.Resource)
if err != nil {
if apierrors.IsNotFound(err) {
// It might happen when the resource template has been removed but the garbage collector hasn't removed
// the ResourceBinding which dependent on resource template.
// So, just return without retry(requeue) would save unnecessary loop.
return nil
}
klog.Errorf("Failed to fetch workload for clusterResourceBinding(%s). Error: %v",
binding.GetName(), err)
return err
}

err = helper.AggregateClusterResourceBindingWorkStatus(c.Client, binding, resource, c.EventRecorder)
err := helper.AggregateClusterResourceBindingWorkStatus(c.Client, binding, c.EventRecorder)
if err != nil {
klog.Errorf("Failed to aggregate workStatues to clusterResourceBinding(%s), Error: %v",
binding.Name, err)
return err
}

err = updateResourceStatus(c.DynamicClient, c.RESTMapper, c.ResourceInterpreter, resource, binding.Status)
err = updateResourceStatus(c.DynamicClient, c.RESTMapper, c.ResourceInterpreter, c.EventRecorder, binding.Spec.Resource, binding.Status)
if err != nil {
return err
}
Expand Down
18 changes: 10 additions & 8 deletions pkg/controllers/status/crb_status_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"

workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/resourceinterpreter/default/native"
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
"github.com/karmada-io/karmada/pkg/util/gclient"
)
Expand Down Expand Up @@ -98,8 +99,11 @@ func TestCRBStatusController_Reconcile(t *testing.T) {
name: "failed in syncBindingStatus",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
Name: "binding",
Namespace: "default",
// finalizers field is required when deletionTimestamp is defined, otherwise will encounter the
// error: `refusing to create obj binding with metadata.deletionTimestamp but no finalizers`.
Finalizers: []string{"test"},
DeletionTimestamp: &preTime,
},
Spec: workv1alpha2.ResourceBindingSpec{
Expand All @@ -119,6 +123,7 @@ func TestCRBStatusController_Reconcile(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateCRBStatusController()
c.ResourceInterpreter = FakeResourceInterpreter{DefaultInterpreter: native.NewDefaultInterpreter()}

// Prepare req
req := controllerruntime.Request{
Expand All @@ -130,9 +135,7 @@ func TestCRBStatusController_Reconcile(t *testing.T) {

// Prepare binding and create it in client
if tt.binding != nil {
if err := c.Client.Create(context.Background(), tt.binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
c.Client = fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(tt.binding).WithStatusSubresource(tt.binding).Build()
}

res, err := c.Reconcile(context.Background(), req)
Expand Down Expand Up @@ -192,6 +195,7 @@ func TestCRBStatusController_syncBindingStatus(t *testing.T) {
c := generateCRBStatusController()
c.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme,
&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: tt.podNameInDynamicClient, Namespace: "default"}})
c.ResourceInterpreter = FakeResourceInterpreter{DefaultInterpreter: native.NewDefaultInterpreter()}

binding := &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -204,9 +208,7 @@ func TestCRBStatusController_syncBindingStatus(t *testing.T) {
}

if tt.resourceExistInClient {
if err := c.Client.Create(context.Background(), binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
c.Client = fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(binding).WithStatusSubresource(binding).Build()
}

err := c.syncBindingStatus(binding)
Expand Down
20 changes: 4 additions & 16 deletions pkg/controllers/status/rb_status_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,29 +111,17 @@ func (c *RBStatusController) SetupWithManager(mgr controllerruntime.Manager) err
}

func (c *RBStatusController) syncBindingStatus(binding *workv1alpha2.ResourceBinding) error {
resourceTemplate, err := helper.FetchResourceTemplate(c.DynamicClient, c.InformerManager, c.RESTMapper, binding.Spec.Resource)
err := helper.AggregateResourceBindingWorkStatus(c.Client, binding, c.EventRecorder)
if err != nil {
if apierrors.IsNotFound(err) {
// It might happen when the resource template has been removed but the garbage collector hasn't removed
// the ResourceBinding which dependent on resource template.
// So, just return without retry(requeue) would save unnecessary loop.
return nil
}
klog.Errorf("Failed to fetch workload for resourceBinding(%s/%s). Error: %v.",
binding.GetNamespace(), binding.GetName(), err)
return err
}

err = helper.AggregateResourceBindingWorkStatus(c.Client, binding, resourceTemplate, c.EventRecorder)
if err != nil {
klog.Errorf("Failed to aggregate workStatues to resourceBinding(%s/%s), Error: %v",
klog.Errorf("Failed to aggregate workStatus to resourceBinding(%s/%s), Error: %v",
binding.Namespace, binding.Name, err)
return err
}

err = updateResourceStatus(c.DynamicClient, c.RESTMapper, c.ResourceInterpreter, resourceTemplate, binding.Status)
err = updateResourceStatus(c.DynamicClient, c.RESTMapper, c.ResourceInterpreter, c.EventRecorder, binding.Spec.Resource, binding.Status)
if err != nil {
return err
}

return nil
}
29 changes: 21 additions & 8 deletions pkg/controllers/status/rb_status_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"

workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/resourceinterpreter"
"github.com/karmada-io/karmada/pkg/resourceinterpreter/default/native"
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
"github.com/karmada-io/karmada/pkg/util/gclient"
)
Expand Down Expand Up @@ -98,8 +100,11 @@ func TestRBStatusController_Reconcile(t *testing.T) {
name: "failed in syncBindingStatus",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "binding",
Namespace: "default",
Name: "binding",
Namespace: "default",
// finalizers field is required when deletionTimestamp is defined, otherwise will encounter the
// error: `refusing to create obj binding with metadata.deletionTimestamp but no finalizers`.
Finalizers: []string{"test"},
DeletionTimestamp: &preTime,
},
Spec: workv1alpha2.ResourceBindingSpec{
Expand All @@ -119,6 +124,7 @@ func TestRBStatusController_Reconcile(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := generateRBStatusController()
c.ResourceInterpreter = FakeResourceInterpreter{DefaultInterpreter: native.NewDefaultInterpreter()}

// Prepare req
req := controllerruntime.Request{
Expand All @@ -130,9 +136,7 @@ func TestRBStatusController_Reconcile(t *testing.T) {

// Prepare binding and create it in client
if tt.binding != nil {
if err := c.Client.Create(context.Background(), tt.binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
c.Client = fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(tt.binding).WithStatusSubresource(tt.binding).Build()
}

res, err := c.Reconcile(context.Background(), req)
Expand Down Expand Up @@ -192,6 +196,7 @@ func TestRBStatusController_syncBindingStatus(t *testing.T) {
c := generateRBStatusController()
c.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme,
&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: tt.podNameInDynamicClient, Namespace: "default"}})
c.ResourceInterpreter = FakeResourceInterpreter{DefaultInterpreter: native.NewDefaultInterpreter()}

binding := &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -204,9 +209,7 @@ func TestRBStatusController_syncBindingStatus(t *testing.T) {
}

if tt.resourceExistInClient {
if err := c.Client.Create(context.Background(), binding); err != nil {
t.Fatalf("Failed to create binding: %v", err)
}
c.Client = fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(binding).WithStatusSubresource(binding).Build()
}

err := c.syncBindingStatus(binding)
Expand All @@ -219,3 +222,13 @@ func TestRBStatusController_syncBindingStatus(t *testing.T) {
})
}
}

var _ resourceinterpreter.ResourceInterpreter = &FakeResourceInterpreter{}

type FakeResourceInterpreter struct {
*native.DefaultInterpreter
}

func (f FakeResourceInterpreter) Start(_ context.Context) (err error) {
return nil
}
4 changes: 3 additions & 1 deletion pkg/controllers/status/work_status_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,9 @@ func (c *WorkStatusController) reflectStatus(work *workv1alpha1.Work, clusterObj
}

func (c *WorkStatusController) buildStatusIdentifier(work *workv1alpha1.Work, clusterObj *unstructured.Unstructured) (*workv1alpha1.ResourceIdentifier, error) {
ordinal, err := helper.GetManifestIndex(work.Spec.Workload.Manifests, clusterObj)
manifestRef := helper.ManifestReference{APIVersion: clusterObj.GetAPIVersion(), Kind: clusterObj.GetKind(),
Namespace: clusterObj.GetNamespace(), Name: clusterObj.GetName()}
ordinal, err := helper.GetManifestIndex(work.Spec.Workload.Manifests, &manifestRef)
if err != nil {
return nil, err
}
Expand Down
Loading

0 comments on commit ea15f5f

Please sign in to comment.