-
Notifications
You must be signed in to change notification settings - Fork 994
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Ensure podAnnotations are removed from pods if reset in the config #2826
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1037,10 +1037,47 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql | |
syncReason = append(syncReason, specReason...) | ||
} | ||
|
||
listOptions := metav1.ListOptions{ | ||
LabelSelector: labels.Set(c.connectionPoolerLabels(role, true).MatchLabels).String(), | ||
} | ||
pods, err = c.listPoolerPods(listOptions) | ||
newPodAnnotations := c.annotationsSet(c.generatePodAnnotations(&c.Spec)) | ||
if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations); changed { | ||
specSync = true | ||
syncReason = append(syncReason, []string{"new connection pooler's pod template annotations do not match the current ones: " + reason}...) | ||
|
||
if strings.Contains(reason, "Removed") { | ||
annotationToRemove := `{"metadata":{"annotations":{` | ||
annotationToRemoveTemplate := `{"spec":{"template":{"metadata":{"annotations":{` | ||
for anno := range deployment.Spec.Template.Annotations { | ||
if _, ok := newPodAnnotations[anno]; !ok { | ||
// template annotation was removed | ||
for _, ignore := range c.OpConfig.IgnoredAnnotations { | ||
if anno == ignore { | ||
continue | ||
} | ||
} | ||
annotationToRemove += fmt.Sprintf(`"%s":null,`, anno) | ||
annotationToRemoveTemplate += fmt.Sprintf(`"%s":null,`, anno) | ||
} | ||
} | ||
annotationToRemove = strings.TrimSuffix(annotationToRemove, ",") + `}}}` | ||
annotationToRemoveTemplate = strings.TrimSuffix(annotationToRemoveTemplate, ",") + `}}}}}` | ||
deployment, err = c.KubeClient.Deployments(c.Namespace).Patch(context.TODO(), | ||
deployment.Name, types.StrategicMergePatchType, []byte(annotationToRemoveTemplate), metav1.PatchOptions{}, "") | ||
if err != nil { | ||
c.logger.Errorf("failed to remove annotations from %s connection pooler's pod template: %v", role, err) | ||
return nil, err | ||
} | ||
for _, pod := range pods { | ||
_, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, | ||
types.StrategicMergePatchType, []byte(annotationToRemove), metav1.PatchOptions{}) | ||
if err != nil { | ||
c.logger.Errorf("failed to remove annotations from pod %s: %v", pod.Name, err) | ||
return nil, err | ||
} | ||
} | ||
} | ||
deployment.Spec.Template.Annotations = newPodAnnotations | ||
} | ||
|
||
|
@@ -1060,7 +1097,6 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql | |
if err != nil { | ||
return syncReason, err | ||
} | ||
c.ConnectionPooler[role].Deployment = deployment | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. so we change to update internal deployment only if every sync step works, right? Have to think about consequences it might have |
||
} | ||
|
||
newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(nil)) // including the downscaling annotations | ||
|
@@ -1069,15 +1105,10 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql | |
if err != nil { | ||
return nil, err | ||
} | ||
c.ConnectionPooler[role].Deployment = deployment | ||
} | ||
} | ||
|
||
// check if pooler pods must be replaced due to secret update | ||
listOptions := metav1.ListOptions{ | ||
LabelSelector: labels.Set(c.connectionPoolerLabels(role, true).MatchLabels).String(), | ||
} | ||
pods, err = c.listPoolerPods(listOptions) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ToDo for me: Check if we can go with a version of pods we now fetch before changing annotations? Or do we need to fetch them again for the following steps? |
||
if err != nil { | ||
return nil, err | ||
} | ||
|
@@ -1098,18 +1129,22 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql | |
if err != nil { | ||
return nil, fmt.Errorf("could not delete pooler pod: %v", err) | ||
} | ||
} else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations); changed { | ||
patchData, err := metaAnnotationsPatch(deployment.Spec.Template.Annotations) | ||
if err != nil { | ||
return nil, fmt.Errorf("could not form patch for pooler's pod annotations: %v", err) | ||
} | ||
_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||
if err != nil { | ||
return nil, fmt.Errorf("could not patch annotations for pooler's pod %q: %v", pod.Name, err) | ||
} else { | ||
if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations); changed { | ||
patchData, err := metaAnnotationsPatch(deployment.Spec.Template.Annotations) | ||
if err != nil { | ||
return nil, fmt.Errorf("could not form patch for pooler's pod annotations: %v", err) | ||
} | ||
_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||
if err != nil { | ||
return nil, fmt.Errorf("could not patch annotations for pooler's pod %q: %v", pod.Name, err) | ||
} | ||
} | ||
} | ||
} | ||
|
||
c.ConnectionPooler[role].Deployment = deployment | ||
|
||
if service, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}); err == nil { | ||
c.ConnectionPooler[role].Service = service | ||
desiredSvc := c.generateConnectionPoolerService(c.ConnectionPooler[role]) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -573,6 +573,33 @@ func (c *Cluster) syncStatefulSet() error { | |
} | ||
} | ||
} | ||
annotationToRemove := "" | ||
for anno := range c.Statefulset.Spec.Template.Annotations { | ||
if _, ok := desiredSts.Spec.Template.Annotations[anno]; !ok { | ||
// template annotation was removed | ||
for _, ignore := range c.OpConfig.IgnoredAnnotations { | ||
if anno == ignore { | ||
continue | ||
} | ||
} | ||
if annotationToRemove != "" { | ||
annotationToRemove = `{"metadata":{"annotations":{` | ||
} | ||
annotationToRemove += fmt.Sprintf(`"%s":null,`, anno) | ||
// annotationToRemove := []byte(fmt.Sprintf(`{"metadata":{"annotations":{"%s":null}}}`, anno)) | ||
} | ||
} | ||
if annotationToRemove != "" { | ||
annotationToRemove = strings.TrimSuffix(annotationToRemove, ",") + `}}}` | ||
for _, pod := range pods { | ||
_, err = c.KubeClient.Pods(c.Namespace).Patch(context.Background(), pod.Name, | ||
types.StrategicMergePatchType, []byte(annotationToRemove), metav1.PatchOptions{}) | ||
if err != nil { | ||
c.logger.Errorf("failed to remove annotations from pod %s: %v", pod.Name, err) | ||
return err | ||
} | ||
} | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sorry, another thinking out loud comment: We already introduced patch code for annotations. Could it not be reused? |
||
} | ||
if !cmp.match { | ||
if cmp.rollingUpdate { | ||
|
@@ -1594,6 +1621,27 @@ func (c *Cluster) syncLogicalBackupJob() error { | |
if reason != "" { | ||
c.logger.Infof("reason: %s", reason) | ||
} | ||
if strings.Contains(reason, "annotations do not match") { | ||
annotationToRemoveTemplate := `{"spec":{"jobTemplate":{"spec":{"template":{"metadata":{"annotations":{` | ||
for anno := range job.Spec.JobTemplate.Spec.Template.Annotations { | ||
if _, ok := desiredJob.Spec.JobTemplate.Spec.Template.Annotations[anno]; !ok { | ||
// template annotation was removed | ||
for _, ignore := range c.OpConfig.IgnoredAnnotations { | ||
if anno == ignore { | ||
continue | ||
} | ||
} | ||
annotationToRemoveTemplate += fmt.Sprintf(`"%s":null,`, anno) | ||
} | ||
} | ||
annotationToRemoveTemplate = strings.TrimSuffix(annotationToRemoveTemplate, ",") + `}}}}}}}` | ||
job, err = c.KubeClient.CronJobs(c.Namespace).Patch(context.TODO(), | ||
jobName, types.StrategicMergePatchType, []byte(annotationToRemoveTemplate), metav1.PatchOptions{}, "") | ||
if err != nil { | ||
c.logger.Errorf("failed to remove annotations from the logical backup job %q pod template: %v", jobName, err) | ||
return err | ||
} | ||
} | ||
if err = c.patchLogicalBackupJob(desiredJob); err != nil { | ||
return fmt.Errorf("could not update logical backup job to match desired state: %v", err) | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can we not use json marshalling here? This concatenating {{{ + }}} looks a bit "ugly".