@@ -556,7 +556,7 @@ def compare_config():
556
556
557
557
pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_change ]["database" ] = "bar"
558
558
del pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_remove ]
559
-
559
+
560
560
k8s .api .custom_objects_api .patch_namespaced_custom_object (
561
561
"acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_delete_slot_patch )
562
562
@@ -573,7 +573,7 @@ def compare_config():
573
573
574
574
self .eventuallyEqual (lambda : self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("database" , slot_to_change ))[0 ], "bar" ,
575
575
"The replication slot cannot be updated" , 10 , 5 )
576
-
576
+
577
577
# make sure slot from Patroni didn't get deleted
578
578
self .eventuallyEqual (lambda : len (self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("slot_name" , patroni_slot ))), 1 ,
579
579
"The replication slot from Patroni gets deleted" , 10 , 5 )
@@ -929,7 +929,7 @@ def test_ignored_annotations(self):
929
929
},
930
930
}
931
931
}
932
-
932
+
933
933
old_sts_creation_timestamp = sts .metadata .creation_timestamp
934
934
k8s .api .apps_v1 .patch_namespaced_stateful_set (sts .metadata .name , sts .metadata .namespace , annotation_patch )
935
935
old_svc_creation_timestamp = svc .metadata .creation_timestamp
@@ -1254,7 +1254,7 @@ def test_persistent_volume_claim_retention_policy(self):
1254
1254
}
1255
1255
k8s .update_config (patch_scaled_policy_retain )
1256
1256
self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1257
-
1257
+
1258
1258
# decrease the number of instances
1259
1259
k8s .api .custom_objects_api .patch_namespaced_custom_object (
1260
1260
'acid.zalan.do' , 'v1' , 'default' , 'postgresqls' , 'acid-minimal-cluster' , pg_patch_scale_down_instances )
@@ -1622,7 +1622,7 @@ def test_password_rotation(self):
1622
1622
},
1623
1623
}
1624
1624
k8s .api .core_v1 .patch_namespaced_secret (
1625
- name = "foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do" ,
1625
+ name = "foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do" ,
1626
1626
namespace = "default" ,
1627
1627
body = secret_fake_rotation )
1628
1628
@@ -1638,7 +1638,7 @@ def test_password_rotation(self):
1638
1638
"data" : {
1639
1639
"enable_password_rotation" : "true" ,
1640
1640
"password_rotation_interval" : "30" ,
1641
- "password_rotation_user_retention" : "30" , # should be set to 60
1641
+ "password_rotation_user_retention" : "30" , # should be set to 60
1642
1642
},
1643
1643
}
1644
1644
k8s .update_config (enable_password_rotation )
@@ -1691,7 +1691,7 @@ def test_password_rotation(self):
1691
1691
"Unexpected username in secret of test.db_user: expected {}, got {}" .format ("test.db_user" , secret_username ))
1692
1692
1693
1693
# disable password rotation for all other users (foo_user)
1694
- # and pick smaller intervals to see if the third fake rotation user is dropped
1694
+ # and pick smaller intervals to see if the third fake rotation user is dropped
1695
1695
enable_password_rotation = {
1696
1696
"data" : {
1697
1697
"enable_password_rotation" : "false" ,
@@ -2158,7 +2158,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
2158
2158
2159
2159
# if nodes are different we can quit here
2160
2160
if master_nodes [0 ] not in replica_nodes :
2161
- return True
2161
+ return True
2162
2162
2163
2163
# enable pod anti affintiy in config map which should trigger movement of replica
2164
2164
patch_enable_antiaffinity = {
@@ -2182,7 +2182,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
2182
2182
}
2183
2183
k8s .update_config (patch_disable_antiaffinity , "disable antiaffinity" )
2184
2184
self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
2185
-
2185
+
2186
2186
k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_labels )
2187
2187
k8s .wait_for_running_pods (cluster_labels , 2 )
2188
2188
@@ -2193,7 +2193,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
2193
2193
# if nodes are different we can quit here
2194
2194
for target_node in target_nodes :
2195
2195
if (target_node not in master_nodes or target_node not in replica_nodes ) and master_nodes [0 ] in replica_nodes :
2196
- print ('Pods run on the same node' )
2196
+ print ('Pods run on the same node' )
2197
2197
return False
2198
2198
2199
2199
except timeout_decorator .TimeoutError :
@@ -2272,5 +2272,74 @@ def query_database_with_user(self, pod_name, db_name, query, user_name):
2272
2272
2273
2273
return result_set
2274
2274
2275
+ def test_topology_spread_constraints (self ):
2276
+ '''
2277
+ Enable topologySpreadConstraints for pods
2278
+ '''
2279
+ k8s = self .k8s
2280
+ cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
2281
+
2282
+ # Verify we are in good state from potential previous tests
2283
+ self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 , "No 2 pods running" )
2284
+
2285
+ patch_node_label = {
2286
+ "metadata" : {
2287
+ "labels" : {
2288
+ "topology.kubernetes.io/zone" : "zalando"
2289
+ }
2290
+ }
2291
+ }
2292
+
2293
+ nodes = k8s .api .core_v1 .list_node ()
2294
+ for node in nodes .items :
2295
+ k8s .api .core_v1 .patch_node (node .metadata .name , patch_node_label )
2296
+
2297
+ podsList = k8s .api .core_v1 .list_namespaced_pod ('default' , label_selector = cluster_label )
2298
+ k8s .wait_for_pod_start ('spilo-role=master,' + cluster_label )
2299
+ k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_label )
2300
+
2301
+ patch_cordon_node = {
2302
+ "spec" : {
2303
+ "unschedulable" : True
2304
+ }
2305
+ }
2306
+
2307
+ master_nodes , replica_nodes = k8s .get_cluster_nodes ()
2308
+ self .assertNotEqual (master_nodes , [])
2309
+ self .assertNotEqual (replica_nodes , [])
2310
+
2311
+ # Cordon replicas node
2312
+ k8s .api .core_v1 .patch_node (replica_nodes [0 ], patch_cordon_node )
2313
+ # Delete replicas pod so it can be re-scheduled to master node
2314
+ replicas_pod = k8s .get_cluster_replica_pod ()
2315
+ k8s .api .core_v1 .delete_namespaced_pod (replicas_pod .metadata .name , 'default' )
2316
+ # Wait for replicas pod re-scheduled to master node
2317
+ k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_label )
2318
+ # Assert master pod and replicas pod are in the same node
2319
+ master_nodes , replica_nodes = k8s .get_cluster_nodes ()
2320
+ self .assertEqual (master_nodes [0 ].metadata .name , replica_nodes [0 ].metadata .name )
2321
+
2322
+ patch_uncordon_node = {
2323
+ "spec" : {
2324
+ "unschedulable" : False
2325
+ }
2326
+ }
2327
+
2328
+ # Uncordon replicas node
2329
+ k8s .api .core_v1 .patch_node (replica_nodes [0 ], patch_uncordon_node )
2330
+
2331
+ patch_enable_topology_spread_constraints = {
2332
+ "data" : {
2333
+ "enable_postgres_topology_spread_constraints" : "true"
2334
+ }
2335
+ }
2336
+
2337
+ k8s .update_config (patch_enable_topology_spread_constraints , "enable topologySpreadConstraints" )
2338
+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
2339
+
2340
+ # Assert master pod and replicas pod are spread in two diffrence nodes
2341
+ master_nodes , replica_nodes = k8s .get_cluster_nodes ()
2342
+ self .assertNotEqual (master_nodes [0 ].metadata .name , replica_nodes [0 ].metadata .name )
2343
+
2275
2344
if __name__ == '__main__' :
2276
2345
unittest .main ()
0 commit comments