@@ -559,7 +559,7 @@ def compare_config():
559559
560560 pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_change ]["database" ] = "bar"
561561 del pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_remove ]
562-
562+
563563 k8s .api .custom_objects_api .patch_namespaced_custom_object (
564564 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_delete_slot_patch )
565565
@@ -576,7 +576,7 @@ def compare_config():
576576
577577 self .eventuallyEqual (lambda : self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("database" , slot_to_change ))[0 ], "bar" ,
578578 "The replication slot cannot be updated" , 10 , 5 )
579-
579+
580580 # make sure slot from Patroni didn't get deleted
581581 self .eventuallyEqual (lambda : len (self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("slot_name" , patroni_slot ))), 1 ,
582582 "The replication slot from Patroni gets deleted" , 10 , 5 )
@@ -932,7 +932,7 @@ def test_ignored_annotations(self):
932932 },
933933 }
934934 }
935-
935+
936936 old_sts_creation_timestamp = sts .metadata .creation_timestamp
937937 k8s .api .apps_v1 .patch_namespaced_stateful_set (sts .metadata .name , sts .metadata .namespace , annotation_patch )
938938 old_svc_creation_timestamp = svc .metadata .creation_timestamp
@@ -1369,7 +1369,7 @@ def test_persistent_volume_claim_retention_policy(self):
13691369 }
13701370 k8s .update_config (patch_scaled_policy_retain )
13711371 self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1372-
1372+
13731373 # decrease the number of instances
13741374 k8s .api .custom_objects_api .patch_namespaced_custom_object (
13751375 'acid.zalan.do' , 'v1' , 'default' , 'postgresqls' , 'acid-minimal-cluster' , pg_patch_scale_down_instances )
@@ -1646,7 +1646,6 @@ def test_node_readiness_label(self):
16461646 # toggle pod anti affinity to move replica away from master node
16471647 self .assert_distributed_pods (master_nodes )
16481648
1649-
16501649 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
16511650 def test_overwrite_pooler_deployment (self ):
16521651 pooler_name = 'acid-minimal-cluster-pooler'
@@ -1799,7 +1798,7 @@ def test_password_rotation(self):
17991798 },
18001799 }
18011800 k8s .api .core_v1 .patch_namespaced_secret (
1802- name = "foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do" ,
1801+ name = "foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do" ,
18031802 namespace = "default" ,
18041803 body = secret_fake_rotation )
18051804
@@ -1816,7 +1815,7 @@ def test_password_rotation(self):
18161815 "enable_password_rotation" : "true" ,
18171816 "inherited_annotations" : "environment" ,
18181817 "password_rotation_interval" : "30" ,
1819- "password_rotation_user_retention" : "30" , # should be set to 60
1818+ "password_rotation_user_retention" : "30" , # should be set to 60
18201819 },
18211820 }
18221821 k8s .update_config (enable_password_rotation )
@@ -1885,7 +1884,7 @@ def test_password_rotation(self):
18851884 self .assertTrue ("environment" in db_user_secret .metadata .annotations , "Added annotation was not propagated to secret" )
18861885
18871886 # disable password rotation for all other users (foo_user)
1888- # and pick smaller intervals to see if the third fake rotation user is dropped
1887+ # and pick smaller intervals to see if the third fake rotation user is dropped
18891888 enable_password_rotation = {
18901889 "data" : {
18911890 "enable_password_rotation" : "false" ,
@@ -2385,6 +2384,56 @@ def test_taint_based_eviction(self):
23852384 # toggle pod anti affinity to move replica away from master node
23862385 self .assert_distributed_pods (master_nodes )
23872386
2387+ @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
2388+ def test_topology_spread_constraints (self ):
2389+ '''
2390+ Enable topologySpreadConstraints for pods
2391+ '''
2392+ k8s = self .k8s
2393+ cluster_labels = "application=spilo,cluster-name=acid-minimal-cluster"
2394+
2395+ # Verify we are in good state from potential previous tests
2396+ self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 , "No 2 pods running" )
2397+
2398+ master_nodes , replica_nodes = k8s .get_cluster_nodes ()
2399+ self .assertNotEqual (master_nodes , [])
2400+ self .assertNotEqual (replica_nodes , [])
2401+
2402+ # Patch label to nodes for topologySpreadConstraints
2403+ patch_node_label = {
2404+ "metadata" : {
2405+ "labels" : {
2406+ "topology.kubernetes.io/zone" : "zalando"
2407+ }
2408+ }
2409+ }
2410+ k8s .api .core_v1 .patch_node (master_nodes [0 ], patch_node_label )
2411+ k8s .api .core_v1 .patch_node (replica_nodes [0 ], patch_node_label )
2412+
2413+ # Scale-out postgresql pods
2414+ k8s .api .custom_objects_api .patch_namespaced_custom_object ("acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" ,
2415+ {"spec" : {"numberOfInstances" : 6 }})
2416+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
2417+ self .eventuallyEqual (lambda : k8s .count_pods_with_label (cluster_labels ), 6 , "Postgresql StatefulSet are scale to 6" )
2418+ self .eventuallyEqual (lambda : k8s .count_running_pods (), 6 , "All pods are running" )
2419+
2420+ worker_node_1 = 0
2421+ worker_node_2 = 0
2422+ pods = k8s .api .core_v1 .list_namespaced_pod ('default' , label_selector = cluster_labels )
2423+ for pod in pods .items :
2424+ if pod .spec .node_name == 'postgres-operator-e2e-tests-worker' :
2425+ worker_node_1 += 1
2426+ elif pod .spec .node_name == 'postgres-operator-e2e-tests-worker2' :
2427+ worker_node_2 += 1
2428+
2429+ self .assertEqual (worker_node_1 , worker_node_2 )
2430+ self .assertEqual (worker_node_1 , 3 )
2431+ self .assertEqual (worker_node_2 , 3 )
2432+
2433+ # Scale-it postgresql pods to previous replicas
2434+ k8s .api .custom_objects_api .patch_namespaced_custom_object ("acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" ,
2435+ {"spec" : {"numberOfInstances" : 2 }})
2436+
23882437 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
23892438 def test_zz_cluster_deletion (self ):
23902439 '''
@@ -2460,7 +2509,7 @@ def test_zz_cluster_deletion(self):
24602509 self .eventuallyEqual (lambda : k8s .count_deployments_with_label (cluster_label ), 0 , "Deployments not deleted" )
24612510 self .eventuallyEqual (lambda : k8s .count_pdbs_with_label (cluster_label ), 0 , "Pod disruption budget not deleted" )
24622511 self .eventuallyEqual (lambda : k8s .count_secrets_with_label (cluster_label ), 8 , "Secrets were deleted although disabled in config" )
2463- self .eventuallyEqual (lambda : k8s .count_pvcs_with_label (cluster_label ), 3 , "PVCs were deleted although disabled in config" )
2512+ self .eventuallyEqual (lambda : k8s .count_pvcs_with_label (cluster_label ), 6 , "PVCs were deleted although disabled in config" )
24642513
24652514 except timeout_decorator .TimeoutError :
24662515 print ('Operator log: {}' .format (k8s .get_operator_log ()))
@@ -2502,7 +2551,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
25022551
25032552 # if nodes are different we can quit here
25042553 if master_nodes [0 ] not in replica_nodes :
2505- return True
2554+ return True
25062555
25072556 # enable pod anti affintiy in config map which should trigger movement of replica
25082557 patch_enable_antiaffinity = {
@@ -2526,7 +2575,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
25262575 }
25272576 k8s .update_config (patch_disable_antiaffinity , "disable antiaffinity" )
25282577 self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
2529-
2578+
25302579 k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_labels )
25312580 k8s .wait_for_running_pods (cluster_labels , 2 )
25322581
@@ -2537,7 +2586,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
25372586 # if nodes are different we can quit here
25382587 for target_node in target_nodes :
25392588 if (target_node not in master_nodes or target_node not in replica_nodes ) and master_nodes [0 ] in replica_nodes :
2540- print ('Pods run on the same node' )
2589+ print ('Pods run on the same node' )
25412590 return False
25422591
25432592 except timeout_decorator .TimeoutError :
0 commit comments