Skip to content

Commit e398cf8

Browse files
RafiaSabihRafia Sabih
andauthored
Avoid syncing when possible (#1274)
Avoid extra syncing in case there are no changes in pooler requirements. Add pooler specific labels to pooler secrets. Add test case to check for pooler secret creation and deletion. Co-authored-by: Rafia Sabih <rafia.sabih@zalando.de>
1 parent ff46bb0 commit e398cf8

File tree

4 files changed

+55
-8
lines changed

4 files changed

+55
-8
lines changed

e2e/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ To run the end 2 end test and keep the kind state execute:
4444
NOCLEANUP=True ./run.sh main
4545
```
4646

47-
## Run indidual test
47+
## Run individual test
4848

4949
After having executed a normal E2E run with `NOCLEANUP=True` Kind still continues to run, allowing you subsequent test runs.
5050

e2e/tests/test_e2e.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def test_overwrite_pooler_deployment(self):
160160
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")
161161
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
162162
self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1,
163-
"Initial broken deplyment not rolled out")
163+
"Initial broken deployment not rolled out")
164164

165165
self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
166166
'acid.zalan.do', 'v1', 'default',
@@ -221,6 +221,8 @@ def test_enable_disable_connection_pooler(self):
221221
self.eventuallyEqual(lambda: k8s.count_services_with_label(
222222
'application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
223223
2, "No pooler service found")
224+
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
225+
1, "Pooler secret not created")
224226

225227
# Turn off only master connection pooler
226228
k8s.api.custom_objects_api.patch_namespaced_custom_object(
@@ -246,6 +248,8 @@ def test_enable_disable_connection_pooler(self):
246248
self.eventuallyEqual(lambda: k8s.count_services_with_label(
247249
'application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
248250
1, "No pooler service found")
251+
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
252+
1, "Secret not created")
249253

250254
# Turn off only replica connection pooler
251255
k8s.api.custom_objects_api.patch_namespaced_custom_object(
@@ -268,6 +272,8 @@ def test_enable_disable_connection_pooler(self):
268272
0, "Pooler replica pods not deleted")
269273
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
270274
1, "No pooler service found")
275+
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
276+
1, "Secret not created")
271277

272278
# scale up connection pooler deployment
273279
k8s.api.custom_objects_api.patch_namespaced_custom_object(
@@ -301,6 +307,8 @@ def test_enable_disable_connection_pooler(self):
301307
0, "Pooler pods not scaled down")
302308
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
303309
0, "Pooler service not removed")
310+
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=spilo,cluster-name=acid-minimal-cluster'),
311+
4, "Secrets not deleted")
304312

305313
# Verify that all the databases have pooler schema installed.
306314
# Do this via psql, since otherwise we need to deal with
@@ -1034,7 +1042,7 @@ def test_node_affinity(self):
10341042
except timeout_decorator.TimeoutError:
10351043
print('Operator log: {}'.format(k8s.get_operator_log()))
10361044
raise
1037-
1045+
10381046
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
10391047
def test_zzzz_cluster_deletion(self):
10401048
'''

pkg/cluster/connection_pooler.go

Lines changed: 36 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -539,13 +539,13 @@ func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, depl
539539
// Test if two connection pooler configuration needs to be synced. For simplicity
540540
// compare not the actual K8S objects, but the configuration itself and request
541541
// sync if there is any difference.
542-
func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) {
542+
func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler, logger *logrus.Entry) (sync bool, reasons []string) {
543543
reasons = []string{}
544544
sync = false
545545

546546
changelog, err := diff.Diff(oldSpec, newSpec)
547547
if err != nil {
548-
//c.logger.Infof("Cannot get diff, do not do anything, %+v", err)
548+
logger.Infof("cannot get diff, do not do anything, %+v", err)
549549
return false, reasons
550550
}
551551

@@ -681,13 +681,45 @@ func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql)
681681
}
682682

683683
func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) {
684-
logPoolerEssentials(c.logger, oldSpec, newSpec)
685684

686685
var reason SyncReason
687686
var err error
688687
var newNeedConnectionPooler, oldNeedConnectionPooler bool
689688
oldNeedConnectionPooler = false
690689

690+
if oldSpec == nil {
691+
oldSpec = &acidv1.Postgresql{
692+
Spec: acidv1.PostgresSpec{
693+
ConnectionPooler: &acidv1.ConnectionPooler{},
694+
},
695+
}
696+
}
697+
698+
needSync, _ := needSyncConnectionPoolerSpecs(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler, c.logger)
699+
masterChanges, err := diff.Diff(oldSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableConnectionPooler)
700+
if err != nil {
701+
c.logger.Error("Error in getting diff of master connection pooler changes")
702+
}
703+
replicaChanges, err := diff.Diff(oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler)
704+
if err != nil {
705+
c.logger.Error("Error in getting diff of replica connection pooler changes")
706+
}
707+
708+
// skip pooler sync only
709+
// 1. if there is no diff in spec, AND
710+
// 2. if connection pooler is already there and is also required as per newSpec
711+
//
712+
// Handling the case when connectionPooler is not there but it is required
713+
// as per spec, hence do not skip syncing in that case, even though there
714+
// is no diff in specs
715+
if (!needSync && len(masterChanges) <= 0 && len(replicaChanges) <= 0) &&
716+
(c.ConnectionPooler != nil && *newSpec.Spec.EnableConnectionPooler) {
717+
c.logger.Debugln("syncing pooler is not required")
718+
return nil, nil
719+
}
720+
721+
logPoolerEssentials(c.logger, oldSpec, newSpec)
722+
691723
// Check and perform the sync requirements for each of the roles.
692724
for _, role := range [2]PostgresRole{Master, Replica} {
693725

@@ -841,7 +873,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
841873
var specReason []string
842874

843875
if oldSpec != nil {
844-
specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler)
876+
specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger)
845877
}
846878

847879
defaultsSync, defaultsReason := needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment)

pkg/cluster/k8sres.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1561,11 +1561,17 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
15611561
}
15621562

15631563
username := pgUser.Name
1564+
lbls := c.labelsSet(true)
1565+
1566+
if username == constants.ConnectionPoolerUserName {
1567+
lbls = c.connectionPoolerLabels("", false).MatchLabels
1568+
}
1569+
15641570
secret := v1.Secret{
15651571
ObjectMeta: metav1.ObjectMeta{
15661572
Name: c.credentialSecretName(username),
15671573
Namespace: namespace,
1568-
Labels: c.labelsSet(true),
1574+
Labels: lbls,
15691575
Annotations: c.annotationsSet(nil),
15701576
},
15711577
Type: v1.SecretTypeOpaque,
@@ -1574,6 +1580,7 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
15741580
"password": []byte(pgUser.Password),
15751581
},
15761582
}
1583+
15771584
return &secret
15781585
}
15791586

0 commit comments

Comments
 (0)