Skip to content

Commit c4ae116

Browse files
authored
Fix connection pooler deployment selectors (zalando#1213)
Stick with the existing pooler deployment selector labels to make it compatible with existing deployments. Make the use of additional labels clear and avoid where not needed. Deployment Selector and Service Selector now do not use extra labels, pod spec does.
1 parent 580883b commit c4ae116

File tree

6 files changed

+92
-20
lines changed

6 files changed

+92
-20
lines changed

e2e/scripts/watch_objects.sh

+2-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,8 @@ echo 'Statefulsets'
1616
kubectl get statefulsets --all-namespaces
1717
echo
1818
echo 'Deployments'
19-
kubectl get deployments --all-namespaces -l application=db-connection-pooler -l name=postgres-operator
19+
kubectl get deployments --all-namespaces -l application=db-connection-pooler
20+
kubectl get deployments --all-namespaces -l application=postgres-operator
2021
echo
2122
echo
2223
echo 'Step from operator deployment'

e2e/tests/test_e2e.py

+34
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,40 @@ def setUpClass(cls):
152152
print('Operator log: {}'.format(k8s.get_operator_log()))
153153
raise
154154

155+
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
156+
def test_overwrite_pooler_deployment(self):
157+
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")
158+
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
159+
self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1,
160+
"Initial broken deplyment not rolled out")
161+
162+
self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
163+
'acid.zalan.do', 'v1', 'default',
164+
'postgresqls', 'acid-minimal-cluster',
165+
{
166+
'spec': {
167+
'enableConnectionPooler': True
168+
}
169+
})
170+
171+
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
172+
self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 2,
173+
"Operator did not succeed in overwriting labels")
174+
175+
self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
176+
'acid.zalan.do', 'v1', 'default',
177+
'postgresqls', 'acid-minimal-cluster',
178+
{
179+
'spec': {
180+
'enableConnectionPooler': False
181+
}
182+
})
183+
184+
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
185+
self.eventuallyEqual(lambda: self.k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
186+
0, "Pooler pods not scaled down")
187+
188+
155189
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
156190
def test_enable_disable_connection_pooler(self):
157191
'''
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# will not run but is good enough for tests to fail
2+
apiVersion: apps/v1
3+
kind: Deployment
4+
metadata:
5+
name: acid-minimal-cluster-pooler
6+
labels:
7+
application: db-connection-pooler
8+
connection-pooler: acid-minimal-cluster-pooler
9+
spec:
10+
replicas: 1
11+
selector:
12+
matchLabels:
13+
application: db-connection-pooler
14+
connection-pooler: acid-minimal-cluster-pooler
15+
cluster-name: acid-minimal-cluster
16+
template:
17+
metadata:
18+
labels:
19+
application: db-connection-pooler
20+
connection-pooler: acid-minimal-cluster-pooler
21+
cluster-name: acid-minimal-cluster
22+
spec:
23+
serviceAccountName: postgres-operator
24+
containers:
25+
- name: postgres-operator
26+
image: registry.opensource.zalan.do/acid/pgbouncer:master-11
27+
imagePullPolicy: IfNotPresent
28+
resources:
29+
requests:
30+
cpu: 100m
31+
memory: 250Mi
32+
limits:
33+
cpu: 500m
34+
memory: 500Mi
35+
env: []

manifests/postgres-operator.yaml

+2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@ apiVersion: apps/v1
22
kind: Deployment
33
metadata:
44
name: postgres-operator
5+
labels:
6+
application: postgres-operator
57
spec:
68
replicas: 1
79
strategy:

pkg/cluster/connection_pooler.go

+16-16
Original file line numberDiff line numberDiff line change
@@ -78,22 +78,22 @@ func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
7878
// have e.g. different `application` label, so that recreatePod operation will
7979
// not interfere with it (it lists all the pods via labels, and if there would
8080
// be no difference, it will recreate also pooler pods).
81-
func (c *Cluster) connectionPoolerLabelsSelector(role PostgresRole) *metav1.LabelSelector {
82-
connectionPoolerLabels := labels.Set(map[string]string{})
81+
func (c *Cluster) connectionPoolerLabels(role PostgresRole, addExtraLabels bool) *metav1.LabelSelector {
82+
poolerLabels := c.labelsSet(addExtraLabels)
8383

84-
extraLabels := labels.Set(map[string]string{
85-
"connection-pooler": c.connectionPoolerName(role),
86-
"application": "db-connection-pooler",
87-
"spilo-role": string(role),
88-
"cluster-name": c.Name,
89-
"Namespace": c.Namespace,
90-
})
84+
// TODO should be config values
85+
poolerLabels["application"] = "db-connection-pooler"
86+
poolerLabels["connection-pooler"] = c.connectionPoolerName(role)
9187

92-
connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false))
93-
connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels)
88+
if addExtraLabels {
89+
extraLabels := map[string]string{}
90+
extraLabels["spilo-role"] = string(role)
91+
92+
poolerLabels = labels.Merge(poolerLabels, extraLabels)
93+
}
9494

9595
return &metav1.LabelSelector{
96-
MatchLabels: connectionPoolerLabels,
96+
MatchLabels: poolerLabels,
9797
MatchExpressions: nil,
9898
}
9999
}
@@ -284,7 +284,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
284284

285285
podTemplate := &v1.PodTemplateSpec{
286286
ObjectMeta: metav1.ObjectMeta{
287-
Labels: c.connectionPoolerLabelsSelector(role).MatchLabels,
287+
Labels: c.connectionPoolerLabels(role, true).MatchLabels,
288288
Namespace: c.Namespace,
289289
Annotations: c.generatePodAnnotations(spec),
290290
},
@@ -338,7 +338,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
338338
ObjectMeta: metav1.ObjectMeta{
339339
Name: connectionPooler.Name,
340340
Namespace: connectionPooler.Namespace,
341-
Labels: c.connectionPoolerLabelsSelector(connectionPooler.Role).MatchLabels,
341+
Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels,
342342
Annotations: map[string]string{},
343343
// make StatefulSet object its owner to represent the dependency.
344344
// By itself StatefulSet is being deleted with "Orphaned"
@@ -350,7 +350,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
350350
},
351351
Spec: appsv1.DeploymentSpec{
352352
Replicas: numberOfInstances,
353-
Selector: c.connectionPoolerLabelsSelector(connectionPooler.Role),
353+
Selector: c.connectionPoolerLabels(connectionPooler.Role, false),
354354
Template: *podTemplate,
355355
},
356356
}
@@ -389,7 +389,7 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
389389
ObjectMeta: metav1.ObjectMeta{
390390
Name: connectionPooler.Name,
391391
Namespace: connectionPooler.Namespace,
392-
Labels: c.connectionPoolerLabelsSelector(connectionPooler.Role).MatchLabels,
392+
Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels,
393393
Annotations: map[string]string{},
394394
// make StatefulSet object its owner to represent the dependency.
395395
// By itself StatefulSet is being deleted with "Orphaned"

pkg/cluster/connection_pooler_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -838,17 +838,17 @@ func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresR
838838
func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error {
839839
poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"]
840840

841-
if poolerLabels != cluster.connectionPoolerLabelsSelector(role).MatchLabels["connection-pooler"] {
841+
if poolerLabels != cluster.connectionPoolerLabels(role, true).MatchLabels["connection-pooler"] {
842842
return fmt.Errorf("Pod labels do not match, got %+v, expected %+v",
843-
podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector(role).MatchLabels)
843+
podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabels(role, true).MatchLabels)
844844
}
845845

846846
return nil
847847
}
848848

849849
func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error {
850850
labels := deployment.Spec.Selector.MatchLabels
851-
expected := cluster.connectionPoolerLabelsSelector(Master).MatchLabels
851+
expected := cluster.connectionPoolerLabels(Master, true).MatchLabels
852852

853853
if labels["connection-pooler"] != expected["connection-pooler"] {
854854
return fmt.Errorf("Labels are incorrect, got %+v, expected %+v",

0 commit comments

Comments
 (0)