Skip to content

Commit 3d87e0a

Browse files
committed
feat: add nodeselector annotation to limit LB pool members
1 parent 9cb6142 commit 3d87e0a

File tree

6 files changed

+291
-58
lines changed

6 files changed

+291
-58
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -429,6 +429,7 @@ Changes to the following annotations causes pools to be recreated and cause an e
429429
- `k8s.cloudscale.ch/loadbalancer-pool-algorithm`
430430
- `k8s.cloudscale.ch/loadbalancer-pool-protocol`
431431
- `k8s.cloudscale.ch/loadbalancer-listener-allowed-subnets`
432+
- `k8s.cloudscale.ch/loadbalancer-node-selector`
432433

433434
Additionally, changes to `spec.externalTrafficPolicy` have the same effect.
434435

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
# Deploys the docker.io/nginxdemos/hello:plain-text container and creates a
2+
# loadbalancer service with a node-selector annotation for it:
3+
#
4+
# export KUBECONFIG=path/to/kubeconfig
5+
# kubectl apply -f nginx-hello.yml
6+
#
7+
# Wait for `kubectl describe service hello` to show "Loadbalancer Ensured",
8+
# then use the IP address found under "LoadBalancer Ingress" to connect to the
9+
# service.
10+
#
11+
# You can also use the following shortcut:
12+
#
13+
# curl http://$(kubectl get service hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
14+
#
15+
# If you follow the nginx log, you will see that nginx sees a cluster internal
16+
# IP address as source of requests:
17+
#
18+
# kubectl logs -l "app=hello"
19+
#
20+
---
21+
apiVersion: apps/v1
22+
kind: Deployment
23+
metadata:
24+
name: hello
25+
spec:
26+
replicas: 2
27+
selector:
28+
matchLabels:
29+
app: hello
30+
template:
31+
metadata:
32+
labels:
33+
app: hello
34+
spec:
35+
containers:
36+
- name: hello
37+
image: docker.io/nginxdemos/hello:plain-text
38+
nodeSelector:
39+
kubernetes.io/hostname: k8test-worker-2
40+
---
41+
apiVersion: v1
42+
kind: Service
43+
metadata:
44+
labels:
45+
app: hello
46+
annotations:
47+
k8s.cloudscale.ch/loadbalancer-node-selector: "kubernetes.io/hostname=k8test-worker-2"
48+
name: hello
49+
spec:
50+
ports:
51+
- port: 80
52+
protocol: TCP
53+
targetPort: 80
54+
name: http
55+
selector:
56+
app: hello
57+
type: LoadBalancer

pkg/cloudscale_ccm/loadbalancer.go

Lines changed: 48 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
"github.com/cloudscale-ch/cloudscale-go-sdk/v6"
1212
v1 "k8s.io/api/core/v1"
1313
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
14+
"k8s.io/apimachinery/pkg/labels"
1415
"k8s.io/client-go/kubernetes"
1516
"k8s.io/klog/v2"
1617
"k8s.io/utils/ptr"
@@ -208,7 +209,7 @@ const (
208209
// connections timing out while the monitor is updated.
209210
LoadBalancerHealthMonitorTimeoutS = "k8s.cloudscale.ch/loadbalancer-health-monitor-timeout-s"
210211

211-
// LoadBalancerHealthMonitorDownThreshold is the number of the checks that
212+
// LoadBalancerHealthMonitorUpThreshold is the number of the checks that
212213
// need to succeed before a pool member is considered up. Defaults to 2.
213214
LoadBalancerHealthMonitorUpThreshold = "k8s.cloudscale.ch/loadbalancer-health-monitor-up-threshold"
214215

@@ -278,7 +279,7 @@ const (
278279
// Changing this annotation on an established service is considered safe.
279280
LoadBalancerListenerTimeoutMemberDataMS = "k8s.cloudscale.ch/loadbalancer-timeout-member-data-ms"
280281

281-
// LoadBalancerSubnetLimit is a JSON list of subnet UUIDs that the
282+
// LoadBalancerListenerAllowedSubnets is a JSON list of subnet UUIDs that the
282283
// loadbalancer should use. By default, all subnets of a node are used:
283284
//
284285
// * `[]` means that anyone is allowed to connect (default).
@@ -291,6 +292,10 @@ const (
291292
// This is an advanced feature, useful if you have nodes that are in
292293
// multiple private subnets.
293294
LoadBalancerListenerAllowedSubnets = "k8s.cloudscale.ch/loadbalancer-listener-allowed-subnets"
295+
296+
// LoadBalancerNodeSelector can be set to restrict which nodes are added to the LB pool.
297+
// It accepts a standard Kubernetes label selector string.
298+
LoadBalancerNodeSelector = "k8s.cloudscale.ch/loadbalancer-node-selector"
294299
)
295300

296301
type loadbalancer struct {
@@ -387,6 +392,11 @@ func (l *loadbalancer) EnsureLoadBalancer(
387392
return nil, err
388393
}
389394

395+
nodes, err := filterNodesBySelector(serviceInfo, nodes)
396+
if err != nil {
397+
return nil, err
398+
}
399+
390400
// Refuse to do anything if there are no nodes
391401
if len(nodes) == 0 {
392402
return nil, errors.New(
@@ -396,7 +406,7 @@ func (l *loadbalancer) EnsureLoadBalancer(
396406
}
397407

398408
// Reconcile
399-
err := reconcileLbState(ctx, l.lbs.client, func() (*lbState, error) {
409+
err = reconcileLbState(ctx, l.lbs.client, func() (*lbState, error) {
400410
// Get the desired state from Kubernetes
401411
servers, err := l.srv.mapNodes(ctx, nodes).All()
402412
if err != nil {
@@ -442,6 +452,28 @@ func (l *loadbalancer) EnsureLoadBalancer(
442452
return result, nil
443453
}
444454

455+
func filterNodesBySelector(
456+
serviceInfo *serviceInfo,
457+
nodes []*v1.Node,
458+
) ([]*v1.Node, error) {
459+
selector := labels.Everything()
460+
if v := serviceInfo.annotation(LoadBalancerNodeSelector); v != "" {
461+
var err error
462+
selector, err = labels.Parse(v)
463+
if err != nil {
464+
return nil, fmt.Errorf("unable to parse selector: %w", err)
465+
}
466+
}
467+
selectedNodes := make([]*v1.Node, 0, len(nodes))
468+
for _, node := range nodes {
469+
if selector.Matches(labels.Set(node.Labels)) {
470+
selectedNodes = append(selectedNodes, node)
471+
}
472+
}
473+
474+
return selectedNodes, nil
475+
}
476+
445477
// UpdateLoadBalancer updates hosts under the specified load balancer.
446478
// Implementations must treat the *v1.Service and *v1.Node
447479
// parameters as read-only and not modify them.
@@ -461,6 +493,19 @@ func (l *loadbalancer) UpdateLoadBalancer(
461493
return err
462494
}
463495

496+
nodes, err := filterNodesBySelector(serviceInfo, nodes)
497+
if err != nil {
498+
return err
499+
}
500+
501+
// Refuse to do anything if there are no nodes
502+
if len(nodes) == 0 {
503+
return errors.New(
504+
"no valid nodes for service found, please verify there is " +
505+
"at least one that allows load balancers",
506+
)
507+
}
508+
464509
// Reconcile
465510
return reconcileLbState(ctx, l.lbs.client, func() (*lbState, error) {
466511
// Get the desired state from Kubernetes

pkg/cloudscale_ccm/service_info.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,8 @@ func (s serviceInfo) annotation(key string) string {
118118
return s.annotationOrDefault(key, "50000")
119119
case LoadBalancerListenerAllowedSubnets:
120120
return s.annotationOrDefault(key, "[]")
121+
case LoadBalancerNodeSelector:
122+
return s.annotationOrDefault(key, "")
121123
default:
122124
return s.annotationOrElse(key, func() string {
123125
klog.Warning("unknown annotation:", key)

0 commit comments

Comments
 (0)