Skip to content

Commit 1710fa6

Browse files
authored
test(controller): add integration test for reconciliation happy path (#123)
### Context During [migration of the Service](#105) resource from Golang to a yaml manifest, I realized the controller's `Reconcile()` needed increased test coverage to ensure the migration code changes incurred no external behavior changes. ### Changes - Tests Service, Service Account, Network Policy, and Deployment coordination and port alignment - Validates NetworkPolicy, Service, and Deployment creation - Verifies owner references across all managed resources --------- Signed-off-by: Matthew F Leader <mleader@redhat.com>
1 parent 5ce3635 commit 1710fa6

File tree

2 files changed

+188
-4
lines changed

2 files changed

+188
-4
lines changed

controllers/llamastackdistribution_controller_test.go

Lines changed: 71 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,11 @@ import (
99
"github.com/stretchr/testify/require"
1010
appsv1 "k8s.io/api/apps/v1"
1111
corev1 "k8s.io/api/core/v1"
12+
networkingv1 "k8s.io/api/networking/v1"
1213
apierrors "k8s.io/apimachinery/pkg/api/errors"
1314
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1415
"k8s.io/apimachinery/pkg/types"
16+
"k8s.io/apimachinery/pkg/util/intstr"
1517
ctrl "sigs.k8s.io/controller-runtime"
1618
"sigs.k8s.io/controller-runtime/pkg/log/zap"
1719
)
@@ -108,7 +110,7 @@ func TestStorageConfiguration(t *testing.T) {
108110
})
109111

110112
// act: reconcile the instance
111-
ReconcileDistribution(t, instance)
113+
ReconcileDistribution(t, instance, false)
112114

113115
// assert
114116
deployment := &appsv1.Deployment{}
@@ -179,7 +181,7 @@ server:
179181
require.NoError(t, k8sClient.Create(context.Background(), instance))
180182

181183
// Reconcile to create initial deployment
182-
ReconcileDistribution(t, instance)
184+
ReconcileDistribution(t, instance, false)
183185

184186
// Get the initial deployment and check for ConfigMap hash annotation
185187
deployment := &appsv1.Deployment{}
@@ -218,7 +220,7 @@ server:
218220
time.Sleep(2 * time.Second)
219221

220222
// Trigger reconciliation (in real scenarios this would be triggered by the watch)
221-
ReconcileDistribution(t, instance)
223+
ReconcileDistribution(t, instance, false)
222224

223225
// Verify the deployment was updated with a new hash
224226
waitForResourceWithKeyAndCondition(
@@ -244,3 +246,69 @@ server:
244246
// Note: In test environment, field indexer might not be set up properly,
245247
// so we skip the isConfigMapReferenced checks which rely on field indexing
246248
}
249+
250+
func TestReconcile(t *testing.T) {
251+
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
252+
253+
// --- arrange ---
254+
instanceName := "llamastackdistribution-sample"
255+
instancePort := llamav1alpha1.DefaultServerPort
256+
expectedSelector := map[string]string{
257+
llamav1alpha1.DefaultLabelKey: llamav1alpha1.DefaultLabelValue,
258+
"app.kubernetes.io/instance": instanceName,
259+
}
260+
expectedPort := corev1.ServicePort{
261+
Name: llamav1alpha1.DefaultServicePortName,
262+
Port: instancePort,
263+
TargetPort: intstr.FromInt(int(instancePort)),
264+
Protocol: corev1.ProtocolTCP,
265+
}
266+
operatorNamespaceName := "test-operator-namespace"
267+
268+
// set operator namespace to avoid service account file dependency
269+
t.Setenv("OPERATOR_NAMESPACE", operatorNamespaceName)
270+
271+
namespace := createTestNamespace(t, operatorNamespaceName)
272+
instance := NewDistributionBuilder().
273+
WithName(instanceName).
274+
WithNamespace(namespace.Name).
275+
WithDistribution("starter").
276+
WithPort(instancePort).
277+
Build()
278+
require.NoError(t, k8sClient.Create(context.Background(), instance))
279+
280+
// --- act ---
281+
ReconcileDistribution(t, instance, true)
282+
283+
service := &corev1.Service{}
284+
waitForResource(t, k8sClient, instance.Namespace, instance.Name+"-service", service)
285+
deployment := &appsv1.Deployment{}
286+
waitForResource(t, k8sClient, instance.Namespace, instance.Name, deployment)
287+
networkpolicy := &networkingv1.NetworkPolicy{}
288+
waitForResource(t, k8sClient, instance.Namespace, instance.Name+"-network-policy",
289+
networkpolicy)
290+
serviceAccount := &corev1.ServiceAccount{}
291+
waitForResource(t, k8sClient, instance.Namespace, instance.Name+"-sa",
292+
serviceAccount)
293+
294+
// --- assert ---
295+
// Service behaviors
296+
AssertServicePortMatches(t, service, expectedPort)
297+
AssertServiceAndDeploymentPortsAlign(t, service, deployment)
298+
AssertServiceSelectorMatches(t, service, expectedSelector)
299+
AssertServiceAndDeploymentSelectorsAlign(t, service, deployment)
300+
301+
// ServiceAccount behaviors
302+
AssertServiceAccountDeploymentAlign(t, deployment, serviceAccount)
303+
304+
// NetworkPolicy behaviors
305+
AssertNetworkPolicyTargetsDeploymentPods(t, networkpolicy, deployment)
306+
AssertNetworkPolicyAllowsDeploymentPort(t, networkpolicy, deployment, operatorNamespaceName)
307+
AssertNetworkPolicyIsIngressOnly(t, networkpolicy)
308+
309+
// Resource ownership behaviors
310+
AssertResourceOwnedByInstance(t, service, instance)
311+
AssertResourceOwnedByInstance(t, deployment, instance)
312+
AssertResourceOwnedByInstance(t, networkpolicy, instance)
313+
AssertResourceOwnedByInstance(t, serviceAccount, instance)
314+
}

controllers/testing_support_test.go

Lines changed: 117 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package controllers_test
33
import (
44
"context"
55
"fmt"
6+
"slices"
67
"testing"
78
"time"
89

@@ -272,10 +273,125 @@ func AssertPVCHasSize(t *testing.T, pvc *corev1.PersistentVolumeClaim, expectedS
272273
"PVC should request %s storage, got %s", expectedSize, storageRequest.String())
273274
}
274275

275-
func ReconcileDistribution(t *testing.T, instance *llamav1alpha1.LlamaStackDistribution) {
276+
// AssertServicePortMatches verifies that a service has the expected port configuration.
277+
func AssertServicePortMatches(t *testing.T, service *corev1.Service, expectedPort corev1.ServicePort) {
278+
t.Helper()
279+
require.Len(t, service.Spec.Ports, 1, "Service should have exactly one port")
280+
require.Equal(t, expectedPort, service.Spec.Ports[0], "Service port should match expected")
281+
}
282+
283+
// AssertServiceAndDeploymentPortsAlign verifies that service target port matches deployment container port.
284+
func AssertServiceAndDeploymentPortsAlign(t *testing.T, service *corev1.Service, deployment *appsv1.Deployment) {
285+
t.Helper()
286+
require.Len(t, service.Spec.Ports, 1, "Service should have exactly one port")
287+
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Deployment should have exactly one container")
288+
require.Len(t, deployment.Spec.Template.Spec.Containers[0].Ports, 1, "Container should have exactly one port")
289+
290+
serviceTargetPort := service.Spec.Ports[0].TargetPort.IntVal
291+
containerPort := deployment.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort
292+
require.Equal(t, serviceTargetPort, containerPort, "Service target port should match deployment container port")
293+
}
294+
295+
// AssertServiceSelectorMatches verifies that a service has the expected selector.
296+
func AssertServiceSelectorMatches(t *testing.T, service *corev1.Service, expectedSelector map[string]string) {
297+
t.Helper()
298+
require.Equal(t, expectedSelector, service.Spec.Selector, "Service selector should match expected")
299+
}
300+
301+
// AssertServiceAndDeploymentSelectorsAlign verifies that service selector matches deployment pod labels.
302+
func AssertServiceAndDeploymentSelectorsAlign(t *testing.T, service *corev1.Service, deployment *appsv1.Deployment) {
303+
t.Helper()
304+
require.Equal(t, service.Spec.Selector, deployment.Spec.Template.Labels, "Service selector should match deployment pod labels")
305+
}
306+
307+
// AssertNetworkPolicyTargetsDeploymentPods verifies that network policy targets the same pods as deployment.
308+
func AssertNetworkPolicyTargetsDeploymentPods(t *testing.T, networkPolicy *networkingv1.NetworkPolicy, deployment *appsv1.Deployment) {
309+
t.Helper()
310+
require.Equal(t, deployment.Spec.Template.Labels, networkPolicy.Spec.PodSelector.MatchLabels,
311+
"NetworkPolicy should target same pods as deployment")
312+
}
313+
314+
// hasMatchingIngressRule is a generic helper function that checks if a network policy
315+
// contains at least one ingress rule that meets two specific criteria:
316+
// 1. The rule allows traffic on the specified 'port'.
317+
// 2. The rule's source (the 'From' field) matches a custom condition defined by the 'peerPredicate'.
318+
func hasMatchingIngressRule(
319+
t *testing.T,
320+
policy *networkingv1.NetworkPolicy,
321+
port int32,
322+
peerPredicate func(peer networkingv1.NetworkPolicyPeer) bool,
323+
) bool {
324+
t.Helper()
325+
for _, rule := range policy.Spec.Ingress {
326+
// First, check if this rule's source (any of its 'From' peers) matches our criteria.
327+
// If not, move on to the next one.
328+
if !slices.ContainsFunc(rule.From, peerPredicate) {
329+
continue
330+
}
331+
332+
// Check if this same rule also allows traffic on the required port.
333+
// Both conditions must be met by a single rule for the policy to be
334+
// considered valid.
335+
portMatches := slices.ContainsFunc(rule.Ports, func(p networkingv1.NetworkPolicyPort) bool {
336+
return p.Port != nil && p.Port.IntVal == port
337+
})
338+
339+
if portMatches {
340+
// This rule meets both the source and port requirements.
341+
return true
342+
}
343+
}
344+
345+
// Returning false signifies that no single rule in the entire policy satisfied
346+
// both the source (predicate) and port conditions for this specific check.
347+
return false
348+
}
349+
350+
// AssertNetworkPolicyAllowsDeploymentPort verifies that the network policy
351+
// allows traffic from both intra-stack components and the operator.
352+
func AssertNetworkPolicyAllowsDeploymentPort(t *testing.T, networkPolicy *networkingv1.NetworkPolicy, deployment *appsv1.Deployment, operatorNamespace string) {
353+
t.Helper()
354+
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Deployment should have exactly one container")
355+
require.Len(t, deployment.Spec.Template.Spec.Containers[0].Ports, 1, "Container should have exactly one port")
356+
containerPort := deployment.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort
357+
358+
// Behavior 1: Verify a rule exists for intra-stack communication.
359+
intraStackPredicate := func(peer networkingv1.NetworkPolicyPeer) bool {
360+
return peer.PodSelector != nil && peer.PodSelector.MatchLabels["app.kubernetes.io/part-of"] == "llama-stack"
361+
}
362+
require.True(t,
363+
hasMatchingIngressRule(t, networkPolicy, containerPort, intraStackPredicate),
364+
"NetworkPolicy is missing a rule to allow traffic from other Llama Stack components on port %d", containerPort)
365+
366+
// Behavior 2: Verify a rule for operator communication exists.
367+
// This allows the operator to communicate with the server pods it manages
368+
// from its separate namespace for tasks like health checks.
369+
operatorPredicate := func(peer networkingv1.NetworkPolicyPeer) bool {
370+
return peer.NamespaceSelector != nil && peer.NamespaceSelector.MatchLabels["kubernetes.io/metadata.name"] == operatorNamespace
371+
}
372+
require.True(t,
373+
hasMatchingIngressRule(t, networkPolicy, containerPort, operatorPredicate),
374+
"NetworkPolicy is missing a rule to allow traffic from the operator in namespace '%s' on port %d", operatorNamespace, containerPort)
375+
}
376+
377+
// AssertNetworkPolicyIsIngressOnly verifies that network policy is configured for ingress-only traffic.
378+
func AssertNetworkPolicyIsIngressOnly(t *testing.T, networkPolicy *networkingv1.NetworkPolicy) {
379+
t.Helper()
380+
expectedPolicyTypes := []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}
381+
require.Equal(t, expectedPolicyTypes, networkPolicy.Spec.PolicyTypes, "NetworkPolicy should be ingress-only")
382+
}
383+
384+
func AssertServiceAccountDeploymentAlign(t *testing.T, deployment *appsv1.Deployment, serviceAccount *corev1.ServiceAccount) {
385+
t.Helper()
386+
require.Equal(t, serviceAccount.Name, deployment.Spec.Template.Spec.ServiceAccountName,
387+
"Deployment should use the created ServiceAccount for pod permissions")
388+
}
389+
390+
func ReconcileDistribution(t *testing.T, instance *llamav1alpha1.LlamaStackDistribution, enableNetworkPolicy bool) {
276391
t.Helper()
277392
// Create reconciler and run reconciliation
278393
reconciler := createTestReconciler()
394+
reconciler.EnableNetworkPolicy = enableNetworkPolicy
279395
_, err := reconciler.Reconcile(context.Background(), ctrl.Request{
280396
NamespacedName: types.NamespacedName{
281397
Name: instance.Name,

0 commit comments

Comments
 (0)