From 8504f8cfb4fa17e8b8e319ebd3d491b6c0f68fe4 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Fri, 14 Nov 2025 01:31:37 +0800 Subject: [PATCH 1/2] Added status back-reporting controller Signed-off-by: michaelawyu --- .../statusbackreporter/controller.go | 309 +++++++++ .../controller_integration_test.go | 448 +++++++++++++ .../statusbackreporter/controller_test.go | 609 ++++++++++++++++++ .../statusbackreporter/suite_test.go | 142 ++++ 4 files changed, 1508 insertions(+) create mode 100644 pkg/controllers/statusbackreporter/controller.go create mode 100644 pkg/controllers/statusbackreporter/controller_integration_test.go create mode 100644 pkg/controllers/statusbackreporter/controller_test.go create mode 100644 pkg/controllers/statusbackreporter/suite_test.go diff --git a/pkg/controllers/statusbackreporter/controller.go b/pkg/controllers/statusbackreporter/controller.go new file mode 100644 index 000000000..524408c3f --- /dev/null +++ b/pkg/controllers/statusbackreporter/controller.go @@ -0,0 +1,309 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package statusbackreporter + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + errorsutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" + parallelizerutil "github.com/kubefleet-dev/kubefleet/pkg/utils/parallelizer" +) + +// Reconciler reconciles a Work object (specifically its status) to back-report +// statuses to their corresponding original resources in the hub cluster. +type Reconciler struct { + hubClient client.Client + hubDynamicClient dynamic.Interface + + parallelizer parallelizerutil.Parallelizer +} + +// NewReconciler creates a new Reconciler. +func NewReconciler(hubClient client.Client, hubDynamicClient dynamic.Interface, parallelizer parallelizerutil.Parallelizer) *Reconciler { + if parallelizer == nil { + klog.V(2).InfoS("parallelizer is not set; using the default parallelizer with a worker count of 1") + parallelizer = parallelizerutil.NewParallelizer(1) + } + + return &Reconciler{ + hubClient: hubClient, + hubDynamicClient: hubDynamicClient, + parallelizer: parallelizer, + } +} + +// Reconcile reconciles the Work object to back-report statuses to their corresponding +// original resources in the hub cluster. +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + workRef := klog.KRef(req.Namespace, req.Name) + startTime := time.Now() + klog.V(2).InfoS("Reconciliation loop starts", "controller", "statusBackReporter", "work", workRef) + defer func() { + latency := time.Since(startTime).Milliseconds() + klog.V(2).InfoS("Reconciliation loop ends", "controller", "statusBackReporter", "work", workRef, "latency", latency) + }() + + work := &placementv1beta1.Work{} + if err := r.hubClient.Get(ctx, req.NamespacedName, work); err != nil { + klog.ErrorS(err, "Failed to retrieve Work object", "work", workRef) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // TO-DO (chenyu1): verify status back-reporting strategy from the source (CRP or RP objects), + // rather than using the data copied to the Work object. + reportBackStrategy := work.Spec.ReportBackStrategy + switch { + case reportBackStrategy == nil: + klog.V(2).InfoS("Skip status back-reporting; the strategy has not been set", "work", workRef) + return ctrl.Result{}, nil + case reportBackStrategy.Type != placementv1beta1.ReportBackStrategyTypeMirror: + klog.V(2).InfoS("Skip status back-reporting; it has been disabled in the strategy", "work", workRef) + return ctrl.Result{}, nil + case reportBackStrategy.Destination == nil: + // This in theory should never occur; CEL based validation should have rejected such strategies. + klog.V(2).InfoS("Skip status back-reporting; destination has not been set in the strategy", "work", workRef) + return ctrl.Result{}, nil + case *reportBackStrategy.Destination != placementv1beta1.ReportBackDestinationOriginalResource: + klog.V(2).InfoS("Skip status back-reporting; destination has been set to the Work API", "work", workRef) + return ctrl.Result{}, nil + } + + // Peform a sanity check; make sure that mirroring back to original resources can be done, i.e., + // the scheduling policy is set to the PickFixed type with exactly one target cluster, or the PickN + // type with the number of clusters set to 1. + // + // TO-DO (chenyu1): at this moment the controller will attempt to locate the CRP/RP object that is + // associated with a Work object by parsing its name; KubeFleet does have a label (`kubernetes-fleet.io/parent-CRP`) + // that tracks the name of the placement object associated with a Work object, however, for RP objects + // the name does not include the owner namespace, which makes it less useful. Once the label situation + // improves, the statuses back-reporter should read the label instead for CRP/RP tracking here. + placementObj, err := r.validatePlacementObjectForOriginalResourceStatusBackReporting(ctx, work) + if err != nil { + klog.ErrorS(err, "Failed to validate the placement object associated with the Work object for back-reporting statuses to original resources", "work", workRef) + return ctrl.Result{}, err + } + + // Prepare a map for quick lookup of whether a resource is enveloped. + isResEnvelopedByIdStr := prepareIsResEnvelopedMap(placementObj) + + // Back-report statuses to original resources. + + // Prepare a child context. + // Cancel the child context anyway to avoid leaks. + childCtx, cancel := context.WithCancel(ctx) + defer cancel() + errs := make([]error, len(work.Status.ManifestConditions)) + doWork := func(pieces int) { + manifestCond := &work.Status.ManifestConditions[pieces] + resIdentifier := manifestCond.Identifier + + applyCond := meta.FindStatusCondition(work.Status.Conditions, placementv1beta1.WorkConditionTypeApplied) + if applyCond == nil || applyCond.ObservedGeneration != work.Generation || applyCond.Status != metav1.ConditionTrue { + // The resource has not been successfully applied yet. Skip back-reporting. + klog.V(2).InfoS("Skip status back-reporting for the resource; the resource has not been successfully applied yet", "work", workRef, "resourceIdentifier", resIdentifier) + return + } + + // Skip the resource if there is no back-reported status. + if manifestCond.BackReportedStatus == nil || len(manifestCond.BackReportedStatus.ObservedStatus.Raw) == 0 { + klog.V(2).InfoS("Skip status back-reporting for the resource; there is no back-reported status", "work", workRef, "resourceIdentifier", resIdentifier) + return + } + + // Skip the resource if it is enveloped. + idStr := formatWorkResourceIdentifier(&resIdentifier) + isEnveloped, ok := isResEnvelopedByIdStr[idStr] + if !ok { + // The resource is not found in the list of selected resources as reported by the status of the placement object. + // + // This is not considered as an error as the resource might be absent due to consistency reasons (i.e., it has + // just been de-selected); the status back-reporter will skip the resource for now. + klog.V(2).InfoS("Skip status back-reporting for the resource; the resource is not found in the list of selected resources in the placement object", "work", workRef, "resourceIdentifier", resIdentifier) + return + } + if isEnveloped { + // The resource is enveloped; skip back-reporting. + klog.V(2).InfoS("Skip status back-reporting for the resource; the resource is enveloped", "work", workRef, "resourceIdentifier", resIdentifier) + return + } + + // Note that applied resources should always have a valid identifier set; for simplicity reasons + // here the back-reporter will no longer perform any validation. + gvr := schema.GroupVersionResource{ + Group: resIdentifier.Group, + Version: resIdentifier.Version, + Resource: resIdentifier.Resource, + } + nsName := resIdentifier.Namespace + resName := resIdentifier.Name + unstructured, err := r.hubDynamicClient.Resource(gvr).Namespace(nsName).Get(ctx, resName, metav1.GetOptions{}) + if err != nil { + wrappedErr := fmt.Errorf("failed to retrieve the target resource for status back-reporting: %w", err) + klog.ErrorS(err, "Failed to retrieve the target resource for status back-reporting", "work", workRef, "resourceIdentifier", resIdentifier) + errs[pieces] = wrappedErr + return + } + + // Set the back-reported status to the target resource. + statusWrapper := make(map[string]interface{}) + if err := json.Unmarshal(manifestCond.BackReportedStatus.ObservedStatus.Raw, &statusWrapper); err != nil { + wrappedErr := fmt.Errorf("failed to unmarshal back-reported status: %w", err) + klog.ErrorS(err, "Failed to unmarshal back-reported status", "work", workRef, "resourceIdentifier", resIdentifier) + errs[pieces] = wrappedErr + return + } + + // Note that if the applied resource has a status sub-resource, it is usually safe for us to assume that + // the original resource should also have a status sub-resource of the same format. + unstructured.Object["status"] = statusWrapper["status"] + _, err = r.hubDynamicClient.Resource(gvr).Namespace(nsName).UpdateStatus(ctx, unstructured, metav1.UpdateOptions{}) + if err != nil { + // TO-DO (chenyu1): check for cases where the API definition is inconsistent between the member cluster + // side and the hub cluster side, and single out the errors as user errors instead. + wrappedErr := fmt.Errorf("failed to update status to the target resource: %w", err) + klog.ErrorS(err, "Failed to update status to the target resource", "work", workRef, "resourceIdentifier", resIdentifier) + errs[pieces] = wrappedErr + return + } + } + r.parallelizer.ParallelizeUntil(childCtx, len(work.Status.ManifestConditions), doWork, "backReportStatusToOriginalResources") + return ctrl.Result{}, errorsutil.NewAggregate(errs) +} + +// validatePlacementObjectForOriginalResourceStatusBackReporting validatess whether +// the placement object associated with the given Work object is eligible for back-reporting +// statuses to original resources. +func (r *Reconciler) validatePlacementObjectForOriginalResourceStatusBackReporting( + ctx context.Context, work *placementv1beta1.Work) (placementv1beta1.PlacementObj, error) { + // Read the `kubernetes-fleet.io/parent-CRP` label to retrieve the CRP/RP name. + parentPlacementName, ok := work.Labels[placementv1beta1.PlacementTrackingLabel] + if !ok || len(parentPlacementName) == 0 { + // Normally this should never occur. + wrappedErr := fmt.Errorf("the placement tracking label is absent or invalid (label value: %s)", parentPlacementName) + return nil, controller.NewUnexpectedBehaviorError(wrappedErr) + } + + // Parse the name of the Work object to retrieve the namespace of the placement object (if applicable). + // + // For CRP objects, the Work object has it names formatted as [CRP-NAME]-work, or [CRP-NAME]-[WORK-SUBINDEX]. + // For RP objects, the Work object has it names formatted as [RP-NAMESPACE].[RP-NAME]-work, or [RP-NAMESPACE].[RP-NAME]-[WORK-SUBINDEX]. + // + // Also note that Kubernetes does not allow dots in the name of namespaces; though dots are + // allowed in the names of placement objects (custom resources), even though such usage is often frowned upon by + // the system. + var parentPlacementNamespace string + beforeSeg, afterSeg, found := strings.Cut(work.Name, ".") + if found && strings.HasPrefix(afterSeg, parentPlacementName+"-") { + // There exists (at least) one dot in the name and the dot is not from the placement object's name. + parentPlacementNamespace = beforeSeg + } else { + // There exists no dots in the name, or the dot is from the placement object's name. + parentPlacementNamespace = "" + } + + var placementObj placementv1beta1.PlacementObj + if len(parentPlacementNamespace) == 0 { + // Retrieve the CRP object. + placementObj = &placementv1beta1.ClusterResourcePlacement{} + if err := r.hubClient.Get(ctx, client.ObjectKey{Name: parentPlacementName}, placementObj); err != nil { + wrappedErr := fmt.Errorf("failed to retrieve CRP object: %w", err) + return nil, controller.NewAPIServerError(true, wrappedErr) + } + } else { + // Retrieve the RP object. + placementObj = &placementv1beta1.ResourcePlacement{} + if err := r.hubClient.Get(ctx, client.ObjectKey{Namespace: parentPlacementNamespace, Name: parentPlacementName}, placementObj); err != nil { + wrappedErr := fmt.Errorf("failed to retrieve RP object: %w", err) + return nil, controller.NewAPIServerError(true, wrappedErr) + } + } + + // Validate the scheduling policy of the placement object. + schedulingPolicy := placementObj.GetPlacementSpec().Policy + switch { + case schedulingPolicy == nil: + // The system uses a default scheduling policy of the PickAll placement type. Reject status back-reporting. + wrappedErr := fmt.Errorf("no scheduling policy specified (the PickAll type is in use); cannot back-report status to original resources") + return nil, controller.NewUserError(wrappedErr) + case schedulingPolicy.PlacementType == placementv1beta1.PickAllPlacementType: + wrappedErr := fmt.Errorf("the scheduling policy in use is of the PickAll type; cannot back-reporting status to original resources") + return nil, controller.NewUserError(wrappedErr) + case schedulingPolicy.PlacementType == placementv1beta1.PickFixedPlacementType && len(schedulingPolicy.ClusterNames) != 1: + wrappedErr := fmt.Errorf("the scheduling policy in use is of the PickFixed type, but it has more than one target cluster (%d clusters); cannot back-report status to original resources", len(schedulingPolicy.ClusterNames)) + return nil, controller.NewUserError(wrappedErr) + case schedulingPolicy.PlacementType == placementv1beta1.PickNPlacementType && schedulingPolicy.NumberOfClusters == nil: + // Normally this should never occur. + wrappedErr := fmt.Errorf("the scheduling policy in use is of the PickN type, but no number of target clusters is specified; cannot back-report status to original resources") + return nil, controller.NewUserError(wrappedErr) + case schedulingPolicy.PlacementType == placementv1beta1.PickNPlacementType && *schedulingPolicy.NumberOfClusters != 1: + wrappedErr := fmt.Errorf("the scheduling policy in use is of the PickN type, but the number of target clusters is not set to 1; cannot back-report status to original resources") + return nil, controller.NewUserError(wrappedErr) + } + + // The scheduling policy is valid for back-reporting statuses to original resources. + return placementObj, nil +} + +// formatResourceIdentifier formats a ResourceIdentifier object to a string for keying purposes. +// +// The format in use is `[API-GROUP]/[API-VERSION]/[API-KIND]/[NAMESPACE]/[NAME]`, e.g., `/v1/Namespace//work`. +func formatResourceIdentifier(resourceIdentifier *placementv1beta1.ResourceIdentifier) string { + return fmt.Sprintf("%s/%s/%s/%s/%s", resourceIdentifier.Group, resourceIdentifier.Version, resourceIdentifier.Kind, resourceIdentifier.Namespace, resourceIdentifier.Name) +} + +// formatWorkResourceIdentifier formats a WorkResourceIdentifier object to a string for keying purposes. +// +// The format in use is `[API-GROUP]/[API-VERSION]/[API-KIND]/[NAMESPACE]/[NAME]`, e.g., `/v1/Namespace//work`. +func formatWorkResourceIdentifier(workResourceIdentifier *placementv1beta1.WorkResourceIdentifier) string { + return fmt.Sprintf("%s/%s/%s/%s/%s", workResourceIdentifier.Group, workResourceIdentifier.Version, workResourceIdentifier.Kind, workResourceIdentifier.Namespace, workResourceIdentifier.Name) +} + +// prepareIsResEnvelopedMap prepares a map for quick lookup of whether a resource is enveloped. +func prepareIsResEnvelopedMap(placementObj placementv1beta1.PlacementObj) map[string]bool { + isResEnvelopedByIdStr := make(map[string]bool) + + selectedResources := placementObj.GetPlacementStatus().SelectedResources + for idx := range selectedResources { + selectedRes := selectedResources[idx] + idStr := formatResourceIdentifier(&selectedRes) + isResEnvelopedByIdStr[idStr] = selectedRes.Envelope != nil + } + + return isResEnvelopedByIdStr +} + +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + Named("status-back-reporter"). + Watches(&placementv1beta1.Work{}, &handler.EnqueueRequestForObject{}). + Complete(r) +} diff --git a/pkg/controllers/statusbackreporter/controller_integration_test.go b/pkg/controllers/statusbackreporter/controller_integration_test.go new file mode 100644 index 000000000..963051777 --- /dev/null +++ b/pkg/controllers/statusbackreporter/controller_integration_test.go @@ -0,0 +1,448 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package statusbackreporter + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" +) + +const ( + // The linter in use mistakenly recognizes some of the names as potential hardcoded credentials; + // as a result, gosec linter warnings are suppressed for these variables. + crpWorkNameTemplate = "%s-work-%s" //nolint:gosec + nsNameTemplate = "ns-%s" + crpNameTemplate = "crp-%s" + + deployName = "app" + + workOrManifestAppliedReason = "MarkedAsApplied" + workOrManifestAppliedMessage = "the object is marked as applied" + deployAvailableReason = "MarkedAsAvailable" + deployAvailableMessage = "the object is marked as available" +) + +const ( + eventuallyDuration = time.Second * 10 + eventuallyInterval = time.Second * 1 +) + +var ( + nsTemplate = corev1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: nsName, + }, + } +) + +// createWorkObject creates a new Work object with the given name, manifests, and apply strategy. +func createWorkObject(workName, memberClusterReservedNSName, placementObjName string, reportBackStrategy *placementv1beta1.ReportBackStrategy, rawManifestJSON ...[]byte) { + manifests := make([]placementv1beta1.Manifest, len(rawManifestJSON)) + for idx := range rawManifestJSON { + manifests[idx] = placementv1beta1.Manifest{ + RawExtension: runtime.RawExtension{ + Raw: rawManifestJSON[idx], + }, + } + } + + work := &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberClusterReservedNSName, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: placementObjName, + }, + }, + Spec: placementv1beta1.WorkSpec{ + Workload: placementv1beta1.WorkloadTemplate{ + Manifests: manifests, + }, + ReportBackStrategy: reportBackStrategy, + }, + } + Expect(hubClient.Create(ctx, work)).To(Succeed()) +} + +func marshalK8sObjJSON(obj runtime.Object) []byte { + unstructuredObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + Expect(err).To(BeNil(), "Failed to convert the object to an unstructured object") + unstructuredObj := &unstructured.Unstructured{Object: unstructuredObjMap} + json, err := unstructuredObj.MarshalJSON() + Expect(err).To(BeNil(), "Failed to marshal the unstructured object to JSON") + return json +} + +func prepareStatusWrapperData(obj runtime.Object) ([]byte, error) { + unstructuredObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, fmt.Errorf("failed to convert to unstructured object: %w", err) + } + unstructuredObj := &unstructured.Unstructured{Object: unstructuredObjMap} + statusBackReportingWrapper := make(map[string]interface{}) + statusBackReportingWrapper["apiVersion"] = unstructuredObj.GetAPIVersion() + statusBackReportingWrapper["kind"] = unstructuredObj.GetKind() + statusBackReportingWrapper["status"] = unstructuredObj.Object["status"] + statusBackReportingWrapperData, err := json.Marshal(statusBackReportingWrapper) + if err != nil { + return nil, fmt.Errorf("failed to marshal status back-reporting wrapper data: %w", err) + } + return statusBackReportingWrapperData, nil +} + +func workObjectRemovedActual(workName string) func() error { + // Wait for the removal of the Work object. + return func() error { + work := &placementv1beta1.Work{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); !errors.IsNotFound(err) && err != nil { + return fmt.Errorf("work object still exists or an unexpected error occurred: %w", err) + } + if controllerutil.ContainsFinalizer(work, placementv1beta1.WorkFinalizer) { + // The Work object is being deleted, but the finalizer is still present. + return fmt.Errorf("work object is being deleted, but the finalizer is still present") + } + return nil + } +} + +func ensureWorkObjectDeletion(workName string) { + // Retrieve the Work object. + work := &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName, + }, + } + Expect(hubClient.Delete(ctx, work)).To(Succeed(), "Failed to delete the Work object") + + workObjRemovedActual := workObjectRemovedActual(workName) + Eventually(workObjRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove work object") +} + +var _ = Describe("back-reporting status", func() { + Context("back-report status for deployments (CRP)", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, utils.RandStr()) + workName := fmt.Sprintf(crpWorkNameTemplate, crpName, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var ns *corev1.Namespace + var deploy *appsv1.Deployment + var now metav1.Time + + BeforeAll(func() { + now = metav1.Now().Rfc3339Copy() + + // Create the namespace. + ns = nsTemplate.DeepCopy() + nsJSON := marshalK8sObjJSON(ns) + ns.Name = nsName + Expect(hubClient.Create(ctx, ns)).To(Succeed()) + + // Create the deployment. + deploy = &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deployName, + Namespace: nsName, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 80, + }, + }, + }, + }, + }, + }, + }, + } + deployJSON := marshalK8sObjJSON(deploy) + Expect(hubClient.Create(ctx, deploy)).To(Succeed()) + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: nsName, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + cluster1, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + ReportBackStrategy: &placementv1beta1.ReportBackStrategy{ + Type: placementv1beta1.ReportBackStrategyTypeMirror, + Destination: ptr.To(placementv1beta1.ReportBackDestinationOriginalResource), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed()) + + // Create the Work object. + reportBackStrategy := &placementv1beta1.ReportBackStrategy{ + Type: placementv1beta1.ReportBackStrategyTypeMirror, + Destination: ptr.To(placementv1beta1.ReportBackDestinationOriginalResource), + } + createWorkObject(workName, memberReservedNSName, crpName, reportBackStrategy, nsJSON, deployJSON) + }) + + It("can update CRP status", func() { + Eventually(func() error { + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: crpName}, crp); err != nil { + return fmt.Errorf("failed to retrieve CRP object: %w", err) + } + + crp.Status = placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: nsName, + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: deployName, + Namespace: nsName, + }, + }, + } + if err := hubClient.Status().Update(ctx, crp); err != nil { + return fmt.Errorf("failed to update CRP status: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).To(Succeed(), "Failed to update CRP status") + }) + + It("can update work status", func() { + Eventually(func() error { + work := &placementv1beta1.Work{} + if err := hubClient.Get(ctx, client.ObjectKey{Namespace: memberReservedNSName, Name: workName}, work); err != nil { + return fmt.Errorf("failed to retrieve work object: %w", err) + } + + deployWithStatus := deploy.DeepCopy() + deployWithStatus.Status = appsv1.DeploymentStatus{ + ObservedGeneration: deploy.Generation, + Replicas: 1, + UpdatedReplicas: 1, + AvailableReplicas: 1, + ReadyReplicas: 1, + UnavailableReplicas: 0, + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + LastUpdateTime: now, + LastTransitionTime: now, + Reason: deployAvailableReason, + Message: deployAvailableMessage, + }, + }, + } + + statusBackReportingWrapperData, err := prepareStatusWrapperData(deployWithStatus) + if err != nil { + return fmt.Errorf("failed to prepare status wrapper data: %w", err) + } + + work.Status = placementv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workOrManifestAppliedReason, + Message: workOrManifestAppliedMessage, + ObservedGeneration: 1, + LastTransitionTime: now, + }, + }, + ManifestConditions: []placementv1beta1.ManifestCondition{ + { + Identifier: placementv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Namespace: "", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workOrManifestAppliedReason, + Message: workOrManifestAppliedMessage, + ObservedGeneration: 1, + LastTransitionTime: now, + }, + }, + }, + { + Identifier: placementv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "apps", + Version: "v1", + Kind: "Deployment", + Resource: "deployments", + Namespace: nsName, + Name: deployName, + }, + Conditions: []metav1.Condition{ + { + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: workOrManifestAppliedReason, + Message: workOrManifestAppliedMessage, + ObservedGeneration: 1, + LastTransitionTime: now, + }, + }, + BackReportedStatus: &placementv1beta1.BackReportedStatus{ + ObservedStatus: runtime.RawExtension{ + Raw: statusBackReportingWrapperData, + }, + ObservationTime: now, + }, + }, + }, + } + if err := hubClient.Status().Update(ctx, work); err != nil { + return fmt.Errorf("failed to update Work object status: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update Work object status") + }) + + It("should back-report status to original resource", func() { + wantDeployStatus := appsv1.DeploymentStatus{ + ObservedGeneration: deploy.Generation, + Replicas: 1, + UpdatedReplicas: 1, + AvailableReplicas: 1, + ReadyReplicas: 1, + UnavailableReplicas: 0, + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + LastUpdateTime: now, + LastTransitionTime: now, + Reason: deployAvailableReason, + Message: deployAvailableMessage, + }, + }, + } + + Eventually(func() error { + deploy := &appsv1.Deployment{} + if err := hubClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, deploy); err != nil { + return fmt.Errorf("failed to retrieve Deployment object: %w", err) + } + + if diff := cmp.Diff(deploy.Status, wantDeployStatus); diff != "" { + return fmt.Errorf("deploy status diff (-got, +want):\n%s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to back-report status") + }) + + AfterAll(func() { + // Delete the Work object. + ensureWorkObjectDeletion(workName) + + // Delete the Deployment object. + Eventually(func() error { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nsName, + Name: deployName, + }, + } + if err := hubClient.Delete(ctx, deploy); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete Deployment object: %w", err) + } + if err := hubClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: nsName}, deploy); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("Deployment object still exists or an unexpected error occurred: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove Deployment object") + + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt to verify its deletion. + }) + }) +}) diff --git a/pkg/controllers/statusbackreporter/controller_test.go b/pkg/controllers/statusbackreporter/controller_test.go new file mode 100644 index 000000000..0e510ff11 --- /dev/null +++ b/pkg/controllers/statusbackreporter/controller_test.go @@ -0,0 +1,609 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package statusbackreporter + +import ( + "context" + "log" + "os" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" +) + +const ( + // The linter in use mistakenly recognizes some of the names as potential hardcoded credentials; + // as a result, gosec linter warnings are suppressed for these variables. + crpName1 = "crp-1" + rpName1 = "rp-1" + rpName2 = "test.app" + rpName3 = "app" + nsName = "work" + clusterResEnvelopeName = "cluster-res-envelope-1" + resEnvelopeName = "res-envelope-1" + cluster1 = "cluster-1" + cluster2 = "cluster-2" + + crpWorkName1 = "crp-1-work" + rpWorkName1 = "work.test.app-work" //nolint:gosec + rpWorkName2 = "work.app-work" //nolint:gosec +) + +func TestMain(m *testing.M) { + // Set up the scheme. + if err := clientgoscheme.AddToScheme(scheme.Scheme); err != nil { + log.Fatalf("failed to add default set of APIs to the runtime scheme: %v", err) + } + if err := placementv1beta1.AddToScheme(scheme.Scheme); err != nil { + log.Fatalf("failed to add custom APIs (placement/v1beta1) to the runtime scheme: %v", err) + } + + os.Exit(m.Run()) +} + +// TestFormatResourceIdentifier tests the formatResourceIdentifier function. +func TestFormatResourceIdentifier(t *testing.T) { + testCases := []struct { + name string + resourceIdentifier *placementv1beta1.ResourceIdentifier + wantIdStr string + }{ + { + name: "cluster-scoped object (core API group)", + resourceIdentifier: &placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: nsName, + }, + wantIdStr: "/v1/Namespace//work", + }, + { + name: "cluster-scoped object (non-core API group)", + resourceIdentifier: &placementv1beta1.ResourceIdentifier{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Namespace: "", + Name: "admin", + }, + wantIdStr: "rbac.authorization.k8s.io/v1/ClusterRole//admin", + }, + { + name: "namespace-scoped object (core API group)", + resourceIdentifier: &placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Pod", + Namespace: "default", + Name: "nginx-pod", + }, + wantIdStr: "/v1/Pod/default/nginx-pod", + }, + { + name: "namespace-scoped object (non-core API group)", + resourceIdentifier: &placementv1beta1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Namespace: "default", + Name: "nginx", + }, + wantIdStr: "apps/v1/Deployment/default/nginx", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + idStr := formatResourceIdentifier(tc.resourceIdentifier) + if !cmp.Equal(idStr, tc.wantIdStr) { + t.Errorf("formatResourceIdentifier() = %v, want %v", idStr, tc.wantIdStr) + } + }) + } +} + +// TestFormatWorkResourceIdentifier tests the formatWorkResourceIdentifier function. +func TestFormatWorkResourceIdentifier(t *testing.T) { + testCases := []struct { + name string + workResourceIdentifier *placementv1beta1.WorkResourceIdentifier + wantIdStr string + }{ + { + name: "cluster-scoped object (core API group)", + workResourceIdentifier: &placementv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Namespace: "", + Name: "work", + }, + wantIdStr: "/v1/Namespace//work", + }, + { + name: "cluster-scoped object (non-core API group)", + workResourceIdentifier: &placementv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Resource: "clusterroles", + Namespace: "", + Name: "admin", + }, + wantIdStr: "rbac.authorization.k8s.io/v1/ClusterRole//admin", + }, + { + name: "namespace-scoped object (core API group)", + workResourceIdentifier: &placementv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + Group: "", + Version: "v1", + Kind: "Pod", + Resource: "pods", + Namespace: "default", + Name: "nginx-pod", + }, + wantIdStr: "/v1/Pod/default/nginx-pod", + }, + { + name: "namespace-scoped object (non-core API group)", + workResourceIdentifier: &placementv1beta1.WorkResourceIdentifier{ + Ordinal: 3, + Group: "apps", + Version: "v1", + Kind: "Deployment", + Resource: "deployments", + Namespace: "default", + Name: "nginx", + }, + wantIdStr: "apps/v1/Deployment/default/nginx", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + idStr := formatWorkResourceIdentifier(tc.workResourceIdentifier) + if !cmp.Equal(idStr, tc.wantIdStr) { + t.Errorf("formatWorkResourceIdentifier() = %v, want %v", idStr, tc.wantIdStr) + } + }) + } +} + +// TestPrepareIsResEnvelopedMap tests the prepareIsResEnvelopedMap function. +func TestPrepareIsResEnvelopedMap(t *testing.T) { + testCases := []struct { + name string + placementObj placementv1beta1.PlacementObj + wantIsResEnvelopedMap map[string]bool + }{ + { + name: "CRP object with regular and enveloped objects", + placementObj: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: nsName, + }, + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Namespace: "", + Name: "admin", + }, + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRoleBinding", + Name: "admin-users", + Envelope: &placementv1beta1.EnvelopeIdentifier{ + Name: clusterResEnvelopeName, + Type: placementv1beta1.ClusterResourceEnvelopeType, + }, + }, + }, + }, + }, + wantIsResEnvelopedMap: map[string]bool{ + "/v1/Namespace//work": false, + "rbac.authorization.k8s.io/v1/ClusterRole//admin": false, + "rbac.authorization.k8s.io/v1/ClusterRoleBinding//admin-users": true, + }, + }, + { + name: "RP object with regular and enveloped objects", + placementObj: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nsName, + Name: rpName1, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Pod", + Namespace: "default", + Name: "nginx-pod", + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Namespace: "default", + Name: "nginx", + }, + { + Group: "apps", + Version: "v1", + Kind: "ResourceQuota", + Namespace: "default", + Name: "all", + Envelope: &placementv1beta1.EnvelopeIdentifier{ + Name: resEnvelopeName, + Type: placementv1beta1.ResourceEnvelopeType, + }, + }, + }, + }, + }, + wantIsResEnvelopedMap: map[string]bool{ + "/v1/Pod/default/nginx-pod": false, + "apps/v1/Deployment/default/nginx": false, + "apps/v1/ResourceQuota/default/all": true, + }, + }, + { + name: "empty map", + placementObj: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nsName, + Name: rpName1, + }, + Status: placementv1beta1.PlacementStatus{ + SelectedResources: []placementv1beta1.ResourceIdentifier{}, + }, + }, + wantIsResEnvelopedMap: map[string]bool{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + isResEnvelopedMap := prepareIsResEnvelopedMap(tc.placementObj) + if diff := cmp.Diff(isResEnvelopedMap, tc.wantIsResEnvelopedMap); diff != "" { + t.Errorf("prepareIsResEnvelopedMap() isResEnvelopedMaps mismatch (-got, +want):\n%s", diff) + } + }) + } +} + +// TestValidatePlacementObjectForOriginalResourceStatusBackReporting tests the validatePlacementObjectForOriginalResourceStatusBackReporting function. +func TestValidatePlacementObjectForOriginalResourceStatusBackReporting(t *testing.T) { + testCases := []struct { + name string + placementObj placementv1beta1.PlacementObj + work *placementv1beta1.Work + wantErred bool + wantErrStrSubString string + // The method returns the placement object as it is; for simplicity reasons the test spec here + // will no longer check the returned placement object here. + }{ + { + name: "no placement tracking label", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpWorkName1, + }, + }, + wantErred: true, + wantErrStrSubString: "the placement tracking label is absent or invalid", + }, + { + name: "empty placement tracking label", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpWorkName1, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: "", + }, + }, + }, + wantErred: true, + wantErrStrSubString: "the placement tracking label is absent or invalid", + }, + { + name: "work associated with rp (rp has a name with dots), invalid scheduling policy (nil)", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpWorkName1, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: rpName2, + }, + }, + }, + placementObj: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName2, + Namespace: nsName, + }, + Spec: placementv1beta1.PlacementSpec{}, + }, + wantErred: true, + wantErrStrSubString: "no scheduling policy specified (the PickAll type is in use)", + }, + { + name: "work associated with rp (rp does not have dots in its name), invalid scheduling policy (nil)", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpWorkName2, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: rpName3, + }, + }, + }, + placementObj: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: nsName, + }, + Spec: placementv1beta1.PlacementSpec{}, + }, + wantErred: true, + wantErrStrSubString: "no scheduling policy specified (the PickAll type is in use)", + }, + { + name: "work associated with crp, invalid scheduling policy (nil)", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpWorkName1, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: crpName1, + }, + }, + }, + placementObj: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{}, + }, + wantErred: true, + wantErrStrSubString: "no scheduling policy specified (the PickAll type is in use)", + }, + { + name: "work associated with rp (rp does not have dots in its name), rp not found", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpWorkName2, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: rpName3, + }, + }, + }, + wantErred: true, + wantErrStrSubString: "failed to retrieve RP object", + }, + { + name: "work associated with crp, crp not found", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpWorkName1, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: crpName1, + }, + }, + }, + wantErred: true, + wantErrStrSubString: "failed to retrieve CRP object", + }, + { + name: "work associated with rp (rp does not have dots in its name), with PickAll scheduling policy", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpWorkName2, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: rpName3, + }, + }, + }, + placementObj: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: nsName, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + }, + }, + wantErred: true, + wantErrStrSubString: "the scheduling policy in use is of the PickAll type", + }, + { + name: "work associated with rp (rp does not have dots in its name), with PickFixed placement type and more than 1 selected clusters", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpWorkName2, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: rpName3, + }, + }, + }, + placementObj: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: nsName, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + cluster1, + cluster2, + }, + }, + }, + }, + wantErred: true, + wantErrStrSubString: "the scheduling policy in use is of the PickFixed type, but it has more than one target cluster", + }, + { + name: "work associated with rp (rp does not have dots in its name), with PickN placement type and more than 1 clusters to select", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpWorkName2, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: rpName3, + }, + }, + }, + placementObj: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: nsName, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + }, + }, + wantErred: true, + wantErrStrSubString: "the scheduling policy in use is of the PickN type, but the number of target clusters is not set to 1", + }, + { + // Normally this will never occur. + name: "work associated with rp (rp does not have dots in its name), with PickN placement type and no number of target clusters", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpWorkName2, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: rpName3, + }, + }, + }, + placementObj: &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName3, + Namespace: nsName, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + }, + }, + }, + wantErred: true, + wantErrStrSubString: "the scheduling policy in use is of the PickN type, but no number of target clusters is specified", + }, + { + name: "work associated with crp, with PickFixed placement type and one selected cluster", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpWorkName1, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: crpName1, + }, + }, + }, + placementObj: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + cluster1, + }, + }, + }, + }, + }, + { + name: "work associated with crp, with PickN placement type and 1 target cluster to select", + work: &placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpWorkName1, + Labels: map[string]string{ + placementv1beta1.PlacementTrackingLabel: crpName1, + }, + }, + }, + placementObj: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName1, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + fakeClientBuilder := fake.NewClientBuilder().WithScheme(scheme.Scheme) + if tc.placementObj != nil { + fakeClientBuilder.WithObjects(tc.placementObj) + } + fakeClient := fakeClientBuilder.Build() + + r := NewReconciler(fakeClient, nil, nil) + + _, err := r.validatePlacementObjectForOriginalResourceStatusBackReporting(ctx, tc.work) + if tc.wantErred { + if err == nil { + t.Fatalf("validatePlacementObjectForOriginalResourceStatusBackReporting() = nil, want erred") + return + } + if !strings.Contains(err.Error(), tc.wantErrStrSubString) { + t.Fatalf("validatePlacementObjectForOriginalResourceStatusBackReporting() = %v, want to have prefix %s", err, tc.wantErrStrSubString) + return + } + } + }) + } +} diff --git a/pkg/controllers/statusbackreporter/suite_test.go b/pkg/controllers/statusbackreporter/suite_test.go new file mode 100644 index 000000000..a03649147 --- /dev/null +++ b/pkg/controllers/statusbackreporter/suite_test.go @@ -0,0 +1,142 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package statusbackreporter + +import ( + "context" + "flag" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/klog/v2/textlogger" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + "github.com/kubefleet-dev/kubefleet/pkg/utils/parallelizer" +) + +const ( + defaultWorkerCount = 4 +) + +const ( + memberReservedNSName = "fleet-member-experimental" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +var ( + hubCfg *rest.Config + hubEnv *envtest.Environment + hubClient client.Client + hubMgr manager.Manager + statusBackReporter *Reconciler + + ctx context.Context + cancel context.CancelFunc +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Work Applier Integration Test Suite") +} + +var _ = BeforeSuite(func() { + ctx, cancel = context.WithCancel(context.TODO()) + + By("Setup klog") + fs := flag.NewFlagSet("klog", flag.ContinueOnError) + klog.InitFlags(fs) + Expect(fs.Parse([]string{"--v", "5", "-add_dir_header", "true"})).Should(Succeed()) + + klog.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("Bootstrapping test environments") + hubEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("../../../", "config", "crd", "bases"), + filepath.Join("../../../", "test", "manifests"), + }, + } + + var err error + hubCfg, err = hubEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(hubCfg).ToNot(BeNil()) + + // The schemes have been set up in the TestMain method. + + By("Building the K8s clients") + hubClient, err = client.New(hubCfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(hubClient).ToNot(BeNil()) + + hubDynamicClient, err := dynamic.NewForConfig(hubCfg) + Expect(err).ToNot(HaveOccurred()) + Expect(hubDynamicClient).ToNot(BeNil()) + + // Create the reserved namespace for KubeFleet member cluster. + memberReservedNS := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: memberReservedNSName, + }, + } + Expect(hubClient.Create(ctx, &memberReservedNS)).To(Succeed()) + + By("Setting up the controller and the controller manager for member cluster 1") + hubMgr, err = ctrl.NewManager(hubCfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: server.Options{ + BindAddress: "0", + }, + Logger: textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(4))), + }) + Expect(err).ToNot(HaveOccurred()) + + statusBackReporter = NewReconciler( + hubClient, + hubDynamicClient, + parallelizer.NewParallelizer(defaultWorkerCount), + ) + Expect(statusBackReporter.SetupWithManager(hubMgr)).To(Succeed()) + + go func() { + defer GinkgoRecover() + Expect(hubMgr.Start(ctx)).To(Succeed()) + }() +}) + +var _ = AfterSuite(func() { + defer klog.Flush() + + cancel() + By("Tearing down the test environment") + Expect(hubEnv.Stop()).To(Succeed()) +}) From eb286f637c53d306075161dd2a52e4ac3273b381 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Fri, 14 Nov 2025 01:39:10 +0800 Subject: [PATCH 2/2] Minor fixes Signed-off-by: michaelawyu --- pkg/controllers/statusbackreporter/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/statusbackreporter/controller.go b/pkg/controllers/statusbackreporter/controller.go index 524408c3f..05a3c8675 100644 --- a/pkg/controllers/statusbackreporter/controller.go +++ b/pkg/controllers/statusbackreporter/controller.go @@ -97,7 +97,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, nil } - // Peform a sanity check; make sure that mirroring back to original resources can be done, i.e., + // Perform a sanity check; make sure that mirroring back to original resources can be done, i.e., // the scheduling policy is set to the PickFixed type with exactly one target cluster, or the PickN // type with the number of clusters set to 1. //