From da972e72a30a3b789f7870fd3e643838993993f3 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Mon, 21 Apr 2025 13:31:08 -0700 Subject: [PATCH 1/3] skip creation of empty work object Signed-off-by: Britania Rodriguez Reyes --- pkg/controllers/workgenerator/controller.go | 10 ++--- .../controller_integration_test.go | 37 +++++++++++++++++++ 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 24b6faf36..eaf7cb44d 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -520,13 +520,11 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be } if len(simpleManifests) == 0 { klog.V(2).InfoS("the snapshot contains no resource to apply either because of override or enveloped resources", "snapshot", klog.KObj(snapshot)) + } else { + work := generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + activeWork[work.Name] = work + newWork = append(newWork, work) } - // generate a work object for the manifests even if there is nothing to place - // to allow CRP to collect the status of the placement - // TODO (RZ): revisit to see if we need this hack - work := generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) - activeWork[work.Name] = work - newWork = append(newWork, work) // issue all the create/update requests for the corresponding works for each snapshot in parallel for ni := range newWork { diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 0a035fd3a..9de2ed65b 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -162,6 +162,39 @@ var _ = Describe("Test Work Generator Controller", func() { }, timeout, interval).Should(BeTrue(), "Get() member cluster, want not found") }) + It("Should not create work when no resources are selected", func() { + // create master resource snapshot with 0 resources + masterSnapshot := generateResourceSnapshot(1, 1, 0, [][]byte{}) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + // create a scheduled binding + spec := placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateScheduled, + ResourceSnapshotName: masterSnapshot.Name, + TargetCluster: memberClusterName, + } + createClusterResourceBinding(&binding, spec) + // check the work is not created since the binding state is not bound + work := placementv1beta1.Work{} + Consistently(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName}, &work) + return errors.IsNotFound(err) + }, duration, interval).Should(BeTrue(), "controller should not create work in hub cluster until all resources are created") + // binding should not have any finalizers + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + Expect(len(binding.Finalizers)).Should(Equal(0)) + // flip the binding state to bound + binding.Spec.State = placementv1beta1.BindingStateBound + Expect(k8sClient.Update(ctx, binding)).Should(Succeed()) + updateRolloutStartedGeneration(&binding) + // check the work is not created since no resources are selected + Consistently(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName}, &work) + return errors.IsNotFound(err) + }, duration, interval).Should(BeTrue(), "controller should not create work in hub cluster until all resources are created") + // check the binding status is available + verifyBindStatusAvail(binding, false, false) + }) + It("Should not create work for the binding with state scheduled", func() { // create master resource snapshot with 2 number of resources masterSnapshot := generateResourceSnapshot(1, 1, 0, [][]byte{ @@ -4143,6 +4176,10 @@ func generateResourceSnapshot(resourceIndex, numberResource, subIndex int, rawCo clusterResourceSnapshot.Annotations[placementv1beta1.SubindexOfResourceSnapshotAnnotation] = strconv.Itoa(subIndex) } clusterResourceSnapshot.Name = snapshotName + if len(rawContents) == 0 { + clusterResourceSnapshot.Spec.SelectedResources = []placementv1beta1.ResourceContent{} + return clusterResourceSnapshot + } for _, rawContent := range rawContents { clusterResourceSnapshot.Spec.SelectedResources = append(clusterResourceSnapshot.Spec.SelectedResources, placementv1beta1.ResourceContent{ RawExtension: runtime.RawExtension{Raw: rawContent}, From 4071855283fc31eb5fb699c93e231df847251f3e Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Tue, 22 Apr 2025 16:07:53 -0700 Subject: [PATCH 2/3] add e2e Signed-off-by: Britania Rodriguez Reyes --- .../e2e/placement_selecting_resources_test.go | 133 ++++++++++++------ 1 file changed, 93 insertions(+), 40 deletions(-) diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index 8f7aca0b4..2cb88220f 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -46,55 +46,108 @@ var ( // Note that this container will run in parallel with other containers. var _ = Describe("creating CRP and selecting resources by name", Ordered, func() { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + Context("creating CRP and selecting resources that exist by name", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + }, + } + By(fmt.Sprintf("creating placement %s", crpName)) + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) - BeforeAll(func() { - By("creating work resources") - createWorkResources() + AfterAll(func() { + By(fmt.Sprintf("garbage all things related to placement %s", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) - // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - }, - } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) - }) + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) - AfterAll(func() { - By(fmt.Sprintf("garbage all things related to placement %s", crpName)) - ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) - }) + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) - It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) - }) + It("can delete the CRP", func() { + // Delete the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + } + Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP %s", crpName) + }) - It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) - It("can delete the CRP", func() { - // Delete the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - }, - } - Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP %s", crpName) + It("should remove controller finalizers from CRP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) + }) }) - It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) + Context("creating CRP and selecting resources that do not exist by name", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + BeforeAll(func() { + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: "doesNotExist", + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s", crpName)) + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) - It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) - Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) + AfterAll(func() { + By(fmt.Sprintf("garbage all things related to placement %s", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(nil, allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("can delete the CRP", func() { + // Delete the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + } + Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP %s", crpName) + }) + + It("should remove controller finalizers from CRP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP %s", crpName) + }) }) }) From 044729641082c0b254a1960ab60eaf46263665b7 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Tue, 29 Apr 2025 13:46:47 -0500 Subject: [PATCH 3/3] refactor, update tests and UX for crp status keep available/applied condition true and update CRP status message/reason Signed-off-by: Britania Rodriguez Reyes --- .../controller_integration_test.go | 4 +- .../placement_status.go | 12 +- .../placement_status_test.go | 382 ++++++++++++++++-- pkg/controllers/workgenerator/controller.go | 109 +++-- .../controller_integration_test.go | 287 ++++++++++++- .../workgenerator/controller_test.go | 226 ++++++++++- pkg/utils/condition/condition.go | 3 + test/e2e/actuals_test.go | 112 ++++- test/e2e/placement_cro_test.go | 49 ++- .../e2e/placement_selecting_resources_test.go | 5 +- 10 files changed, 1101 insertions(+), 88 deletions(-) diff --git a/pkg/controllers/clusterresourceplacement/controller_integration_test.go b/pkg/controllers/clusterresourceplacement/controller_integration_test.go index 7aabc6dba..859b40937 100644 --- a/pkg/controllers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/controllers/clusterresourceplacement/controller_integration_test.go @@ -249,7 +249,7 @@ func updateClusterResourceBindingWithAvailable(binding *placementv1beta1.Cluster cond = metav1.Condition{ Status: metav1.ConditionTrue, Type: string(placementv1beta1.ResourceBindingAvailable), - Reason: condition.AvailableReason, + Reason: condition.NoResourcesSelectedReason, ObservedGeneration: binding.Generation, } meta.SetStatusCondition(&binding.Status.Conditions, cond) @@ -1338,7 +1338,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { wantCondition = metav1.Condition{ Status: metav1.ConditionTrue, Type: string(placementv1beta1.ClusterResourcePlacementAvailableConditionType), - Reason: condition.AvailableReason, + Reason: condition.NoResourcesSelectedReason, } meta.SetStatusCondition(&wantCRP.Status.Conditions, wantCondition) wantCondition.Type = string(placementv1beta1.ResourceBindingAvailable) diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index f95e4a2d1..5ad201f9f 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -237,7 +237,8 @@ func setCRPConditions( default: // All the conditions of the given type are True. cond := i.TrueClusterResourcePlacementCondition(crp.Generation, rpsSetCondTypeCounter[i][condition.TrueConditionStatus]) - if i == condition.OverriddenCondition { + switch i { + case condition.OverriddenCondition: hasOverride := false for _, status := range allRPS { if len(status.ApplicableResourceOverrides) > 0 || len(status.ApplicableClusterResourceOverrides) > 0 { @@ -249,6 +250,15 @@ func setCRPConditions( cond.Reason = condition.OverrideNotSpecifiedReason cond.Message = "No override rules are configured for the selected resources" } + case condition.AppliedCondition: + if len(crp.Status.SelectedResources) == 0 { + cond.Message = "Previous resources are deleted. No new resources are selected for the placement" + } + case condition.AvailableCondition: + if len(crp.Status.SelectedResources) == 0 { + cond.Reason = condition.NoResourcesSelectedReason + cond.Message = "No new resources are selected for the placement. Please update crp to select new resources" + } } crp.SetConditions(cond) } diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index 189699cd2..bc3f694ef 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -99,6 +99,7 @@ func TestSetPlacementStatus(t *testing.T) { name string crpStatus fleetv1beta1.ClusterResourcePlacementStatus policy *fleetv1beta1.PlacementPolicy + selectResources []fleetv1beta1.ResourceIdentifier strategy fleetv1beta1.RolloutStrategy latestPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot @@ -136,7 +137,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: false, + selectResources: selectedResources, + want: false, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -195,7 +197,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: false, + selectResources: selectedResources, + want: false, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -254,7 +257,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: false, + selectResources: selectedResources, + want: false, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -313,7 +317,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: false, + selectResources: selectedResources, + want: false, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -393,7 +398,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -527,7 +533,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: false, + selectResources: selectedResources, + want: false, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -603,7 +610,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -745,7 +753,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: false, + selectResources: selectedResources, + want: false, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -878,7 +887,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -1147,7 +1157,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -1557,7 +1568,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -1835,7 +1847,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -2065,7 +2078,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -2376,7 +2390,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -2671,7 +2686,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -2770,7 +2786,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -2877,7 +2894,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -3081,7 +3099,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -3331,7 +3350,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -3650,7 +3670,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -3962,7 +3983,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -4262,7 +4284,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -4494,7 +4517,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, crpStatus: fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -4787,7 +4811,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - want: true, + selectResources: selectedResources, + want: true, crpStatus: fleetv1beta1.ClusterResourcePlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", @@ -4969,6 +4994,313 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, + { + name: "the placement applied/available true because no resources were selected", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(2), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-2", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-4", + Reason: "failed", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-1", + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: 1, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-2", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-2", + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + selectResources: []fleetv1beta1.ResourceIdentifier{}, + want: true, + wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: []fleetv1beta1.ResourceIdentifier{}, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAvailableConditionType), + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAvailableConditionType), + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + { + ClusterName: "member-2", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAvailableConditionType), + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + }, } for _, tc := range tests { @@ -5008,7 +5340,7 @@ func TestSetPlacementStatus(t *testing.T) { Recorder: record.NewFakeRecorder(10), } crp.Generation = crpGeneration - got, err := r.setPlacementStatus(context.Background(), crp, selectedResources, tc.latestPolicySnapshot, tc.latestResourceSnapshot) + got, err := r.setPlacementStatus(context.Background(), crp, tc.selectResources, tc.latestPolicySnapshot, tc.latestResourceSnapshot) if gotErr, wantErr := err != nil, tc.wantErr != nil; gotErr != wantErr || !errors.Is(err, tc.wantErr) { t.Fatalf("setPlacementStatus() got error %v, want error %v", err, tc.wantErr) } diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index eaf7cb44d..df8f4d1e1 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -155,11 +155,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req controllerruntime.Reques workUpdated := false overrideSucceeded := false + resourcesSelected := true // list all the corresponding works works, syncErr := r.listAllWorksAssociated(ctx, &resourceBinding) if syncErr == nil { // generate and apply the workUpdated works if we have all the works - overrideSucceeded, workUpdated, syncErr = r.syncAllWork(ctx, &resourceBinding, works, &cluster) + overrideSucceeded, workUpdated, resourcesSelected, syncErr = r.syncAllWork(ctx, &resourceBinding, works, &cluster) } // Reset the conditions and failed/drifted/diffed placements. for i := condition.OverriddenCondition; i < condition.TotalCondition; i++ { @@ -223,7 +224,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req controllerruntime.Reques case !workUpdated: // The Work object itself is unchanged; refresh the cluster resource binding status // based on the status information reported on the Work object(s). - setBindingStatus(works, &resourceBinding) + setBindingStatus(resourcesSelected, works, &resourceBinding) case resourceBinding.Spec.ApplyStrategy == nil || resourceBinding.Spec.ApplyStrategy.Type != fleetv1beta1.ApplyStrategyTypeReportDiff: // The Work object itself has changed; set a False Applied condition which signals // that resources are in the process of being applied. @@ -409,8 +410,9 @@ func (r *Reconciler) listAllWorksAssociated(ctx context.Context, resourceBinding // it returns // 1: if we apply the overrides successfully // 2: if we actually made any changes on the hub cluster -func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1beta1.ClusterResourceBinding, existingWorks map[string]*fleetv1beta1.Work, cluster *clusterv1beta1.MemberCluster) (bool, bool, error) { +func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1beta1.ClusterResourceBinding, existingWorks map[string]*fleetv1beta1.Work, cluster *clusterv1beta1.MemberCluster) (bool, bool, bool, error) { updateAny := atomic.NewBool(false) + resourcesSelected := atomic.NewBool(true) resourceBindingRef := klog.KObj(resourceBinding) // Refresh the apply strategy for all existing works. @@ -435,17 +437,17 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be }) } if updateErr := errs.Wait(); updateErr != nil { - return false, false, updateErr + return false, false, false, updateErr } // the hash256 function can handle empty list https://go.dev/play/p/_4HW17fooXM resourceOverrideSnapshotHash, err := resource.HashOf(resourceBinding.Spec.ResourceOverrideSnapshots) if err != nil { - return false, false, controller.NewUnexpectedBehaviorError(err) + return false, false, false, controller.NewUnexpectedBehaviorError(err) } clusterResourceOverrideSnapshotHash, err := resource.HashOf(resourceBinding.Spec.ClusterResourceOverrideSnapshots) if err != nil { - return false, false, controller.NewUnexpectedBehaviorError(err) + return false, false, false, controller.NewUnexpectedBehaviorError(err) } // TODO: check all work synced first before fetching the snapshots after we put ParentResourceOverrideSnapshotHashAnnotation and ParentClusterResourceOverrideSnapshotHashAnnotation in all the work objects @@ -456,22 +458,22 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be // the resourceIndex is deleted but the works might still be up to date with the binding. if areAllWorkSynced(existingWorks, resourceBinding, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) { klog.V(2).InfoS("All the works are synced with the resourceBinding even if the resource snapshot index is removed", "resourceBinding", resourceBindingRef) - return true, updateAny.Load(), nil + return true, updateAny.Load(), resourcesSelected.Load(), nil } - return false, false, controller.NewUserError(err) + return false, false, false, controller.NewUserError(err) } // TODO(RZ): handle errResourceNotFullyCreated error so we don't need to wait for all the snapshots to be created - return false, false, err + return false, false, false, err } croMap, err := r.fetchClusterResourceOverrideSnapshots(ctx, resourceBinding) if err != nil { - return false, false, err + return false, false, false, err } roMap, err := r.fetchResourceOverrideSnapshots(ctx, resourceBinding) if err != nil { - return false, false, err + return false, false, false, err } // issue all the create/update requests for the corresponding works for each snapshot in parallel @@ -484,7 +486,7 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be workNamePrefix, err := getWorkNamePrefixFromSnapshotName(snapshot) if err != nil { klog.ErrorS(err, "Encountered a mal-formatted resource snapshot", "resourceSnapshot", klog.KObj(snapshot)) - return false, false, err + return false, false, false, err } var simpleManifests []fleetv1beta1.Manifest for j := range snapshot.Spec.SelectedResources { @@ -492,7 +494,7 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be // TODO: override the content of the wrapped resource instead of the envelope itself resourceDeleted, overrideErr := r.applyOverrides(selectedResource, cluster, croMap, roMap) if overrideErr != nil { - return false, false, overrideErr + return false, false, false, overrideErr } if resourceDeleted { klog.V(2).InfoS("The resource is deleted by the override rules", "snapshot", klog.KObj(snapshot), "selectedResource", snapshot.Spec.SelectedResources[j]) @@ -503,14 +505,14 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be var uResource unstructured.Unstructured if unMarshallErr := uResource.UnmarshalJSON(selectedResource.Raw); unMarshallErr != nil { klog.ErrorS(unMarshallErr, "work has invalid content", "snapshot", klog.KObj(snapshot), "selectedResource", selectedResource.Raw) - return true, false, controller.NewUnexpectedBehaviorError(unMarshallErr) + return true, false, false, controller.NewUnexpectedBehaviorError(unMarshallErr) } if uResource.GetObjectKind().GroupVersionKind() == utils.ConfigMapGVK && len(uResource.GetAnnotations()[fleetv1beta1.EnvelopeConfigMapAnnotation]) != 0 { // get a work object for the enveloped configMap work, err := r.getConfigMapEnvelopWorkObj(ctx, workNamePrefix, resourceBinding, snapshot, &uResource, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) if err != nil { - return true, false, err + return true, false, false, err } activeWork[work.Name] = work newWork = append(newWork, work) @@ -518,12 +520,13 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be simpleManifests = append(simpleManifests, fleetv1beta1.Manifest(*selectedResource)) } } - if len(simpleManifests) == 0 { - klog.V(2).InfoS("the snapshot contains no resource to apply either because of override or enveloped resources", "snapshot", klog.KObj(snapshot)) - } else { + if workObjectCreationNeeded(snapshot, simpleManifests, newWork) { + // Create a work object if a resource is selected or if an enveloped configMap is selected work := generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) activeWork[work.Name] = work newWork = append(newWork, work) + } else if snapshot.Labels[fleetv1beta1.IsLatestSnapshotLabel] == strconv.FormatBool(true) { + resourcesSelected.Store(false) } // issue all the create/update requests for the corresponding works for each snapshot in parallel @@ -549,11 +552,8 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be continue } errs.Go(func() error { - if err := r.Client.Delete(ctx, work); err != nil { - if !apierrors.IsNotFound(err) { - klog.ErrorS(err, "Failed to delete the no longer needed work", "work", klog.KObj(work)) - return controller.NewAPIServerError(false, err) - } + if err := r.deleteUnnecessaryWork(ctx, work); err != nil { + return err } klog.V(2).InfoS("Deleted the work that is not associated with any resource snapshot", "work", klog.KObj(work)) updateAny.Store(true) @@ -563,10 +563,10 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be // wait for all the create/update/delete requests to finish if updateErr := errs.Wait(); updateErr != nil { - return true, false, updateErr + return true, false, false, updateErr } klog.V(2).InfoS("Successfully synced all the work associated with the resourceBinding", "updateAny", updateAny.Load(), "resourceBinding", resourceBindingRef) - return true, updateAny.Load(), nil + return true, updateAny.Load(), resourcesSelected.Load(), nil } // syncApplyStrategy syncs the apply strategy specified on a ClusterResourceBinding object @@ -591,6 +591,29 @@ func (r *Reconciler) syncApplyStrategy( return true, nil } +// deleteUnnecessaryWork deletes the work objects that are not associated with any resource snapshot. +func (r *Reconciler) deleteUnnecessaryWork(ctx context.Context, work *fleetv1beta1.Work) error { + if err := r.Client.Delete(ctx, work); err != nil { + if !apierrors.IsNotFound(err) { + klog.ErrorS(err, "Failed to delete the no longer needed work", "work", klog.KObj(work)) + return controller.NewAPIServerError(false, err) + } + } + return nil +} + +// workObjectCreationNeeded checks if we need to create a work object for the resource snapshot. +func workObjectCreationNeeded(snapshot *fleetv1beta1.ClusterResourceSnapshot, simpleManifests []fleetv1beta1.Manifest, newWork []*fleetv1beta1.Work) bool { + switch { + case len(simpleManifests) == 0 && len(newWork) == 0: + klog.V(2).InfoS("No work to create for this snapshot", "snapshot", klog.KObj(snapshot)) + return false + case len(simpleManifests) == 0: + klog.V(2).InfoS("The snapshot contains no resource to apply due to overrides or enveloped resources", "snapshot", klog.KObj(snapshot)) + } + return true +} + // areAllWorkSynced checks if all the works are synced with the resource binding. func areAllWorkSynced(existingWorks map[string]*fleetv1beta1.Work, resourceBinding *fleetv1beta1.ClusterResourceBinding, _, _ string) bool { // TODO: check resourceOverrideSnapshotHash and clusterResourceOverrideSnapshotHash after all the work has the ParentResourceOverrideSnapshotHashAnnotation and ParentClusterResourceOverrideSnapshotHashAnnotation @@ -843,7 +866,7 @@ const ( ) // setBindingStatus sets the binding status based on the works associated with the binding. -func setBindingStatus(works map[string]*fleetv1beta1.Work, resourceBinding *fleetv1beta1.ClusterResourceBinding) { +func setBindingStatus(resourcesSelected bool, works map[string]*fleetv1beta1.Work, resourceBinding *fleetv1beta1.ClusterResourceBinding) { bindingRef := klog.KObj(resourceBinding) // Note (chenyu1): the work generator will refresh the status of a ClusterResourceBinding using @@ -869,10 +892,10 @@ func setBindingStatus(works map[string]*fleetv1beta1.Work, resourceBinding *flee } else { // Set the Applied and Available condition if (and only if) a ClientSideApply or ServerSideApply // apply strategy is currently being used. - appliedSummarizedStatus = setAllWorkAppliedCondition(works, resourceBinding) + appliedSummarizedStatus = setAllWorkAppliedCondition(resourcesSelected, works, resourceBinding) // Note that Fleet will only set the Available condition if the apply op itself is successful, i.e., // the Applied condition is True. - availabilitySummarizedStatus = setAllWorkAvailableCondition(works, resourceBinding) + availabilitySummarizedStatus = setAllWorkAvailableCondition(resourcesSelected, works, resourceBinding) } resourceBinding.Status.FailedPlacements = nil @@ -1005,7 +1028,7 @@ func setBindingStatus(works map[string]*fleetv1beta1.Work, resourceBinding *flee // // The Applied condition of a ClusterResourceBinding object is set to True if and only if all the // related Work objects have their Applied condition set to True. -func setAllWorkAppliedCondition(works map[string]*fleetv1beta1.Work, binding *fleetv1beta1.ClusterResourceBinding) workConditionSummarizedStatus { +func setAllWorkAppliedCondition(resourcesSelected bool, works map[string]*fleetv1beta1.Work, binding *fleetv1beta1.ClusterResourceBinding) workConditionSummarizedStatus { // Fleet here makes a clear distinction between incomplete, failed, and successful apply operations. // This is to ensure that stale apply information (esp. those set before // an apply strategy change) will not leak into the current apply operations. @@ -1036,7 +1059,7 @@ func setAllWorkAppliedCondition(works map[string]*fleetv1beta1.Work, binding *fl } switch { - case !areAllWorksApplyOpsCompleted: + case !areAllWorksApplyOpsCompleted && resourcesSelected: // Not all Work objects have completed the apply op. klog.V(2).InfoS("Some works are not yet completed the apply op", "binding", klog.KObj(binding), "firstWorkWithIncompleteApplyOp", klog.KObj(firstWorkWithIncompleteApplyOp)) binding.SetConditions(metav1.Condition{ @@ -1047,7 +1070,7 @@ func setAllWorkAppliedCondition(works map[string]*fleetv1beta1.Work, binding *fl ObservedGeneration: binding.GetGeneration(), }) return workConditionSummarizedStatusIncomplete - case !areAllWorksApplyOpsSuccessful: + case !areAllWorksApplyOpsSuccessful && resourcesSelected: // All Work objects have completed the apply op, but at least one of them has failed. klog.V(2).InfoS("Some works have failed to apply", "binding", klog.KObj(binding), "firstWorkWithFailedApplyOp", klog.KObj(firstWorkWithFailedApplyOp)) binding.SetConditions(metav1.Condition{ @@ -1059,6 +1082,17 @@ func setAllWorkAppliedCondition(works map[string]*fleetv1beta1.Work, binding *fl }) return workConditionSummarizedStatusFalse default: + if !resourcesSelected { + klog.V(2).InfoS("No resources are selected for the binding", "binding", klog.KObj(binding)) + binding.SetConditions(metav1.Condition{ + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.AllWorkAppliedReason, + Message: "Work has been deleted. No new work objects need to be applied.", + ObservedGeneration: binding.GetGeneration(), + }) + return workConditionSummarizedStatusTrue + } // All Work objects have completed the apply op successfully. klog.V(2).InfoS("All works associated with the binding are applied", "binding", klog.KObj(binding)) binding.SetConditions(metav1.Condition{ @@ -1149,7 +1183,7 @@ func setAllWorkDiffReportedCondition(works map[string]*fleetv1beta1.Work, bindin // // The Available condition of a ClusterResourceBinding object is set to True if and only if all the // related Work objects have their Available condition set to True. -func setAllWorkAvailableCondition(works map[string]*fleetv1beta1.Work, binding *fleetv1beta1.ClusterResourceBinding) workConditionSummarizedStatus { +func setAllWorkAvailableCondition(resourcesSelected bool, works map[string]*fleetv1beta1.Work, binding *fleetv1beta1.ClusterResourceBinding) workConditionSummarizedStatus { // If the Applied condition has been set to False, skip setting the Available condition. appliedCond := meta.FindStatusCondition(binding.Status.Conditions, string(fleetv1beta1.ResourceBindingApplied)) if !condition.IsConditionStatusTrue(appliedCond, binding.GetGeneration()) { @@ -1246,6 +1280,17 @@ func setAllWorkAvailableCondition(works map[string]*fleetv1beta1.Work, binding * }) return workConditionSummarizedStatusTrue default: + if !resourcesSelected { + klog.V(2).InfoS("No resources are selected for the binding", "binding", klog.KObj(binding)) + meta.SetStatusCondition(&binding.Status.Conditions, metav1.Condition{ + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.NoResourcesSelectedReason, + Message: "No resources selected.", + ObservedGeneration: binding.GetGeneration(), + }) + return workConditionSummarizedStatusTrue + } // All Work objects have completed the availability check successfully. klog.V(2).InfoS("All works associated with the binding are available", "binding", klog.KObj(binding)) meta.SetStatusCondition(&binding.Status.Conditions, metav1.Condition{ diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 9de2ed65b..46481a3d5 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -163,36 +163,36 @@ var _ = Describe("Test Work Generator Controller", func() { }) It("Should not create work when no resources are selected", func() { - // create master resource snapshot with 0 resources + // Create master resource snapshot with 0 resources. masterSnapshot := generateResourceSnapshot(1, 1, 0, [][]byte{}) Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) - // create a scheduled binding + // Create a scheduled binding. spec := placementv1beta1.ResourceBindingSpec{ State: placementv1beta1.BindingStateScheduled, ResourceSnapshotName: masterSnapshot.Name, TargetCluster: memberClusterName, } createClusterResourceBinding(&binding, spec) - // check the work is not created since the binding state is not bound + // Check the work is not created since the binding state is not bound. work := placementv1beta1.Work{} Consistently(func() bool { err := k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName}, &work) return errors.IsNotFound(err) - }, duration, interval).Should(BeTrue(), "controller should not create work in hub cluster until all resources are created") - // binding should not have any finalizers + }, duration, interval).Should(BeTrue(), "controller should not create work in hub cluster") + // Binding should not have any finalizers. Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) Expect(len(binding.Finalizers)).Should(Equal(0)) - // flip the binding state to bound + // Flip the binding state to bound. binding.Spec.State = placementv1beta1.BindingStateBound Expect(k8sClient.Update(ctx, binding)).Should(Succeed()) updateRolloutStartedGeneration(&binding) - // check the work is not created since no resources are selected + // Check the work is not created since no resources are selected. Consistently(func() bool { err := k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName}, &work) return errors.IsNotFound(err) - }, duration, interval).Should(BeTrue(), "controller should not create work in hub cluster until all resources are created") - // check the binding status is available - verifyBindStatusAvail(binding, false, false) + }, duration, interval).Should(BeTrue(), "controller should not create work in hub cluster") + // Check the binding status is not applied. + verifyBindStatusNotAppliedWithNoResourcesSelected(binding) }) It("Should not create work for the binding with state scheduled", func() { @@ -1294,6 +1294,225 @@ var _ = Describe("Test Work Generator Controller", func() { }) }) + Context("Test Bound ClusterResourceBinding with multiple resource snapshots with no selected resources at first", func() { + var masterSnapshot, secondSnapshot *placementv1beta1.ClusterResourceSnapshot + var binding *placementv1beta1.ClusterResourceBinding + + BeforeEach(func() { + masterSnapshot = generateResourceSnapshot(2, 2, 0, [][]byte{}) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + By(fmt.Sprintf("Master resource snapshot %s created", masterSnapshot.Name)) + // Create a bound binding. + spec := placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateBound, + ResourceSnapshotName: masterSnapshot.Name, + TargetCluster: memberClusterName, + } + createClusterResourceBinding(&binding, spec) + }) + + AfterEach(func() { + // Delete the master resource snapshot. + Expect(k8sClient.Delete(ctx, masterSnapshot)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + By(fmt.Sprintf("Master resource snapshot %s deleted", masterSnapshot.Name)) + // Delete the second resource snapshot. + Expect(k8sClient.Delete(ctx, secondSnapshot)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + By(fmt.Sprintf("Secondary resource snapshot %s deleted", secondSnapshot.Name)) + // Delete the binding. + Expect(k8sClient.Delete(ctx, binding)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + By(fmt.Sprintf("Resource binding %s deleted", binding.Name)) + }) + + It("Should create necessary work in the target namespace after a resource snapshot with selected envelope object is created", func() { + // Now create the second resource snapshot. + secondSnapshot = generateResourceSnapshot(2, 2, 1, [][]byte{ + testEnvelopConfigMap, testNameSpace, + }) + Expect(k8sClient.Create(ctx, secondSnapshot)).Should(Succeed()) + By(fmt.Sprintf("Secondary resource snapshot %s created", secondSnapshot.Name)) + // Update master resource snapshot labels. + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: masterSnapshot.Name}, masterSnapshot)).Should(Succeed()) + masterSnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = "false" + Expect(k8sClient.Update(ctx, masterSnapshot)).Should(Succeed()) + // Update binding. + updateRolloutStartedGeneration(&binding) + By(fmt.Sprintf("resource binding %s updated", binding.Name)) + // Check the work for the master resource snapshot is not created. + work := placementv1beta1.Work{} + Consistently(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName}, &work) + return errors.IsNotFound(err) + }, duration, interval).Should(BeTrue(), "controller should not create work in hub cluster") + By("Work for first resource snapshot is not created in hub cluster") + // Check the work for the secondary resource snapshot is created, it's name is crp-subindex. + secondWork := placementv1beta1.Work{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.WorkNameWithSubindexFmt, testCRPName, 1), Namespace: memberClusterNamespaceName}, &secondWork) + }, timeout, interval).Should(Succeed(), "Failed to get the expected work in hub cluster") + By(fmt.Sprintf("Work %s for second resource snapshot is created in %s", secondWork.Name, secondWork.Namespace)) + // Inspect the work that is created. + wantWork := placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(placementv1beta1.WorkNameWithSubindexFmt, testCRPName, 1), + Namespace: memberClusterNamespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: placementv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: ptr.To(true), + }, + }, + Labels: map[string]string{ + placementv1beta1.CRPTrackingLabel: testCRPName, + placementv1beta1.ParentResourceSnapshotIndexLabel: "2", + placementv1beta1.ParentBindingLabel: binding.Name, + }, + Annotations: map[string]string{ + placementv1beta1.ParentResourceSnapshotNameAnnotation: binding.Spec.ResourceSnapshotName, + placementv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: emptyHash, + placementv1beta1.ParentResourceOverrideSnapshotHashAnnotation: emptyHash, + }, + }, + Spec: placementv1beta1.WorkSpec{ + Workload: placementv1beta1.WorkloadTemplate{ + Manifests: []placementv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testNameSpace}}, + }, + }, + }, + } + diff := cmp.Diff(wantWork, secondWork, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("work(%s) mismatch (-want +got):\n%s", secondWork.Name, diff)) + // Inspect the envelope work. + var workList placementv1beta1.WorkList + fetchEnvelopedWork(&workList, binding) + envWork := workList.Items[0] + By(fmt.Sprintf("Enveloped work %s is created in %s", envWork.Name, envWork.Namespace)) + wantWork = placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: envWork.Name, + Namespace: memberClusterNamespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: placementv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: ptr.To(true), + }, + }, + Labels: map[string]string{ + placementv1beta1.CRPTrackingLabel: testCRPName, + placementv1beta1.ParentBindingLabel: binding.Name, + placementv1beta1.ParentResourceSnapshotIndexLabel: "2", + placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ConfigMapEnvelopeType), + placementv1beta1.EnvelopeNameLabel: "envelop-configmap", + placementv1beta1.EnvelopeNamespaceLabel: "app", + }, + Annotations: map[string]string{ + placementv1beta1.ParentResourceSnapshotNameAnnotation: binding.Spec.ResourceSnapshotName, + placementv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: emptyHash, + placementv1beta1.ParentResourceOverrideSnapshotHashAnnotation: emptyHash, + }, + }, + Spec: placementv1beta1.WorkSpec{ + Workload: placementv1beta1.WorkloadTemplate{ + Manifests: []placementv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testEnvelopeResourceQuota}}, + }, + }, + }, + } + diff = cmp.Diff(wantWork, envWork, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("enveloped work(%s) mismatch (-want +got):\n%s", envWork.Name, diff)) + // Check the binding status that it should be marked as applied false. + verifyBindingStatusSyncedNotApplied(binding, false, true) + markWorkApplied(&secondWork) + markWorkApplied(&envWork) + // Check the binding status that it should be marked as applied true eventually. + verifyBindStatusAppliedNotAvailable(binding, false) + markWorkAvailable(&secondWork) + markWorkAvailable(&envWork) + // Check the binding status that it should be marked as applied true eventually. + verifyBindStatusAvail(binding, false, false) + }) + + It("Should create necessary work in the target namespace after a resource snapshot with selected resource is created", func() { + // Now create the second resource snapshot. + secondSnapshot = generateResourceSnapshot(2, 2, 1, [][]byte{ + testResourceCRD, testNameSpace, + }) + Expect(k8sClient.Create(ctx, secondSnapshot)).Should(Succeed()) + By(fmt.Sprintf("Secondary resource snapshot %s created", secondSnapshot.Name)) + // Update master resource snapshot labels. + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: masterSnapshot.Name}, masterSnapshot)).Should(Succeed()) + masterSnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = "false" + Expect(k8sClient.Update(ctx, masterSnapshot)).Should(Succeed()) + // Update binding. + updateRolloutStartedGeneration(&binding) + By(fmt.Sprintf("resource binding %s updated", binding.Name)) + // Check the work for the master resource snapshot is not created. + work := placementv1beta1.Work{} + Consistently(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName}, &work) + return errors.IsNotFound(err) + }, duration, interval).Should(BeTrue(), "controller should not create work in hub cluster") + By("Work for first resource snapshot is not created in hub cluster") + // Check the work for the secondary resource snapshot is created, it's name is crp-subindex. + secondWork := placementv1beta1.Work{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.WorkNameWithSubindexFmt, testCRPName, 1), Namespace: memberClusterNamespaceName}, &secondWork) + }, timeout, interval).Should(Succeed(), "Failed to get the expected work in hub cluster") + By(fmt.Sprintf("Work %sfor second resource snapshot is created in %s", secondWork.Name, secondWork.Namespace)) + // Inspect the work that is created. + wantWork := placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(placementv1beta1.WorkNameWithSubindexFmt, testCRPName, 1), + Namespace: memberClusterNamespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: placementv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: ptr.To(true), + }, + }, + Labels: map[string]string{ + placementv1beta1.CRPTrackingLabel: testCRPName, + placementv1beta1.ParentResourceSnapshotIndexLabel: "2", + placementv1beta1.ParentBindingLabel: binding.Name, + }, + Annotations: map[string]string{ + placementv1beta1.ParentResourceSnapshotNameAnnotation: binding.Spec.ResourceSnapshotName, + placementv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: emptyHash, + placementv1beta1.ParentResourceOverrideSnapshotHashAnnotation: emptyHash, + }, + }, + Spec: placementv1beta1.WorkSpec{ + Workload: placementv1beta1.WorkloadTemplate{ + Manifests: []placementv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testResourceCRD}}, + {RawExtension: runtime.RawExtension{Raw: testNameSpace}}, + }, + }, + }, + } + diff := cmp.Diff(wantWork, secondWork, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("work(%s) mismatch (-want +got):\n%s", secondWork.Name, diff)) + // Check the binding status that it should be marked as applied false. + verifyBindingStatusSyncedNotApplied(binding, false, true) + markWorkApplied(&secondWork) + // Check the binding status that it should be marked as applied true eventually. + verifyBindStatusAppliedNotAvailable(binding, false) + markWorkAvailable(&secondWork) + // Check the binding status that it should be marked as applied true eventually. + verifyBindStatusAvail(binding, false, false) + }) + }) + Context("Test Bound ClusterResourceBinding with a single resource snapshot and valid overrides", func() { var masterSnapshot *placementv1beta1.ClusterResourceSnapshot var roHash, croHash string @@ -3643,6 +3862,47 @@ func verifyBindStatusAvail(binding *placementv1beta1.ClusterResourceBinding, has }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got):\n", binding.Name)) } +func verifyBindStatusNotAppliedWithNoResourcesSelected(binding *placementv1beta1.ClusterResourceBinding) { + Eventually(func() string { + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + wantStatus := placementv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Type: string(placementv1beta1.ResourceBindingRolloutStarted), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingOverridden), + Status: metav1.ConditionTrue, + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingWorkSynchronized), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkSyncedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: binding.GetGeneration(), + }, + { + Type: string(placementv1beta1.ResourceBindingAvailable), + Status: metav1.ConditionTrue, + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: binding.GetGeneration(), + }, + }, + } + return cmp.Diff(wantStatus, binding.Status, cmpConditionOption) + }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got)", binding.Name)) +} + func verifyBindStatusNotAppliedWithTwoPlacements(binding *placementv1beta1.ClusterResourceBinding, hasOverride, hasFailedPlacements, hasDiffedPlacements, hasDriftedPlacements bool) { Eventually(func() string { Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) @@ -4160,8 +4420,9 @@ func generateResourceSnapshot(resourceIndex, numberResource, subIndex int, rawCo clusterResourceSnapshot := &placementv1beta1.ClusterResourceSnapshot{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - placementv1beta1.ResourceIndexLabel: strconv.Itoa(resourceIndex), - placementv1beta1.CRPTrackingLabel: testCRPName, + placementv1beta1.ResourceIndexLabel: strconv.Itoa(resourceIndex), + placementv1beta1.CRPTrackingLabel: testCRPName, + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), }, Annotations: map[string]string{ placementv1beta1.NumberOfResourceSnapshotsAnnotation: strconv.Itoa(numberResource), @@ -4654,7 +4915,7 @@ func createClusterResourceBinding(binding **placementv1beta1.ClusterResourceBind } return k8sClient.Status().Update(ctx, *binding) }, timeout, interval).Should(Succeed(), "Failed to update the binding with RolloutStarted condition") - By(fmt.Sprintf("resource binding %s created", (*binding).Name)) + By(fmt.Sprintf("Resource binding %s created", (*binding).Name)) } func updateRolloutStartedGeneration(binding **placementv1beta1.ClusterResourceBinding) { diff --git a/pkg/controllers/workgenerator/controller_test.go b/pkg/controllers/workgenerator/controller_test.go index 4c8495edf..f8bb41423 100644 --- a/pkg/controllers/workgenerator/controller_test.go +++ b/pkg/controllers/workgenerator/controller_test.go @@ -529,6 +529,7 @@ func TestSetAllWorkAppliedCondition(t *testing.T) { tests := map[string]struct { works map[string]*fleetv1beta1.Work generation int64 + resourcesSelected bool wantAppliedCond metav1.Condition wantWorkAppliedCondSummaryStatus workConditionSummarizedStatus }{ @@ -565,7 +566,8 @@ func TestSetAllWorkAppliedCondition(t *testing.T) { }, }, }, - generation: 1, + generation: 1, + resourcesSelected: true, wantAppliedCond: metav1.Condition{ Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingApplied), @@ -607,7 +609,8 @@ func TestSetAllWorkAppliedCondition(t *testing.T) { }, }, }, - generation: 1, + generation: 1, + resourcesSelected: true, wantAppliedCond: metav1.Condition{ Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceBindingApplied), @@ -649,7 +652,8 @@ func TestSetAllWorkAppliedCondition(t *testing.T) { }, }, }, - generation: 1, + generation: 1, + resourcesSelected: true, wantAppliedCond: metav1.Condition{ Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceBindingApplied), @@ -685,7 +689,8 @@ func TestSetAllWorkAppliedCondition(t *testing.T) { }, }, }, - generation: 1, + generation: 1, + resourcesSelected: true, wantAppliedCond: metav1.Condition{ Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceBindingApplied), @@ -694,6 +699,76 @@ func TestSetAllWorkAppliedCondition(t *testing.T) { }, wantWorkAppliedCondSummaryStatus: workConditionSummarizedStatusIncomplete, }, + "applied is successful when no work is created due to no resources being selected": { + works: map[string]*fleetv1beta1.Work{}, + generation: 1, + resourcesSelected: false, + wantAppliedCond: metav1.Condition{ + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: 1, + }, + wantWorkAppliedCondSummaryStatus: workConditionSummarizedStatusTrue, + }, + "applied is successful when no new work is created due to no resources being selected": { + works: map[string]*fleetv1beta1.Work{ + "appliedWork1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Generation: 123, + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + ObservedGeneration: 123, + }, + }, + }, + }, + // No new work object is created because resource selection was zero + }, + generation: 1, + resourcesSelected: false, + wantAppliedCond: metav1.Condition{ + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: 1, + }, + wantWorkAppliedCondSummaryStatus: workConditionSummarizedStatusTrue, + }, + "should succeed after when no new work is created due to no resources being selected": { + works: map[string]*fleetv1beta1.Work{ + "appliedWork1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Generation: 123, + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + ObservedGeneration: 123, + }, + }, + }, + }, + // No new work object is created because resource selection was zero + }, + generation: 1, + resourcesSelected: false, + wantAppliedCond: metav1.Condition{ + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: 1, + }, + wantWorkAppliedCondSummaryStatus: workConditionSummarizedStatusTrue, + }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { @@ -703,7 +778,7 @@ func TestSetAllWorkAppliedCondition(t *testing.T) { Generation: tt.generation, }, } - workAppliedCondSummaryStatus := setAllWorkAppliedCondition(tt.works, binding) + workAppliedCondSummaryStatus := setAllWorkAppliedCondition(tt.resourcesSelected, tt.works, binding) if workAppliedCondSummaryStatus != tt.wantWorkAppliedCondSummaryStatus { t.Errorf("setAllWorkAppliedCondition() = %v, want %v", workAppliedCondSummaryStatus, tt.wantWorkAppliedCondSummaryStatus) } @@ -911,6 +986,7 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { tests := map[string]struct { works map[string]*fleetv1beta1.Work binding *fleetv1beta1.ClusterResourceBinding + resourcesSelected bool wantAvailableCond *metav1.Condition wantWorkAvailableCondSummaryStatus workConditionSummarizedStatus }{ @@ -959,6 +1035,7 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { }, }, }, + resourcesSelected: true, wantAvailableCond: &metav1.Condition{ Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingAvailable), @@ -1012,6 +1089,7 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { }, }, }, + resourcesSelected: true, wantAvailableCond: &metav1.Condition{ Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingAvailable), @@ -1065,6 +1143,7 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { }, }, }, + resourcesSelected: true, wantAvailableCond: &metav1.Condition{ Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingAvailable), @@ -1110,6 +1189,7 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { }, }, }, + resourcesSelected: true, wantAvailableCond: &metav1.Condition{ Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceBindingAvailable), @@ -1156,6 +1236,7 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { }, }, }, + resourcesSelected: true, wantAvailableCond: &metav1.Condition{ Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceBindingAvailable), @@ -1198,6 +1279,7 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { }, }, }, + resourcesSelected: true, wantAvailableCond: &metav1.Condition{ Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceBindingAvailable), @@ -1226,6 +1308,7 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { }, }, }, + resourcesSelected: true, wantAvailableCond: nil, wantWorkAvailableCondSummaryStatus: workConditionSummarizedStatusFalse, }, @@ -1242,14 +1325,136 @@ func TestSetAllWorkAvailableCondition(t *testing.T) { Conditions: []metav1.Condition{}, }, }, + resourcesSelected: true, wantAvailableCond: nil, wantWorkAvailableCondSummaryStatus: workConditionSummarizedStatusFalse, }, + "no work created due to no resources being selected but still available": { + works: map[string]*fleetv1beta1.Work{}, + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + resourcesSelected: false, + wantAvailableCond: &metav1.Condition{ + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: 1, + }, + wantWorkAvailableCondSummaryStatus: workConditionSummarizedStatusTrue, + }, + "no NEW work created due to no resources being selected but still available": { + works: map[string]*fleetv1beta1.Work{ + "work1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Reason: "any", + Status: metav1.ConditionTrue, + }, + }, + }, + }, + "work2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "work2", + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Reason: "any", + Status: metav1.ConditionTrue, + }, + }, + }, + }, + // No new work object is created because resource selection was zero + }, + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + resourcesSelected: false, + wantAvailableCond: &metav1.Condition{ + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: 1, + }, + wantWorkAvailableCondSummaryStatus: workConditionSummarizedStatusTrue, + }, + "should be available even with no NEW work created due to no resources being selected": { + works: map[string]*fleetv1beta1.Work{ + "work1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Reason: "any", + Status: metav1.ConditionTrue, + }, + }, + }, + }, + // No new work object is created because resource selection was zero + }, + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + }, + resourcesSelected: false, + wantAvailableCond: &metav1.Condition{ + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: 1, + }, + wantWorkAvailableCondSummaryStatus: workConditionSummarizedStatusTrue, + }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { - workAvailableCondSummaryStatus := setAllWorkAvailableCondition(tt.works, tt.binding) + workAvailableCondSummaryStatus := setAllWorkAvailableCondition(tt.resourcesSelected, tt.works, tt.binding) if workAvailableCondSummaryStatus != tt.wantWorkAvailableCondSummaryStatus { t.Errorf("buildAllWorkAvailableCondition() = %v, want %v", workAvailableCondSummaryStatus, tt.wantWorkAvailableCondSummaryStatus) } @@ -1267,6 +1472,7 @@ func TestSetBindingStatus(t *testing.T) { tests := map[string]struct { works map[string]*fleetv1beta1.Work applyStrategy *fleetv1beta1.ApplyStrategy + resourcesSelected bool maxFailedResourcePlacementLimit *int wantFailedResourcePlacements []fleetv1beta1.FailedResourcePlacement maxDriftedResourcePlacementLimit *int @@ -1352,6 +1558,7 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, + resourcesSelected: true, }, "One work has one not available and one work has one not applied": { works: map[string]*fleetv1beta1.Work{ @@ -1464,6 +1671,7 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, + resourcesSelected: true, wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ @@ -1604,6 +1812,7 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, + resourcesSelected: true, maxFailedResourcePlacementLimit: ptr.To(1), wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ { @@ -1732,6 +1941,7 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, + resourcesSelected: true, wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ @@ -1855,6 +2065,7 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, + resourcesSelected: true, wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ @@ -1995,6 +2206,7 @@ func TestSetBindingStatus(t *testing.T) { }, }, }, + resourcesSelected: true, wantFailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ @@ -2372,7 +2584,7 @@ func TestSetBindingStatus(t *testing.T) { ApplyStrategy: tt.applyStrategy, }, } - setBindingStatus(tt.works, binding) + setBindingStatus(tt.resourcesSelected, tt.works, binding) got := binding.Status.FailedPlacements // setBindingStatus is using map to populate the placements. // There is no default order in traversing the map. diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index b566a009c..8e670e55e 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -94,6 +94,9 @@ const ( // diff reporting has been fully completed. DiffReportedStatusTrueReason = "DiffReportingCompleted" + // NoResourcesSelectedReason is the reason string of placement condition when the resources are not selected. + NoResourcesSelectedReason = "NoResourcesSelected" + // TODO: Add a user error reason ) diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 64bac3fca..0294ecdc2 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -419,6 +419,51 @@ func crpRolloutCompletedConditions(generation int64, hasOverride bool) []metav1. } } +func crpNoResourcesSelectedConditions(generation int64, hasOverride bool) []metav1.Condition { + overrideConditionReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideConditionReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overrideConditionReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementAppliedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ApplySucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementAvailableConditionType), + Status: metav1.ConditionTrue, + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: generation, + }, + } +} + func resourcePlacementSyncPendingConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -556,6 +601,52 @@ func resourcePlacementRolloutCompletedConditions(generation int64, resourceIsTra } } +func resourcePlacementRolloutCompletedConditionsWithNoSelectedResource(generation int64, hasOverride bool) []metav1.Condition { + overrideConditionReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideConditionReason = condition.OverriddenSucceededReason + } + + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourceScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourceOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overrideConditionReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourceWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkSyncedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcesAppliedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.AllWorkAppliedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcesAvailableConditionType), + Status: metav1.ConditionTrue, + Reason: condition.NoResourcesSelectedReason, + ObservedGeneration: generation, + }, + } +} + func resourcePlacementScheduleFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -826,10 +917,17 @@ func customizedCRPStatusUpdatedActual(crpName string, wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{} for _, name := range wantSelectedClusters { - wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ - ClusterName: name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, resourceIsTrackable, false), - }) + if len(wantSelectedResourceIdentifiers) == 0 || wantSelectedResourceIdentifiers == nil { + wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ + ClusterName: name, + Conditions: resourcePlacementRolloutCompletedConditionsWithNoSelectedResource(crp.Generation, false), + }) + } else { + wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ + ClusterName: name, + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, resourceIsTrackable, false), + }) + } } for i := 0; i < len(wantUnselectedClusters); i++ { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ @@ -839,7 +937,11 @@ func customizedCRPStatusUpdatedActual(crpName string, var wantCRPConditions []metav1.Condition if len(wantSelectedClusters) > 0 { - wantCRPConditions = crpRolloutCompletedConditions(crp.Generation, false) + if len(wantSelectedResourceIdentifiers) == 0 || wantSelectedResourceIdentifiers == nil { + wantCRPConditions = crpNoResourcesSelectedConditions(crp.Generation, false) + } else { + wantCRPConditions = crpRolloutCompletedConditions(crp.Generation, false) + } } else { wantCRPConditions = []metav1.Condition{ // we don't set the remaining resource conditions. diff --git a/test/e2e/placement_cro_test.go b/test/e2e/placement_cro_test.go index b2686c3ba..d31b0b5c4 100644 --- a/test/e2e/placement_cro_test.go +++ b/test/e2e/placement_cro_test.go @@ -18,6 +18,7 @@ package e2e import ( "fmt" + "github.com/google/go-cmp/cmp" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -690,7 +691,53 @@ var _ = Context("creating clusterResourceOverride with delete rules for one clus It("should update CRP status as expected", func() { wantCRONames := []string{fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, croName, 0)} - crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", wantCRONames, nil) + crpStatusUpdatedActual := func() error { + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + return err + } + + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + Conditions: crpRolloutCompletedConditions(crp.Generation, true), + PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, true), + ApplicableClusterResourceOverrides: wantCRONames, + }, + { + ClusterName: memberCluster2EastCanaryName, + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, true), + ApplicableClusterResourceOverrides: wantCRONames, + }, + { + ClusterName: memberCluster3WestProdName, + Conditions: resourcePlacementRolloutCompletedConditionsWithNoSelectedResource(crp.Generation, true), + ApplicableClusterResourceOverrides: wantCRONames, + }, + }, + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Kind: "ConfigMap", + Name: appConfigMapName, + Version: "v1", + Namespace: workNamespaceName, + }, + }, + ObservedResourceIndex: "0", + } + if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + } Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index 2cb88220f..dacc3e0af 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -130,7 +130,8 @@ var _ = Describe("creating CRP and selecting resources by name", Ordered, func() }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual(nil, allMemberClusterNames, nil, "0") + // CRP status will complete but with reason "NoResourceSelected" since the resource does not exist and cannot be selected. + crpStatusUpdatedActual := crpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -365,7 +366,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become unselected It("should remove the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) It("should update CRP status as expected", func() { - // If there are no resources selected, the available condition reason will become "AllWorkAreAvailable". + // If there are no resources selected, the available condition reason will become "AllWorkAreAvailable" crpStatusUpdatedActual := crpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "1") Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) })