diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 6b3a8f4f3..24b6faf36 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -1280,8 +1280,11 @@ func extractResFromConfigMap(uConfigMap *unstructured.Unstructured) ([]fleetv1be klog.ErrorS(unMarshallErr, "manifest has invalid content", "manifestKey", key, "envelopeResource", klog.KObj(uConfigMap)) return nil, fmt.Errorf("the object with manifest key `%s` in envelope config `%s` is malformatted, err: %w", key, klog.KObj(uConfigMap), unMarshallErr) } + if len(uManifest.GetNamespace()) == 0 { + // Block cluster-scoped resources. + return nil, fmt.Errorf("cannot wrap cluster-scoped resource %s in the envelope %s", uManifest.GetName(), klog.KObj(uConfigMap)) + } if len(uManifest.GetNamespace()) != 0 && uManifest.GetNamespace() != configMap.Namespace { - // if the manifest is a namespaced object but no namespace is specified, it will fail at the apply time instead of here. return nil, fmt.Errorf("the namespaced object `%s` in envelope config `%s` is placed in a different namespace `%s` ", uManifest.GetName(), klog.KObj(uConfigMap), uManifest.GetNamespace()) } manifests = append(manifests, fleetv1beta1.Manifest{ diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index ed8cbdf6e..0a035fd3a 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -698,7 +698,6 @@ var _ = Describe("Test Work Generator Controller", func() { Workload: placementv1beta1.WorkloadTemplate{ Manifests: []placementv1beta1.Manifest{ {RawExtension: runtime.RawExtension{Raw: testEnvelopeResourceQuota}}, - {RawExtension: runtime.RawExtension{Raw: testEnvelopeWebhook}}, }, }, }, @@ -823,7 +822,7 @@ var _ = Describe("Test Work Generator Controller", func() { Spec: placementv1beta1.WorkSpec{ Workload: placementv1beta1.WorkloadTemplate{ Manifests: []placementv1beta1.Manifest{ - {RawExtension: runtime.RawExtension{Raw: testEnvelopeWebhook}}, + {RawExtension: runtime.RawExtension{Raw: testEnvelopeResourceQuota2}}, }, }, }, diff --git a/pkg/controllers/workgenerator/controller_test.go b/pkg/controllers/workgenerator/controller_test.go index 3becd2d26..4c8495edf 100644 --- a/pkg/controllers/workgenerator/controller_test.go +++ b/pkg/controllers/workgenerator/controller_test.go @@ -208,6 +208,23 @@ func TestExtractResFromConfigMap(t *testing.T) { want: nil, wantErr: true, }, + "config map with cluster scoped resource should fail": { + uConfigMap: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-config", + "namespace": "default", + }, + "data": map[string]interface{}{ + "resource": `{"apiVersion": "admissionregistration.k8s.io/v1", "kind": "ValidatingWebhookConfiguration", "metadata": {"name": "test-webhook"}}`, + }, + }, + }, + want: nil, + wantErr: true, + }, "config map with valid and invalid entries should fail": { uConfigMap: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -226,7 +243,7 @@ func TestExtractResFromConfigMap(t *testing.T) { want: nil, wantErr: true, }, - "config map with cluster and namespace scoped data in the correct namespace should pass": { + "config map with cluster and namespace scoped data in the correct namespace should fail": { uConfigMap: &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "v1", @@ -241,11 +258,8 @@ func TestExtractResFromConfigMap(t *testing.T) { }, }, }, - want: []fleetv1beta1.Manifest{ - {RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod", "namespace": "default"}}`)}}, - {RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion": "v1", "kind": "ClusterRole", "metadata": {"name": "test-role"}}`)}}, - }, - wantErr: false, + want: nil, + wantErr: true, }, "config map with cluster scoped and cross namespaced resources data in a different namespace should fail": { uConfigMap: &unstructured.Unstructured{ diff --git a/pkg/controllers/workgenerator/manifests/resourcequota2.yaml b/pkg/controllers/workgenerator/manifests/resourcequota2.yaml new file mode 100644 index 000000000..4e3224799 --- /dev/null +++ b/pkg/controllers/workgenerator/manifests/resourcequota2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: mem-cpu-demo + namespace: app +spec: + hard: + requests.cpu: "2" + requests.memory: 2Gi + limits.cpu: "4" + limits.memory: 4Gi diff --git a/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml b/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml index 451947aae..c88877618 100644 --- a/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml +++ b/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml @@ -18,34 +18,3 @@ data: requests.memory: 1Gi limits.cpu: "2" limits.memory: 2Gi - webhook.yaml: | - apiVersion: admissionregistration.k8s.io/v1 - kind: MutatingWebhookConfiguration - metadata: - creationTimestamp: null - labels: - azure-workload-identity.io/system: "true" - name: azure-wi-webhook-mutating-webhook-configuration - webhooks: - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: azure-wi-webhook-webhook-service - namespace: app - path: /mutate-v1-pod - failurePolicy: Fail - matchPolicy: Equivalent - name: mutation.azure-workload-identity.io - rules: - - apiGroups: - - "" - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - pods - sideEffects: None diff --git a/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml b/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml index a580d6971..3692ad470 100644 --- a/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml +++ b/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml @@ -6,34 +6,15 @@ metadata: annotations: kubernetes-fleet.io/envelope-configmap: "true" data: - webhook.yaml: | - apiVersion: admissionregistration.k8s.io/v1 - kind: MutatingWebhookConfiguration + resourceQuota.yaml: | + apiVersion: v1 + kind: ResourceQuota metadata: - creationTimestamp: null - labels: - azure-workload-identity.io/system: "true" - name: azure-wi-webhook-mutating-webhook-configuration - webhooks: - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: azure-wi-webhook-webhook-service - namespace: app - path: /mutate-v1-pod - failurePolicy: Fail - matchPolicy: Equivalent - name: mutation.azure-workload-identity.io - rules: - - apiGroups: - - "" - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - pods - sideEffects: None + name: mem-cpu-demo + namespace: app + spec: + hard: + requests.cpu: "2" + requests.memory: 2Gi + limits.cpu: "4" + limits.memory: 4Gi diff --git a/pkg/controllers/workgenerator/suite_test.go b/pkg/controllers/workgenerator/suite_test.go index 051627144..c611074d6 100644 --- a/pkg/controllers/workgenerator/suite_test.go +++ b/pkg/controllers/workgenerator/suite_test.go @@ -65,7 +65,7 @@ var ( wantOverriddenTestResource []byte // the content of the enveloped resources - testEnvelopeWebhook, testEnvelopeResourceQuota []byte + testEnvelopeResourceQuota, testEnvelopeResourceQuota2 []byte ) func TestAPIs(t *testing.T) { @@ -365,15 +365,15 @@ func readTestManifests() { testPdb, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) - By("Read EnvelopeWebhook") - rawByte, err = os.ReadFile("manifests/webhook.yaml") - Expect(err).Should(Succeed()) - testEnvelopeWebhook, err = yaml.ToJSON(rawByte) - Expect(err).Should(Succeed()) - By("Read ResourceQuota") rawByte, err = os.ReadFile("manifests/resourcequota.yaml") Expect(err).Should(Succeed()) testEnvelopeResourceQuota, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) + + By("Read ResourceQuota2") + rawByte, err = os.ReadFile("manifests/resourcequota2.yaml") + Expect(err).Should(Succeed()) + testEnvelopeResourceQuota2, err = yaml.ToJSON(rawByte) + Expect(err).Should(Succeed()) } diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index dd98aa1c3..64bac3fca 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -613,7 +613,11 @@ func resourcePlacementOverrideFailedConditions(generation int64) []metav1.Condit } } -func resourcePlacementWorkSynchronizedFailedConditions(generation int64) []metav1.Condition { +func resourcePlacementWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { + overridenCondReason := condition.OverrideNotSpecifiedReason + if hasOverrides { + overridenCondReason = condition.OverriddenSucceededReason + } return []metav1.Condition{ { Type: string(placementv1beta1.ResourceScheduledConditionType), @@ -631,7 +635,7 @@ func resourcePlacementWorkSynchronizedFailedConditions(generation int64) []metav Type: string(placementv1beta1.ResourceOverriddenConditionType), Status: metav1.ConditionTrue, ObservedGeneration: generation, - Reason: condition.OverriddenSucceededReason, + Reason: overridenCondReason, }, { Type: string(placementv1beta1.ResourceWorkSynchronizedConditionType), @@ -642,7 +646,11 @@ func resourcePlacementWorkSynchronizedFailedConditions(generation int64) []metav } } -func crpWorkSynchronizedFailedConditions(generation int64) []metav1.Condition { +func crpWorkSynchronizedFailedConditions(generation int64, hasOverrides bool) []metav1.Condition { + overridenCondReason := condition.OverrideNotSpecifiedReason + if hasOverrides { + overridenCondReason = condition.OverriddenSucceededReason + } return []metav1.Condition{ { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), @@ -659,7 +667,7 @@ func crpWorkSynchronizedFailedConditions(generation int64) []metav1.Condition { { Type: string(placementv1beta1.ClusterResourcePlacementOverriddenConditionType), Status: metav1.ConditionTrue, - Reason: condition.OverriddenSucceededReason, + Reason: overridenCondReason, ObservedGeneration: generation, }, { @@ -782,17 +790,18 @@ func crpStatusWithWorkSynchronizedUpdatedFailedActual( } var wantPlacementStatus []placementv1beta1.ResourcePlacementStatus + hasOverrides := len(wantResourceOverrides) > 0 || len(wantClusterResourceOverrides) > 0 for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ ClusterName: name, - Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation), + Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, }) } wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ - Conditions: crpWorkSynchronizedFailedConditions(crp.Generation), + Conditions: crpWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, ObservedResourceIndex: wantObservedResourceIndex, diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 6e76fce3f..3339fa304 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -22,12 +22,11 @@ import ( "strings" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - admv1 "k8s.io/api/admissionregistration/v1" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" @@ -43,10 +42,16 @@ import ( var ( // pre loaded test manifests testConfigMap, testEnvelopConfigMap corev1.ConfigMap - testEnvelopeWebhook admv1.MutatingWebhookConfiguration testEnvelopeResourceQuota corev1.ResourceQuota ) +const ( + wrapperCMName = "wrapper" + + cmDataKey = "foo" + cmDataVal = "bar" +) + // Note that this container will run in parallel with other containers. var _ = Describe("placing wrapped resources using a CRP", func() { Context("Test a CRP place enveloped objects successfully", Ordered, func() { @@ -103,14 +108,14 @@ var _ = Describe("placing wrapped resources using a CRP", func() { It("should update CRP status as expected", func() { // resourceQuota is enveloped so it's not trackable yet - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", false) + crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) It("should place the resources on all member clusters", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkEnvelopQuotaAndMutationWebhookPlacement(memberCluster) + workResourcesPlacedActual := checkEnvelopQuotaPlacement(memberCluster) Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) @@ -147,14 +152,14 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) It("should update CRP status as success again", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "2", false) + crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "2", true) Eventually(crpStatusUpdatedActual, longEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) It("should place the resources on all member clusters again", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkEnvelopQuotaAndMutationWebhookPlacement(memberCluster) + workResourcesPlacedActual := checkEnvelopQuotaPlacement(memberCluster) Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) @@ -339,6 +344,144 @@ var _ = Describe("placing wrapped resources using a CRP", func() { ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) }) }) + + Context("Block envelopes that wrap cluster-scoped resources", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + wrappedCMName := "app" + wrappedCBName := "standard" + + BeforeAll(func() { + // Use an envelope to create duplicate resource entries. + ns := appNamespace() + Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) + + // Create an envelope config map. + wrapperCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: wrapperCMName, + Namespace: ns.Name, + Annotations: map[string]string{ + placementv1beta1.EnvelopeConfigMapAnnotation: "true", + }, + }, + Data: map[string]string{}, + } + + // Create a configMap and a clusterRole as wrapped resources. + wrappedCM := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: wrappedCMName, + }, + Data: map[string]string{ + cmDataKey: cmDataVal, + }, + } + wrappedCMBytes, err := json.Marshal(wrappedCM) + Expect(err).To(BeNil(), "Failed to marshal configMap %s", wrappedCM.Name) + wrapperCM.Data["cm.yaml"] = string(wrappedCMBytes) + + wrappedCB := &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: rbacv1.SchemeGroupVersion.String(), + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: wrappedCBName, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + } + wrappedCBBytes, err := json.Marshal(wrappedCB) + Expect(err).To(BeNil(), "Failed to marshal clusterRole %s", wrappedCB.Name) + wrapperCM.Data["cb.yaml"] = string(wrappedCBBytes) + + Expect(hubClient.Create(ctx, wrapperCM)).To(Succeed(), "Failed to create configMap %s", wrapperCM.Name) + + // Create a CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should update CRP status as expected", func() { + Eventually(func() error { + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + return err + } + + wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + Conditions: crpWorkSynchronizedFailedConditions(crp.Generation, false), + PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, false), + }, + }, + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Kind: "ConfigMap", + Name: wrapperCMName, + Version: "v1", + Namespace: workNamespaceName, + }, + }, + ObservedResourceIndex: "0", + } + if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + // Note that due to the order in which the work generator handles resources, the synchronization error is + // triggered before the primary work object is applied; that is, the namespace itself will not be created + // either. + + AfterAll(func() { + // Remove the CRP and the namespace from the hub cluster. + ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) + }) + }) }) var _ = Describe("Process objects with generate name", Ordered, func() { @@ -346,7 +489,6 @@ var _ = Describe("Process objects with generate name", Ordered, func() { workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) nsGenerateName := "application-" - wrapperCMName := "wrapper" wrappedCMGenerateName := "wrapped-foo-" BeforeAll(func() { @@ -377,7 +519,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { Namespace: ns.Name, }, Data: map[string]string{ - "foo": "bar", + cmDataKey: cmDataVal, }, } wrappedCMByte, err := json.Marshal(wrappedCM) @@ -497,7 +639,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { }) }) -func checkEnvelopQuotaAndMutationWebhookPlacement(memberCluster *framework.Cluster) func() error { +func checkEnvelopQuotaPlacement(memberCluster *framework.Cluster) func() error { workNamespaceName := appNamespace().Name return func() error { if err := validateWorkNamespaceOnCluster(memberCluster, types.NamespacedName{Name: workNamespaceName}); err != nil { @@ -523,29 +665,6 @@ func checkEnvelopQuotaAndMutationWebhookPlacement(memberCluster *framework.Clust if diff := cmp.Diff(placedResourceQuota.Spec, testEnvelopeResourceQuota.Spec); diff != "" { return fmt.Errorf("resource quota diff (-got, +want): %s", diff) } - By("check the cluster scoped envelope objects") - placedEnvelopeWebhook := &admv1.MutatingWebhookConfiguration{} - if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: testEnvelopeWebhook.Name}, placedEnvelopeWebhook); err != nil { - return err - } - // the two webhooks are very different since one is a client side yaml and the other is server side generated - if placedEnvelopeWebhook.Webhooks == nil || len(placedEnvelopeWebhook.Webhooks) != 1 { - return fmt.Errorf("webhook size does not match") - } - if placedEnvelopeWebhook.Webhooks[0].Name != testEnvelopeWebhook.Webhooks[0].Name || - *placedEnvelopeWebhook.Webhooks[0].FailurePolicy != *testEnvelopeWebhook.Webhooks[0].FailurePolicy || - *placedEnvelopeWebhook.Webhooks[0].SideEffects != *testEnvelopeWebhook.Webhooks[0].SideEffects || - *placedEnvelopeWebhook.Webhooks[0].MatchPolicy != *testEnvelopeWebhook.Webhooks[0].MatchPolicy { - return fmt.Errorf("webhook config does not match") - } - if len(placedEnvelopeWebhook.Webhooks[0].Rules) != 1 { - return fmt.Errorf("webhook rule size does not match") - } - if diff := cmp.Diff(placedEnvelopeWebhook.Webhooks[0].Rules[0], - testEnvelopeWebhook.Webhooks[0].Rules[0], - cmpopts.IgnoreFields(admv1.Rule{}, "Scope")); diff != "" { - return fmt.Errorf("webhook rule diff (-got, +want): %s", diff) - } return nil } } @@ -632,11 +751,6 @@ func readEnvelopTestManifests() { err = utils.GetObjectFromManifest("resources/test-envelop-configmap.yaml", &testEnvelopConfigMap) Expect(err).Should(Succeed()) - By("Read EnvelopeWebhook") - testEnvelopeWebhook = admv1.MutatingWebhookConfiguration{} - err = utils.GetObjectFromManifest("resources/webhook.yaml", &testEnvelopeWebhook) - Expect(err).Should(Succeed()) - By("Read ResourceQuota") testEnvelopeResourceQuota = corev1.ResourceQuota{} err = utils.GetObjectFromManifest("resources/resourcequota.yaml", &testEnvelopeResourceQuota) diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index d9a06c4b0..6bead04d3 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -99,14 +99,14 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun It("should update CRP status as expected", func() { // resourceQuota is not trackable yet - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", false) + crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) It("should place the resources on all member clusters", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkEnvelopQuotaAndMutationWebhookPlacement(memberCluster) + workResourcesPlacedActual := checkEnvelopQuotaPlacement(memberCluster) Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) @@ -208,7 +208,7 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun }) It("should update CRP status to applied to all clusters again automatically after rejoining", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", false) + crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) }) diff --git a/test/e2e/resources/test-envelop-configmap.yaml b/test/e2e/resources/test-envelop-configmap.yaml index 4b948ea02..c88877618 100644 --- a/test/e2e/resources/test-envelop-configmap.yaml +++ b/test/e2e/resources/test-envelop-configmap.yaml @@ -18,35 +18,3 @@ data: requests.memory: 1Gi limits.cpu: "2" limits.memory: 2Gi - webhook.yaml: | - apiVersion: admissionregistration.k8s.io/v1 - kind: MutatingWebhookConfiguration - metadata: - creationTimestamp: null - labels: - azure-workload-identity.io/system: "true" - name: azure-wi-webhook-mutating-webhook-configuration - webhooks: - - admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: azure-wi-webhook-webhook-service - namespace: app - path: /mutate-v1-pod - failurePolicy: Ignore - matchPolicy: Equivalent - name: mutation.azure-workload-identity.io - rules: - - apiGroups: - - "" - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - pods - sideEffects: None - timeoutSeconds: 1