From c93a54909a350cf97fa570aa816ff52fe990b234 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 1 May 2025 00:01:18 +1000 Subject: [PATCH 1/8] Enabled new envelopes [WIP] Signed-off-by: michaelawyu --- apis/cluster/v1beta1/zz_generated.deepcopy.go | 2 +- apis/placement/v1alpha1/envelope_types.go | 55 +++ .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1beta1/clusterresourceplacement_types.go | 12 +- apis/placement/v1beta1/commons.go | 4 + .../v1beta1/zz_generated.deepcopy.go | 2 +- apis/v1alpha1/zz_generated.deepcopy.go | 2 +- .../resource_selector.go | 10 +- pkg/controllers/workgenerator/controller.go | 251 +++++------- pkg/controllers/workgenerator/envelope.go | 359 ++++++++++++++++++ pkg/utils/common.go | 14 +- test/apis/v1alpha1/zz_generated.deepcopy.go | 2 +- 12 files changed, 559 insertions(+), 156 deletions(-) create mode 100644 pkg/controllers/workgenerator/envelope.go diff --git a/apis/cluster/v1beta1/zz_generated.deepcopy.go b/apis/cluster/v1beta1/zz_generated.deepcopy.go index 7bb7f501c..17e71a1a2 100644 --- a/apis/cluster/v1beta1/zz_generated.deepcopy.go +++ b/apis/cluster/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/placement/v1alpha1/envelope_types.go b/apis/placement/v1alpha1/envelope_types.go index a8a29fc40..f54906957 100644 --- a/apis/placement/v1alpha1/envelope_types.go +++ b/apis/placement/v1alpha1/envelope_types.go @@ -19,6 +19,9 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" ) // +genclient @@ -71,3 +74,55 @@ type ResourceEnvelope struct { // +kubebuilder:validation:Required Spec EnvelopeSpec `json:"spec"` } + +type EnvelopeReader interface { + // GetManifests returns the manifests in the envelope. + GetManifests() map[string]Manifest + + // GetEnvelopeObjRef returns a klog object reference to the envelope. + GetEnvelopeObjRef() klog.ObjectRef + + // GetNamespace returns the namespace of the envelope. + GetNamespace() string + + // GetName returns the name of the envelope. + GetName() string + + // GetEnvelopeType returns the type of the envelope. + GetEnvelopeType() string +} + +// Ensure that both ClusterResourceEnvelope and ResourceEnvelope implement the +// EnvelopeReader interface at compile time. +var ( + _ EnvelopeReader = &ClusterResourceEnvelope{} + _ EnvelopeReader = &ResourceEnvelope{} +) + +// Implements the EnvelopeReader interface for ClusterResourceEnvelope. + +func (e *ClusterResourceEnvelope) GetManifests() map[string]Manifest { + return e.Spec.Manifests +} + +func (e *ClusterResourceEnvelope) GetEnvelopeObjRef() klog.ObjectRef { + return klog.KObj(e) +} + +func (e *ClusterResourceEnvelope) GetEnvelopeType() string { + return string(placementv1beta1.ClusterResourceEnvelopeType) +} + +// Implements the EnvelopeReader interface for ResourceEnvelope. + +func (e *ResourceEnvelope) GetManifests() map[string]Manifest { + return e.Spec.Manifests +} + +func (e *ResourceEnvelope) GetEnvelopeObjRef() klog.ObjectRef { + return klog.KObj(e) +} + +func (e *ResourceEnvelope) GetEnvelopeType() string { + return string(placementv1beta1.ResourceEnvelopeType) +} diff --git a/apis/placement/v1alpha1/zz_generated.deepcopy.go b/apis/placement/v1alpha1/zz_generated.deepcopy.go index bdc1ec2a8..5f8dd6064 100644 --- a/apis/placement/v1alpha1/zz_generated.deepcopy.go +++ b/apis/placement/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 9b07f5ef5..a2569e4e1 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -884,8 +884,10 @@ type EnvelopeIdentifier struct { // +kubebuilder:validation:Optional Namespace string `json:"namespace,omitempty"` + // TO-DO (chenyu1): drop the enum value ConfigMap after the new envelope forms become fully available. + // Type of the envelope object. - // +kubebuilder:validation:Enum=ConfigMap + // +kubebuilder:validation:Enum=ConfigMap,ClusterResourceEnvelope,ResourceEnvelope // +kubebuilder:default=ConfigMap // +kubebuilder:validation:Optional Type EnvelopeType `json:"type"` @@ -897,7 +899,15 @@ type EnvelopeType string const ( // ConfigMapEnvelopeType means the envelope object is of type `ConfigMap`. + // + // TO-DO (chenyu1): drop this type after the configMap-based envelopes become obsolete. ConfigMapEnvelopeType EnvelopeType = "ConfigMap" + + // ClusterResourceEnvelopeType is the envelope type that represents the ClusterResourceEnvelope custom resource. + ClusterResourceEnvelopeType EnvelopeType = "ClusterResourceEnvelope" + + // ResourceEnvelopeType is the envelope type that represents the ResourceEnvelope custom resource. + ResourceEnvelopeType EnvelopeType = "ResourceEnvelope" ) // ResourcePlacementStatus represents the placement status of selected resources for one target cluster. diff --git a/apis/placement/v1beta1/commons.go b/apis/placement/v1beta1/commons.go index 02cf2e3ee..8f8fbe3d4 100644 --- a/apis/placement/v1beta1/commons.go +++ b/apis/placement/v1beta1/commons.go @@ -78,6 +78,10 @@ const ( // The format is {workPrefix}-configMap-uuid. WorkNameWithConfigEnvelopeFmt = "%s-configmap-%s" + // WorkNameWithEnvelopeCRFmt is the format of the name of a work generated with an envelope CR. + // The format is [WORK-PREFIX]-envelope-[UUID]. + WorkNameWithEnvelopeCRFmt = "%s-envelope-%s" + // ParentClusterResourceOverrideSnapshotHashAnnotation is the annotation to work that contains the hash of the parent cluster resource override snapshot list. ParentClusterResourceOverrideSnapshotHashAnnotation = fleetPrefix + "parent-cluster-resource-override-snapshot-hash" diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index dec32e99d..69f7db476 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 27a862c43..85550ca19 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/clusterresourceplacement/resource_selector.go index 001d96530..8e396849c 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector.go @@ -463,8 +463,14 @@ func (r *Reconciler) selectResourcesForPlacement(placement *fleetv1beta1.Cluster if err != nil { return 0, nil, nil, err } - if unstructuredObj.GetObjectKind().GroupVersionKind() == utils.ConfigMapGVK && - len(unstructuredObj.GetAnnotations()[fleetv1beta1.EnvelopeConfigMapAnnotation]) != 0 { + uGVK := unstructuredObj.GetObjectKind().GroupVersionKind() + switch { + case uGVK == utils.ClusterResourceEnvelopeV1Alpha1GVK: + envelopeObjCount++ + case uGVK == utils.ResourceEnvelopeV1Alpha1GVK: + envelopeObjCount++ + case uGVK == utils.ConfigMapGVK && len(unstructuredObj.GetAnnotations()[fleetv1beta1.EnvelopeConfigMapAnnotation]) > 0: + // TO-DO (chenyu1): remove this branch after the configMap-based envelopes become obsolete. envelopeObjCount++ } resources[i] = *rc diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 24b6faf36..d018e32af 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -23,12 +23,10 @@ import ( "fmt" "sort" "strconv" - "strings" "time" "go.uber.org/atomic" "golang.org/x/sync/errgroup" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -36,8 +34,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" @@ -54,6 +50,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" + fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/controllers/workapplier" "github.com/kubefleet-dev/kubefleet/pkg/utils" @@ -498,24 +495,23 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be klog.V(2).InfoS("The resource is deleted by the override rules", "snapshot", klog.KObj(snapshot), "selectedResource", snapshot.Spec.SelectedResources[j]) continue } - // we need to special treat configMap with envelopeConfigMapAnnotation annotation, - // so we need to check the GVK and annotation of the selected resource - var uResource unstructured.Unstructured - if unMarshallErr := uResource.UnmarshalJSON(selectedResource.Raw); unMarshallErr != nil { - klog.ErrorS(unMarshallErr, "work has invalid content", "snapshot", klog.KObj(snapshot), "selectedResource", selectedResource.Raw) - return true, false, controller.NewUnexpectedBehaviorError(unMarshallErr) - } - if uResource.GetObjectKind().GroupVersionKind() == utils.ConfigMapGVK && - len(uResource.GetAnnotations()[fleetv1beta1.EnvelopeConfigMapAnnotation]) != 0 { - // get a work object for the enveloped configMap - work, err := r.getConfigMapEnvelopWorkObj(ctx, workNamePrefix, resourceBinding, snapshot, &uResource, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) - if err != nil { - return true, false, err - } - activeWork[work.Name] = work - newWork = append(newWork, work) - } else { - simpleManifests = append(simpleManifests, fleetv1beta1.Manifest(*selectedResource)) + + // Process the selected resource. + // + // Specifically, + // a) if the selected resource is an envelope (configMap-based or envelope-based; the former will soon + // become obsolete), we will create a work object dedicated for the envelope; + // b) otherwise (the selected resource is a regular resource), the resource will be appended to the list of + // simple manifests. + // + // Note (chenyu1): this method is added to reduce the cyclomatic complexity of the syncAllWork method. + newWork, simpleManifests, err = r.processOneSelectedResource( + ctx, selectedResource, resourceBinding, snapshot, + workNamePrefix, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash, + activeWork, newWork, simpleManifests) + if err != nil { + klog.ErrorS(err, "Failed to process the selected resource", "snapshot", klog.KObj(snapshot), "selectedResourceIdx", j) + return true, false, err } } if len(simpleManifests) == 0 { @@ -571,6 +567,92 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be return true, updateAny.Load(), nil } +// processOneSelectedResource processes a single selected resource from the resource snapshot. +// +// If the selected resource is an envelope (either configMap-based or envelope-based), create a new dedicated +// work object for the envelope. Otherwise, append the selected resource to the list of simple manifests. +func (r *Reconciler) processOneSelectedResource( + ctx context.Context, + selectedResource *fleetv1beta1.ResourceContent, + resourceBinding *fleetv1beta1.ClusterResourceBinding, + snapshot *fleetv1beta1.ClusterResourceSnapshot, + workNamePrefix, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, + activeWork map[string]*fleetv1beta1.Work, + newWork []*fleetv1beta1.Work, + simpleManifests []fleetv1beta1.Manifest, +) ([]*fleetv1beta1.Work, []fleetv1beta1.Manifest, error) { + // Extract resources from envelopes if one or more ClusterResourceEnvelopes and/or ResourceEnvelopes + // are present. + + // Unmarshal the YAML content into an unstructured object. + var uResource unstructured.Unstructured + if unMarshallErr := uResource.UnmarshalJSON(selectedResource.Raw); unMarshallErr != nil { + klog.ErrorS(unMarshallErr, "work has invalid content", "snapshot", klog.KObj(snapshot), "selectedResource", selectedResource.Raw) + return newWork, simpleManifests, controller.NewUnexpectedBehaviorError(unMarshallErr) + } + + uGVK := uResource.GetObjectKind().GroupVersionKind() + switch { + case uGVK == utils.ClusterResourceEnvelopeV1Alpha1GVK: + // The resource is a ClusterResourceEnvelope; extract its contents. + var clusterResourceEnvelope fleetv1alpha1.ClusterResourceEnvelope + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uResource.Object, &clusterResourceEnvelope); err != nil { + klog.ErrorS(err, "Failed to convert the unstructured object to a ClusterResourceEnvelope", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(snapshot), + "selectedResource", klog.KObj(&uResource)) + return newWork, simpleManifests, controller.NewUnexpectedBehaviorError(err) + } + + work, err := r.createOrUpdateEnvelopeCRWorkObj(ctx, workNamePrefix, resourceBinding, snapshot, &clusterResourceEnvelope, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + if err != nil { + klog.ErrorS(err, "Failed to create or get the work object for the ClusterResourceEnvelope", + "clusterResourceEnvelope", klog.KObj(&clusterResourceEnvelope), + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(snapshot)) + return newWork, simpleManifests, err + } + activeWork[work.Name] = work + newWork = append(newWork, work) + case uGVK == utils.ResourceEnvelopeV1Alpha1GVK: + // The resource is a ResourceEnvelope; extract its contents. + var resourceEnvelope fleetv1alpha1.ResourceEnvelope + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uResource.Object, &resourceEnvelope); err != nil { + klog.ErrorS(err, "Failed to convert the unstructured object to a ResourceEnvelope", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(snapshot), + "selectedResource", klog.KObj(&uResource)) + return newWork, simpleManifests, controller.NewUnexpectedBehaviorError(err) + } + + work, err := r.createOrUpdateEnvelopeCRWorkObj(ctx, workNamePrefix, resourceBinding, snapshot, &resourceEnvelope, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + if err != nil { + klog.ErrorS(err, "Failed to create or get the work object for the ResourceEnvelope", + "resourceEnvelope", klog.KObj(&resourceEnvelope), + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(snapshot)) + return newWork, simpleManifests, err + } + activeWork[work.Name] = work + newWork = append(newWork, work) + case uGVK == utils.ConfigMapGVK && len(uResource.GetAnnotations()[fleetv1beta1.EnvelopeConfigMapAnnotation]) > 0: + // The resource is a configMap-based envelope; extract its contents. + // + // TO-DO (chenyu1): drop this branch after the configMap-based envelope becomes obsolete. + work, err := r.getConfigMapEnvelopWorkObj(ctx, workNamePrefix, resourceBinding, snapshot, &uResource, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + if err != nil { + return newWork, simpleManifests, err + } + activeWork[work.Name] = work + newWork = append(newWork, work) + default: + // The resource is not an envelope; add it to the list of simple manifests. + simpleManifests = append(simpleManifests, fleetv1beta1.Manifest(*selectedResource)) + } + + return newWork, simpleManifests, nil +} + // syncApplyStrategy syncs the apply strategy specified on a ClusterResourceBinding object // to a Work object. func (r *Reconciler) syncApplyStrategy( @@ -630,91 +712,6 @@ func (r *Reconciler) fetchAllResourceSnapshots(ctx context.Context, resourceBind return controller.FetchAllClusterResourceSnapshots(ctx, r.Client, resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], &masterResourceSnapshot) } -// getConfigMapEnvelopWorkObj first try to locate a work object for the corresponding envelopObj of type configMap. -// we create a new one if the work object doesn't exist. We do this to avoid repeatedly delete and create the same work object. -func (r *Reconciler) getConfigMapEnvelopWorkObj(ctx context.Context, workNamePrefix string, resourceBinding *fleetv1beta1.ClusterResourceBinding, - resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, envelopeObj *unstructured.Unstructured, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string) (*fleetv1beta1.Work, error) { - // we group all the resources in one configMap to one work - manifest, err := extractResFromConfigMap(envelopeObj) - if err != nil { - klog.ErrorS(err, "configMap has invalid content", "snapshot", klog.KObj(resourceSnapshot), - "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) - return nil, controller.NewUserError(err) - } - klog.V(2).InfoS("Successfully extract the enveloped resources from the configMap", "numOfResources", len(manifest), - "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) - - // Try to see if we already have a work represent the same enveloped object for this CRP in the same cluster - // The ParentResourceSnapshotIndexLabel can change between snapshots so we have to exclude that label in the match - envelopWorkLabelMatcher := client.MatchingLabels{ - fleetv1beta1.ParentBindingLabel: resourceBinding.Name, - fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], - fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), - fleetv1beta1.EnvelopeNameLabel: envelopeObj.GetName(), - fleetv1beta1.EnvelopeNamespaceLabel: envelopeObj.GetNamespace(), - } - workList := &fleetv1beta1.WorkList{} - if err := r.Client.List(ctx, workList, envelopWorkLabelMatcher); err != nil { - return nil, controller.NewAPIServerError(true, err) - } - // we need to create a new work object - if len(workList.Items) == 0 { - // we limit the CRP name length to be 63 (DNS1123LabelMaxLength) characters, - // so we have plenty of characters left to fit into 253 (DNS1123SubdomainMaxLength) characters for a CR - workName := fmt.Sprintf(fleetv1beta1.WorkNameWithConfigEnvelopeFmt, workNamePrefix, uuid.NewUUID()) - return &fleetv1beta1.Work{ - ObjectMeta: metav1.ObjectMeta{ - Name: workName, - Namespace: fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster), - Labels: map[string]string{ - fleetv1beta1.ParentBindingLabel: resourceBinding.Name, - fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], - fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], - fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), - fleetv1beta1.EnvelopeNameLabel: envelopeObj.GetName(), - fleetv1beta1.EnvelopeNamespaceLabel: envelopeObj.GetNamespace(), - }, - Annotations: map[string]string{ - fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, - fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, - fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: fleetv1beta1.GroupVersion.String(), - Kind: resourceBinding.Kind, - Name: resourceBinding.Name, - UID: resourceBinding.UID, - BlockOwnerDeletion: ptr.To(true), // make sure that the k8s will call work delete when the binding is deleted - }, - }, - }, - Spec: fleetv1beta1.WorkSpec{ - Workload: fleetv1beta1.WorkloadTemplate{ - Manifests: manifest, - }, - ApplyStrategy: resourceBinding.Spec.ApplyStrategy, - }, - }, nil - } - if len(workList.Items) > 1 { - // return error here won't get us out of this - klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("find %d work representing configMap", len(workList.Items))), - "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) - } - work := workList.Items[0] - work.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] = resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel] - if work.Annotations == nil { - work.Annotations = make(map[string]string) - } - work.Annotations[fleetv1beta1.ParentResourceSnapshotNameAnnotation] = resourceBinding.Spec.ResourceSnapshotName - work.Annotations[fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation] = resourceOverrideSnapshotHash - work.Annotations[fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation] = clusterResourceOverrideSnapshotHash - work.Spec.Workload.Manifests = manifest - work.Spec.ApplyStrategy = resourceBinding.Spec.ApplyStrategy - return &work, nil -} - // generateSnapshotWorkObj generates the work object for the corresponding snapshot func generateSnapshotWorkObj(workName string, resourceBinding *fleetv1beta1.ClusterResourceBinding, resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, manifest []fleetv1beta1.Manifest, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string) *fleetv1beta1.Work { @@ -1261,46 +1258,6 @@ func setAllWorkAvailableCondition(works map[string]*fleetv1beta1.Work, binding * } } -func extractResFromConfigMap(uConfigMap *unstructured.Unstructured) ([]fleetv1beta1.Manifest, error) { - manifests := make([]fleetv1beta1.Manifest, 0) - var configMap corev1.ConfigMap - err := runtime.DefaultUnstructuredConverter.FromUnstructured(uConfigMap.Object, &configMap) - if err != nil { - return nil, err - } - // the list order is not stable as the map traverse is random - for key, value := range configMap.Data { - // so we need to check the GVK and annotation of the selected resource - content, jsonErr := yaml.ToJSON([]byte(value)) - if jsonErr != nil { - return nil, jsonErr - } - var uManifest unstructured.Unstructured - if unMarshallErr := uManifest.UnmarshalJSON(content); unMarshallErr != nil { - klog.ErrorS(unMarshallErr, "manifest has invalid content", "manifestKey", key, "envelopeResource", klog.KObj(uConfigMap)) - return nil, fmt.Errorf("the object with manifest key `%s` in envelope config `%s` is malformatted, err: %w", key, klog.KObj(uConfigMap), unMarshallErr) - } - if len(uManifest.GetNamespace()) == 0 { - // Block cluster-scoped resources. - return nil, fmt.Errorf("cannot wrap cluster-scoped resource %s in the envelope %s", uManifest.GetName(), klog.KObj(uConfigMap)) - } - if len(uManifest.GetNamespace()) != 0 && uManifest.GetNamespace() != configMap.Namespace { - return nil, fmt.Errorf("the namespaced object `%s` in envelope config `%s` is placed in a different namespace `%s` ", uManifest.GetName(), klog.KObj(uConfigMap), uManifest.GetNamespace()) - } - manifests = append(manifests, fleetv1beta1.Manifest{ - RawExtension: runtime.RawExtension{Raw: content}, - }) - } - // stable sort the manifests so that we can have a deterministic order - sort.Slice(manifests, func(i, j int) bool { - obj1 := manifests[i].Raw - obj2 := manifests[j].Raw - // order by its json formatted string - return strings.Compare(string(obj1), string(obj2)) > 0 - }) - return manifests, nil -} - // extractFailedResourcePlacementsFromWork extracts the failed resource placements from the work. func extractFailedResourcePlacementsFromWork(work *fleetv1beta1.Work) []fleetv1beta1.FailedResourcePlacement { appliedCond := meta.FindStatusCondition(work.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) diff --git a/pkg/controllers/workgenerator/envelope.go b/pkg/controllers/workgenerator/envelope.go new file mode 100644 index 000000000..512b8338c --- /dev/null +++ b/pkg/controllers/workgenerator/envelope.go @@ -0,0 +1,359 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workgenerator + +import ( + "context" + "fmt" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" + fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" +) + +// createOrUpdateEnvelopeCRWorkObj creates or updates a work object for a given envelope CR. +func (r *Reconciler) createOrUpdateEnvelopeCRWorkObj( + ctx context.Context, + workNamePrefix string, + resourceBinding *fleetv1beta1.ClusterResourceBinding, + resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, + envelopeReader fleetv1alpha1.EnvelopeReader, + resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, +) (*fleetv1beta1.Work, error) { + manifests, err := extractManifestsFromEnvelopeCR(envelopeReader) + if err != nil { + klog.ErrorS(err, "Failed to extract manifests from the envelope spec", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + } + klog.V(2).InfoS("Successfully extracted wrapped manifests from the envelope", + "numOfResources", len(manifests), + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + + // Check to see if a corresponding work object has been created for the envelope. + labelMatcher := client.MatchingLabels{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: envelopeReader.GetEnvelopeType(), + fleetv1beta1.EnvelopeNameLabel: envelopeReader.GetName(), + fleetv1beta1.EnvelopeNamespaceLabel: envelopeReader.GetNamespace(), + } + workList := &fleetv1beta1.WorkList{} + if err := r.Client.List(ctx, workList, labelMatcher); err != nil { + klog.ErrorS(err, "Failed to list work objects when finding the work object for an envelope", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + wrappedErr := fmt.Errorf("failed to list work objects when finding the work object for an envelope %v: %w", envelopeReader.GetEnvelopeObjRef(), err) + return nil, controller.NewAPIServerError(true, wrappedErr) + } + + var work *fleetv1beta1.Work + switch { + case len(workList.Items) > 1: + // Multiple matching work objects found; this should never occur under normal conditions. + wrappedErr := fmt.Errorf("%d work objects found for the same envelope %v, only one expected", len(workList.Items), envelopeReader.GetEnvelopeObjRef()) + klog.ErrorS(wrappedErr, "Failed to create or update work object for envelope", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + return nil, controller.NewUnexpectedBehaviorError(wrappedErr) + case len(workList.Items) == 1: + klog.V(2).InfoS("Found existing work object for the envelope", + "work", klog.KObj(&workList.Items[0]), + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + work = &workList.Items[0] + refreshWorkForEnvelopeCR(work, resourceBinding, resourceSnapshot, manifests, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + case len(workList.Items) == 0: + // No matching work object found; create a new one. + klog.V(2).InfoS("No existing work object found for the envelope; creating a new one", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + work = buildNewWorkForEnvelopeCR(workNamePrefix, resourceBinding, resourceSnapshot, envelopeReader, manifests, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + } + + return work, nil +} + +// getConfigMapEnvelopWorkObj first try to locate a work object for the corresponding envelopObj of type configMap. +// we create a new one if the work object doesn't exist. We do this to avoid repeatedly delete and create the same work object. +func (r *Reconciler) getConfigMapEnvelopWorkObj(ctx context.Context, workNamePrefix string, resourceBinding *fleetv1beta1.ClusterResourceBinding, + resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, envelopeObj *unstructured.Unstructured, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string) (*fleetv1beta1.Work, error) { + // we group all the resources in one configMap to one work + manifest, err := extractResFromConfigMap(envelopeObj) + if err != nil { + klog.ErrorS(err, "configMap has invalid content", "snapshot", klog.KObj(resourceSnapshot), + "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) + return nil, controller.NewUserError(err) + } + klog.V(2).InfoS("Successfully extract the enveloped resources from the configMap", "numOfResources", len(manifest), + "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) + + // Try to see if we already have a work represent the same enveloped object for this CRP in the same cluster + // The ParentResourceSnapshotIndexLabel can change between snapshots so we have to exclude that label in the match + envelopWorkLabelMatcher := client.MatchingLabels{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: envelopeObj.GetName(), + fleetv1beta1.EnvelopeNamespaceLabel: envelopeObj.GetNamespace(), + } + workList := &fleetv1beta1.WorkList{} + if err := r.Client.List(ctx, workList, envelopWorkLabelMatcher); err != nil { + return nil, controller.NewAPIServerError(true, err) + } + // we need to create a new work object + if len(workList.Items) == 0 { + // we limit the CRP name length to be 63 (DNS1123LabelMaxLength) characters, + // so we have plenty of characters left to fit into 253 (DNS1123SubdomainMaxLength) characters for a CR + workName := fmt.Sprintf(fleetv1beta1.WorkNameWithConfigEnvelopeFmt, workNamePrefix, uuid.NewUUID()) + return &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster), + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: envelopeObj.GetName(), + fleetv1beta1.EnvelopeNamespaceLabel: envelopeObj.GetNamespace(), + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: resourceBinding.Kind, + Name: resourceBinding.Name, + UID: resourceBinding.UID, + BlockOwnerDeletion: ptr.To(true), // make sure that the k8s will call work delete when the binding is deleted + }, + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: manifest, + }, + ApplyStrategy: resourceBinding.Spec.ApplyStrategy, + }, + }, nil + } + if len(workList.Items) > 1 { + // return error here won't get us out of this + klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("find %d work representing configMap", len(workList.Items))), + "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) + } + work := workList.Items[0] + work.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] = resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel] + if work.Annotations == nil { + work.Annotations = make(map[string]string) + } + work.Annotations[fleetv1beta1.ParentResourceSnapshotNameAnnotation] = resourceBinding.Spec.ResourceSnapshotName + work.Annotations[fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation] = resourceOverrideSnapshotHash + work.Annotations[fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation] = clusterResourceOverrideSnapshotHash + work.Spec.Workload.Manifests = manifest + work.Spec.ApplyStrategy = resourceBinding.Spec.ApplyStrategy + return &work, nil +} + +func extractResFromConfigMap(uConfigMap *unstructured.Unstructured) ([]fleetv1beta1.Manifest, error) { + manifests := make([]fleetv1beta1.Manifest, 0) + var configMap corev1.ConfigMap + err := runtime.DefaultUnstructuredConverter.FromUnstructured(uConfigMap.Object, &configMap) + if err != nil { + return nil, err + } + // the list order is not stable as the map traverse is random + for key, value := range configMap.Data { + // so we need to check the GVK and annotation of the selected resource + content, jsonErr := yaml.ToJSON([]byte(value)) + if jsonErr != nil { + return nil, jsonErr + } + var uManifest unstructured.Unstructured + if unMarshallErr := uManifest.UnmarshalJSON(content); unMarshallErr != nil { + klog.ErrorS(unMarshallErr, "manifest has invalid content", "manifestKey", key, "envelopeResource", klog.KObj(uConfigMap)) + return nil, fmt.Errorf("the object with manifest key `%s` in envelope config `%s` is malformatted, err: %w", key, klog.KObj(uConfigMap), unMarshallErr) + } + if len(uManifest.GetNamespace()) == 0 { + // Block cluster-scoped resources. + return nil, fmt.Errorf("cannot wrap cluster-scoped resource %s in the envelope %s", uManifest.GetName(), klog.KObj(uConfigMap)) + } + if len(uManifest.GetNamespace()) != 0 && uManifest.GetNamespace() != configMap.Namespace { + return nil, fmt.Errorf("the namespaced object `%s` in envelope config `%s` is placed in a different namespace `%s` ", uManifest.GetName(), klog.KObj(uConfigMap), uManifest.GetNamespace()) + } + manifests = append(manifests, fleetv1beta1.Manifest{ + RawExtension: runtime.RawExtension{Raw: content}, + }) + } + // stable sort the manifests so that we can have a deterministic order + sort.Slice(manifests, func(i, j int) bool { + obj1 := manifests[i].Raw + obj2 := manifests[j].Raw + // order by its json formatted string + return strings.Compare(string(obj1), string(obj2)) > 0 + }) + return manifests, nil +} + +func extractManifestsFromEnvelopeCR(envelopeReader fleetv1alpha1.EnvelopeReader) ([]fleetv1beta1.Manifest, error) { + manifests := make([]fleetv1beta1.Manifest, 0) + + for k, v := range envelopeReader.GetManifests() { + // Verify if the wrapped manifests in the envelope are valid. + var runtimeObj runtime.Object + var scope conversion.Scope + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&v.Data, &runtimeObj, scope); err != nil { + klog.ErrorS(err, "Failed to parse the wrapped manifest data to a Kubernetes runtime object", + "manifestKey", k, "envelope", envelopeReader.GetEnvelopeObjRef()) + wrappedErr := fmt.Errorf("failed to parse the wrapped manifest data to a Kubernetes runtime object (manifestKey=%s,envelopeObjRef=%v): %w", k, envelopeReader.GetEnvelopeObjRef(), err) + return nil, controller.NewUnexpectedBehaviorError(wrappedErr) + } + + objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(runtimeObj) + if err != nil { + klog.ErrorS(err, "Failed to convert the wrapped manifest data to an unstructured object", + "manifestKey", k, "envelope", envelopeReader.GetEnvelopeObjRef()) + wrappedErr := fmt.Errorf("failed to convert the wrapped manifest data to an unstructured object (manifestKey=%s,envelopeObjRef=%v): %w", k, envelopeReader.GetEnvelopeObjRef(), err) + return nil, controller.NewUnexpectedBehaviorError(wrappedErr) + } + uObj := &unstructured.Unstructured{Object: objMap} + + // Perform some basic validation to make sure that the envelope is used correctly. + switch { + case envelopeReader.GetNamespace() == "" && uObj.GetNamespace() != "": + // Check if a namespaced manifest has been wrapped in a cluster resource envelope. + wrappedErr := fmt.Errorf("a namespaced object %s (%v) has been wrapped in a cluster resource envelope %s", k, klog.KObj(uObj), envelopeReader.GetEnvelopeObjRef()) + klog.ErrorS(wrappedErr, "Found an invalid manifest", "manifestKey", k, "envelope", envelopeReader.GetEnvelopeObjRef()) + return nil, controller.NewUserError(wrappedErr) + case envelopeReader.GetNamespace() != uObj.GetNamespace(): + // Check if the namespace of the wrapped manifest matches the envelope's namespace. + wrappedErr := fmt.Errorf("a namespaced object %s (%v) in has been wrapped in a resource envelope from another namespace (%v)", k, klog.KObj(uObj), envelopeReader.GetEnvelopeObjRef()) + klog.ErrorS(wrappedErr, "Found an invalid manifest", "manifestKey", k, "envelope", envelopeReader.GetEnvelopeObjRef()) + return nil, controller.NewUserError(wrappedErr) + } + + manifests = append(manifests, fleetv1beta1.Manifest{ + RawExtension: v.Data, + }) + } + + // Do a stable sort of the extracted manifests to ensure consistent, deterministic ordering. + sort.Slice(manifests, func(i, j int) bool { + obj1 := manifests[i].Raw + obj2 := manifests[j].Raw + // order by its json formatted string + return strings.Compare(string(obj1), string(obj2)) > 0 + }) + return manifests, nil +} + +func refreshWorkForEnvelopeCR( + work *fleetv1beta1.Work, + resourceBinding *fleetv1beta1.ClusterResourceBinding, + resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, + manifests []fleetv1beta1.Manifest, + resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, +) { + // Update the parent resource snapshot index label. + work.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] = resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel] + + // Update the annotations. + if work.Annotations == nil { + work.Annotations = make(map[string]string) + } + work.Annotations[fleetv1beta1.ParentResourceSnapshotNameAnnotation] = resourceBinding.Spec.ResourceSnapshotName + work.Annotations[fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation] = resourceOverrideSnapshotHash + work.Annotations[fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation] = clusterResourceOverrideSnapshotHash + + // Update the work spec (the manifests and the apply strategy). + work.Spec.Workload.Manifests = manifests + work.Spec.ApplyStrategy = resourceBinding.Spec.ApplyStrategy +} + +func buildNewWorkForEnvelopeCR( + workNamePrefix string, + resourceBinding *fleetv1beta1.ClusterResourceBinding, + resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, + envelopeReader fleetv1alpha1.EnvelopeReader, + manifests []fleetv1beta1.Manifest, + resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, +) *fleetv1beta1.Work { + workName := fmt.Sprintf(fleetv1beta1.WorkNameWithEnvelopeCRFmt, workNamePrefix, uuid.NewUUID()) + workNamespace := fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster) + + return &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: workNamespace, + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: envelopeReader.GetName(), + fleetv1beta1.EnvelopeNamespaceLabel: envelopeReader.GetNamespace(), + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: resourceBinding.Kind, + Name: resourceBinding.Name, + UID: resourceBinding.UID, + // Make sure that the resource binding can only be deleted after + // all of its managed work objects have been deleted. + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: manifests, + }, + ApplyStrategy: resourceBinding.Spec.ApplyStrategy, + }, + } +} diff --git a/pkg/utils/common.go b/pkg/utils/common.go index fc720cf7b..dd2b26d6b 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -146,7 +146,7 @@ var ( } ) -// Those are the GVR/GVK of the fleet related resources. +// Those are the GVR/GVKs in use by Fleet source code. var ( ClusterResourcePlacementV1Alpha1GVK = schema.GroupVersionKind{ Group: fleetv1alpha1.GroupVersion.Group, @@ -501,6 +501,18 @@ var ( Version: corev1.SchemeGroupVersion.Version, Kind: "PersistentVolumeClaim", } + + ClusterResourceEnvelopeV1Alpha1GVK = schema.GroupVersionKind{ + Group: placementv1alpha1.GroupVersion.Group, + Version: placementv1alpha1.GroupVersion.Version, + Kind: "ClusterResourceEnvelope", + } + + ResourceEnvelopeV1Alpha1GVK = schema.GroupVersionKind{ + Group: placementv1alpha1.GroupVersion.Group, + Version: placementv1alpha1.GroupVersion.Version, + Kind: "ResourceEnvelope", + } ) // RandSecureInt returns a uniform random value in [1, max] or panic. diff --git a/test/apis/v1alpha1/zz_generated.deepcopy.go b/test/apis/v1alpha1/zz_generated.deepcopy.go index 081bec913..143bdee7b 100644 --- a/test/apis/v1alpha1/zz_generated.deepcopy.go +++ b/test/apis/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) From 6e79b7229b89a2a115e7066e90e28685221199e8 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 1 May 2025 00:23:23 +1000 Subject: [PATCH 2/8] Minor fixes Signed-off-by: michaelawyu --- apis/cluster/v1beta1/zz_generated.deepcopy.go | 2 +- apis/placement/v1alpha1/envelope_types.go | 4 ++++ apis/placement/v1alpha1/zz_generated.deepcopy.go | 2 +- apis/placement/v1beta1/clusterresourceplacement_types.go | 2 +- apis/placement/v1beta1/zz_generated.deepcopy.go | 2 +- apis/v1alpha1/zz_generated.deepcopy.go | 2 +- ...ement.kubernetes-fleet.io_clusterresourcebindings.yaml | 6 ++++++ ...ent.kubernetes-fleet.io_clusterresourceplacements.yaml | 8 ++++++++ test/apis/v1alpha1/zz_generated.deepcopy.go | 2 +- 9 files changed, 24 insertions(+), 6 deletions(-) diff --git a/apis/cluster/v1beta1/zz_generated.deepcopy.go b/apis/cluster/v1beta1/zz_generated.deepcopy.go index 17e71a1a2..7bb7f501c 100644 --- a/apis/cluster/v1beta1/zz_generated.deepcopy.go +++ b/apis/cluster/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/placement/v1alpha1/envelope_types.go b/apis/placement/v1alpha1/envelope_types.go index f54906957..f39a590eb 100644 --- a/apis/placement/v1alpha1/envelope_types.go +++ b/apis/placement/v1alpha1/envelope_types.go @@ -75,6 +75,10 @@ type ResourceEnvelope struct { Spec EnvelopeSpec `json:"spec"` } +// +kubebuilder:object:generate=false +// EnvelopeReader is an interface that allows retrieval of common information across all envelope CRs. +// +// Note (chenyu1): controller-gen should skip this type. type EnvelopeReader interface { // GetManifests returns the manifests in the envelope. GetManifests() map[string]Manifest diff --git a/apis/placement/v1alpha1/zz_generated.deepcopy.go b/apis/placement/v1alpha1/zz_generated.deepcopy.go index 5f8dd6064..bdc1ec2a8 100644 --- a/apis/placement/v1alpha1/zz_generated.deepcopy.go +++ b/apis/placement/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index a2569e4e1..c4a7420eb 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -887,7 +887,7 @@ type EnvelopeIdentifier struct { // TO-DO (chenyu1): drop the enum value ConfigMap after the new envelope forms become fully available. // Type of the envelope object. - // +kubebuilder:validation:Enum=ConfigMap,ClusterResourceEnvelope,ResourceEnvelope + // +kubebuilder:validation:Enum=ConfigMap;ClusterResourceEnvelope;ResourceEnvelope // +kubebuilder:default=ConfigMap // +kubebuilder:validation:Optional Type EnvelopeType `json:"type"` diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index 69f7db476..dec32e99d 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 85550ca19..27a862c43 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml index a933f5f3c..d16b9eb43 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml @@ -831,6 +831,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -944,6 +946,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -1107,6 +1111,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index 195c11d65..20dd0ab94 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -2189,6 +2189,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -2304,6 +2306,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -2471,6 +2475,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -2524,6 +2530,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name diff --git a/test/apis/v1alpha1/zz_generated.deepcopy.go b/test/apis/v1alpha1/zz_generated.deepcopy.go index 143bdee7b..081bec913 100644 --- a/test/apis/v1alpha1/zz_generated.deepcopy.go +++ b/test/apis/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) From 23dd394a93a5ef40836688df9a80693d55e2e0f8 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Wed, 30 Apr 2025 19:16:00 -0700 Subject: [PATCH 3/8] add AI test --- .../workgenerator/envelope_test.go | 663 ++++++++++++++++++ test/e2e/enveloped_object_placement_test.go | 336 ++++++++- 2 files changed, 995 insertions(+), 4 deletions(-) create mode 100644 pkg/controllers/workgenerator/envelope_test.go diff --git a/pkg/controllers/workgenerator/envelope_test.go b/pkg/controllers/workgenerator/envelope_test.go new file mode 100644 index 000000000..d41db7d64 --- /dev/null +++ b/pkg/controllers/workgenerator/envelope_test.go @@ -0,0 +1,663 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workgenerator + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" + fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/test/utils/informer" +) + +var ctx = context.Background() + +func TestExtractManifestsFromEnvelopeCR(t *testing.T) { + tests := []struct { + name string + envelopeReader fleetv1alpha1.EnvelopeReader + want []fleetv1beta1.Manifest + wantErr bool + }{ + { + name: "valid ResourceEnvelope with one resource", + envelopeReader: &fleetv1alpha1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-envelope", + Namespace: "default", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{ + "resource1": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }, + }, + }, + want: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }, + wantErr: false, + }, + { + name: "valid ClusterResourceEnvelope with one resource", + envelopeReader: &fleetv1alpha1.ClusterResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-envelope", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{ + "clusterrole1": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }, + }, + }, + want: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }, + wantErr: false, + }, + { + name: "envelope with multiple resources", + envelopeReader: &fleetv1alpha1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-resource-envelope", + Namespace: "default", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{ + "resource1": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm1","namespace":"default"},"data":{"key1":"value1"}}`), + }, + }, + "resource2": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm2","namespace":"default"},"data":{"key2":"value2"}}`), + }, + }, + }, + }, + }, + want: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm1","namespace":"default"},"data":{"key1":"value1"}}`), + }, + }, + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm2","namespace":"default"},"data":{"key2":"value2"}}`), + }, + }, + }, + wantErr: false, + }, + { + name: "envelope with invalid resource JSON", + envelopeReader: &fleetv1alpha1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-resource-envelope", + Namespace: "default", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{ + "invalid": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{invalid_json}`), + }, + }, + }, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "empty envelope", + envelopeReader: &fleetv1alpha1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-envelope", + Namespace: "default", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{}, + }, + }, + want: []fleetv1beta1.Manifest{}, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := extractManifestsFromEnvelopeCR(tt.envelopeReader) + if (err != nil) != tt.wantErr { + t.Errorf("extractManifestsFromEnvelopeCR() error = %v, wantErr %v", err, tt.wantErr) + return + } + + // Compare manifests by their raw content + if len(got) != len(tt.want) { + t.Fatalf("extractManifestsFromEnvelopeCR() returned %d manifests, want %d", len(got), len(tt.want)) + } + + for i := range got { + var gotObj, wantObj map[string]interface{} + if err := json.Unmarshal(got[i].Raw, &gotObj); err != nil { + t.Fatalf("Failed to unmarshal result: %v", err) + } + if err := json.Unmarshal(tt.want[i].Raw, &wantObj); err != nil { + t.Fatalf("Failed to unmarshal expected: %v", err) + } + if diff := cmp.Diff(wantObj, gotObj); diff != "" { + t.Errorf("extractManifestsFromEnvelopeCR() mismatch (-want +got):\n%s", diff) + } + } + }) + } +} + +func TestCreateOrUpdateEnvelopeCRWorkObj(t *testing.T) { + scheme := serviceScheme(t) + + workNamePrefix := "test-work" + resourceBinding := &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: "test-crp", + }, + }, + Spec: fleetv1beta1.ClusterResourceBinding{}.Spec, + } + resourceSnapshot := &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-snapshot", + }, + Spec: fleetv1beta1.ClusterResourceSnapshot{}.Spec, + } + + resourceEnvelope := &fleetv1alpha1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-envelope", + Namespace: "default", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{ + "configmap": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }, + }, + } + + clusterResourceEnvelope := &fleetv1alpha1.ClusterResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-envelope", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{ + "clusterrole": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }, + }, + } + + // Create an existing work for update test + existingWork := &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workNamePrefix, + Namespace: utils.GetClusterNamespace("test-cluster"), + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1alpha1.EnvelopeTypeResource), + fleetv1beta1.EnvelopeNameLabel: resourceEnvelope.Name, + fleetv1beta1.EnvelopeNamespaceLabel: resourceEnvelope.Namespace, + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"old-cm","namespace":"default"},"data":{"key":"old-value"}}`), + }, + }, + }, + }, + }, + } + + tests := []struct { + name string + envelopeReader fleetv1alpha1.EnvelopeReader + resourceOverrideSnapshotHash string + clusterResourceOverrideSnapshotHash string + existingObjects []client.Object + want *fleetv1beta1.Work + wantErr bool + }{ + { + name: "create work for ResourceEnvelope", + envelopeReader: resourceEnvelope, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + existingObjects: []client.Object{}, + want: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1alpha1.EnvelopeTypeResource), + fleetv1beta1.EnvelopeNameLabel: resourceEnvelope.Name, + fleetv1beta1.EnvelopeNamespaceLabel: resourceEnvelope.Namespace, + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: "resource-hash", + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: "cluster-resource-hash", + }, + }, + }, + wantErr: false, + }, + { + name: "create work for ClusterResourceEnvelope", + envelopeReader: clusterResourceEnvelope, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + existingObjects: []client.Object{}, + want: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1alpha1.EnvelopeTypeClusterResource), + fleetv1beta1.EnvelopeNameLabel: clusterResourceEnvelope.Name, + fleetv1beta1.EnvelopeNamespaceLabel: "", + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: "resource-hash", + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: "cluster-resource-hash", + }, + }, + }, + wantErr: false, + }, + { + name: "update existing work for ResourceEnvelope", + envelopeReader: resourceEnvelope, + resourceOverrideSnapshotHash: "new-resource-hash", + clusterResourceOverrideSnapshotHash: "new-cluster-resource-hash", + existingObjects: []client.Object{existingWork}, + want: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workNamePrefix, + Namespace: utils.GetClusterNamespace("test-cluster"), + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1alpha1.EnvelopeTypeResource), + fleetv1beta1.EnvelopeNameLabel: resourceEnvelope.Name, + fleetv1beta1.EnvelopeNamespaceLabel: resourceEnvelope.Namespace, + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: "new-resource-hash", + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: "new-cluster-resource-hash", + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set up binding with expected target cluster + resourceBinding.Spec.TargetCluster = "test-cluster" + // Set up binding with expected resource snapshot name + resourceBinding.Spec.ResourceSnapshotName = "test-snapshot" + + // Create fake client with scheme + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tt.existingObjects...). + Build() + + // Create reconciler + r := &Reconciler{ + Client: fakeClient, + recorder: record.NewFakeRecorder(10), + InformerManager: &informer.FakeManager{}, + } + + // Call the function under test + got, err := r.createOrUpdateEnvelopeCRWorkObj( + ctx, + workNamePrefix, + resourceBinding, + resourceSnapshot, + tt.envelopeReader, + tt.resourceOverrideSnapshotHash, + tt.clusterResourceOverrideSnapshotHash, + ) + + if (err != nil) != tt.wantErr { + t.Errorf("createOrUpdateEnvelopeCRWorkObj() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err == nil { + // Verify the basic structure of the created/updated work + if got.Name == "" { + t.Error("createOrUpdateEnvelopeCRWorkObj() returned work with empty name") + } + + if got.Namespace != utils.GetClusterNamespace("test-cluster") { + t.Errorf("createOrUpdateEnvelopeCRWorkObj() returned work with namespace %s, want %s", + got.Namespace, utils.GetClusterNamespace("test-cluster")) + } + + // Check labels + for key, expectedValue := range tt.want.Labels { + if got.Labels[key] != expectedValue { + t.Errorf("createOrUpdateEnvelopeCRWorkObj() returned work with label %s=%s, want %s", + key, got.Labels[key], expectedValue) + } + } + + // Check annotations + for key, expectedValue := range tt.want.Annotations { + if got.Annotations[key] != expectedValue { + t.Errorf("createOrUpdateEnvelopeCRWorkObj() returned work with annotation %s=%s, want %s", + key, got.Annotations[key], expectedValue) + } + } + + // Check that manifests exist + if len(got.Spec.Workload.Manifests) == 0 { + t.Error("createOrUpdateEnvelopeCRWorkObj() returned work with empty manifests") + } + } + }) + } +} + +// Test processOneSelectedResource with both envelope types +func TestProcessOneSelectedResource(t *testing.T) { + scheme := serviceScheme(t) + + workNamePrefix := "test-work" + resourceBinding := &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: "test-crp", + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + TargetCluster: "test-cluster", + }, + } + snapshot := &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-snapshot", + }, + } + + // Convert the envelope objects to ResourceContent + resourceEnvelopeContent := createResourceContent(t, &fleetv1alpha1.ResourceEnvelope{ + TypeMeta: metav1.TypeMeta{ + APIVersion: fleetv1alpha1.GroupVersion.String(), + Kind: "ResourceEnvelope", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-resource-envelope", + Namespace: "default", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{ + "configmap": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }, + }, + }) + + clusterResourceEnvelopeContent := createResourceContent(t, &fleetv1alpha1.ClusterResourceEnvelope{ + TypeMeta: metav1.TypeMeta{ + APIVersion: fleetv1alpha1.GroupVersion.String(), + Kind: "ClusterResourceEnvelope", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-envelope", + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: map[string]fleetv1alpha1.Manifest{ + "clusterrole": { + Data: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }, + }, + }) + + configMapEnvelopeContent := createResourceContent(t, &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config-map-envelope", + Namespace: "default", + Annotations: map[string]string{ + fleetv1beta1.EnvelopeConfigMapAnnotation: "true", + }, + }, + Data: map[string]string{ + "resource1": `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"cm1","namespace":"default"},"data":{"key1":"value1"}}`, + }, + }) + + // Regular resource content that's not an envelope + regularResourceContent := createResourceContent(t, &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "regular-config-map", + Namespace: "default", + }, + Data: map[string]string{ + "key": "value", + }, + }) + + tests := []struct { + name string + selectedResource *fleetv1beta1.ResourceContent + resourceOverrideSnapshotHash string + clusterResourceOverrideSnapshotHash string + wantNewWorkLen int + wantSimpleManifestsLen int + wantErr bool + }{ + { + name: "process ResourceEnvelope", + selectedResource: resourceEnvelopeContent, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + wantNewWorkLen: 1, // Should create a new work + wantSimpleManifestsLen: 0, // Should not add to simple manifests + wantErr: false, + }, + { + name: "process ClusterResourceEnvelope", + selectedResource: clusterResourceEnvelopeContent, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + wantNewWorkLen: 1, // Should create a new work + wantSimpleManifestsLen: 0, // Should not add to simple manifests + wantErr: false, + }, + { + name: "process ConfigMap envelope", + selectedResource: configMapEnvelopeContent, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + wantNewWorkLen: 1, // Should create a new work + wantSimpleManifestsLen: 0, // Should not add to simple manifests + wantErr: false, + }, + { + name: "process regular resource", + selectedResource: regularResourceContent, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + wantNewWorkLen: 0, // Should NOT create a new work + wantSimpleManifestsLen: 1, // Should add to simple manifests + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake client with scheme + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create reconciler + r := &Reconciler{ + Client: fakeClient, + recorder: record.NewFakeRecorder(10), + InformerManager: &informer.FakeManager{}, + } + + // Prepare input parameters + activeWork := make(map[string]*fleetv1beta1.Work) + newWork := make([]*fleetv1beta1.Work, 0) + simpleManifests := make([]fleetv1beta1.Manifest, 0) + + gotNewWork, gotSimpleManifests, err := r.processOneSelectedResource( + ctx, + tt.selectedResource, + resourceBinding, + snapshot, + workNamePrefix, + tt.resourceOverrideSnapshotHash, + tt.clusterResourceOverrideSnapshotHash, + activeWork, + newWork, + simpleManifests, + ) + + if (err != nil) != tt.wantErr { + t.Errorf("processOneSelectedResource() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if len(gotNewWork) != tt.wantNewWorkLen { + t.Errorf("processOneSelectedResource() returned %d new works, want %d", len(gotNewWork), tt.wantNewWorkLen) + } + + if len(gotSimpleManifests) != tt.wantSimpleManifestsLen { + t.Errorf("processOneSelectedResource() returned %d simple manifests, want %d", len(gotSimpleManifests), tt.wantSimpleManifestsLen) + } + + // Check active work got populated + if tt.wantNewWorkLen > 0 && len(activeWork) != tt.wantNewWorkLen { + t.Errorf("processOneSelectedResource() populated %d active works, want %d", len(activeWork), tt.wantNewWorkLen) + } + }) + } +} + +func createResourceContent(t *testing.T, obj runtime.Object) *fleetv1beta1.ResourceContent { + jsonData, err := json.Marshal(obj) + if err != nil { + t.Fatalf("Failed to marshal object: %v", err) + } + return &fleetv1beta1.ResourceContent{ + Raw: jsonData, + } +} + +func serviceScheme(t *testing.T) *runtime.Scheme { + scheme := runtime.NewScheme() + + // Add types needed for testing + if err := fleetv1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add fleetv1alpha1 types to scheme: %v", err) + } + if err := fleetv1beta1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add fleetv1beta1 types to scheme: %v", err) + } + if err := corev1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add corev1 types to scheme: %v", err) + } + + return scheme +} diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 3339fa304..f32a2be9e 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -28,10 +28,12 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/controllers/workapplier" "github.com/kubefleet-dev/kubefleet/pkg/utils" @@ -43,17 +45,22 @@ var ( // pre loaded test manifests testConfigMap, testEnvelopConfigMap corev1.ConfigMap testEnvelopeResourceQuota corev1.ResourceQuota + testClusterRole rbacv1.ClusterRole + testResourceEnvelope fleetv1alpha1.ResourceEnvelope + testClusterResourceEnvelope fleetv1alpha1.ClusterResourceEnvelope ) const ( - wrapperCMName = "wrapper" - - cmDataKey = "foo" - cmDataVal = "bar" + wrapperCMName = "wrapper" + cmDataKey = "foo" + cmDataVal = "bar" + resourceEnvelopeName = "test-resource-envelope" + clusterResourceEnvelopeName = "test-cluster-envelope" ) // Note that this container will run in parallel with other containers. var _ = Describe("placing wrapped resources using a CRP", func() { + // Original test cases for ConfigMap envelope... Context("Test a CRP place enveloped objects successfully", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespaceName := appNamespace().Name @@ -482,6 +489,209 @@ var _ = Describe("placing wrapped resources using a CRP", func() { ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) }) }) + + Context("Test ResourceEnvelope and ClusterResourceEnvelope placement", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + workNamespaceName := appNamespace().Name + var wantSelectedResources []placementv1beta1.ResourceIdentifier + + BeforeAll(func() { + // Create the test resources. + readAllEnvelopTypes() + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Kind: "ResourceEnvelope", + Name: testResourceEnvelope.Name, + Version: "v1alpha1", + Group: "placement.kubefleet", + Namespace: workNamespaceName, + }, + { + Kind: "ClusterResourceEnvelope", + Name: testClusterResourceEnvelope.Name, + Version: "v1alpha1", + Group: "placement.kubefleet", + }, + } + }) + + It("Create the test envelope resources", createAllEnvelopTypeResources) + + It("Create the CRP that selects the namespace and envelopes", func() { + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer to better observe controller behavior + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: workNamespaceName, + }, + { + Group: "placement.kubefleet", + Version: "v1alpha1", + Kind: "ResourceEnvelope", + Name: testResourceEnvelope.Name, + Namespace: ptr.To(workNamespaceName), + }, + { + Group: "placement.kubefleet", + Version: "v1alpha1", + Kind: "ClusterResourceEnvelope", + Name: testClusterResourceEnvelope.Name, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) + Eventually(crpStatusUpdatedActual, longEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("should place the resources from both envelope types on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := checkBothEnvelopeTypesPlacement(memberCluster) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("Update the ResourceEnvelope with invalid content", func() { + // Get the current ResourceEnvelope + resourceEnvelope := &fleetv1alpha1.ResourceEnvelope{} + Expect(hubClient.Get(ctx, types.NamespacedName{ + Namespace: workNamespaceName, + Name: testResourceEnvelope.Name, + }, resourceEnvelope)).To(Succeed(), "Failed to get ResourceEnvelope") + + // Update with an invalid ConfigMap (immutable field change) + badConfigMap := testEnvelopeResourceQuota.DeepCopy() + badConfigMap.Spec.Scopes = []corev1.ResourceQuotaScope{ + corev1.ResourceQuotaScopeNotBestEffort, + corev1.ResourceQuotaScopeNotTerminating, + } + + badCMBytes, err := json.Marshal(badConfigMap) + Expect(err).Should(Succeed()) + + // Replace the first resource with the invalid one + resourceEnvelope.Spec.Manifests["resourceQuota1.yaml"] = fleetv1alpha1.Manifest{ + Data: runtime.RawExtension{Raw: badCMBytes}, + } + + Expect(hubClient.Update(ctx, resourceEnvelope)).To(Succeed(), "Failed to update ResourceEnvelope") + }) + + It("should update CRP status showing failure due to invalid ResourceEnvelope content", func() { + Eventually(func() error { + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + return err + } + + // Check for failed conditions + if diff := cmp.Diff(crp.Status.Conditions, crpAppliedFailedConditions(crp.Generation), crpStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP conditions don't show application failure: %s", diff) + } + + // Verify at least one placement has a failed placement with immutable field error + foundFailure := false + for _, placementStatus := range crp.Status.PlacementStatuses { + for _, failedPlacement := range placementStatus.FailedPlacements { + if failedPlacement.ResourceIdentifier.Envelope != nil && + failedPlacement.ResourceIdentifier.Envelope.Type == placementv1beta1.EnvelopeType(fleetv1alpha1.EnvelopeTypeResource) && + strings.Contains(failedPlacement.Condition.Message, "field is immutable") { + foundFailure = true + break + } + } + if foundFailure { + break + } + } + + if !foundFailure { + return fmt.Errorf("didn't find expected failure for immutable field in ResourceEnvelope") + } + + return nil + }, longEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to see expected failure in CRP status") + }) + + It("Fix the ResourceEnvelope with valid content", func() { + // Get the current ResourceEnvelope + resourceEnvelope := &fleetv1alpha1.ResourceEnvelope{} + Expect(hubClient.Get(ctx, types.NamespacedName{ + Namespace: workNamespaceName, + Name: testResourceEnvelope.Name, + }, resourceEnvelope)).To(Succeed(), "Failed to get ResourceEnvelope") + + // Reset to valid content + goodCM := testEnvelopeResourceQuota.DeepCopy() + goodCMBytes, err := json.Marshal(goodCM) + Expect(err).Should(Succeed()) + + // Replace the first resource with the valid one + resourceEnvelope.Spec.Manifests["resourceQuota1.yaml"] = fleetv1alpha1.Manifest{ + Data: runtime.RawExtension{Raw: goodCMBytes}, + } + + Expect(hubClient.Update(ctx, resourceEnvelope)).To(Succeed(), "Failed to update ResourceEnvelope") + }) + + It("should update CRP status as success again", func() { + crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "2", true) + Eventually(crpStatusUpdatedActual, longEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("should place the fixed resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := checkBothEnvelopeTypesPlacement(memberCluster) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("can delete the CRP", func() { + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + } + Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP") + }) + + It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) + + It("should remove controller finalizers from CRP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP") + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + }) }) var _ = Describe("Process objects with generate name", Ordered, func() { @@ -775,3 +985,121 @@ func createWrappedResourcesForEnvelopTest() { testEnvelopConfigMap.Data["resourceQuota.yaml"] = string(resourceQuotaByte) Expect(hubClient.Create(ctx, &testEnvelopConfigMap)).To(Succeed(), "Failed to create testEnvelop config map %s", testEnvelopConfigMap.Name) } + +// readAllEnvelopTypes reads all envelope type test manifests +func readAllEnvelopTypes() { + By("Read the ConfigMap resources") + testConfigMap = corev1.ConfigMap{} + err := utils.GetObjectFromManifest("resources/test-configmap.yaml", &testConfigMap) + Expect(err).Should(Succeed()) + + By("Read ResourceQuota") + testEnvelopeResourceQuota = corev1.ResourceQuota{} + err = utils.GetObjectFromManifest("resources/resourcequota.yaml", &testEnvelopeResourceQuota) + Expect(err).Should(Succeed()) + + By("Read ClusterRole") + testClusterRole = rbacv1.ClusterRole{} + err = utils.GetObjectFromManifest("resources/test-clusterrole.yaml", &testClusterRole) + Expect(err).Should(Succeed()) + + By("Read ResourceEnvelope template") + testResourceEnvelope = fleetv1alpha1.ResourceEnvelope{ + TypeMeta: metav1.TypeMeta{ + APIVersion: fleetv1alpha1.GroupVersion.String(), + Kind: "ResourceEnvelope", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: resourceEnvelopeName, + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: make(map[string]fleetv1alpha1.Manifest), + }, + } + + By("Read ClusterResourceEnvelope template") + testClusterResourceEnvelope = fleetv1alpha1.ClusterResourceEnvelope{ + TypeMeta: metav1.TypeMeta{ + APIVersion: fleetv1alpha1.GroupVersion.String(), + Kind: "ClusterResourceEnvelope", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: clusterResourceEnvelopeName, + }, + Spec: fleetv1alpha1.EnvelopeSpec{ + Manifests: make(map[string]fleetv1alpha1.Manifest), + }, + } +} + +// createAllEnvelopTypeResources creates all types of envelope resources on the hub cluster for testing +func createAllEnvelopTypeResources() { + ns := appNamespace() + Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) + + // Update namespaces for namespaced resources + testConfigMap.Namespace = ns.Name + testEnvelopeResourceQuota.Namespace = ns.Name + testResourceEnvelope.Namespace = ns.Name + + // Create ResourceEnvelope with ResourceQuota inside + quotaBytes, err := json.Marshal(testEnvelopeResourceQuota) + Expect(err).Should(Succeed()) + testResourceEnvelope.Spec.Manifests["resourceQuota1.yaml"] = fleetv1alpha1.Manifest{ + Data: runtime.RawExtension{Raw: quotaBytes}, + } + testResourceEnvelope.Spec.Manifests["resourceQuota2.yaml"] = fleetv1alpha1.Manifest{ + Data: runtime.RawExtension{Raw: quotaBytes}, // Include a duplicate to test multiple resources + } + Expect(hubClient.Create(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to create ResourceEnvelope") + + // Create ClusterResourceEnvelope with ClusterRole inside + roleBytes, err := json.Marshal(testClusterRole) + Expect(err).Should(Succeed()) + testClusterResourceEnvelope.Spec.Manifests["clusterRole.yaml"] = fleetv1alpha1.Manifest{ + Data: runtime.RawExtension{Raw: roleBytes}, + } + Expect(hubClient.Create(ctx, &testClusterResourceEnvelope)).To(Succeed(), "Failed to create ClusterResourceEnvelope") +} + +// checkBothEnvelopeTypesPlacement verifies that resources from both envelope types were properly placed +func checkBothEnvelopeTypesPlacement(memberCluster *framework.Cluster) func() error { + workNamespaceName := appNamespace().Name + return func() error { + // Verify namespace exists on target cluster + if err := validateWorkNamespaceOnCluster(memberCluster, types.NamespacedName{Name: workNamespaceName}); err != nil { + return err + } + + // Check that ResourceQuota from ResourceEnvelope was placed + By("Check ResourceQuota from ResourceEnvelope") + placedResourceQuota := &corev1.ResourceQuota{} + if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{ + Namespace: workNamespaceName, + Name: testEnvelopeResourceQuota.Name, + }, placedResourceQuota); err != nil { + return fmt.Errorf("failed to find ResourceQuota from ResourceEnvelope: %w", err) + } + + // Verify the ResourceQuota matches expected spec + if diff := cmp.Diff(placedResourceQuota.Spec, testEnvelopeResourceQuota.Spec); diff != "" { + return fmt.Errorf("ResourceQuota from ResourceEnvelope diff (-got, +want): %s", diff) + } + + // Check that ClusterRole from ClusterResourceEnvelope was placed + By("Check ClusterRole from ClusterResourceEnvelope") + placedClusterRole := &rbacv1.ClusterRole{} + if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{ + Name: testClusterRole.Name, + }, placedClusterRole); err != nil { + return fmt.Errorf("failed to find ClusterRole from ClusterResourceEnvelope: %w", err) + } + + // Verify the ClusterRole matches expected rules + if diff := cmp.Diff(placedClusterRole.Rules, testClusterRole.Rules); diff != "" { + return fmt.Errorf("ClusterRole from ClusterResourceEnvelope diff (-got, +want): %s", diff) + } + + return nil + } +} From 8dc5d94c871df33390d24ec6904b745b18bfb22f Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Wed, 14 May 2025 15:58:05 -0700 Subject: [PATCH 4/8] add CRD envelop support Signed-off-by: Ryan Zhang --- .../v1/clusterresourceplacement_types.go | 10 - apis/placement/v1/commons.go | 74 -- apis/placement/v1/policysnapshot_types.go | 11 - apis/placement/v1/resourcesnapshot_types.go | 24 - apis/placement/v1/work_types.go | 15 - .../v1beta1/clusterresourceplacement_types.go | 9 +- apis/placement/v1beta1/commons.go | 8 + apis/placement/v1beta1/envelope_types.go | 85 +++ .../v1beta1/zz_generated.deepcopy.go | 64 ++ ...tes-fleet.io_clusterresourceenvelopes.yaml | 58 ++ ...kubernetes-fleet.io_resourceenvelopes.yaml | 58 ++ ...etes-fleet.io_clusterresourcebindings.yaml | 6 + ...es-fleet.io_clusterresourceplacements.yaml | 8 + examples/envelopes/namespacescoped.yaml | 7 +- .../resource_selector.go | 7 +- pkg/controllers/workgenerator/controller.go | 240 +++--- .../controller_integration_test.go | 218 +++++- .../workgenerator/controller_test.go | 157 ---- pkg/controllers/workgenerator/envelope.go | 225 ++++++ .../workgenerator/envelope_test.go | 716 ++++++++++++++++++ .../workgenerator/manifests/clusterrole.yaml | 8 + .../manifests/test-clusterscoped-envelop.yaml | 33 + ...figmap.yaml => test-resource-envelop.yaml} | 10 +- ...gmap2.yaml => test-resource-envelop2.yaml} | 10 +- .../workgenerator/manifests/webhook.yaml | 41 +- pkg/controllers/workgenerator/suite_test.go | 38 +- pkg/utils/apiresources.go | 106 ++- pkg/utils/common.go | 25 +- pkg/utils/common_test.go | 34 +- pkg/utils/informer/informermanager.go | 2 +- test/e2e/enveloped_object_placement_test.go | 419 ++++++---- test/e2e/join_and_leave_test.go | 29 +- test/e2e/placement_negative_cases_test.go | 43 +- test/e2e/resources/resourcequota.yaml | 8 +- test/e2e/resources/test-clusterrole.yaml | 8 + test/e2e/resources/test-configmap.yaml | 2 + test/e2e/resources/test-daemonset.yaml | 1 + test/e2e/resources/test-deployment.yaml | 7 + test/e2e/resources/test-envelope-object.yaml | 8 +- test/e2e/resources/test-statefulset.yaml | 7 + test/e2e/rollout_test.go | 68 +- test/e2e/utils_test.go | 10 +- 42 files changed, 2110 insertions(+), 807 deletions(-) create mode 100644 charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceenvelopes.yaml create mode 100644 charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_resourceenvelopes.yaml create mode 100644 pkg/controllers/workgenerator/envelope.go create mode 100644 pkg/controllers/workgenerator/envelope_test.go create mode 100644 pkg/controllers/workgenerator/manifests/clusterrole.yaml create mode 100644 pkg/controllers/workgenerator/manifests/test-clusterscoped-envelop.yaml rename pkg/controllers/workgenerator/manifests/{test-envelop-configmap.yaml => test-resource-envelop.yaml} (64%) rename pkg/controllers/workgenerator/manifests/{test-envelop-configmap2.yaml => test-resource-envelop2.yaml} (64%) create mode 100644 test/e2e/resources/test-clusterrole.yaml diff --git a/apis/placement/v1/clusterresourceplacement_types.go b/apis/placement/v1/clusterresourceplacement_types.go index 272b8584f..aa48ee012 100644 --- a/apis/placement/v1/clusterresourceplacement_types.go +++ b/apis/placement/v1/clusterresourceplacement_types.go @@ -23,16 +23,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -const ( - // ClusterResourcePlacementCleanupFinalizer is a finalizer added by the CRP controller to all CRPs, to make sure - // that the CRP controller can react to CRP deletions if necessary. - ClusterResourcePlacementCleanupFinalizer = fleetPrefix + "crp-cleanup" - - // SchedulerCRPCleanupFinalizer is a finalizer added by the scheduler to CRPs, to make sure - // that all bindings derived from a CRP can be cleaned up after the CRP is deleted. - SchedulerCRPCleanupFinalizer = fleetPrefix + "scheduler-cleanup" -) - // +genclient // +genclient:nonNamespaced // +kubebuilder:object:root=true diff --git a/apis/placement/v1/commons.go b/apis/placement/v1/commons.go index 45f257558..534c0343c 100644 --- a/apis/placement/v1/commons.go +++ b/apis/placement/v1/commons.go @@ -16,80 +16,6 @@ limitations under the License. package v1 -const ( - ClusterResourcePlacementKind = "ClusterResourcePlacement" - ClusterResourcePlacementResource = "clusterresourceplacements" - ClusterResourceBindingKind = "ClusterResourceBinding" - ClusterResourceSnapshotKind = "ClusterResourceSnapshot" - ClusterSchedulingPolicySnapshotKind = "ClusterSchedulingPolicySnapshot" - WorkKind = "Work" - AppliedWorkKind = "AppliedWork" -) - -const ( - // Unprefixed labels/annotations are reserved for end-users - // we will add a kubernetes-fleet.io to designate these labels/annotations as official fleet labels/annotations. - // See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions - fleetPrefix = "kubernetes-fleet.io/" - - // MemberClusterFinalizer is used to make sure that we handle gc of all the member cluster resources on the hub cluster. - MemberClusterFinalizer = fleetPrefix + "membercluster-finalizer" - - // WorkFinalizer is used by the work generator to make sure that the binding is not deleted until the work objects - // it generates are all deleted, or used by the work controller to make sure the work has been deleted in the member - // cluster. - WorkFinalizer = fleetPrefix + "work-cleanup" - - // CRPTrackingLabel is the label that points to the cluster resource policy that creates a resource binding. - CRPTrackingLabel = fleetPrefix + "parent-CRP" - - // IsLatestSnapshotLabel tells if the snapshot is the latest one. - IsLatestSnapshotLabel = fleetPrefix + "is-latest-snapshot" - - // FleetResourceLabelKey is that label that indicates the resource is a fleet resource. - FleetResourceLabelKey = fleetPrefix + "is-fleet-resource" - - // FirstWorkNameFmt is the format of the name of the work generated with first resource snapshot . - // The name of the first work is {crpName}-work. - FirstWorkNameFmt = "%s-work" - - // WorkNameWithSubindexFmt is the format of the name of a work generated with resource snapshot with subindex. - // The name of the first work is {crpName}-{subindex}. - WorkNameWithSubindexFmt = "%s-%d" - - // WorkNameWithConfigEnvelopeFmt is the format of the name of a work generated with config envelop. - // The format is {workPrefix}-configMap-uuid - WorkNameWithConfigEnvelopeFmt = "%s-configmap-%s" - - // ParentResourceSnapshotIndexLabel is the label applied to work that contains the index of the resource snapshot that generates the work. - ParentResourceSnapshotIndexLabel = fleetPrefix + "parent-resource-snapshot-index" - - // ParentBindingLabel is the label applied to work that contains the name of the binding that generates the work. - ParentBindingLabel = fleetPrefix + "parent-resource-binding" - - // CRPGenerationAnnotation is the annotation that indicates the generation of the CRP from - // which an object is derived or last updated. - CRPGenerationAnnotation = fleetPrefix + "CRP-generation" - - // EnvelopeConfigMapAnnotation is the annotation that indicates the configmap is an envelope configmap that contains resources - // we need to apply to the member cluster instead of the configMap itself. - EnvelopeConfigMapAnnotation = fleetPrefix + "envelope-configmap" - - // EnvelopeTypeLabel is the label that marks the work object as generated from an envelope object. - // The value of the annotation is the type of the envelope object. - EnvelopeTypeLabel = fleetPrefix + "envelope-work" - - // EnvelopeNamespaceLabel is the label that contains the namespace of the envelope object that the work is generated from. - EnvelopeNamespaceLabel = fleetPrefix + "envelope-namespace" - - // EnvelopeNameLabel is the label that contains the name of the envelope object that the work is generated from. - EnvelopeNameLabel = fleetPrefix + "envelope-name" - - // PreviousBindingStateAnnotation is the annotation that records the previous state of a binding. - // This is used to remember if an "unscheduled" binding was moved from a "bound" state or a "scheduled" state. - PreviousBindingStateAnnotation = fleetPrefix + "previous-binding-state" -) - // NamespacedName comprises a resource name, with a mandatory namespace. type NamespacedName struct { // Name is the name of the namespaced scope resource. diff --git a/apis/placement/v1/policysnapshot_types.go b/apis/placement/v1/policysnapshot_types.go index 72a5a099e..6c117aa89 100644 --- a/apis/placement/v1/policysnapshot_types.go +++ b/apis/placement/v1/policysnapshot_types.go @@ -21,17 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ( - // PolicyIndexLabel is the label that indicate the policy snapshot index of a cluster policy. - PolicyIndexLabel = fleetPrefix + "policy-index" - - // PolicySnapshotNameFmt is clusterPolicySnapshot name format: {CRPName}-{PolicySnapshotIndex}. - PolicySnapshotNameFmt = "%s-%d" - - // NumberOfClustersAnnotation is the annotation that indicates how many clusters should be selected for selectN placement type. - NumberOfClustersAnnotation = fleetPrefix + "number-of-clusters" -) - // +genclient // +genclient:nonNamespaced // +kubebuilder:object:root=true diff --git a/apis/placement/v1/resourcesnapshot_types.go b/apis/placement/v1/resourcesnapshot_types.go index e3b4eb76c..ea930dea8 100644 --- a/apis/placement/v1/resourcesnapshot_types.go +++ b/apis/placement/v1/resourcesnapshot_types.go @@ -22,30 +22,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -const ( - // ResourceIndexLabel is the label that indicate the resource snapshot index of a cluster resource snapshot. - ResourceIndexLabel = fleetPrefix + "resource-index" - - // ResourceGroupHashAnnotation is the annotation that contains the value of the sha-256 hash - // value of all the snapshots belong to the same snapshot index. - ResourceGroupHashAnnotation = fleetPrefix + "resource-hash" - - // NumberOfEnvelopedObjectsAnnotation is the annotation that contains the number of the enveloped objects in the resource snapshot group. - NumberOfEnvelopedObjectsAnnotation = fleetPrefix + "number-of-enveloped-object" - - // NumberOfResourceSnapshotsAnnotation is the annotation that contains the total number of resource snapshots. - NumberOfResourceSnapshotsAnnotation = fleetPrefix + "number-of-resource-snapshots" - - // SubindexOfResourceSnapshotAnnotation is the annotation to store the subindex of resource snapshot in the group. - SubindexOfResourceSnapshotAnnotation = fleetPrefix + "subindex-of-resource-snapshot" - - // ResourceSnapshotNameFmt is resourcePolicySnapshot name format: {CRPName}-{resourceIndex}-snapshot. - ResourceSnapshotNameFmt = "%s-%d-snapshot" - - // ResourceSnapshotNameWithSubindexFmt is resourcePolicySnapshot name with subindex format: {CRPName}-{resourceIndex}-{subindex}. - ResourceSnapshotNameWithSubindexFmt = "%s-%d-%d" -) - // +genclient // +genclient:nonNamespaced // +kubebuilder:object:root=true diff --git a/apis/placement/v1/work_types.go b/apis/placement/v1/work_types.go index 3ae1a71c3..8910ff737 100644 --- a/apis/placement/v1/work_types.go +++ b/apis/placement/v1/work_types.go @@ -37,21 +37,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// The following definitions are originally declared in the controllers/workv1alpha1/manager.go file. -const ( - // ManifestHashAnnotation is the annotation that indicates whether the spec of the object has been changed or not. - ManifestHashAnnotation = fleetPrefix + "spec-hash" - - // LastAppliedConfigAnnotation is to record the last applied configuration on the object. - LastAppliedConfigAnnotation = fleetPrefix + "last-applied-configuration" - - // WorkConditionTypeApplied represents workload in Work is applied successfully on the spoke cluster. - WorkConditionTypeApplied = "Applied" - - // WorkConditionTypeAvailable represents workload in Work is available on the spoke cluster. - WorkConditionTypeAvailable = "Available" -) - // This api is copied from https://github.com/kubernetes-sigs/work-api/blob/master/pkg/apis/v1alpha1/work_types.go. // Renamed original "ResourceIdentifier" so that it won't conflict with ResourceIdentifier defined in the clusterresourceplacement_types.go. diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 896c90ddd..db5135b7b 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -891,7 +891,7 @@ type EnvelopeIdentifier struct { Namespace string `json:"namespace,omitempty"` // Type of the envelope object. - // +kubebuilder:validation:Enum=ConfigMap + // +kubebuilder:validation:Enum=ConfigMap;ClusterResourceEnvelope;ResourceEnvelope // +kubebuilder:default=ConfigMap // +kubebuilder:validation:Optional Type EnvelopeType `json:"type"` @@ -903,7 +903,14 @@ type EnvelopeType string const ( // ConfigMapEnvelopeType means the envelope object is of type `ConfigMap`. + // TO-DO (chenyu1): drop this type after the configMap-based envelopes become obsolete. ConfigMapEnvelopeType EnvelopeType = "ConfigMap" + + // ClusterResourceEnvelopeType is the envelope type that represents the ClusterResourceEnvelope custom resource. + ClusterResourceEnvelopeType EnvelopeType = "ClusterResourceEnvelope" + + // ResourceEnvelopeType is the envelope type that represents the ResourceEnvelope custom resource. + ResourceEnvelopeType EnvelopeType = "ResourceEnvelope" ) // ResourcePlacementStatus represents the placement status of selected resources for one target cluster. diff --git a/apis/placement/v1beta1/commons.go b/apis/placement/v1beta1/commons.go index 02cf2e3ee..f6205cec1 100644 --- a/apis/placement/v1beta1/commons.go +++ b/apis/placement/v1beta1/commons.go @@ -41,6 +41,10 @@ const ( ClusterResourcePlacementEvictionKind = "ClusterResourcePlacementEviction" // ClusterResourcePlacementDisruptionBudgetKind is the kind of the ClusterResourcePlacementDisruptionBudget. ClusterResourcePlacementDisruptionBudgetKind = "ClusterResourcePlacementDisruptionBudget" + // ResourceEnvelopeKind is the kind of the ResourceEnvelope. + ResourceEnvelopeKind = "ResourceEnvelope" + // ClusterResourceEnvelopeKind is the kind of the ClusterResourceEnvelope. + ClusterResourceEnvelopeKind = "ClusterResourceEnvelope" ) const ( @@ -78,6 +82,10 @@ const ( // The format is {workPrefix}-configMap-uuid. WorkNameWithConfigEnvelopeFmt = "%s-configmap-%s" + // WorkNameWithEnvelopeCRFmt is the format of the name of a work generated with an envelope CR. + // The format is [WORK-PREFIX]-envelope-[UUID]. + WorkNameWithEnvelopeCRFmt = "%s-envelope-%s" + // ParentClusterResourceOverrideSnapshotHashAnnotation is the annotation to work that contains the hash of the parent cluster resource override snapshot list. ParentClusterResourceOverrideSnapshotHashAnnotation = fleetPrefix + "parent-cluster-resource-override-snapshot-hash" diff --git a/apis/placement/v1beta1/envelope_types.go b/apis/placement/v1beta1/envelope_types.go index eebe296c6..4dc452247 100644 --- a/apis/placement/v1beta1/envelope_types.go +++ b/apis/placement/v1beta1/envelope_types.go @@ -19,6 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" ) // +genclient @@ -43,6 +44,17 @@ type ClusterResourceEnvelope struct { Data map[string]runtime.RawExtension `json:"data"` } +// ClusterResourceEnvelopeList contains a list of ClusterResourceEnvelope objects. +// +kubebuilder:resource:scope=Cluster +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterResourceEnvelopeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of ClusterResourceEnvelope objects. + Items []ClusterResourceEnvelope `json:"items"` +} + // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true @@ -64,3 +76,76 @@ type ResourceEnvelope struct { // +kubebuilder:validation:MaxProperties=50 Data map[string]runtime.RawExtension `json:"data"` } + +// ResourceEnvelopeList contains a list of ResourceEnvelope objects. +// +kubebuilder:resource:scope=Namespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ResourceEnvelopeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of ResourceEnvelope objects. + Items []ResourceEnvelope `json:"items"` +} + +func init() { + SchemeBuilder.Register( + &ClusterResourceEnvelope{}, + &ClusterResourceEnvelopeList{}, + &ResourceEnvelope{}, + &ResourceEnvelopeList{}) +} + +// +kubebuilder:object:generate=false +// EnvelopeReader is an interface that allows retrieval of common information across all envelope CRs. +type EnvelopeReader interface { + // GetData returns the raw data in the envelope. + GetData() map[string]runtime.RawExtension + + // GetEnvelopeObjRef returns a klog object reference to the envelope. + GetEnvelopeObjRef() klog.ObjectRef + + // GetNamespace returns the namespace of the envelope. + GetNamespace() string + + // GetName returns the name of the envelope. + GetName() string + + // GetEnvelopeType returns the type of the envelope. + GetEnvelopeType() string +} + +// Ensure that both ClusterResourceEnvelope and ResourceEnvelope implement the +// EnvelopeReader interface at compile time. +var ( + _ EnvelopeReader = &ClusterResourceEnvelope{} + _ EnvelopeReader = &ResourceEnvelope{} +) + +// Implements the EnvelopeReader interface for ClusterResourceEnvelope. + +func (e *ClusterResourceEnvelope) GetData() map[string]runtime.RawExtension { + return e.Data +} + +func (e *ClusterResourceEnvelope) GetEnvelopeObjRef() klog.ObjectRef { + return klog.KObj(e) +} + +func (e *ClusterResourceEnvelope) GetEnvelopeType() string { + return string(ClusterResourceEnvelopeType) +} + +// Implements the EnvelopeReader interface for ResourceEnvelope. + +func (e *ResourceEnvelope) GetData() map[string]runtime.RawExtension { + return e.Data +} + +func (e *ResourceEnvelope) GetEnvelopeObjRef() klog.ObjectRef { + return klog.KObj(e) +} + +func (e *ResourceEnvelope) GetEnvelopeType() string { + return string(ResourceEnvelopeType) +} diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index 9353405ed..c786e93a3 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -448,6 +448,38 @@ func (in *ClusterResourceEnvelope) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceEnvelopeList) DeepCopyInto(out *ClusterResourceEnvelopeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterResourceEnvelope, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceEnvelopeList. +func (in *ClusterResourceEnvelopeList) DeepCopy() *ClusterResourceEnvelopeList { + if in == nil { + return nil + } + out := new(ClusterResourceEnvelopeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceEnvelopeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterResourcePlacement) DeepCopyInto(out *ClusterResourcePlacement) { *out = *in @@ -1567,6 +1599,38 @@ func (in *ResourceEnvelope) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceEnvelopeList) DeepCopyInto(out *ResourceEnvelopeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceEnvelope, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceEnvelopeList. +func (in *ResourceEnvelopeList) DeepCopy() *ResourceEnvelopeList { + if in == nil { + return nil + } + out := new(ResourceEnvelopeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceEnvelopeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceIdentifier) DeepCopyInto(out *ResourceIdentifier) { *out = *in diff --git a/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceenvelopes.yaml b/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceenvelopes.yaml new file mode 100644 index 000000000..9b00cfba7 --- /dev/null +++ b/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_clusterresourceenvelopes.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: clusterresourceenvelopes.placement.kubernetes-fleet.io +spec: + group: placement.kubernetes-fleet.io + names: + categories: + - fleet + - fleet-placement + kind: ClusterResourceEnvelope + listKind: ClusterResourceEnvelopeList + plural: clusterresourceenvelopes + singular: clusterresourceenvelope + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ClusterResourceEnvelope wraps cluster-scoped resources for placement. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + additionalProperties: + type: object + x-kubernetes-preserve-unknown-fields: true + description: |- + The manifests wrapped in this envelope. + + Each manifest is uniquely identified by a string key, typically a filename that represents + the manifest. The value is the manifest object itself. + maxProperties: 50 + minProperties: 1 + type: object + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + required: + - data + type: object + served: true + storage: true diff --git a/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_resourceenvelopes.yaml b/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_resourceenvelopes.yaml new file mode 100644 index 000000000..963e66c40 --- /dev/null +++ b/charts/hub-agent/templates/crds/placement.kubernetes-fleet.io_resourceenvelopes.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: resourceenvelopes.placement.kubernetes-fleet.io +spec: + group: placement.kubernetes-fleet.io + names: + categories: + - fleet + - fleet-placement + kind: ResourceEnvelope + listKind: ResourceEnvelopeList + plural: resourceenvelopes + singular: resourceenvelope + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ResourceEnvelope wraps namespaced resources for placement. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + additionalProperties: + type: object + x-kubernetes-preserve-unknown-fields: true + description: |- + The manifests wrapped in this envelope. + + Each manifest is uniquely identified by a string key, typically a filename that represents + the manifest. The value is the manifest object itself. + maxProperties: 50 + minProperties: 1 + type: object + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + required: + - data + type: object + served: true + storage: true diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml index 6967766fb..25e47d643 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourcebindings.yaml @@ -831,6 +831,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -944,6 +946,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -1107,6 +1111,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index a8a0933f9..360c0528f 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -2196,6 +2196,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -2311,6 +2313,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -2478,6 +2482,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name @@ -2537,6 +2543,8 @@ spec: description: Type of the envelope object. enum: - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope type: string required: - name diff --git a/examples/envelopes/namespacescoped.yaml b/examples/envelopes/namespacescoped.yaml index b261e0e99..2aedf6bd6 100644 --- a/examples/envelopes/namespacescoped.yaml +++ b/examples/envelopes/namespacescoped.yaml @@ -2,19 +2,22 @@ apiVersion: placement.kubernetes-fleet.io/v1beta1 kind: ResourceEnvelope metadata: name: example + namespace: app data: "cm.yaml": apiVersion: v1 kind: ConfigMap metadata: - name: app + name: config + namespace: app data: foo: bar "deploy.yaml": apiVersion: apps/v1 kind: Deployment metadata: - name: app + name: ingress + namespace: app spec: replicas: 1 selector: diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/clusterresourceplacement/resource_selector.go index c6861e1ff..b3373837f 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector.go @@ -465,8 +465,11 @@ func (r *Reconciler) selectResourcesForPlacement(placement *fleetv1beta1.Cluster if err != nil { return 0, nil, nil, err } - if unstructuredObj.GetObjectKind().GroupVersionKind() == utils.ConfigMapGVK && - len(unstructuredObj.GetAnnotations()[fleetv1beta1.EnvelopeConfigMapAnnotation]) != 0 { + uGVK := unstructuredObj.GetObjectKind().GroupVersionKind().GroupKind() + switch { + case uGVK == utils.ClusterResourceEnvelopeGK: + envelopeObjCount++ + case uGVK == utils.ResourceEnvelopeGK: envelopeObjCount++ } resources[i] = *rc diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 24b6faf36..8772a2595 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -23,12 +23,10 @@ import ( "fmt" "sort" "strconv" - "strings" "time" "go.uber.org/atomic" "golang.org/x/sync/errgroup" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -36,8 +34,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" @@ -480,16 +476,16 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be // generate work objects for each resource snapshot for i := range resourceSnapshots { snapshot := resourceSnapshots[i] - var newWork []*fleetv1beta1.Work workNamePrefix, err := getWorkNamePrefixFromSnapshotName(snapshot) if err != nil { klog.ErrorS(err, "Encountered a mal-formatted resource snapshot", "resourceSnapshot", klog.KObj(snapshot)) return false, false, err } var simpleManifests []fleetv1beta1.Manifest + var newWork []*fleetv1beta1.Work for j := range snapshot.Spec.SelectedResources { selectedResource := snapshot.Spec.SelectedResources[j].DeepCopy() - // TODO: override the content of the wrapped resource instead of the envelope itself + // TODO: apply the override rules on the envelope resources by applying them on the work instead of the selected resource resourceDeleted, overrideErr := r.applyOverrides(selectedResource, cluster, croMap, roMap) if overrideErr != nil { return false, false, overrideErr @@ -498,24 +494,23 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be klog.V(2).InfoS("The resource is deleted by the override rules", "snapshot", klog.KObj(snapshot), "selectedResource", snapshot.Spec.SelectedResources[j]) continue } - // we need to special treat configMap with envelopeConfigMapAnnotation annotation, - // so we need to check the GVK and annotation of the selected resource - var uResource unstructured.Unstructured - if unMarshallErr := uResource.UnmarshalJSON(selectedResource.Raw); unMarshallErr != nil { - klog.ErrorS(unMarshallErr, "work has invalid content", "snapshot", klog.KObj(snapshot), "selectedResource", selectedResource.Raw) - return true, false, controller.NewUnexpectedBehaviorError(unMarshallErr) - } - if uResource.GetObjectKind().GroupVersionKind() == utils.ConfigMapGVK && - len(uResource.GetAnnotations()[fleetv1beta1.EnvelopeConfigMapAnnotation]) != 0 { - // get a work object for the enveloped configMap - work, err := r.getConfigMapEnvelopWorkObj(ctx, workNamePrefix, resourceBinding, snapshot, &uResource, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) - if err != nil { - return true, false, err - } - activeWork[work.Name] = work - newWork = append(newWork, work) - } else { - simpleManifests = append(simpleManifests, fleetv1beta1.Manifest(*selectedResource)) + + // Process the selected resource. + // + // Specifically, + // a) if the selected resource is an envelope (configMap-based or envelope-based; the former will soon + // become obsolete), we will create a work object dedicated for the envelope; + // b) otherwise (the selected resource is a regular resource), the resource will be appended to the list of + // simple manifests. + // + // Note (chenyu1): this method is added to reduce the cyclomatic complexity of the syncAllWork method. + newWork, simpleManifests, err = r.processOneSelectedResource( + ctx, selectedResource, resourceBinding, snapshot, + workNamePrefix, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash, + activeWork, newWork, simpleManifests) + if err != nil { + klog.ErrorS(err, "Failed to process the selected resource", "snapshot", klog.KObj(snapshot), "selectedResourceIdx", j) + return true, false, err } } if len(simpleManifests) == 0 { @@ -571,6 +566,78 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be return true, updateAny.Load(), nil } +// processOneSelectedResource processes a single selected resource from the resource snapshot. +// +// If the selected resource is an envelope (either configMap-based or envelope-based), create a new dedicated +// work object for the envelope. Otherwise, append the selected resource to the list of simple manifests. +func (r *Reconciler) processOneSelectedResource( + ctx context.Context, + selectedResource *fleetv1beta1.ResourceContent, + resourceBinding *fleetv1beta1.ClusterResourceBinding, + snapshot *fleetv1beta1.ClusterResourceSnapshot, + workNamePrefix, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, + activeWork map[string]*fleetv1beta1.Work, + newWork []*fleetv1beta1.Work, + simpleManifests []fleetv1beta1.Manifest, +) ([]*fleetv1beta1.Work, []fleetv1beta1.Manifest, error) { + // Unmarshal the YAML content into an unstructured object. + var uResource unstructured.Unstructured + if unMarshallErr := uResource.UnmarshalJSON(selectedResource.Raw); unMarshallErr != nil { + klog.ErrorS(unMarshallErr, "work has invalid content", "snapshot", klog.KObj(snapshot), "selectedResource", selectedResource.Raw) + return nil, nil, controller.NewUnexpectedBehaviorError(unMarshallErr) + } + + uGVK := uResource.GetObjectKind().GroupVersionKind().GroupKind() + switch { + case uGVK == utils.ClusterResourceEnvelopeGK: + // The resource is a ClusterResourceEnvelope; extract its contents. + var clusterResourceEnvelope fleetv1beta1.ClusterResourceEnvelope + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uResource.Object, &clusterResourceEnvelope); err != nil { + klog.ErrorS(err, "Failed to convert the unstructured object to a ClusterResourceEnvelope", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(snapshot), + "selectedResource", klog.KObj(&uResource)) + return nil, nil, controller.NewUnexpectedBehaviorError(err) + } + work, err := r.createOrUpdateEnvelopeCRWorkObj(ctx, &clusterResourceEnvelope, workNamePrefix, resourceBinding, snapshot, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + if err != nil { + klog.ErrorS(err, "Failed to create or get the work object for the ClusterResourceEnvelope", + "clusterResourceEnvelope", klog.KObj(&clusterResourceEnvelope), + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(snapshot)) + return nil, nil, err + } + activeWork[work.Name] = work + newWork = append(newWork, work) + case uGVK == utils.ResourceEnvelopeGK: + // The resource is a ResourceEnvelope; extract its contents. + var resourceEnvelope fleetv1beta1.ResourceEnvelope + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uResource.Object, &resourceEnvelope); err != nil { + klog.ErrorS(err, "Failed to convert the unstructured object to a ResourceEnvelope", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(snapshot), + "selectedResource", klog.KObj(&uResource)) + return nil, nil, controller.NewUnexpectedBehaviorError(err) + } + work, err := r.createOrUpdateEnvelopeCRWorkObj(ctx, &resourceEnvelope, workNamePrefix, resourceBinding, snapshot, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + if err != nil { + klog.ErrorS(err, "Failed to create or get the work object for the ResourceEnvelope", + "resourceEnvelope", klog.KObj(&resourceEnvelope), + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(snapshot)) + return nil, nil, err + } + activeWork[work.Name] = work + newWork = append(newWork, work) + + default: + // The resource is not an envelope; add it to the list of simple manifests. + simpleManifests = append(simpleManifests, fleetv1beta1.Manifest(*selectedResource)) + } + + return newWork, simpleManifests, nil +} + // syncApplyStrategy syncs the apply strategy specified on a ClusterResourceBinding object // to a Work object. func (r *Reconciler) syncApplyStrategy( @@ -630,91 +697,6 @@ func (r *Reconciler) fetchAllResourceSnapshots(ctx context.Context, resourceBind return controller.FetchAllClusterResourceSnapshots(ctx, r.Client, resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], &masterResourceSnapshot) } -// getConfigMapEnvelopWorkObj first try to locate a work object for the corresponding envelopObj of type configMap. -// we create a new one if the work object doesn't exist. We do this to avoid repeatedly delete and create the same work object. -func (r *Reconciler) getConfigMapEnvelopWorkObj(ctx context.Context, workNamePrefix string, resourceBinding *fleetv1beta1.ClusterResourceBinding, - resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, envelopeObj *unstructured.Unstructured, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string) (*fleetv1beta1.Work, error) { - // we group all the resources in one configMap to one work - manifest, err := extractResFromConfigMap(envelopeObj) - if err != nil { - klog.ErrorS(err, "configMap has invalid content", "snapshot", klog.KObj(resourceSnapshot), - "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) - return nil, controller.NewUserError(err) - } - klog.V(2).InfoS("Successfully extract the enveloped resources from the configMap", "numOfResources", len(manifest), - "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) - - // Try to see if we already have a work represent the same enveloped object for this CRP in the same cluster - // The ParentResourceSnapshotIndexLabel can change between snapshots so we have to exclude that label in the match - envelopWorkLabelMatcher := client.MatchingLabels{ - fleetv1beta1.ParentBindingLabel: resourceBinding.Name, - fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], - fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), - fleetv1beta1.EnvelopeNameLabel: envelopeObj.GetName(), - fleetv1beta1.EnvelopeNamespaceLabel: envelopeObj.GetNamespace(), - } - workList := &fleetv1beta1.WorkList{} - if err := r.Client.List(ctx, workList, envelopWorkLabelMatcher); err != nil { - return nil, controller.NewAPIServerError(true, err) - } - // we need to create a new work object - if len(workList.Items) == 0 { - // we limit the CRP name length to be 63 (DNS1123LabelMaxLength) characters, - // so we have plenty of characters left to fit into 253 (DNS1123SubdomainMaxLength) characters for a CR - workName := fmt.Sprintf(fleetv1beta1.WorkNameWithConfigEnvelopeFmt, workNamePrefix, uuid.NewUUID()) - return &fleetv1beta1.Work{ - ObjectMeta: metav1.ObjectMeta{ - Name: workName, - Namespace: fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster), - Labels: map[string]string{ - fleetv1beta1.ParentBindingLabel: resourceBinding.Name, - fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], - fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], - fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), - fleetv1beta1.EnvelopeNameLabel: envelopeObj.GetName(), - fleetv1beta1.EnvelopeNamespaceLabel: envelopeObj.GetNamespace(), - }, - Annotations: map[string]string{ - fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, - fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, - fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: fleetv1beta1.GroupVersion.String(), - Kind: resourceBinding.Kind, - Name: resourceBinding.Name, - UID: resourceBinding.UID, - BlockOwnerDeletion: ptr.To(true), // make sure that the k8s will call work delete when the binding is deleted - }, - }, - }, - Spec: fleetv1beta1.WorkSpec{ - Workload: fleetv1beta1.WorkloadTemplate{ - Manifests: manifest, - }, - ApplyStrategy: resourceBinding.Spec.ApplyStrategy, - }, - }, nil - } - if len(workList.Items) > 1 { - // return error here won't get us out of this - klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("find %d work representing configMap", len(workList.Items))), - "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) - } - work := workList.Items[0] - work.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] = resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel] - if work.Annotations == nil { - work.Annotations = make(map[string]string) - } - work.Annotations[fleetv1beta1.ParentResourceSnapshotNameAnnotation] = resourceBinding.Spec.ResourceSnapshotName - work.Annotations[fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation] = resourceOverrideSnapshotHash - work.Annotations[fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation] = clusterResourceOverrideSnapshotHash - work.Spec.Workload.Manifests = manifest - work.Spec.ApplyStrategy = resourceBinding.Spec.ApplyStrategy - return &work, nil -} - // generateSnapshotWorkObj generates the work object for the corresponding snapshot func generateSnapshotWorkObj(workName string, resourceBinding *fleetv1beta1.ClusterResourceBinding, resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, manifest []fleetv1beta1.Manifest, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string) *fleetv1beta1.Work { @@ -1261,46 +1243,6 @@ func setAllWorkAvailableCondition(works map[string]*fleetv1beta1.Work, binding * } } -func extractResFromConfigMap(uConfigMap *unstructured.Unstructured) ([]fleetv1beta1.Manifest, error) { - manifests := make([]fleetv1beta1.Manifest, 0) - var configMap corev1.ConfigMap - err := runtime.DefaultUnstructuredConverter.FromUnstructured(uConfigMap.Object, &configMap) - if err != nil { - return nil, err - } - // the list order is not stable as the map traverse is random - for key, value := range configMap.Data { - // so we need to check the GVK and annotation of the selected resource - content, jsonErr := yaml.ToJSON([]byte(value)) - if jsonErr != nil { - return nil, jsonErr - } - var uManifest unstructured.Unstructured - if unMarshallErr := uManifest.UnmarshalJSON(content); unMarshallErr != nil { - klog.ErrorS(unMarshallErr, "manifest has invalid content", "manifestKey", key, "envelopeResource", klog.KObj(uConfigMap)) - return nil, fmt.Errorf("the object with manifest key `%s` in envelope config `%s` is malformatted, err: %w", key, klog.KObj(uConfigMap), unMarshallErr) - } - if len(uManifest.GetNamespace()) == 0 { - // Block cluster-scoped resources. - return nil, fmt.Errorf("cannot wrap cluster-scoped resource %s in the envelope %s", uManifest.GetName(), klog.KObj(uConfigMap)) - } - if len(uManifest.GetNamespace()) != 0 && uManifest.GetNamespace() != configMap.Namespace { - return nil, fmt.Errorf("the namespaced object `%s` in envelope config `%s` is placed in a different namespace `%s` ", uManifest.GetName(), klog.KObj(uConfigMap), uManifest.GetNamespace()) - } - manifests = append(manifests, fleetv1beta1.Manifest{ - RawExtension: runtime.RawExtension{Raw: content}, - }) - } - // stable sort the manifests so that we can have a deterministic order - sort.Slice(manifests, func(i, j int) bool { - obj1 := manifests[i].Raw - obj2 := manifests[j].Raw - // order by its json formatted string - return strings.Compare(string(obj1), string(obj2)) > 0 - }) - return manifests, nil -} - // extractFailedResourcePlacementsFromWork extracts the failed resource placements from the work. func extractFailedResourcePlacementsFromWork(work *fleetv1beta1.Work) []fleetv1beta1.FailedResourcePlacement { appliedCond := meta.FindStatusCondition(work.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 0a035fd3a..92cfd5561 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -596,12 +596,13 @@ var _ = Describe("Test Work Generator Controller", func() { }) }) - Context("Test Bound ClusterResourceBinding with a single resource snapshot with envelop objects", func() { + Context("Test Bound ClusterResourceBinding with a single resource snapshot with namespaced envelop objects", func() { var masterSnapshot *placementv1beta1.ClusterResourceSnapshot - + envelopedResourceName := "namespaced-resource-envelop" + envelopedResourceNameSpace := "app" BeforeEach(func() { masterSnapshot = generateResourceSnapshot(1, 1, 0, [][]byte{ - testConfigMap, testEnvelopConfigMap, testResourceCRD, testNameSpace, + testConfigMap, testResourceEnvelop, testResourceCRD, testNameSpace, }) Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) By(fmt.Sprintf("master resource snapshot %s created", masterSnapshot.Name)) @@ -664,7 +665,7 @@ var _ = Describe("Test Work Generator Controller", func() { Expect(diff).Should(BeEmpty(), fmt.Sprintf("work(%s) mismatch (-want +got):\n%s", work.Name, diff)) //inspect the envelope work var workList placementv1beta1.WorkList - fetchEnvelopedWork(&workList, binding) + fetchEnvelopedWork(&workList, binding, string(placementv1beta1.ResourceEnvelopeType), envelopedResourceName, envelopedResourceNameSpace) envWork := workList.Items[0] By(fmt.Sprintf("enveloped work %s is created in %s", envWork.Name, envWork.Namespace)) wantWork = placementv1beta1.Work{ @@ -684,9 +685,9 @@ var _ = Describe("Test Work Generator Controller", func() { placementv1beta1.CRPTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, placementv1beta1.ParentResourceSnapshotIndexLabel: "1", - placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ConfigMapEnvelopeType), - placementv1beta1.EnvelopeNameLabel: "envelop-configmap", - placementv1beta1.EnvelopeNamespaceLabel: "app", + placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ResourceEnvelopeType), + placementv1beta1.EnvelopeNameLabel: envelopedResourceName, + placementv1beta1.EnvelopeNamespaceLabel: envelopedResourceNameSpace, }, Annotations: map[string]string{ placementv1beta1.ParentResourceSnapshotNameAnnotation: binding.Spec.ResourceSnapshotName, @@ -697,7 +698,7 @@ var _ = Describe("Test Work Generator Controller", func() { Spec: placementv1beta1.WorkSpec{ Workload: placementv1beta1.WorkloadTemplate{ Manifests: []placementv1beta1.Manifest{ - {RawExtension: runtime.RawExtension{Raw: testEnvelopeResourceQuota}}, + {RawExtension: runtime.RawExtension{Raw: testResourceQuotaContent}}, }, }, }, @@ -719,10 +720,10 @@ var _ = Describe("Test Work Generator Controller", func() { It("Should modify the enveloped work object with the same name", func() { // make sure the enveloped work is created var workList placementv1beta1.WorkList - fetchEnvelopedWork(&workList, binding) + fetchEnvelopedWork(&workList, binding, string(placementv1beta1.ResourceEnvelopeType), envelopedResourceName, envelopedResourceNameSpace) // create a second snapshot with a modified enveloped object masterSnapshot = generateResourceSnapshot(2, 1, 0, [][]byte{ - testEnvelopConfigMap2, testResourceCRD, testNameSpace, + testResourceEnvelop2, testResourceCRD, testNameSpace, }) Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) By(fmt.Sprintf("another master resource snapshot %s created", masterSnapshot.Name)) @@ -788,7 +789,7 @@ var _ = Describe("Test Work Generator Controller", func() { diff := cmp.Diff(wantWork, work, ignoreWorkOption, ignoreTypeMeta) Expect(diff).Should(BeEmpty(), fmt.Sprintf("work(%s) mismatch (-want +got):\n%s", work.Name, diff)) // check the enveloped work is updated - fetchEnvelopedWork(&workList, binding) + fetchEnvelopedWork(&workList, binding, string(placementv1beta1.ResourceEnvelopeType), envelopedResourceName, envelopedResourceNameSpace) work = workList.Items[0] By(fmt.Sprintf("envelope work %s is updated in %s", work.Name, work.Namespace)) //inspect the envelope work @@ -809,9 +810,9 @@ var _ = Describe("Test Work Generator Controller", func() { placementv1beta1.CRPTrackingLabel: testCRPName, placementv1beta1.ParentBindingLabel: binding.Name, placementv1beta1.ParentResourceSnapshotIndexLabel: "2", - placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ConfigMapEnvelopeType), - placementv1beta1.EnvelopeNameLabel: "envelop-configmap", - placementv1beta1.EnvelopeNamespaceLabel: "app", + placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ResourceEnvelopeType), + placementv1beta1.EnvelopeNameLabel: envelopedResourceName, + placementv1beta1.EnvelopeNamespaceLabel: envelopedResourceNameSpace, }, Annotations: map[string]string{ placementv1beta1.ParentResourceSnapshotNameAnnotation: binding.Spec.ResourceSnapshotName, @@ -822,7 +823,7 @@ var _ = Describe("Test Work Generator Controller", func() { Spec: placementv1beta1.WorkSpec{ Workload: placementv1beta1.WorkloadTemplate{ Manifests: []placementv1beta1.Manifest{ - {RawExtension: runtime.RawExtension{Raw: testEnvelopeResourceQuota2}}, + {RawExtension: runtime.RawExtension{Raw: testResourceQuota2Content}}, }, }, }, @@ -834,7 +835,178 @@ var _ = Describe("Test Work Generator Controller", func() { It("Should delete the enveloped work object in the target namespace after it's removed from snapshot", func() { // make sure the enveloped work is created var workList placementv1beta1.WorkList - fetchEnvelopedWork(&workList, binding) + fetchEnvelopedWork(&workList, binding, string(placementv1beta1.ResourceEnvelopeType), envelopedResourceName, envelopedResourceNameSpace) + By("create a second snapshot without an enveloped object") + // create a second snapshot without an enveloped object + masterSnapshot = generateResourceSnapshot(2, 1, 0, [][]byte{ + testResourceCRD, testNameSpace, + }) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + By(fmt.Sprintf("another master resource snapshot %s created", masterSnapshot.Name)) + // update binding + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + binding.Spec.ResourceSnapshotName = masterSnapshot.Name + Expect(k8sClient.Update(ctx, binding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s updated", binding.Name)) + updateRolloutStartedGeneration(&binding) + // check the binding status till the bound condition is true for the second binding generation + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding); err != nil { + return false + } + if binding.GetGeneration() <= 1 { + return false + } + // only check the work created status as the applied status reason changes depends on where the reconcile logic is + return condition.IsConditionStatusTrue( + meta.FindStatusCondition(binding.Status.Conditions, string(placementv1beta1.ResourceBindingWorkSynchronized)), binding.GetGeneration()) + }, timeout, interval).Should(BeTrue(), fmt.Sprintf("binding(%s) condition should be true", binding.Name)) + By(fmt.Sprintf("resource binding %s is reconciled", binding.Name)) + // check the enveloped work is deleted + Eventually(func() error { + envelopWorkLabelMatcher := client.MatchingLabels{ + placementv1beta1.ParentBindingLabel: binding.Name, + placementv1beta1.CRPTrackingLabel: testCRPName, + placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ResourceEnvelopeType), + placementv1beta1.EnvelopeNameLabel: envelopedResourceName, + placementv1beta1.EnvelopeNamespaceLabel: envelopedResourceNameSpace, + } + if err := k8sClient.List(ctx, &workList, envelopWorkLabelMatcher); err != nil { + return err + } + if len(workList.Items) != 0 { + return fmt.Errorf("expect to not get any enveloped work but got %d", len(workList.Items)) + } + return nil + }, timeout, interval).Should(Succeed(), "Failed to delete the expected enveloped work in hub cluster") + }) + }) + + Context("Test Bound ClusterResourceBinding with a single resource snapshot with cluster scoped envelop objects", func() { + var masterSnapshot *placementv1beta1.ClusterResourceSnapshot + envelopedResourceName := "clusterscoped-resource-envelop" + envelopedResourceNameSpace := "" + BeforeEach(func() { + masterSnapshot = generateResourceSnapshot(1, 1, 0, [][]byte{ + testClusterScopedEnvelop, testResourceCRD, testNameSpace, + }) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + By(fmt.Sprintf("master resource snapshot %s created", masterSnapshot.Name)) + spec := placementv1beta1.ResourceBindingSpec{ + State: placementv1beta1.BindingStateBound, + ResourceSnapshotName: masterSnapshot.Name, + TargetCluster: memberClusterName, + } + createClusterResourceBinding(&binding, spec) + }) + + AfterEach(func() { + By("Deleting master clusterResourceSnapshot") + Expect(k8sClient.Delete(ctx, masterSnapshot)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + }) + + It("Should create enveloped work object in the target namespace with master resource snapshot only", func() { + // check the work that contains none enveloped object is created by now + work := placementv1beta1.Work{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), Namespace: memberClusterNamespaceName}, &work) + }, timeout, interval).Should(Succeed(), "Failed to get the expected work in hub cluster") + By(fmt.Sprintf("normal work %s is created in %s", work.Name, work.Namespace)) + //inspect the work + wantWork := placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(placementv1beta1.FirstWorkNameFmt, testCRPName), + Namespace: memberClusterNamespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: placementv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: ptr.To(true), + }, + }, + Labels: map[string]string{ + placementv1beta1.CRPTrackingLabel: testCRPName, + placementv1beta1.ParentBindingLabel: binding.Name, + placementv1beta1.ParentResourceSnapshotIndexLabel: "1", + }, + Annotations: map[string]string{ + placementv1beta1.ParentResourceSnapshotNameAnnotation: binding.Spec.ResourceSnapshotName, + placementv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: emptyHash, + placementv1beta1.ParentResourceOverrideSnapshotHashAnnotation: emptyHash, + }, + }, + Spec: placementv1beta1.WorkSpec{ + Workload: placementv1beta1.WorkloadTemplate{ + Manifests: []placementv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testResourceCRD}}, + {RawExtension: runtime.RawExtension{Raw: testNameSpace}}, + }, + }, + }, + } + diff := cmp.Diff(wantWork, work, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("work(%s) mismatch (-want +got):\n%s", work.Name, diff)) + //inspect the envelope work + var workList placementv1beta1.WorkList + fetchEnvelopedWork(&workList, binding, string(placementv1beta1.ClusterResourceEnvelopeType), envelopedResourceName, envelopedResourceNameSpace) + envWork := workList.Items[0] + By(fmt.Sprintf("enveloped work %s is created in %s", envWork.Name, envWork.Namespace)) + wantWork = placementv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: envWork.Name, + Namespace: memberClusterNamespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: placementv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: ptr.To(true), + }, + }, + Labels: map[string]string{ + placementv1beta1.CRPTrackingLabel: testCRPName, + placementv1beta1.ParentBindingLabel: binding.Name, + placementv1beta1.ParentResourceSnapshotIndexLabel: "1", + placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ClusterResourceEnvelopeType), + placementv1beta1.EnvelopeNameLabel: envelopedResourceName, + placementv1beta1.EnvelopeNamespaceLabel: envelopedResourceNameSpace, + }, + Annotations: map[string]string{ + placementv1beta1.ParentResourceSnapshotNameAnnotation: binding.Spec.ResourceSnapshotName, + placementv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: emptyHash, + placementv1beta1.ParentResourceOverrideSnapshotHashAnnotation: emptyHash, + }, + }, + Spec: placementv1beta1.WorkSpec{ + Workload: placementv1beta1.WorkloadTemplate{ + Manifests: []placementv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testClusterRoleContent}}, + {RawExtension: runtime.RawExtension{Raw: testWebhookContent}}, + }, + }, + }, + } + diff = cmp.Diff(wantWork, envWork, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("enveloped work(%s) mismatch (-want +got):\n%s", envWork.Name, diff)) + // mark the enveloped work applied + markWorkApplied(&work) + markWorkApplied(&envWork) + // check the binding status that it should be marked as applied true eventually + verifyBindStatusAppliedNotAvailable(binding, false) + // mark the enveloped work available + markWorkAvailable(&work) + markWorkAvailable(&envWork) + // check the binding status that it should be marked as available true eventually + verifyBindStatusAvail(binding, false, false) + }) + + It("Should delete the enveloped work object in the target namespace after it's removed from snapshot", func() { + // make sure the enveloped work is created + var workList placementv1beta1.WorkList + fetchEnvelopedWork(&workList, binding, string(placementv1beta1.ClusterResourceEnvelopeType), envelopedResourceName, envelopedResourceNameSpace) By("create a second snapshot without an enveloped object") // create a second snapshot without an enveloped object masterSnapshot = generateResourceSnapshot(2, 1, 0, [][]byte{ @@ -866,9 +1038,9 @@ var _ = Describe("Test Work Generator Controller", func() { envelopWorkLabelMatcher := client.MatchingLabels{ placementv1beta1.ParentBindingLabel: binding.Name, placementv1beta1.CRPTrackingLabel: testCRPName, - placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ConfigMapEnvelopeType), - placementv1beta1.EnvelopeNameLabel: "envelop-configmap", - placementv1beta1.EnvelopeNamespaceLabel: "app", + placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ClusterResourceEnvelopeType), + placementv1beta1.EnvelopeNameLabel: envelopedResourceName, + placementv1beta1.EnvelopeNamespaceLabel: envelopedResourceNameSpace, } if err := k8sClient.List(ctx, &workList, envelopWorkLabelMatcher); err != nil { return err @@ -4090,15 +4262,15 @@ func bindingStatusUpdatedActual( } } -func fetchEnvelopedWork(workList *placementv1beta1.WorkList, binding *placementv1beta1.ClusterResourceBinding) { +func fetchEnvelopedWork(workList *placementv1beta1.WorkList, binding *placementv1beta1.ClusterResourceBinding, envelopeType, envelopeName, envelopeNamespace string) { // try to locate the work that contains enveloped object Eventually(func() error { envelopWorkLabelMatcher := client.MatchingLabels{ placementv1beta1.ParentBindingLabel: binding.Name, placementv1beta1.CRPTrackingLabel: testCRPName, - placementv1beta1.EnvelopeTypeLabel: string(placementv1beta1.ConfigMapEnvelopeType), - placementv1beta1.EnvelopeNameLabel: "envelop-configmap", - placementv1beta1.EnvelopeNamespaceLabel: "app", + placementv1beta1.EnvelopeTypeLabel: envelopeType, + placementv1beta1.EnvelopeNameLabel: envelopeName, + placementv1beta1.EnvelopeNamespaceLabel: envelopeNamespace, } if err := k8sClient.List(ctx, workList, envelopWorkLabelMatcher); err != nil { return err diff --git a/pkg/controllers/workgenerator/controller_test.go b/pkg/controllers/workgenerator/controller_test.go index 4c8495edf..8f40d0d70 100644 --- a/pkg/controllers/workgenerator/controller_test.go +++ b/pkg/controllers/workgenerator/controller_test.go @@ -158,163 +158,6 @@ func TestGetWorkNamePrefixFromSnapshotName(t *testing.T) { } } -func TestExtractResFromConfigMap(t *testing.T) { - tests := map[string]struct { - uConfigMap *unstructured.Unstructured - want []fleetv1beta1.Manifest - wantErr bool - }{ - "valid config map with no entries is fine": { - uConfigMap: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - }, - }, - want: []fleetv1beta1.Manifest{}, - wantErr: false, - }, - "config map with invalid JSON content should fail": { - uConfigMap: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-config", - "namespace": "default", - }, - "data": map[string]interface{}{ - "invalid": "{invalid-json}", - }, - }, - }, - want: nil, - wantErr: true, - }, - "config map with namespaced resource in different namespace should fail": { - uConfigMap: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-config", - "namespace": "default", - }, - "data": map[string]interface{}{ - "resource": `{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod", "namespace": "other-namespace"}}`, - }, - }, - }, - want: nil, - wantErr: true, - }, - "config map with cluster scoped resource should fail": { - uConfigMap: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-config", - "namespace": "default", - }, - "data": map[string]interface{}{ - "resource": `{"apiVersion": "admissionregistration.k8s.io/v1", "kind": "ValidatingWebhookConfiguration", "metadata": {"name": "test-webhook"}}`, - }, - }, - }, - want: nil, - wantErr: true, - }, - "config map with valid and invalid entries should fail": { - uConfigMap: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-config", - "namespace": "default", - }, - "data": map[string]interface{}{ - "valid": `{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod", "namespace": "default"}}`, - "invalid": "{invalid-json}", - }, - }, - }, - want: nil, - wantErr: true, - }, - "config map with cluster and namespace scoped data in the correct namespace should fail": { - uConfigMap: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-config", - "namespace": "default", - }, - "data": map[string]interface{}{ - "resource": `{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod", "namespace": "default"}}`, - "resource2": `{"apiVersion": "v1", "kind": "ClusterRole", "metadata": {"name": "test-role"}}`, - }, - }, - }, - want: nil, - wantErr: true, - }, - "config map with cluster scoped and cross namespaced resources data in a different namespace should fail": { - uConfigMap: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-config", - "namespace": "default", - }, - "data": map[string]interface{}{ - "resource": `{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod", "namespace": "not-default"}}`, - "resource2": `{"apiVersion": "v1", "kind": "ClusterRole", "metadata": {"name": "test-role"}}`, - }, - }, - }, - want: nil, - wantErr: true, - }, - "config map with valid entries in different order should be sorted to order": { - uConfigMap: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-config", - "namespace": "default", - }, - "data": map[string]interface{}{ - "resource2": `{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod1", "namespace": "default"}}`, - "resource1": `{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod2", "namespace": "default"}}`, - }, - }, - }, - want: []fleetv1beta1.Manifest{ - {RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod2", "namespace": "default"}}`)}}, - {RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod1", "namespace": "default"}}`)}}, - }, - wantErr: false, - }, - } - - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - got, err := extractResFromConfigMap(tt.uConfigMap) - if (err != nil) != tt.wantErr { - t.Fatalf("extractResFromConfigMap() error = %v, wantErr %v", err, tt.wantErr) - } - if diff := cmp.Diff(tt.want, got); diff != "" { - t.Errorf("extractResFromConfigMap() mismatch (-want +got):\n%s", diff) - } - }) - } -} - func TestUpsertWork(t *testing.T) { workName := "work" namespace := "default" diff --git a/pkg/controllers/workgenerator/envelope.go b/pkg/controllers/workgenerator/envelope.go new file mode 100644 index 000000000..22ef5897b --- /dev/null +++ b/pkg/controllers/workgenerator/envelope.go @@ -0,0 +1,225 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workgenerator + +import ( + "context" + "fmt" + "sort" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" +) + +// createOrUpdateEnvelopeCRWorkObj creates or updates a work object for a given envelope CR. +func (r *Reconciler) createOrUpdateEnvelopeCRWorkObj( + ctx context.Context, + envelopeReader fleetv1beta1.EnvelopeReader, + workNamePrefix string, + resourceBinding *fleetv1beta1.ClusterResourceBinding, + resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, + resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, +) (*fleetv1beta1.Work, error) { + manifests, err := extractManifestsFromEnvelopeCR(envelopeReader) + if err != nil { + klog.ErrorS(err, "Failed to extract manifests from the envelope spec", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceBinding), + "envelope", envelopeReader.GetEnvelopeObjRef()) + return nil, err + } + klog.V(2).InfoS("Successfully extracted wrapped manifests from the envelope", + "numOfResources", len(manifests), + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + + // Check to see if a corresponding work object has been created for the envelope. + labelMatcher := client.MatchingLabels{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: envelopeReader.GetEnvelopeType(), + fleetv1beta1.EnvelopeNameLabel: envelopeReader.GetName(), + fleetv1beta1.EnvelopeNamespaceLabel: envelopeReader.GetNamespace(), + } + workList := &fleetv1beta1.WorkList{} + if err = r.Client.List(ctx, workList, labelMatcher); err != nil { + klog.ErrorS(err, "Failed to list work objects when finding the work object for an envelope", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + wrappedErr := fmt.Errorf("failed to list work objects when finding the work object for an envelope %v: %w", envelopeReader.GetEnvelopeObjRef(), err) + return nil, controller.NewAPIServerError(true, wrappedErr) + } + + var work *fleetv1beta1.Work + switch { + case len(workList.Items) > 1: + // Multiple matching work objects found; this should never occur under normal conditions. + wrappedErr := fmt.Errorf("%d work objects found for the same envelope %v, only one expected", len(workList.Items), envelopeReader.GetEnvelopeObjRef()) + klog.ErrorS(wrappedErr, "Failed to create or update work object for envelope", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + return nil, controller.NewUnexpectedBehaviorError(wrappedErr) + case len(workList.Items) == 1: + klog.V(2).InfoS("Found existing work object for the envelope; updating it", + "work", klog.KObj(&workList.Items[0]), + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + work = &workList.Items[0] + refreshWorkForEnvelopeCR(work, resourceBinding, resourceSnapshot, manifests, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + case len(workList.Items) == 0: + // No matching work object found; create a new one. + klog.V(2).InfoS("No existing work object found for the envelope; creating a new one", + "clusterResourceBinding", klog.KObj(resourceBinding), + "clusterResourceSnapshot", klog.KObj(resourceSnapshot), + "envelope", envelopeReader.GetEnvelopeObjRef()) + work = buildNewWorkForEnvelopeCR(workNamePrefix, resourceBinding, resourceSnapshot, envelopeReader, manifests, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash) + } + + return work, nil +} + +func extractManifestsFromEnvelopeCR(envelopeReader fleetv1beta1.EnvelopeReader) ([]fleetv1beta1.Manifest, error) { + manifests := make([]fleetv1beta1.Manifest, 0) + for k, v := range envelopeReader.GetData() { + // Verify if the wrapped manifests in the envelope are valid. + var uObj unstructured.Unstructured + if unMarshallErr := uObj.UnmarshalJSON(v.Raw); unMarshallErr != nil { + klog.ErrorS(unMarshallErr, "Failed to parse the wrapped manifest data to a Kubernetes runtime object", + "manifestKey", k, "envelope", envelopeReader.GetEnvelopeObjRef()) + wrappedErr := fmt.Errorf("failed to parse the wrapped manifest data to a Kubernetes runtime object (manifestKey=%s,envelopeObjRef=%v): %w", k, envelopeReader.GetEnvelopeObjRef(), unMarshallErr) + return nil, controller.NewUnexpectedBehaviorError(wrappedErr) + } + resRef := klog.KRef(uObj.GetNamespace(), uObj.GetName()) + // Perform some basic validation to make sure that the envelope is used correctly. + switch { + // Check if a namespaced manifest has been wrapped in a cluster resource envelope. + case envelopeReader.GetEnvelopeType() == string(fleetv1beta1.ClusterResourceEnvelopeType) && uObj.GetNamespace() != "": + wrappedErr := fmt.Errorf("a namespaced object %s (%v) has been wrapped in a cluster resource envelope %s", k, resRef, envelopeReader.GetEnvelopeObjRef()) + klog.ErrorS(wrappedErr, "Found an invalid manifest", "manifestKey", k, "envelope", envelopeReader.GetEnvelopeObjRef()) + return nil, controller.NewUserError(wrappedErr) + + // Check if a cluster scoped manifest has been wrapped in a cluster resource envelope. + case envelopeReader.GetEnvelopeType() == string(fleetv1beta1.ResourceEnvelopeType) && uObj.GetNamespace() == "": + wrappedErr := fmt.Errorf("a cluster scope object %s (%v) has been wrapped in a resource envelope %s", k, resRef, envelopeReader.GetEnvelopeObjRef()) + klog.ErrorS(wrappedErr, "Found an invalid manifest", "manifestKey", k, "envelope", envelopeReader.GetEnvelopeObjRef()) + return nil, controller.NewUserError(wrappedErr) + + // Check if the namespace of the wrapped manifest matches the envelope's namespace. + case envelopeReader.GetNamespace() != uObj.GetNamespace(): + wrappedErr := fmt.Errorf("a namespaced object %s (%v) in has been wrapped in a resource envelope from another namespace (%v)", k, resRef, envelopeReader.GetEnvelopeObjRef()) + klog.ErrorS(wrappedErr, "Found an invalid manifest", "manifestKey", k, "envelope", envelopeReader.GetEnvelopeObjRef()) + return nil, controller.NewUserError(wrappedErr) + } + + manifests = append(manifests, fleetv1beta1.Manifest{ + RawExtension: v, + }) + } + + // Do a stable sort of the extracted manifests to ensure consistent, deterministic ordering. + sort.Slice(manifests, func(i, j int) bool { + obj1 := manifests[i].Raw + obj2 := manifests[j].Raw + // order by its json formatted string + return strings.Compare(string(obj1), string(obj2)) > 0 + }) + return manifests, nil +} + +func refreshWorkForEnvelopeCR( + work *fleetv1beta1.Work, + resourceBinding *fleetv1beta1.ClusterResourceBinding, + resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, + manifests []fleetv1beta1.Manifest, + resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, +) { + // Update the parent resource snapshot index label. + work.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] = resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel] + + // Update the annotations. + if work.Annotations == nil { + work.Annotations = make(map[string]string) + } + work.Annotations[fleetv1beta1.ParentResourceSnapshotNameAnnotation] = resourceBinding.Spec.ResourceSnapshotName + work.Annotations[fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation] = resourceOverrideSnapshotHash + work.Annotations[fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation] = clusterResourceOverrideSnapshotHash + // Update the work spec (the manifests and the apply strategy). + work.Spec.Workload.Manifests = manifests + work.Spec.ApplyStrategy = resourceBinding.Spec.ApplyStrategy +} + +func buildNewWorkForEnvelopeCR( + workNamePrefix string, + resourceBinding *fleetv1beta1.ClusterResourceBinding, + resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, + envelopeReader fleetv1beta1.EnvelopeReader, + manifests []fleetv1beta1.Manifest, + resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, +) *fleetv1beta1.Work { + workName := fmt.Sprintf(fleetv1beta1.WorkNameWithEnvelopeCRFmt, workNamePrefix, uuid.NewUUID()) + workNamespace := fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster) + + return &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: workNamespace, + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], + fleetv1beta1.EnvelopeTypeLabel: envelopeReader.GetEnvelopeType(), + fleetv1beta1.EnvelopeNameLabel: envelopeReader.GetName(), + fleetv1beta1.EnvelopeNamespaceLabel: envelopeReader.GetNamespace(), + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: resourceOverrideSnapshotHash, + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: clusterResourceOverrideSnapshotHash, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: resourceBinding.Kind, + Name: resourceBinding.Name, + UID: resourceBinding.UID, + // Make sure that the resource binding can only be deleted after + // all of its managed work objects have been deleted. + BlockOwnerDeletion: ptr.To(true), + }, + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: manifests, + }, + ApplyStrategy: resourceBinding.Spec.ApplyStrategy, + }, + } +} diff --git a/pkg/controllers/workgenerator/envelope_test.go b/pkg/controllers/workgenerator/envelope_test.go new file mode 100644 index 000000000..9c0fbdeb5 --- /dev/null +++ b/pkg/controllers/workgenerator/envelope_test.go @@ -0,0 +1,716 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workgenerator + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/test/utils/informer" +) + +func TestExtractManifestsFromEnvelopeCR(t *testing.T) { + tests := []struct { + name string + envelopeReader fleetv1beta1.EnvelopeReader + want []fleetv1beta1.Manifest + wantErr bool + }{ + { + name: "valid ResourceEnvelope with one resource", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "resource1": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }, + want: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }, + wantErr: false, + }, + { + name: "config map with valid and invalid entries should fail", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "valid": { + Raw: []byte(`"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "test-pod", "namespace": "default"}}`), + }, + "invalid": { + Raw: []byte("{invalid-json}"), + }, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "valid ClusterResourceEnvelope with one resource", + envelopeReader: &fleetv1beta1.ClusterResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-envelope", + }, + Data: map[string]runtime.RawExtension{ + "clusterrole1": { + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }, + want: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }, + wantErr: false, + }, + { + name: "envelope with multiple resources should have the right order", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-resource-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "resource1": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm1","namespace":"default"},"data":{"key1":"value1"}}`), + }, + "resource2": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm2","namespace":"default"},"data":{"key2":"value2"}}`), + }, + }, + }, + want: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm2","namespace":"default"},"data":{"key2":"value2"}}`), + }, + }, + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm1","namespace":"default"},"data":{"key1":"value1"}}`), + }, + }, + }, + wantErr: false, + }, + { + name: "envelope with invalid resource JSON", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-resource-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "invalid": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{invalid_json}`), + }, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "empty envelope", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{}, + }, + want: []fleetv1beta1.Manifest{}, + wantErr: false, + }, + // New test cases for namespace mismatches + { + name: "ResourceEnvelope with manifest in a different namespace", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace-mismatch-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "resource1": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"other-namespace"},"data":{"key":"value"}}`), + }, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "ResourceEnvelope containing a cluster-scoped resource", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-resource-in-resource-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "resource1": { + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "ClusterResourceEnvelope with namespaced resource", + envelopeReader: &fleetv1beta1.ClusterResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespaced-in-cluster-envelope", + }, + Data: map[string]runtime.RawExtension{ + "resource1": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "ResourceEnvelope with mixed namespaced resources", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mixed-namespace-resources", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "resource1": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm1","namespace":"default"},"data":{"key1":"value1"}}`), + }, + "resource2": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm2","namespace":"other-namespace"},"data":{"key2":"value2"}}`), + }, + }, + }, + want: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := extractManifestsFromEnvelopeCR(tt.envelopeReader) + if (err != nil) != tt.wantErr { + t.Errorf("extractManifestsFromEnvelopeCR() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + // Use cmp.Diff for comparison + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("extractManifestsFromEnvelopeCR() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestCreateOrUpdateEnvelopeCRWorkObj(t *testing.T) { + ignoreWorkMeta := cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name", "OwnerReferences") + scheme := serviceScheme(t) + + workNamePrefix := "test-work" + + resourceSnapshot := &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-snapshot", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: "test-crp", + }, + }, + Spec: fleetv1beta1.ClusterResourceSnapshot{}.Spec, + } + resourceBinding := &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: "test-crp", + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + TargetCluster: "test-cluster-1", + ResourceSnapshotName: resourceSnapshot.Name, + }, + } + configMapData := []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`) + resourceEnvelope := &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "configmap": { + Raw: configMapData, + }, + }, + } + + clusterroleData := []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`) + clusterResourceEnvelope := &fleetv1beta1.ClusterResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-envelope", + }, + Data: map[string]runtime.RawExtension{ + "clusterrole": { + Raw: clusterroleData, + }, + }, + } + + // Create an existing work for update test + existingWork := &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workNamePrefix, + Namespace: "test-app", + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ResourceEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: resourceEnvelope.Name, + fleetv1beta1.EnvelopeNamespaceLabel: resourceEnvelope.Namespace, + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"old-cm","namespace":"default"},"data":{"key":"old-value"}}`), + }, + }, + }, + }, + }, + } + + tests := []struct { + name string + envelopeReader fleetv1beta1.EnvelopeReader + resourceOverrideSnapshotHash string + clusterResourceOverrideSnapshotHash string + existingObjects []client.Object + want *fleetv1beta1.Work + wantErr bool + }{ + { + name: "create work for ResourceEnvelope", + envelopeReader: resourceEnvelope, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + existingObjects: []client.Object{}, + want: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster), + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ResourceEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: resourceEnvelope.Name, + fleetv1beta1.EnvelopeNamespaceLabel: resourceEnvelope.Namespace, + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: "resource-hash", + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: "cluster-resource-hash", + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: configMapData}, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "create work for ClusterResourceEnvelope", + envelopeReader: clusterResourceEnvelope, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + existingObjects: []client.Object{}, + want: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster), + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ClusterResourceEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: clusterResourceEnvelope.Name, + fleetv1beta1.EnvelopeNamespaceLabel: "", + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: "resource-hash", + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: "cluster-resource-hash", + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: clusterroleData}, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "update existing work for ResourceEnvelope", + envelopeReader: resourceEnvelope, + resourceOverrideSnapshotHash: "new-resource-hash", + clusterResourceOverrideSnapshotHash: "new-cluster-resource-hash", + existingObjects: []client.Object{existingWork}, + want: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-app", //copy from the existing work + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ResourceEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: resourceEnvelope.Name, + fleetv1beta1.EnvelopeNamespaceLabel: resourceEnvelope.Namespace, + }, + Annotations: map[string]string{ + fleetv1beta1.ParentResourceSnapshotNameAnnotation: resourceBinding.Spec.ResourceSnapshotName, + fleetv1beta1.ParentResourceOverrideSnapshotHashAnnotation: "new-resource-hash", + fleetv1beta1.ParentClusterResourceOverrideSnapshotHashAnnotation: "new-cluster-resource-hash", + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: configMapData, + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "error with malformed data in ResourceEnvelope", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "malformed-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "malformed": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"bad-cm",invalid json}}`), + }, + }, + }, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + existingObjects: []client.Object{}, + want: nil, + wantErr: true, + }, + { + name: "error with ResourceEnvelope containing cluster-scoped object", + envelopeReader: &fleetv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-scope-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "clusterrole": { + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + existingObjects: []client.Object{}, + want: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake client with scheme + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tt.existingObjects...). + Build() + + // Create reconciler + r := &Reconciler{ + Client: fakeClient, + recorder: record.NewFakeRecorder(10), + InformerManager: &informer.FakeManager{}, + } + + // Call the function under test + got, err := r.createOrUpdateEnvelopeCRWorkObj(ctx, tt.envelopeReader, workNamePrefix, + resourceBinding, resourceSnapshot, tt.resourceOverrideSnapshotHash, tt.clusterResourceOverrideSnapshotHash) + + if (err != nil) != tt.wantErr { + t.Errorf("createOrUpdateEnvelopeCRWorkObj() error = %v, wantErr %v", err, tt.wantErr) + return + } + + // Use cmp.Diff for comparison + if diff := cmp.Diff(got, tt.want, ignoreWorkOption, ignoreWorkMeta, ignoreTypeMeta); diff != "" { + t.Errorf("createOrUpdateEnvelopeCRWorkObj() mismatch (-got +want):\n%s", diff) + } + }) + } +} + +// Test processOneSelectedResource with both envelope types +func TestProcessOneSelectedResource(t *testing.T) { + scheme := serviceScheme(t) + + workNamePrefix := "test-work" + resourceBinding := &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: "test-crp", + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + TargetCluster: "test-cluster", + }, + } + snapshot := &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-snapshot", + }, + } + + // Convert the envelope objects to ResourceContent + resourceEnvelopeContent := createResourceContent(t, &fleetv1beta1.ResourceEnvelope{ + TypeMeta: metav1.TypeMeta{ + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: fleetv1beta1.ResourceEnvelopeKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-resource-envelope", + Namespace: "default", + }, + Data: map[string]runtime.RawExtension{ + "configmap": { + Raw: []byte(`{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-cm","namespace":"default"},"data":{"key":"value"}}`), + }, + }, + }) + + clusterResourceEnvelopeContent := createResourceContent(t, &fleetv1beta1.ClusterResourceEnvelope{ + TypeMeta: metav1.TypeMeta{ + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: "ClusterResourceEnvelope", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-envelope", + }, + Data: map[string]runtime.RawExtension{ + "clusterrole": { + Raw: []byte(`{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"name":"test-role"},"rules":[{"apiGroups":[""],"resources":["pods"],"verbs":["get","list"]}]}`), + }, + }, + }) + + configMapEnvelopeContent := createResourceContent(t, &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config-map-envelope", + Namespace: "default", + Annotations: map[string]string{ + fleetv1beta1.EnvelopeConfigMapAnnotation: "true", + }, + }, + Data: map[string]string{ + "resource1": `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"cm1","namespace":"default"},"data":{"key1":"value1"}}`, + }, + }) + + // Regular resource content that's not an envelope + regularResourceContent := createResourceContent(t, &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "regular-config-map", + Namespace: "default", + }, + Data: map[string]string{ + "key": "value", + }, + }) + + tests := []struct { + name string + selectedResource *fleetv1beta1.ResourceContent + resourceOverrideSnapshotHash string + clusterResourceOverrideSnapshotHash string + wantNewWorkLen int + wantSimpleManifestsLen int + wantErr bool + }{ + { + name: "process ResourceEnvelope", + selectedResource: resourceEnvelopeContent, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + wantNewWorkLen: 1, // Should create a new work + wantSimpleManifestsLen: 0, // Should not add to simple manifests + wantErr: false, + }, + { + name: "process ClusterResourceEnvelope", + selectedResource: clusterResourceEnvelopeContent, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + wantNewWorkLen: 1, // Should create a new work + wantSimpleManifestsLen: 0, // Should not add to simple manifests + wantErr: false, + }, + { + name: "process ConfigMap envelope that we no longer support", + selectedResource: configMapEnvelopeContent, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + wantNewWorkLen: 0, // Should create a new work + wantSimpleManifestsLen: 1, // Should not add to simple manifests + wantErr: false, + }, + { + name: "process regular resource", + selectedResource: regularResourceContent, + resourceOverrideSnapshotHash: "resource-hash", + clusterResourceOverrideSnapshotHash: "cluster-resource-hash", + wantNewWorkLen: 0, // Should NOT create a new work + wantSimpleManifestsLen: 1, // Should add to simple manifests + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake client with scheme + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create reconciler + r := &Reconciler{ + Client: fakeClient, + recorder: record.NewFakeRecorder(10), + InformerManager: &informer.FakeManager{}, + } + + // Prepare input parameters + activeWork := make(map[string]*fleetv1beta1.Work) + newWork := make([]*fleetv1beta1.Work, 0) + simpleManifests := make([]fleetv1beta1.Manifest, 0) + + gotNewWork, gotSimpleManifests, err := r.processOneSelectedResource( + ctx, + tt.selectedResource, + resourceBinding, + snapshot, + workNamePrefix, + tt.resourceOverrideSnapshotHash, + tt.clusterResourceOverrideSnapshotHash, + activeWork, + newWork, + simpleManifests, + ) + + if (err != nil) != tt.wantErr { + t.Errorf("processOneSelectedResource() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if len(gotNewWork) != tt.wantNewWorkLen { + t.Errorf("processOneSelectedResource() returned %d new works, want %d", len(gotNewWork), tt.wantNewWorkLen) + } + + if len(gotSimpleManifests) != tt.wantSimpleManifestsLen { + t.Errorf("processOneSelectedResource() returned %d simple manifests, want %d", len(gotSimpleManifests), tt.wantSimpleManifestsLen) + } + + // Check active work got populated + if tt.wantNewWorkLen > 0 && len(activeWork) != tt.wantNewWorkLen { + t.Errorf("processOneSelectedResource() populated %d active works, want %d", len(activeWork), tt.wantNewWorkLen) + } + }) + } +} + +func createResourceContent(t *testing.T, obj runtime.Object) *fleetv1beta1.ResourceContent { + jsonData, err := json.Marshal(obj) + if err != nil { + t.Fatalf("Failed to marshal object: %v", err) + } + return &fleetv1beta1.ResourceContent{ + RawExtension: runtime.RawExtension{ + Raw: jsonData, + }, + } +} diff --git a/pkg/controllers/workgenerator/manifests/clusterrole.yaml b/pkg/controllers/workgenerator/manifests/clusterrole.yaml new file mode 100644 index 000000000..9bc9a3fbf --- /dev/null +++ b/pkg/controllers/workgenerator/manifests/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pod-reader +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] \ No newline at end of file diff --git a/pkg/controllers/workgenerator/manifests/test-clusterscoped-envelop.yaml b/pkg/controllers/workgenerator/manifests/test-clusterscoped-envelop.yaml new file mode 100644 index 000000000..569b4e1a2 --- /dev/null +++ b/pkg/controllers/workgenerator/manifests/test-clusterscoped-envelop.yaml @@ -0,0 +1,33 @@ +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ClusterResourceEnvelope +metadata: + name: clusterscoped-resource-envelop +data: + "webhook.yaml": + apiVersion: admissionregistration.k8s.io/v1 + kind: ValidatingWebhookConfiguration + metadata: + name: guard + webhooks: + - name: guard.example.com + rules: + - operations: ["CREATE"] + apiGroups: ["*"] + apiVersions: ["*"] + resources: ["*"] + clientConfig: + service: + name: guard + namespace: ops + admissionReviewVersions: ["v1"] + sideEffects: None + timeoutSeconds: 10 + "clusterrole.yaml": + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: pod-reader + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] diff --git a/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml b/pkg/controllers/workgenerator/manifests/test-resource-envelop.yaml similarity index 64% rename from pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml rename to pkg/controllers/workgenerator/manifests/test-resource-envelop.yaml index c88877618..6a10b2d51 100644 --- a/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml +++ b/pkg/controllers/workgenerator/manifests/test-resource-envelop.yaml @@ -1,12 +1,10 @@ -apiVersion: v1 -kind: ConfigMap +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ResourceEnvelope metadata: - name: envelop-configmap + name: namespaced-resource-envelop namespace: app - annotations: - kubernetes-fleet.io/envelope-configmap: "true" data: - resourceQuota.yaml: | + "resourceQuota.yaml": apiVersion: v1 kind: ResourceQuota metadata: diff --git a/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml b/pkg/controllers/workgenerator/manifests/test-resource-envelop2.yaml similarity index 64% rename from pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml rename to pkg/controllers/workgenerator/manifests/test-resource-envelop2.yaml index 3692ad470..ae3d0b8de 100644 --- a/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml +++ b/pkg/controllers/workgenerator/manifests/test-resource-envelop2.yaml @@ -1,12 +1,10 @@ -apiVersion: v1 -kind: ConfigMap +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ResourceEnvelope metadata: - name: envelop-configmap + name: namespaced-resource-envelop namespace: app - annotations: - kubernetes-fleet.io/envelope-configmap: "true" data: - resourceQuota.yaml: | + "resourceQuota.yaml": apiVersion: v1 kind: ResourceQuota metadata: diff --git a/pkg/controllers/workgenerator/manifests/webhook.yaml b/pkg/controllers/workgenerator/manifests/webhook.yaml index e360fa859..b07d3dce6 100644 --- a/pkg/controllers/workgenerator/manifests/webhook.yaml +++ b/pkg/controllers/workgenerator/manifests/webhook.yaml @@ -1,29 +1,18 @@ apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration +kind: ValidatingWebhookConfiguration metadata: - labels: - azure-workload-identity.io/system: "true" - name: azure-wi-webhook-mutating-webhook-configuration + name: guard webhooks: -- admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: azure-wi-webhook-webhook-service - namespace: app - path: /mutate-v1-pod - failurePolicy: Fail - matchPolicy: Equivalent - name: mutation.azure-workload-identity.io - rules: - - apiGroups: - - "" - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - pods - sideEffects: None + - name: guard.example.com + rules: + - operations: ["CREATE"] + apiGroups: ["*"] + apiVersions: ["*"] + resources: ["*"] + clientConfig: + service: + name: guard + namespace: ops + admissionReviewVersions: ["v1"] + sideEffects: None + timeoutSeconds: 10 \ No newline at end of file diff --git a/pkg/controllers/workgenerator/suite_test.go b/pkg/controllers/workgenerator/suite_test.go index c611074d6..699cdd5f1 100644 --- a/pkg/controllers/workgenerator/suite_test.go +++ b/pkg/controllers/workgenerator/suite_test.go @@ -59,13 +59,13 @@ var ( cancel context.CancelFunc // pre loaded test manifests - testResourceCRD, testNameSpace, testResource, testConfigMap, testEnvelopConfigMap, testEnvelopConfigMap2, testPdb []byte + testResourceCRD, testNameSpace, testResource, testConfigMap, testResourceEnvelop, testResourceEnvelop2, testClusterScopedEnvelop, testPdb []byte // want overridden manifest which is overridden by cro-1 and ro-1 wantOverriddenTestResource []byte // the content of the enveloped resources - testEnvelopeResourceQuota, testEnvelopeResourceQuota2 []byte + testResourceQuotaContent, testResourceQuota2Content, testWebhookContent, testClusterRoleContent []byte ) func TestAPIs(t *testing.T) { @@ -347,16 +347,22 @@ func readTestManifests() { testConfigMap, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) - By("Read testEnvelopConfigMap resource") - rawByte, err = os.ReadFile("manifests/test-envelop-configmap.yaml") + By("Read testResourceEnvelop resource") + rawByte, err = os.ReadFile("manifests/test-resource-envelop.yaml") Expect(err).Should(Succeed()) - testEnvelopConfigMap, err = yaml.ToJSON(rawByte) + testResourceEnvelop, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) - By("Read testEnvelopConfigMap2 resource") - rawByte, err = os.ReadFile("manifests/test-envelop-configmap2.yaml") + By("Read testResourceEnvelop2 resource") + rawByte, err = os.ReadFile("manifests/test-resource-envelop2.yaml") Expect(err).Should(Succeed()) - testEnvelopConfigMap2, err = yaml.ToJSON(rawByte) + testResourceEnvelop2, err = yaml.ToJSON(rawByte) + Expect(err).Should(Succeed()) + + By("Read testClusterScopedEnvelop resource") + rawByte, err = os.ReadFile("manifests/test-clusterscoped-envelop.yaml") + Expect(err).Should(Succeed()) + testClusterScopedEnvelop, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) By("Read PodDisruptionBudget") @@ -368,12 +374,24 @@ func readTestManifests() { By("Read ResourceQuota") rawByte, err = os.ReadFile("manifests/resourcequota.yaml") Expect(err).Should(Succeed()) - testEnvelopeResourceQuota, err = yaml.ToJSON(rawByte) + testResourceQuotaContent, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) By("Read ResourceQuota2") rawByte, err = os.ReadFile("manifests/resourcequota2.yaml") Expect(err).Should(Succeed()) - testEnvelopeResourceQuota2, err = yaml.ToJSON(rawByte) + testResourceQuota2Content, err = yaml.ToJSON(rawByte) + Expect(err).Should(Succeed()) + + By("Read testWebhookContent") + rawByte, err = os.ReadFile("manifests/webhook.yaml") + Expect(err).Should(Succeed()) + testWebhookContent, err = yaml.ToJSON(rawByte) + Expect(err).Should(Succeed()) + + By("Read testClusterRoleContent") + rawByte, err = os.ReadFile("manifests/clusterrole.yaml") + Expect(err).Should(Succeed()) + testClusterRoleContent, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) } diff --git a/pkg/utils/apiresources.go b/pkg/utils/apiresources.go index 2fc12b97b..03a7e167a 100644 --- a/pkg/utils/apiresources.go +++ b/pkg/utils/apiresources.go @@ -27,6 +27,7 @@ import ( metricsV1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" + placementv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/v1alpha1" ) @@ -60,6 +61,76 @@ var ( Kind: "TrafficManagerBackend", } + ClusterResourcePlacementGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterResourcePlacementKind, + } + + ClusterResourceBindingGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterResourceBindingKind, + } + + ClusterResourceSnapshotGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterResourceSnapshotKind, + } + + ClusterSchedulingPolicySnapshotGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterSchedulingPolicySnapshotKind, + } + + WorkGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.WorkKind, + } + + ClusterStagedUpdateRunGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterStagedUpdateRunKind, + } + + ClusterStagedUpdateStrategyGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterStagedUpdateStrategyKind, + } + + ClusterApprovalRequestGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterApprovalRequestKind, + } + + ClusterResourcePlacementEvictionGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterResourcePlacementEvictionKind, + } + + ClusterResourcePlacementDisruptionBudgetGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterResourcePlacementDisruptionBudgetKind, + } + + ClusterResourceOverrideGK = schema.GroupKind{ + Group: placementv1alpha1.GroupVersion.Group, + Kind: placementv1alpha1.ClusterResourceOverrideKind, + } + + ClusterResourceOverrideSnapshotGK = schema.GroupKind{ + Group: placementv1alpha1.GroupVersion.Group, + Kind: placementv1alpha1.ClusterResourceOverrideSnapshotKind, + } + + ResourceOverrideGK = schema.GroupKind{ + Group: placementv1alpha1.GroupVersion.Group, + Kind: placementv1alpha1.ResourceOverrideKind, + } + + ResourceOverrideSnapshotGK = schema.GroupKind{ + Group: placementv1alpha1.GroupVersion.Group, + Kind: placementv1alpha1.ResourceOverrideSnapshotKind, + } + // we use `;` to separate the different api groups apiGroupSepToken = ";" ) @@ -92,22 +163,43 @@ func NewResourceConfig(isAllowList bool) *ResourceConfig { if r.isAllowList { return r } - // disable fleet related resource by default + // TODO: remove after we remove v1alpha1 support + // disable v1alpha1 related resources by default r.AddGroup(fleetv1alpha1.GroupVersion.Group) - r.AddGroup(placementv1beta1.GroupVersion.Group) - r.AddGroup(clusterv1beta1.GroupVersion.Group) r.AddGroupVersionKind(WorkV1Alpha1GVK) + // disable cluster group by default + r.AddGroup(clusterv1beta1.GroupVersion.Group) + + // disable some fleet networking resources + r.AddGroupKind(serviceImportGK) + r.AddGroupKind(trafficManagerProfileGK) + r.AddGroupKind(trafficManagerBackendGK) + + // disable all fleet placement resources except for the envelope type + r.AddGroupKind(ClusterResourcePlacementGK) + r.AddGroupKind(ClusterResourceBindingGK) + r.AddGroupKind(ClusterResourceSnapshotGK) + r.AddGroupKind(ClusterSchedulingPolicySnapshotGK) + r.AddGroupKind(WorkGK) + r.AddGroupKind(ClusterStagedUpdateRunGK) + r.AddGroupKind(ClusterStagedUpdateStrategyGK) + r.AddGroupKind(ClusterApprovalRequestGK) + r.AddGroupKind(ClusterResourcePlacementEvictionGK) + r.AddGroupKind(ClusterResourcePlacementDisruptionBudgetGK) + // Add v1alpha1 resources to skip to not break when we move them to v1beta1 + r.AddGroupKind(ClusterResourceOverrideGK) + r.AddGroupKind(ClusterResourceOverrideSnapshotGK) + r.AddGroupKind(ResourceOverrideGK) + r.AddGroupKind(ResourceOverrideSnapshotGK) + // disable the below built-in resources r.AddGroup(eventsv1.GroupName) r.AddGroup(coordv1.GroupName) r.AddGroup(metricsV1beta1.GroupName) r.AddGroupVersionKind(corev1PodGVK) r.AddGroupVersionKind(corev1NodeGVK) - // disable networking resources - r.AddGroupKind(serviceImportGK) - r.AddGroupKind(trafficManagerProfileGK) - r.AddGroupKind(trafficManagerBackendGK) + return r } diff --git a/pkg/utils/common.go b/pkg/utils/common.go index fc720cf7b..5a6b0109b 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -24,7 +24,6 @@ import ( "strings" "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -146,7 +145,7 @@ var ( } ) -// Those are the GVR/GVK of the fleet related resources. +// Those are the GVR/GVKs in use by Fleet source code. var ( ClusterResourcePlacementV1Alpha1GVK = schema.GroupVersionKind{ Group: fleetv1alpha1.GroupVersion.Group, @@ -292,12 +291,6 @@ var ( Kind: "MemberCluster", } - MutatingWebhookConfigurationGVR = schema.GroupVersionResource{ - Group: admissionregistrationv1.SchemeGroupVersion.Group, - Version: admissionregistrationv1.SchemeGroupVersion.Version, - Resource: "mutatingwebhookconfigurations", - } - NamespaceMetaGVK = metav1.GroupVersionKind{ Group: corev1.GroupName, Version: corev1.SchemeGroupVersion.Version, @@ -400,12 +393,6 @@ var ( Kind: "Work", } - ValidatingWebhookConfigurationGVR = schema.GroupVersionResource{ - Group: admissionregistrationv1.SchemeGroupVersion.Group, - Version: admissionregistrationv1.SchemeGroupVersion.Version, - Resource: "validatingwebhookconfigurations", - } - ClusterResourceOverrideSnapshotKind = schema.GroupVersionKind{ Group: placementv1alpha1.GroupVersion.Group, Version: placementv1alpha1.GroupVersion.Version, @@ -501,6 +488,16 @@ var ( Version: corev1.SchemeGroupVersion.Version, Kind: "PersistentVolumeClaim", } + + ClusterResourceEnvelopeGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterResourceEnvelopeKind, + } + + ResourceEnvelopeGK = schema.GroupKind{ + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + } ) // RandSecureInt returns a uniform random value in [1, max] or panic. diff --git a/pkg/utils/common_test.go b/pkg/utils/common_test.go index 1630ce310..483a06a42 100644 --- a/pkg/utils/common_test.go +++ b/pkg/utils/common_test.go @@ -51,7 +51,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -111,7 +111,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -139,7 +139,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -161,7 +161,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -185,7 +185,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -207,7 +207,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -235,7 +235,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -257,7 +257,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -281,7 +281,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -303,7 +303,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -331,7 +331,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -391,7 +391,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -419,7 +419,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -479,7 +479,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -507,7 +507,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -549,7 +549,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -577,7 +577,7 @@ func TestIsFailedResourcePlacementsEqual(t *testing.T) { Envelope: &fleetv1beta1.EnvelopeIdentifier{ Name: "test-envelope-object", Namespace: "default", - Type: fleetv1beta1.ConfigMapEnvelopeType, + Type: fleetv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ diff --git a/pkg/utils/informer/informermanager.go b/pkg/utils/informer/informermanager.go index d4275e7c9..b36e265a6 100644 --- a/pkg/utils/informer/informermanager.go +++ b/pkg/utils/informer/informermanager.go @@ -173,7 +173,7 @@ func (s *informerManagerImpl) AddStaticResource(resource APIResourceMeta, handle if exist { klog.ErrorS(fmt.Errorf("a static resource is added already"), "existing res", staticRes) } - + klog.InfoS("Added an informer for a static resource", "res", resource) resource.isStaticResource = true s.apiResources[resource.GroupVersionKind] = &resource _, _ = s.informerFactory.ForResource(resource.GroupVersionResource).Informer().AddEventHandler(handler) diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 3339fa304..5327d18ba 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -28,6 +28,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -39,17 +40,20 @@ import ( "github.com/kubefleet-dev/kubefleet/test/e2e/framework" ) -var ( - // pre loaded test manifests - testConfigMap, testEnvelopConfigMap corev1.ConfigMap - testEnvelopeResourceQuota corev1.ResourceQuota -) - const ( - wrapperCMName = "wrapper" + envelopResourceName = "envelop-wrapper" + cmDataKey = "foo" + cmDataVal = "bar" +) - cmDataKey = "foo" - cmDataVal = "bar" +var ( + // pre loaded test manifests + testConfigMap corev1.ConfigMap + testResourceQuota corev1.ResourceQuota + testDeployment appv1.Deployment + testClusterRole rbacv1.ClusterRole + testResourceEnvelope placementv1beta1.ResourceEnvelope + testClusterResourceEnvelope placementv1beta1.ClusterResourceEnvelope ) // Note that this container will run in parallel with other containers. @@ -61,31 +65,12 @@ var _ = Describe("placing wrapped resources using a CRP", func() { BeforeAll(func() { // Create the test resources. + By("Create the test resources in the namespace") readEnvelopTestManifests() - wantSelectedResources = []placementv1beta1.ResourceIdentifier{ - { - Kind: "Namespace", - Name: workNamespaceName, - Version: "v1", - }, - { - Kind: "ConfigMap", - Name: testConfigMap.Name, - Version: "v1", - Namespace: workNamespaceName, - }, - { - Kind: "ConfigMap", - Name: testEnvelopConfigMap.Name, - Version: "v1", - Namespace: workNamespaceName, - }, - } + createWrappedResourcesForEnvelopTest() }) - It("Create the test resources in the namespace", createWrappedResourcesForEnvelopTest) - - It("Create the CRP that select the name space", func() { + It("Create the CRP that select the name space that contains wrapper and clusterresourceenvelope", func() { crp := &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, @@ -94,7 +79,20 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: workNamespaceName, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: "ClusterResourceEnvelope", + Version: placementv1beta1.GroupVersion.Version, + Name: testClusterResourceEnvelope.Name, + }, + }, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -107,60 +105,86 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) It("should update CRP status as expected", func() { - // resourceQuota is enveloped so it's not trackable yet + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Kind: "ConfigMap", + Name: testConfigMap.Name, + Version: "v1", + Namespace: workNamespaceName, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: "ClusterResourceEnvelope", + Version: placementv1beta1.GroupVersion.Version, + Name: testClusterResourceEnvelope.Name, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testResourceEnvelope.Name, + Namespace: workNamespaceName, + }, + } crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) It("should place the resources on all member clusters", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkEnvelopQuotaPlacement(memberCluster) + workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) - It("Update the envelop configMap with bad configuration", func() { + It("Update the resource envelope with bad configuration", func() { // modify the embedded namespaced resource to add a scope but it will be rejected as its immutable - badEnvelopeResourceQuota := testEnvelopeResourceQuota.DeepCopy() + badEnvelopeResourceQuota := testResourceQuota.DeepCopy() badEnvelopeResourceQuota.Spec.Scopes = []corev1.ResourceQuotaScope{ corev1.ResourceQuotaScopeNotBestEffort, corev1.ResourceQuotaScopeNotTerminating, } badResourceQuotaByte, err := json.Marshal(badEnvelopeResourceQuota) Expect(err).Should(Succeed()) - // Get the config map. - Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testEnvelopConfigMap.Name}, &testEnvelopConfigMap)).To(Succeed(), "Failed to get config map") - testEnvelopConfigMap.Data["resourceQuota.yaml"] = string(badResourceQuotaByte) - Expect(hubClient.Update(ctx, &testEnvelopConfigMap)).To(Succeed(), "Failed to update the enveloped config map") + // Get the resource envelope + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testResourceEnvelope.Name}, &testResourceEnvelope)).To(Succeed(), "Failed to get the resourceEnvelope") + testResourceEnvelope.Data["resourceQuota.yaml"] = runtime.RawExtension{Raw: badResourceQuotaByte} + Expect(hubClient.Update(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to update the enveloped resource") }) It("should update CRP status with failed to apply resourceQuota", func() { // rolloutStarted is false, but other conditions are true. // "The rollout is being blocked by the rollout strategy in 2 cluster(s)", crpStatusUpdatedActual := checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources) - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) It("Update the envelop configMap back with good configuration", func() { - // Get the config map. - Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testEnvelopConfigMap.Name}, &testEnvelopConfigMap)).To(Succeed(), "Failed to get config map") - resourceQuotaByte, err := json.Marshal(testEnvelopeResourceQuota) + // Get the resource envelope + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testResourceEnvelope.Name}, &testResourceEnvelope)).To(Succeed(), "Failed to get the resourceEnvelope") + // update the resource envelope with a valid resourceQuota + resourceQuotaByte, err := json.Marshal(testResourceQuota) Expect(err).Should(Succeed()) - testEnvelopConfigMap.Data["resourceQuota.yaml"] = string(resourceQuotaByte) - Expect(hubClient.Update(ctx, &testEnvelopConfigMap)).To(Succeed(), "Failed to update the enveloped config map") + testResourceEnvelope.Data["resourceQuota.yaml"] = runtime.RawExtension{Raw: resourceQuotaByte} + Expect(hubClient.Update(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to update the enveloped resource") }) It("should update CRP status as success again", func() { crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "2", true) - Eventually(crpStatusUpdatedActual, longEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) It("should place the resources on all member clusters again", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkEnvelopQuotaPlacement(memberCluster) - Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) + Eventually(workResourcesPlacedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) @@ -181,6 +205,9 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) AfterAll(func() { + By(fmt.Sprintf("deleting envelop %s", testResourceEnvelope.Name)) + Expect(hubClient.Delete(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to delete ResourceEnvelope") + Expect(hubClient.Delete(ctx, &testClusterResourceEnvelope)).To(Succeed(), "Failed to delete testClusterResourceEnvelope") By(fmt.Sprintf("deleting placement %s and related resources", crpName)) ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) }) @@ -190,30 +217,15 @@ var _ = Describe("placing wrapped resources using a CRP", func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespace := appNamespace() var wantSelectedResources []placementv1beta1.ResourceIdentifier - var testDeployment appv1.Deployment var testDaemonSet appv1.DaemonSet var testStatefulSet appv1.StatefulSet - var testEnvelopeConfig corev1.ConfigMap BeforeAll(func() { // read the test resources. readDeploymentTestManifest(&testDeployment) readDaemonSetTestManifest(&testDaemonSet) readStatefulSetTestManifest(&testStatefulSet, true) - readEnvelopeConfigMapTestManifest(&testEnvelopeConfig) - wantSelectedResources = []placementv1beta1.ResourceIdentifier{ - { - Kind: utils.NamespaceKind, - Name: workNamespace.Name, - Version: corev1.SchemeGroupVersion.Version, - }, - { - Kind: utils.ConfigMapKind, - Name: testEnvelopeConfig.Name, - Version: corev1.SchemeGroupVersion.Version, - Namespace: workNamespace.Name, - }, - } + readEnvelopeResourceTestManifest(&testResourceEnvelope) }) It("Create the namespace", func() { @@ -221,11 +233,11 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) It("Create the wrapped resources in the namespace", func() { - testEnvelopeConfig.Data = make(map[string]string) - constructWrappedResources(&testEnvelopeConfig, &testDeployment, utils.DeploymentKind, workNamespace) - constructWrappedResources(&testEnvelopeConfig, &testDaemonSet, utils.DaemonSetKind, workNamespace) - constructWrappedResources(&testEnvelopeConfig, &testStatefulSet, utils.StatefulSetKind, workNamespace) - Expect(hubClient.Create(ctx, &testEnvelopeConfig)).To(Succeed(), "Failed to create testEnvelop object %s containing workloads", testEnvelopeConfig.Name) + testResourceEnvelope.Data = make(map[string]runtime.RawExtension) + constructWrappedResources(&testResourceEnvelope, &testDeployment, utils.DeploymentKind, workNamespace) + constructWrappedResources(&testResourceEnvelope, &testDaemonSet, utils.DaemonSetKind, workNamespace) + constructWrappedResources(&testResourceEnvelope, &testStatefulSet, utils.StatefulSetKind, workNamespace) + Expect(hubClient.Create(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to create testEnvelop object %s containing workloads", testResourceEnvelope.Name) }) It("Create the CRP that select the namespace", func() { @@ -257,9 +269,9 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Name: testStatefulSet.Name, Namespace: testStatefulSet.Namespace, Envelope: &placementv1beta1.EnvelopeIdentifier{ - Name: testEnvelopeConfig.Name, + Name: testResourceEnvelope.Name, Namespace: workNamespace.Name, - Type: placementv1beta1.ConfigMapEnvelopeType, + Type: placementv1beta1.ResourceEnvelopeType, }, } // We only expect the statefulset to not be available all the clusters @@ -319,6 +331,20 @@ var _ = Describe("placing wrapped resources using a CRP", func() { } PlacementStatuses = append(PlacementStatuses, unavailableResourcePlacementStatus) } + wantSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: utils.NamespaceKind, + Name: workNamespace.Name, + Version: corev1.SchemeGroupVersion.Version, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testResourceEnvelope.Name, + Namespace: workNamespace.Name, + }, + } wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ Conditions: crpNotAvailableConditions(1, false), PlacementStatuses: PlacementStatuses, @@ -340,17 +366,17 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) AfterAll(func() { + By(fmt.Sprintf("deleting envelop %s", testResourceEnvelope.Name)) + Expect(hubClient.Delete(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to delete ResourceEnvelope") By(fmt.Sprintf("deleting placement %s and related resources", crpName)) ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) }) }) - Context("Block envelopes that wrap cluster-scoped resources", Ordered, func() { + Context("Block envelopeResource that wrap cluster-scoped resources", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - - wrappedCMName := "app" - wrappedCBName := "standard" + var envelopWrapper *placementv1beta1.ResourceEnvelope BeforeAll(func() { // Use an envelope to create duplicate resource entries. @@ -358,42 +384,39 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) // Create an envelope config map. - wrapperCM := &corev1.ConfigMap{ + envelopWrapper = &placementv1beta1.ResourceEnvelope{ ObjectMeta: metav1.ObjectMeta{ - Name: wrapperCMName, + Name: envelopResourceName, Namespace: ns.Name, - Annotations: map[string]string{ - placementv1beta1.EnvelopeConfigMapAnnotation: "true", - }, }, - Data: map[string]string{}, + Data: make(map[string]runtime.RawExtension), } // Create a configMap and a clusterRole as wrapped resources. - wrappedCM := &corev1.ConfigMap{ + configMap := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ APIVersion: corev1.SchemeGroupVersion.String(), Kind: "ConfigMap", }, ObjectMeta: metav1.ObjectMeta{ Namespace: ns.Name, - Name: wrappedCMName, + Name: "config", }, Data: map[string]string{ cmDataKey: cmDataVal, }, } - wrappedCMBytes, err := json.Marshal(wrappedCM) - Expect(err).To(BeNil(), "Failed to marshal configMap %s", wrappedCM.Name) - wrapperCM.Data["cm.yaml"] = string(wrappedCMBytes) + configMapBytes, err := json.Marshal(configMap) + Expect(err).To(BeNil(), "Failed to marshal configMap %s", configMap.Name) + envelopWrapper.Data["cm.yaml"] = runtime.RawExtension{Raw: configMapBytes} - wrappedCB := &rbacv1.ClusterRole{ + clusterRole := &rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{ APIVersion: rbacv1.SchemeGroupVersion.String(), Kind: "ClusterRole", }, ObjectMeta: metav1.ObjectMeta{ - Name: wrappedCBName, + Name: "clusterRole", }, Rules: []rbacv1.PolicyRule{ { @@ -403,11 +426,11 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }, }, } - wrappedCBBytes, err := json.Marshal(wrappedCB) - Expect(err).To(BeNil(), "Failed to marshal clusterRole %s", wrappedCB.Name) - wrapperCM.Data["cb.yaml"] = string(wrappedCBBytes) + clusterRoleBytes, err := json.Marshal(clusterRole) + Expect(err).To(BeNil(), "Failed to marshal clusterRole %s", clusterRole.Name) + envelopWrapper.Data["cb.yaml"] = runtime.RawExtension{Raw: clusterRoleBytes} - Expect(hubClient.Create(ctx, wrapperCM)).To(Succeed(), "Failed to create configMap %s", wrapperCM.Name) + Expect(hubClient.Create(ctx, envelopWrapper)).To(Succeed(), "Failed to create wrapper %s", envelopWrapper.Name) // Create a CRP. crp := &placementv1beta1.ClusterResourcePlacement{ @@ -458,9 +481,10 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Version: "v1", }, { - Kind: "ConfigMap", - Name: wrapperCMName, - Version: "v1", + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: envelopResourceName, Namespace: workNamespaceName, }, }, @@ -478,6 +502,8 @@ var _ = Describe("placing wrapped resources using a CRP", func() { // either. AfterAll(func() { + By(fmt.Sprintf("deleting envelop %s", envelopWrapper.Name)) + Expect(hubClient.Delete(ctx, envelopWrapper)).To(Succeed(), "Failed to delete ResourceEnvelope") // Remove the CRP and the namespace from the hub cluster. ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) }) @@ -490,6 +516,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { nsGenerateName := "application-" wrappedCMGenerateName := "wrapped-foo-" + var envelop *placementv1beta1.ResourceEnvelope BeforeAll(func() { // Create the namespace with both name and generate name set. @@ -497,16 +524,13 @@ var _ = Describe("Process objects with generate name", Ordered, func() { ns.GenerateName = nsGenerateName Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) - // Create an envelope config map. - cm := &corev1.ConfigMap{ + // Create an envelope. + envelop = &placementv1beta1.ResourceEnvelope{ ObjectMeta: metav1.ObjectMeta{ - Name: wrapperCMName, + Name: envelopResourceName, Namespace: ns.Name, - Annotations: map[string]string{ - placementv1beta1.EnvelopeConfigMapAnnotation: "true", - }, }, - Data: map[string]string{}, + Data: map[string]runtime.RawExtension{}, } wrappedCM := &corev1.ConfigMap{ @@ -524,8 +548,8 @@ var _ = Describe("Process objects with generate name", Ordered, func() { } wrappedCMByte, err := json.Marshal(wrappedCM) Expect(err).Should(BeNil()) - cm.Data["wrapped.yaml"] = string(wrappedCMByte) - Expect(hubClient.Create(ctx, cm)).To(Succeed(), "Failed to create config map %s", cm.Name) + envelop.Data["wrapped.yaml"] = runtime.RawExtension{Raw: wrappedCMByte} + Expect(hubClient.Create(ctx, envelop)).To(Succeed(), "Failed to create config map %s", envelop.Name) // Create a CRP that selects the namespace. crp := &placementv1beta1.ClusterResourcePlacement{ @@ -573,9 +597,9 @@ var _ = Describe("Process objects with generate name", Ordered, func() { Namespace: workNamespaceName, Version: "v1", Envelope: &placementv1beta1.EnvelopeIdentifier{ - Name: wrapperCMName, + Name: envelopResourceName, Namespace: workNamespaceName, - Type: placementv1beta1.ConfigMapEnvelopeType, + Type: placementv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -596,9 +620,10 @@ var _ = Describe("Process objects with generate name", Ordered, func() { Version: "v1", }, { - Kind: "ConfigMap", - Name: wrapperCMName, - Version: "v1", + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: envelopResourceName, Namespace: workNamespaceName, }, }, @@ -634,41 +659,13 @@ var _ = Describe("Process objects with generate name", Ordered, func() { }) AfterAll(func() { + By(fmt.Sprintf("deleting envelop %s", envelop.Name)) + Expect(hubClient.Delete(ctx, envelop)).To(Succeed(), "Failed to delete ResourceEnvelope") By(fmt.Sprintf("deleting placement %s and related resources", crpName)) ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) }) }) -func checkEnvelopQuotaPlacement(memberCluster *framework.Cluster) func() error { - workNamespaceName := appNamespace().Name - return func() error { - if err := validateWorkNamespaceOnCluster(memberCluster, types.NamespacedName{Name: workNamespaceName}); err != nil { - return err - } - By("check the placedConfigMap") - placedConfigMap := &corev1.ConfigMap{} - if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testConfigMap.Name}, placedConfigMap); err != nil { - return err - } - hubConfigMap := &corev1.ConfigMap{} - if err := hubCluster.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testConfigMap.Name}, hubConfigMap); err != nil { - return err - } - if diff := cmp.Diff(placedConfigMap.Data, hubConfigMap.Data); diff != "" { - return fmt.Errorf("configmap diff (-got, +want): %s", diff) - } - By("check the namespaced envelope objects") - placedResourceQuota := &corev1.ResourceQuota{} - if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testEnvelopeResourceQuota.Name}, placedResourceQuota); err != nil { - return err - } - if diff := cmp.Diff(placedResourceQuota.Spec, testEnvelopeResourceQuota.Spec); diff != "" { - return fmt.Errorf("resource quota diff (-got, +want): %s", diff) - } - return nil - } -} - func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placementv1beta1.ResourceIdentifier) func() error { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) @@ -676,13 +673,13 @@ func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placem { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ Kind: "ResourceQuota", - Name: testEnvelopeResourceQuota.Name, + Name: testResourceQuota.Name, Version: "v1", - Namespace: testEnvelopeResourceQuota.Namespace, + Namespace: testResourceQuota.Namespace, Envelope: &placementv1beta1.EnvelopeIdentifier{ - Name: testEnvelopConfigMap.Name, + Name: testResourceEnvelope.Name, Namespace: workNamespaceName, - Type: placementv1beta1.ConfigMapEnvelopeType, + Type: placementv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -741,37 +738,137 @@ func checkForRolloutStuckOnOneFailedClusterStatus(wantSelectedResources []placem } func readEnvelopTestManifests() { - By("Read the testConfigMap resources") + By("Read the ConfigMap resources which is no longer identified as the envelope") testConfigMap = corev1.ConfigMap{} err := utils.GetObjectFromManifest("resources/test-configmap.yaml", &testConfigMap) Expect(err).Should(Succeed()) - By("Read testEnvelopConfigMap resource") - testEnvelopConfigMap = corev1.ConfigMap{} - err = utils.GetObjectFromManifest("resources/test-envelop-configmap.yaml", &testEnvelopConfigMap) + By("Read ResourceQuota to be filled in an envelope") + testResourceQuota = corev1.ResourceQuota{} + err = utils.GetObjectFromManifest("resources/resourcequota.yaml", &testResourceQuota) + Expect(err).Should(Succeed()) + + By("Read Deployment to be filled in an envelope") + testDeployment = appv1.Deployment{} + err = utils.GetObjectFromManifest("resources/test-deployment.yaml", &testDeployment) Expect(err).Should(Succeed()) - By("Read ResourceQuota") - testEnvelopeResourceQuota = corev1.ResourceQuota{} - err = utils.GetObjectFromManifest("resources/resourcequota.yaml", &testEnvelopeResourceQuota) + By("Read ClusterRole to be filled in an envelope") + testClusterRole = rbacv1.ClusterRole{} + err = utils.GetObjectFromManifest("resources/test-clusterrole.yaml", &testClusterRole) Expect(err).Should(Succeed()) + + By("Create ResourceEnvelope template") + testResourceEnvelope = placementv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-resource-envelope", + }, + Data: make(map[string]runtime.RawExtension), + } + + By("Create ClusterResourceEnvelope template") + testClusterResourceEnvelope = placementv1beta1.ClusterResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-resource-envelope", + }, + Data: make(map[string]runtime.RawExtension), + } } // createWrappedResourcesForEnvelopTest creates some enveloped resources on the hub cluster for testing purposes. func createWrappedResourcesForEnvelopTest() { ns := appNamespace() Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) - // modify the configMap according to the namespace + + // Update namespaces for namespaced resources testConfigMap.Namespace = ns.Name - Expect(hubClient.Create(ctx, &testConfigMap)).To(Succeed(), "Failed to create config map %s", testConfigMap.Name) + Expect(hubClient.Create(ctx, &testConfigMap)).To(Succeed(), "Failed to create ConfigMap") - // modify the enveloped configMap according to the namespace - testEnvelopConfigMap.Namespace = ns.Name + testResourceQuota.Namespace = ns.Name + testDeployment.Namespace = ns.Name + testResourceEnvelope.Namespace = ns.Name - // modify the embedded namespaced resource according to the namespace - testEnvelopeResourceQuota.Namespace = ns.Name - resourceQuotaByte, err := json.Marshal(testEnvelopeResourceQuota) + // Create ResourceEnvelope with ResourceQuota inside + quotaBytes, err := json.Marshal(testResourceQuota) + Expect(err).Should(Succeed()) + testResourceEnvelope.Data["resourceQuota1.yaml"] = runtime.RawExtension{Raw: quotaBytes} + deploymentBytes, err := json.Marshal(testDeployment) Expect(err).Should(Succeed()) - testEnvelopConfigMap.Data["resourceQuota.yaml"] = string(resourceQuotaByte) - Expect(hubClient.Create(ctx, &testEnvelopConfigMap)).To(Succeed(), "Failed to create testEnvelop config map %s", testEnvelopConfigMap.Name) + testResourceEnvelope.Data["deployment.yaml"] = runtime.RawExtension{Raw: deploymentBytes} + Expect(hubClient.Create(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to create ResourceEnvelope") + + // Create ClusterResourceEnvelope with ClusterRole inside + roleBytes, err := json.Marshal(testClusterRole) + Expect(err).Should(Succeed()) + testClusterResourceEnvelope.Data["clusterRole.yaml"] = runtime.RawExtension{Raw: roleBytes} + Expect(hubClient.Create(ctx, &testClusterResourceEnvelope)).To(Succeed(), "Failed to create ClusterResourceEnvelope") +} + +func checkAllResourcesPlacement(memberCluster *framework.Cluster) func() error { + workNamespaceName := appNamespace().Name + return func() error { + // Verify namespace exists on target cluster + if err := validateWorkNamespaceOnCluster(memberCluster, types.NamespacedName{Name: workNamespaceName}); err != nil { + return err + } + + // Check that ConfigMap was placed + By("Check ConfigMap") + placedConfigMap := &corev1.ConfigMap{} + if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{ + Namespace: workNamespaceName, + Name: testConfigMap.Name, + }, placedConfigMap); err != nil { + return fmt.Errorf("failed to find configMap %s: %w", testConfigMap.Name, err) + } + // Verify the Configmap matches expected spec + if diff := cmp.Diff(placedConfigMap.Data, testConfigMap.Data); diff != "" { + return fmt.Errorf("ResourceQuota from ResourceEnvelope diff (-got, +want): %s", diff) + } + + // Check that ResourceQuota from ResourceEnvelope was placed + By("Check ResourceQuota from ResourceEnvelope") + placedResourceQuota := &corev1.ResourceQuota{} + if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{ + Namespace: workNamespaceName, + Name: testResourceQuota.Name, + }, placedResourceQuota); err != nil { + return fmt.Errorf("failed to find resourceQuota from ResourceEnvelope: %s: %w", testResourceQuota.Name, err) + } + // Verify the ResourceQuota matches expected spec + if diff := cmp.Diff(placedResourceQuota.Spec, testResourceQuota.Spec); diff != "" { + return fmt.Errorf("ResourceQuota from ResourceEnvelope diff (-got, +want): %s", diff) + } + + // Check that Deployment from ResourceEnvelope was placed + By("Check Deployment from ResourceEnvelope") + placedDeployment := &appv1.Deployment{} + if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{ + Namespace: workNamespaceName, + Name: testDeployment.Name, + }, placedDeployment); err != nil { + return fmt.Errorf("failed to find ResourceQuota from ResourceEnvelope: %w", err) + } + + // Verify the deployment matches expected spec + if diff := cmp.Diff(placedDeployment.Spec.Template.Spec.Containers[0].Image, testDeployment.Spec.Template.Spec.Containers[0].Image); diff != "" { + return fmt.Errorf("deployment from ResourceEnvelope diff (-got, +want): %s", diff) + } + + // Check that ClusterRole from ClusterResourceEnvelope was placed + By("Check ClusterRole from ClusterResourceEnvelope") + placedClusterRole := &rbacv1.ClusterRole{} + if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{ + Name: testClusterRole.Name, + }, placedClusterRole); err != nil { + return fmt.Errorf("failed to find ClusterRole from ClusterResourceEnvelope: %w", err) + } + + // Verify the ClusterRole matches expected rules + if diff := cmp.Diff(placedClusterRole.Rules, testClusterRole.Rules); diff != "" { + return fmt.Errorf("clusterRole from ClusterResourceEnvelope diff (-got, +want): %s", diff) + } + + return nil + } } diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index 6bead04d3..97f6844fa 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -64,9 +64,10 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun Namespace: workNamespaceName, }, { - Kind: "ConfigMap", - Name: testEnvelopConfigMap.Name, - Version: "v1", + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testResourceEnvelope.Name, Namespace: workNamespaceName, }, } @@ -76,25 +77,7 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun It("Create the test resources in the namespace", createWrappedResourcesForEnvelopTest) It("Create the CRP that select the name space and place it to all clusters", func() { - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - ApplyStrategy: &placementv1beta1.ApplyStrategy{AllowCoOwnership: true}, - }, - }, - } - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + createCRP(crpName) }) It("should update CRP status as expected", func() { @@ -106,7 +89,7 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun It("should place the resources on all member clusters", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkEnvelopQuotaPlacement(memberCluster) + workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) diff --git a/test/e2e/placement_negative_cases_test.go b/test/e2e/placement_negative_cases_test.go index 14b0b289c..1cdeb15f6 100644 --- a/test/e2e/placement_negative_cases_test.go +++ b/test/e2e/placement_negative_cases_test.go @@ -13,6 +13,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" @@ -31,7 +32,7 @@ var _ = Describe("handling errors and failures gracefully", func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - wrapperCMName := "wrapper" + envelopeName := "wrapper" wrappedCMName1 := "app-1" wrappedCMName2 := "app-2" @@ -44,20 +45,17 @@ var _ = Describe("handling errors and failures gracefully", func() { ns := appNamespace() Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) - // Create an envelope config map. - wrapperCM := &corev1.ConfigMap{ + // Create an envelope resource to wrap the configMaps. + resourceEnvelop := &placementv1beta1.ResourceEnvelope{ ObjectMeta: metav1.ObjectMeta{ - Name: wrapperCMName, + Name: envelopeName, Namespace: ns.Name, - Annotations: map[string]string{ - placementv1beta1.EnvelopeConfigMapAnnotation: "true", - }, }, - Data: map[string]string{}, + Data: map[string]runtime.RawExtension{}, } // Create configMaps as wrapped resources. - wrappedCM := &corev1.ConfigMap{ + configMap := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ APIVersion: corev1.SchemeGroupVersion.String(), Kind: "ConfigMap", @@ -72,23 +70,23 @@ var _ = Describe("handling errors and failures gracefully", func() { } // Given Fleet's current resource sorting logic, this configMap // will be considered as the duplicated resource entry. - wrappedCM1 := wrappedCM.DeepCopy() - wrappedCM1.TypeMeta = metav1.TypeMeta{ + badConfigMap := configMap.DeepCopy() + badConfigMap.TypeMeta = metav1.TypeMeta{ APIVersion: "dummy/v10", Kind: "Fake", } - wrappedCM1Bytes, err := json.Marshal(wrappedCM1) - Expect(err).To(BeNil(), "Failed to marshal configMap %s", wrappedCM1.Name) - wrapperCM.Data["cm1.yaml"] = string(wrappedCM1Bytes) + badCMBytes, err := json.Marshal(badConfigMap) + Expect(err).To(BeNil(), "Failed to marshal configMap %s", badConfigMap.Name) + resourceEnvelop.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} - wrappedCM2 := wrappedCM.DeepCopy() + wrappedCM2 := configMap.DeepCopy() wrappedCM2.Name = wrappedCMName2 wrappedCM2.Data[cmDataKey] = cmDataVal2 wrappedCM2Bytes, err := json.Marshal(wrappedCM2) Expect(err).To(BeNil(), "Failed to marshal configMap %s", wrappedCM2.Name) - wrapperCM.Data["cm2.yaml"] = string(wrappedCM2Bytes) + resourceEnvelop.Data["cm2.yaml"] = runtime.RawExtension{Raw: wrappedCM2Bytes} - Expect(hubClient.Create(ctx, wrapperCM)).To(Succeed(), "Failed to create configMap %s", wrapperCM.Name) + Expect(hubClient.Create(ctx, resourceEnvelop)).To(Succeed(), "Failed to create configMap %s", resourceEnvelop.Name) // Create a CRP. crp := &placementv1beta1.ClusterResourcePlacement{ @@ -138,9 +136,9 @@ var _ = Describe("handling errors and failures gracefully", func() { Namespace: workNamespaceName, Name: wrappedCMName1, Envelope: &placementv1beta1.EnvelopeIdentifier{ - Name: wrapperCMName, + Name: envelopeName, Namespace: workNamespaceName, - Type: placementv1beta1.ConfigMapEnvelopeType, + Type: placementv1beta1.ResourceEnvelopeType, }, }, Condition: metav1.Condition{ @@ -161,9 +159,10 @@ var _ = Describe("handling errors and failures gracefully", func() { Version: "v1", }, { - Kind: "ConfigMap", - Name: wrapperCMName, - Version: "v1", + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: envelopeName, Namespace: workNamespaceName, }, }, diff --git a/test/e2e/resources/resourcequota.yaml b/test/e2e/resources/resourcequota.yaml index 2db32cb36..4e3224799 100644 --- a/test/e2e/resources/resourcequota.yaml +++ b/test/e2e/resources/resourcequota.yaml @@ -5,7 +5,7 @@ metadata: namespace: app spec: hard: - requests.cpu: "1" - requests.memory: 1Gi - limits.cpu: "2" - limits.memory: 2Gi + requests.cpu: "2" + requests.memory: 2Gi + limits.cpu: "4" + limits.memory: 4Gi diff --git a/test/e2e/resources/test-clusterrole.yaml b/test/e2e/resources/test-clusterrole.yaml new file mode 100644 index 000000000..9bc9a3fbf --- /dev/null +++ b/test/e2e/resources/test-clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pod-reader +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] \ No newline at end of file diff --git a/test/e2e/resources/test-configmap.yaml b/test/e2e/resources/test-configmap.yaml index 98396fe81..661825c40 100644 --- a/test/e2e/resources/test-configmap.yaml +++ b/test/e2e/resources/test-configmap.yaml @@ -3,6 +3,8 @@ kind: ConfigMap metadata: name: test-configmap namespace: app + annotations: + kubernetes-fleet.io/envelope-configmap: "true" data: fielda: one fieldb: two diff --git a/test/e2e/resources/test-daemonset.yaml b/test/e2e/resources/test-daemonset.yaml index d28ebacd9..e901b8964 100644 --- a/test/e2e/resources/test-daemonset.yaml +++ b/test/e2e/resources/test-daemonset.yaml @@ -19,6 +19,7 @@ spec: image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 resources: limits: + cpu: 250m memory: 200Mi requests: cpu: 100m diff --git a/test/e2e/resources/test-deployment.yaml b/test/e2e/resources/test-deployment.yaml index 24af05c07..5cb9bec0a 100644 --- a/test/e2e/resources/test-deployment.yaml +++ b/test/e2e/resources/test-deployment.yaml @@ -30,5 +30,12 @@ spec: containers: - name: nginx image: nginx:1.14.2 + resources: + requests: + cpu: 50m + memory: 200Mi + limits: + cpu: 250m + memory: 400Mi ports: - containerPort: 80 diff --git a/test/e2e/resources/test-envelope-object.yaml b/test/e2e/resources/test-envelope-object.yaml index afabac70a..e50a9e4c5 100644 --- a/test/e2e/resources/test-envelope-object.yaml +++ b/test/e2e/resources/test-envelope-object.yaml @@ -1,7 +1,5 @@ -apiVersion: v1 -kind: ConfigMap +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ResourceEnvelope metadata: name: envelop-object - namespace: app - annotations: - kubernetes-fleet.io/envelope-configmap: "true" \ No newline at end of file + namespace: app \ No newline at end of file diff --git a/test/e2e/resources/test-statefulset.yaml b/test/e2e/resources/test-statefulset.yaml index e3aa518f1..ec5f9ff08 100644 --- a/test/e2e/resources/test-statefulset.yaml +++ b/test/e2e/resources/test-statefulset.yaml @@ -19,6 +19,13 @@ spec: containers: - name: nginx image: nginx + resources: + requests: + cpu: 50m + memory: 200Mi + limits: + cpu: 250m + memory: 400Mi ports: - containerPort: 80 protocol: TCP \ No newline at end of file diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index 1c607a929..f53bf56a3 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -32,6 +32,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/klog/v2" @@ -57,12 +58,11 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespace := appNamespace() var wantSelectedResources []placementv1beta1.ResourceIdentifier - var testEnvelopeDeployment corev1.ConfigMap - var testDeployment appv1.Deployment + var testDeploymentEnvelope placementv1beta1.ResourceEnvelope BeforeAll(func() { readDeploymentTestManifest(&testDeployment) - readEnvelopeConfigMapTestManifest(&testEnvelopeDeployment) + readEnvelopeResourceTestManifest(&testDeploymentEnvelope) wantSelectedResources = []placementv1beta1.ResourceIdentifier{ { Kind: utils.NamespaceKind, @@ -70,16 +70,17 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { Version: corev1.SchemeGroupVersion.Version, }, { - Kind: utils.ConfigMapKind, - Name: testEnvelopeDeployment.Name, - Version: corev1.SchemeGroupVersion.Version, + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDeploymentEnvelope.Name, Namespace: workNamespace.Name, }, } }) It("Create the wrapped deployment resources in the namespace", func() { - createWrappedResourcesForRollout(&testEnvelopeDeployment, &testDeployment, utils.DeploymentKind, workNamespace) + createWrappedResourcesForRollout(&testDeploymentEnvelope, &testDeployment, utils.DeploymentKind, workNamespace) }) It("Create the CRP that select the namespace", func() { @@ -163,6 +164,8 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) AfterAll(func() { + By(fmt.Sprintf("deleting envelop %s", testDeploymentEnvelope.Name)) + Expect(hubClient.Delete(ctx, &testDeploymentEnvelope)).To(Succeed(), "Failed to delete ResourceEnvelope") // Remove the custom deletion blocker finalizer from the CRP. ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) }) @@ -172,7 +175,6 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespace := appNamespace() var wantSelectedResources []placementv1beta1.ResourceIdentifier - var testDeployment appv1.Deployment BeforeAll(func() { // Create the test resources. @@ -251,13 +253,13 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespace := appNamespace() var wantSelectedResources []placementv1beta1.ResourceIdentifier - var testEnvelopeDaemonSet corev1.ConfigMap var testDaemonSet appv1.DaemonSet + var testDaemonSetEnvelope placementv1beta1.ResourceEnvelope BeforeAll(func() { // Create the test resources. readDaemonSetTestManifest(&testDaemonSet) - readEnvelopeConfigMapTestManifest(&testEnvelopeDaemonSet) + readEnvelopeResourceTestManifest(&testDaemonSetEnvelope) wantSelectedResources = []placementv1beta1.ResourceIdentifier{ { Kind: utils.NamespaceKind, @@ -265,16 +267,17 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { Version: corev1.SchemeGroupVersion.Version, }, { - Kind: utils.ConfigMapKind, - Name: testEnvelopeDaemonSet.Name, - Version: corev1.SchemeGroupVersion.Version, + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testDaemonSetEnvelope.Name, Namespace: workNamespace.Name, }, } }) It("create the daemonset resource in the namespace", func() { - createWrappedResourcesForRollout(&testEnvelopeDaemonSet, &testDaemonSet, utils.DaemonSetKind, workNamespace) + createWrappedResourcesForRollout(&testDaemonSetEnvelope, &testDaemonSet, utils.DaemonSetKind, workNamespace) }) It("create the CRP that select the namespace", func() { @@ -302,8 +305,8 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { if err != nil { return nil } - testEnvelopeDaemonSet.Data["daemonset.yaml"] = string(daemonSetByte) - return hubClient.Update(ctx, &testEnvelopeDaemonSet) + testDaemonSetEnvelope.Data["daemonset.yaml"] = runtime.RawExtension{Raw: daemonSetByte} + return hubClient.Update(ctx, &testDaemonSetEnvelope) }, eventuallyInterval, eventuallyInterval).Should(Succeed(), "Failed to change the image name of daemonset in envelope object") }) @@ -315,9 +318,9 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { Name: testDaemonSet.Name, Namespace: testDaemonSet.Namespace, Envelope: &placementv1beta1.EnvelopeIdentifier{ - Name: testEnvelopeDaemonSet.Name, - Namespace: testEnvelopeDaemonSet.Namespace, - Type: placementv1beta1.ConfigMapEnvelopeType, + Name: testDaemonSetEnvelope.Name, + Namespace: testDaemonSetEnvelope.Namespace, + Type: placementv1beta1.ResourceEnvelopeType, }, } crpStatusActual := safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResources, failedDaemonSetResourceIdentifier, allMemberClusterNames, "1", 2) @@ -334,13 +337,13 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespace := appNamespace() var wantSelectedResources []placementv1beta1.ResourceIdentifier - var testEnvelopeStatefulSet corev1.ConfigMap var testStatefulSet appv1.StatefulSet + var testStatefulSetEnvelope placementv1beta1.ResourceEnvelope BeforeAll(func() { // Create the test resources. readStatefulSetTestManifest(&testStatefulSet, false) - readEnvelopeConfigMapTestManifest(&testEnvelopeStatefulSet) + readEnvelopeResourceTestManifest(&testStatefulSetEnvelope) wantSelectedResources = []placementv1beta1.ResourceIdentifier{ { Kind: utils.NamespaceKind, @@ -348,16 +351,17 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { Version: corev1.SchemeGroupVersion.Version, }, { - Kind: utils.ConfigMapKind, - Name: testEnvelopeStatefulSet.Name, - Version: corev1.SchemeGroupVersion.Version, + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testStatefulSetEnvelope.Name, Namespace: workNamespace.Name, }, } }) It("create the statefulset resource in the namespace", func() { - createWrappedResourcesForRollout(&testEnvelopeStatefulSet, &testStatefulSet, utils.StatefulSetKind, workNamespace) + createWrappedResourcesForRollout(&testStatefulSetEnvelope, &testStatefulSet, utils.StatefulSetKind, workNamespace) }) It("create the CRP that select the namespace", func() { @@ -385,8 +389,8 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { if err != nil { return nil } - testEnvelopeStatefulSet.Data["statefulset.yaml"] = string(daemonSetByte) - return hubClient.Update(ctx, &testEnvelopeStatefulSet) + testStatefulSetEnvelope.Data["statefulset.yaml"] = runtime.RawExtension{Raw: daemonSetByte} + return hubClient.Update(ctx, &testStatefulSetEnvelope) }, eventuallyInterval, eventuallyInterval).Should(Succeed(), "Failed to change the image name in statefulset") }) @@ -398,9 +402,9 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { Name: testStatefulSet.Name, Namespace: testStatefulSet.Namespace, Envelope: &placementv1beta1.EnvelopeIdentifier{ - Name: testEnvelopeStatefulSet.Name, - Namespace: testEnvelopeStatefulSet.Namespace, - Type: placementv1beta1.ConfigMapEnvelopeType, + Name: testStatefulSetEnvelope.Name, + Namespace: testStatefulSetEnvelope.Namespace, + Type: placementv1beta1.ResourceEnvelopeType, }, } crpStatusActual := safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResources, failedStatefulSetResourceIdentifier, allMemberClusterNames, "1", 2) @@ -1000,9 +1004,9 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) // createWrappedResourcesForRollout creates an enveloped resource on the hub cluster with a workload object for testing purposes. -func createWrappedResourcesForRollout(testEnvelopeObj *corev1.ConfigMap, obj metav1.Object, kind string, namespace corev1.Namespace) { +func createWrappedResourcesForRollout(testEnvelopeObj *placementv1beta1.ResourceEnvelope, obj metav1.Object, kind string, namespace corev1.Namespace) { Expect(hubClient.Create(ctx, &namespace)).To(Succeed(), "Failed to create namespace %s", namespace.Name) - testEnvelopeObj.Data = make(map[string]string) + testEnvelopeObj.Data = make(map[string]runtime.RawExtension) constructWrappedResources(testEnvelopeObj, obj, kind, namespace) Expect(hubClient.Create(ctx, testEnvelopeObj)).To(Succeed(), "Failed to create testEnvelop object %s containing %s", testEnvelopeObj.Name, kind) } diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 8d9d5da63..07ef31c60 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -1214,14 +1214,14 @@ func readJobTestManifest(testManifest *batchv1.Job) { Expect(err).Should(Succeed()) } -func readEnvelopeConfigMapTestManifest(testEnvelopeObj *corev1.ConfigMap) { +func readEnvelopeResourceTestManifest(testEnvelopeObj *placementv1beta1.ResourceEnvelope) { By("Read testEnvelopConfigMap resource") err := utils.GetObjectFromManifest("resources/test-envelope-object.yaml", testEnvelopeObj) Expect(err).Should(Succeed()) } // constructWrappedResources fill the enveloped resource with the workload object -func constructWrappedResources(testEnvelopeObj *corev1.ConfigMap, workloadObj metav1.Object, kind string, namespace corev1.Namespace) { +func constructWrappedResources(testEnvelopeObj *placementv1beta1.ResourceEnvelope, workloadObj metav1.Object, kind string, namespace corev1.Namespace) { // modify the enveloped configMap according to the namespace testEnvelopeObj.Namespace = namespace.Name @@ -1231,11 +1231,11 @@ func constructWrappedResources(testEnvelopeObj *corev1.ConfigMap, workloadObj me Expect(err).Should(Succeed()) switch kind { case utils.DeploymentKind: - testEnvelopeObj.Data["deployment.yaml"] = string(workloadObjectByte) + testEnvelopeObj.Data["deployment.yaml"] = runtime.RawExtension{Raw: workloadObjectByte} case utils.DaemonSetKind: - testEnvelopeObj.Data["daemonset.yaml"] = string(workloadObjectByte) + testEnvelopeObj.Data["daemonset.yaml"] = runtime.RawExtension{Raw: workloadObjectByte} case utils.StatefulSetKind: - testEnvelopeObj.Data["statefulset.yaml"] = string(workloadObjectByte) + testEnvelopeObj.Data["statefulset.yaml"] = runtime.RawExtension{Raw: workloadObjectByte} } } From 044a175095d365c00fb90a7b02facaae0d15aa4b Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Wed, 14 May 2025 16:36:54 -0700 Subject: [PATCH 5/8] fix e2e Signed-off-by: Ryan Zhang --- test/e2e/join_and_leave_test.go | 41 ++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index 97f6844fa..7f0550ca4 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -70,6 +70,12 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun Name: testResourceEnvelope.Name, Namespace: workNamespaceName, }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: "ClusterResourceEnvelope", + Version: placementv1beta1.GroupVersion.Version, + Name: testClusterResourceEnvelope.Name, + }, } }) @@ -77,13 +83,42 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun It("Create the test resources in the namespace", createWrappedResourcesForEnvelopTest) It("Create the CRP that select the name space and place it to all clusters", func() { - createCRP(crpName) + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: workNamespaceName, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: "ClusterResourceEnvelope", + Version: placementv1beta1.GroupVersion.Version, + Name: testClusterResourceEnvelope.Name, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") }) It("should update CRP status as expected", func() { - // resourceQuota is not trackable yet crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) It("should place the resources on all member clusters", func() { From 76eb13f2bb62f7cce96406bd97908687e5fb16a6 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Thu, 15 May 2025 00:10:38 -0700 Subject: [PATCH 6/8] fix the minor issues Signed-off-by: Ryan Zhang --- pkg/controllers/workgenerator/manifests/clusterrole.yaml | 2 +- pkg/controllers/workgenerator/manifests/webhook.yaml | 2 +- test/e2e/placement_negative_cases_test.go | 8 ++++---- test/e2e/resources/test-clusterrole.yaml | 2 +- test/e2e/resources/test-envelope-object.yaml | 2 +- test/e2e/resources/test-statefulset.yaml | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/controllers/workgenerator/manifests/clusterrole.yaml b/pkg/controllers/workgenerator/manifests/clusterrole.yaml index 9bc9a3fbf..ba11c0b5e 100644 --- a/pkg/controllers/workgenerator/manifests/clusterrole.yaml +++ b/pkg/controllers/workgenerator/manifests/clusterrole.yaml @@ -5,4 +5,4 @@ metadata: rules: - apiGroups: [""] resources: ["pods"] - verbs: ["get", "list", "watch"] \ No newline at end of file + verbs: ["get", "list", "watch"] diff --git a/pkg/controllers/workgenerator/manifests/webhook.yaml b/pkg/controllers/workgenerator/manifests/webhook.yaml index b07d3dce6..c371c2ec0 100644 --- a/pkg/controllers/workgenerator/manifests/webhook.yaml +++ b/pkg/controllers/workgenerator/manifests/webhook.yaml @@ -15,4 +15,4 @@ webhooks: namespace: ops admissionReviewVersions: ["v1"] sideEffects: None - timeoutSeconds: 10 \ No newline at end of file + timeoutSeconds: 10 diff --git a/test/e2e/placement_negative_cases_test.go b/test/e2e/placement_negative_cases_test.go index 1cdeb15f6..de8658742 100644 --- a/test/e2e/placement_negative_cases_test.go +++ b/test/e2e/placement_negative_cases_test.go @@ -46,7 +46,7 @@ var _ = Describe("handling errors and failures gracefully", func() { Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) // Create an envelope resource to wrap the configMaps. - resourceEnvelop := &placementv1beta1.ResourceEnvelope{ + resourceEnvelope := &placementv1beta1.ResourceEnvelope{ ObjectMeta: metav1.ObjectMeta{ Name: envelopeName, Namespace: ns.Name, @@ -77,16 +77,16 @@ var _ = Describe("handling errors and failures gracefully", func() { } badCMBytes, err := json.Marshal(badConfigMap) Expect(err).To(BeNil(), "Failed to marshal configMap %s", badConfigMap.Name) - resourceEnvelop.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} + resourceEnvelope.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} wrappedCM2 := configMap.DeepCopy() wrappedCM2.Name = wrappedCMName2 wrappedCM2.Data[cmDataKey] = cmDataVal2 wrappedCM2Bytes, err := json.Marshal(wrappedCM2) Expect(err).To(BeNil(), "Failed to marshal configMap %s", wrappedCM2.Name) - resourceEnvelop.Data["cm2.yaml"] = runtime.RawExtension{Raw: wrappedCM2Bytes} + resourceEnvelope.Data["cm2.yaml"] = runtime.RawExtension{Raw: wrappedCM2Bytes} - Expect(hubClient.Create(ctx, resourceEnvelop)).To(Succeed(), "Failed to create configMap %s", resourceEnvelop.Name) + Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create configMap %s", resourceEnvelope.Name) // Create a CRP. crp := &placementv1beta1.ClusterResourcePlacement{ diff --git a/test/e2e/resources/test-clusterrole.yaml b/test/e2e/resources/test-clusterrole.yaml index 9bc9a3fbf..ba11c0b5e 100644 --- a/test/e2e/resources/test-clusterrole.yaml +++ b/test/e2e/resources/test-clusterrole.yaml @@ -5,4 +5,4 @@ metadata: rules: - apiGroups: [""] resources: ["pods"] - verbs: ["get", "list", "watch"] \ No newline at end of file + verbs: ["get", "list", "watch"] diff --git a/test/e2e/resources/test-envelope-object.yaml b/test/e2e/resources/test-envelope-object.yaml index e50a9e4c5..11a5183c2 100644 --- a/test/e2e/resources/test-envelope-object.yaml +++ b/test/e2e/resources/test-envelope-object.yaml @@ -2,4 +2,4 @@ apiVersion: placement.kubernetes-fleet.io/v1beta1 kind: ResourceEnvelope metadata: name: envelop-object - namespace: app \ No newline at end of file + namespace: app diff --git a/test/e2e/resources/test-statefulset.yaml b/test/e2e/resources/test-statefulset.yaml index ec5f9ff08..a3e9b624f 100644 --- a/test/e2e/resources/test-statefulset.yaml +++ b/test/e2e/resources/test-statefulset.yaml @@ -28,4 +28,4 @@ spec: memory: 400Mi ports: - containerPort: 80 - protocol: TCP \ No newline at end of file + protocol: TCP From c75712a222d9762ae10f72aaf16dc6fbf5b6a21f Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 15 May 2025 17:56:05 +1000 Subject: [PATCH 7/8] Minor fixes Signed-off-by: michaelawyu --- pkg/controllers/workgenerator/controller.go | 1 - .../controller_integration_test.go | 16 +- .../workgenerator/manifests/clusterrole.yaml | 2 +- ....yaml => test-clusterscoped-envelope.yaml} | 2 +- ...velop.yaml => test-resource-envelope.yaml} | 2 +- ...lop2.yaml => test-resource-envelope2.yaml} | 2 +- .../workgenerator/manifests/webhook.yaml | 2 +- pkg/controllers/workgenerator/suite_test.go | 20 +- test/e2e/enveloped_object_placement_test.go | 345 +----------------- test/e2e/placement_negative_cases_test.go | 8 +- test/e2e/resources/test-clusterrole.yaml | 2 +- test/e2e/resources/test-envelope-object.yaml | 4 +- test/e2e/resources/test-statefulset.yaml | 2 +- test/e2e/rollout_test.go | 4 +- 14 files changed, 44 insertions(+), 368 deletions(-) rename pkg/controllers/workgenerator/manifests/{test-clusterscoped-envelop.yaml => test-clusterscoped-envelope.yaml} (95%) rename pkg/controllers/workgenerator/manifests/{test-resource-envelop.yaml => test-resource-envelope.yaml} (90%) rename pkg/controllers/workgenerator/manifests/{test-resource-envelop2.yaml => test-resource-envelope2.yaml} (90%) diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 2908dd6f6..8772a2595 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -50,7 +50,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" - fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/controllers/workapplier" "github.com/kubefleet-dev/kubefleet/pkg/utils" diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 92cfd5561..6a1294056 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -596,13 +596,13 @@ var _ = Describe("Test Work Generator Controller", func() { }) }) - Context("Test Bound ClusterResourceBinding with a single resource snapshot with namespaced envelop objects", func() { + Context("Test Bound ClusterResourceBinding with a single resource snapshot with namespaced envelope objects", func() { var masterSnapshot *placementv1beta1.ClusterResourceSnapshot - envelopedResourceName := "namespaced-resource-envelop" + envelopedResourceName := "namespaced-resource-envelope" envelopedResourceNameSpace := "app" BeforeEach(func() { masterSnapshot = generateResourceSnapshot(1, 1, 0, [][]byte{ - testConfigMap, testResourceEnvelop, testResourceCRD, testNameSpace, + testConfigMap, testResourceEnvelope, testResourceCRD, testNameSpace, }) Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) By(fmt.Sprintf("master resource snapshot %s created", masterSnapshot.Name)) @@ -723,7 +723,7 @@ var _ = Describe("Test Work Generator Controller", func() { fetchEnvelopedWork(&workList, binding, string(placementv1beta1.ResourceEnvelopeType), envelopedResourceName, envelopedResourceNameSpace) // create a second snapshot with a modified enveloped object masterSnapshot = generateResourceSnapshot(2, 1, 0, [][]byte{ - testResourceEnvelop2, testResourceCRD, testNameSpace, + testResourceEnvelope2, testResourceCRD, testNameSpace, }) Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) By(fmt.Sprintf("another master resource snapshot %s created", masterSnapshot.Name)) @@ -829,7 +829,7 @@ var _ = Describe("Test Work Generator Controller", func() { }, } diff = cmp.Diff(wantWork, work, ignoreWorkOption, ignoreTypeMeta) - Expect(diff).Should(BeEmpty(), fmt.Sprintf("envelop work(%s) mismatch (-want +got):\n%s", work.Name, diff)) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("envelope work(%s) mismatch (-want +got):\n%s", work.Name, diff)) }) It("Should delete the enveloped work object in the target namespace after it's removed from snapshot", func() { @@ -882,13 +882,13 @@ var _ = Describe("Test Work Generator Controller", func() { }) }) - Context("Test Bound ClusterResourceBinding with a single resource snapshot with cluster scoped envelop objects", func() { + Context("Test Bound ClusterResourceBinding with a single resource snapshot with cluster scoped envelope objects", func() { var masterSnapshot *placementv1beta1.ClusterResourceSnapshot - envelopedResourceName := "clusterscoped-resource-envelop" + envelopedResourceName := "clusterscoped-resource-envelope" envelopedResourceNameSpace := "" BeforeEach(func() { masterSnapshot = generateResourceSnapshot(1, 1, 0, [][]byte{ - testClusterScopedEnvelop, testResourceCRD, testNameSpace, + testClusterScopedEnvelope, testResourceCRD, testNameSpace, }) Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) By(fmt.Sprintf("master resource snapshot %s created", masterSnapshot.Name)) diff --git a/pkg/controllers/workgenerator/manifests/clusterrole.yaml b/pkg/controllers/workgenerator/manifests/clusterrole.yaml index 9bc9a3fbf..ba11c0b5e 100644 --- a/pkg/controllers/workgenerator/manifests/clusterrole.yaml +++ b/pkg/controllers/workgenerator/manifests/clusterrole.yaml @@ -5,4 +5,4 @@ metadata: rules: - apiGroups: [""] resources: ["pods"] - verbs: ["get", "list", "watch"] \ No newline at end of file + verbs: ["get", "list", "watch"] diff --git a/pkg/controllers/workgenerator/manifests/test-clusterscoped-envelop.yaml b/pkg/controllers/workgenerator/manifests/test-clusterscoped-envelope.yaml similarity index 95% rename from pkg/controllers/workgenerator/manifests/test-clusterscoped-envelop.yaml rename to pkg/controllers/workgenerator/manifests/test-clusterscoped-envelope.yaml index 569b4e1a2..a73d536f7 100644 --- a/pkg/controllers/workgenerator/manifests/test-clusterscoped-envelop.yaml +++ b/pkg/controllers/workgenerator/manifests/test-clusterscoped-envelope.yaml @@ -1,7 +1,7 @@ apiVersion: placement.kubernetes-fleet.io/v1beta1 kind: ClusterResourceEnvelope metadata: - name: clusterscoped-resource-envelop + name: clusterscoped-resource-envelope data: "webhook.yaml": apiVersion: admissionregistration.k8s.io/v1 diff --git a/pkg/controllers/workgenerator/manifests/test-resource-envelop.yaml b/pkg/controllers/workgenerator/manifests/test-resource-envelope.yaml similarity index 90% rename from pkg/controllers/workgenerator/manifests/test-resource-envelop.yaml rename to pkg/controllers/workgenerator/manifests/test-resource-envelope.yaml index 6a10b2d51..f003d0f84 100644 --- a/pkg/controllers/workgenerator/manifests/test-resource-envelop.yaml +++ b/pkg/controllers/workgenerator/manifests/test-resource-envelope.yaml @@ -1,7 +1,7 @@ apiVersion: placement.kubernetes-fleet.io/v1beta1 kind: ResourceEnvelope metadata: - name: namespaced-resource-envelop + name: namespaced-resource-envelope namespace: app data: "resourceQuota.yaml": diff --git a/pkg/controllers/workgenerator/manifests/test-resource-envelop2.yaml b/pkg/controllers/workgenerator/manifests/test-resource-envelope2.yaml similarity index 90% rename from pkg/controllers/workgenerator/manifests/test-resource-envelop2.yaml rename to pkg/controllers/workgenerator/manifests/test-resource-envelope2.yaml index ae3d0b8de..772bccbb8 100644 --- a/pkg/controllers/workgenerator/manifests/test-resource-envelop2.yaml +++ b/pkg/controllers/workgenerator/manifests/test-resource-envelope2.yaml @@ -1,7 +1,7 @@ apiVersion: placement.kubernetes-fleet.io/v1beta1 kind: ResourceEnvelope metadata: - name: namespaced-resource-envelop + name: namespaced-resource-envelope namespace: app data: "resourceQuota.yaml": diff --git a/pkg/controllers/workgenerator/manifests/webhook.yaml b/pkg/controllers/workgenerator/manifests/webhook.yaml index b07d3dce6..c371c2ec0 100644 --- a/pkg/controllers/workgenerator/manifests/webhook.yaml +++ b/pkg/controllers/workgenerator/manifests/webhook.yaml @@ -15,4 +15,4 @@ webhooks: namespace: ops admissionReviewVersions: ["v1"] sideEffects: None - timeoutSeconds: 10 \ No newline at end of file + timeoutSeconds: 10 diff --git a/pkg/controllers/workgenerator/suite_test.go b/pkg/controllers/workgenerator/suite_test.go index 699cdd5f1..b18320a50 100644 --- a/pkg/controllers/workgenerator/suite_test.go +++ b/pkg/controllers/workgenerator/suite_test.go @@ -59,7 +59,7 @@ var ( cancel context.CancelFunc // pre loaded test manifests - testResourceCRD, testNameSpace, testResource, testConfigMap, testResourceEnvelop, testResourceEnvelop2, testClusterScopedEnvelop, testPdb []byte + testResourceCRD, testNameSpace, testResource, testConfigMap, testResourceEnvelope, testResourceEnvelope2, testClusterScopedEnvelope, testPdb []byte // want overridden manifest which is overridden by cro-1 and ro-1 wantOverriddenTestResource []byte @@ -347,22 +347,22 @@ func readTestManifests() { testConfigMap, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) - By("Read testResourceEnvelop resource") - rawByte, err = os.ReadFile("manifests/test-resource-envelop.yaml") + By("Read testResourceEnvelope resource") + rawByte, err = os.ReadFile("manifests/test-resource-envelope.yaml") Expect(err).Should(Succeed()) - testResourceEnvelop, err = yaml.ToJSON(rawByte) + testResourceEnvelope, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) - By("Read testResourceEnvelop2 resource") - rawByte, err = os.ReadFile("manifests/test-resource-envelop2.yaml") + By("Read testResourceEnvelope2 resource") + rawByte, err = os.ReadFile("manifests/test-resource-envelope2.yaml") Expect(err).Should(Succeed()) - testResourceEnvelop2, err = yaml.ToJSON(rawByte) + testResourceEnvelope2, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) - By("Read testClusterScopedEnvelop resource") - rawByte, err = os.ReadFile("manifests/test-clusterscoped-envelop.yaml") + By("Read testClusterScopedEnvelope resource") + rawByte, err = os.ReadFile("manifests/test-clusterscoped-envelope.yaml") Expect(err).Should(Succeed()) - testClusterScopedEnvelop, err = yaml.ToJSON(rawByte) + testClusterScopedEnvelope, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) By("Read PodDisruptionBudget") diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index a57dee24b..3d4e38092 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -33,7 +33,6 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - fleetv1alpha1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1alpha1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/controllers/workapplier" "github.com/kubefleet-dev/kubefleet/pkg/utils" @@ -59,7 +58,6 @@ var ( // Note that this container will run in parallel with other containers. var _ = Describe("placing wrapped resources using a CRP", func() { - // Original test cases for ConfigMap envelope... Context("Test a CRP place enveloped objects successfully", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespaceName := appNamespace().Name @@ -167,7 +165,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) - It("Update the envelop configMap back with good configuration", func() { + It("Update the envelope configMap back with good configuration", func() { // Get the resource envelope Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: testResourceEnvelope.Name}, &testResourceEnvelope)).To(Succeed(), "Failed to get the resourceEnvelope") // update the resource envelope with a valid resourceQuota @@ -207,7 +205,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) AfterAll(func() { - By(fmt.Sprintf("deleting envelop %s", testResourceEnvelope.Name)) + By(fmt.Sprintf("deleting envelope %s", testResourceEnvelope.Name)) Expect(hubClient.Delete(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to delete ResourceEnvelope") Expect(hubClient.Delete(ctx, &testClusterResourceEnvelope)).To(Succeed(), "Failed to delete testClusterResourceEnvelope") By(fmt.Sprintf("deleting placement %s and related resources", crpName)) @@ -239,7 +237,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { constructWrappedResources(&testResourceEnvelope, &testDeployment, utils.DeploymentKind, workNamespace) constructWrappedResources(&testResourceEnvelope, &testDaemonSet, utils.DaemonSetKind, workNamespace) constructWrappedResources(&testResourceEnvelope, &testStatefulSet, utils.StatefulSetKind, workNamespace) - Expect(hubClient.Create(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to create testEnvelop object %s containing workloads", testResourceEnvelope.Name) + Expect(hubClient.Create(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to create testEnvelope object %s containing workloads", testResourceEnvelope.Name) }) It("Create the CRP that select the namespace", func() { @@ -368,7 +366,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { }) AfterAll(func() { - By(fmt.Sprintf("deleting envelop %s", testResourceEnvelope.Name)) + By(fmt.Sprintf("deleting envelope %s", testResourceEnvelope.Name)) Expect(hubClient.Delete(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to delete ResourceEnvelope") By(fmt.Sprintf("deleting placement %s and related resources", crpName)) ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) @@ -504,215 +502,12 @@ var _ = Describe("placing wrapped resources using a CRP", func() { // either. AfterAll(func() { - By(fmt.Sprintf("deleting envelop %s", envelopWrapper.Name)) + By(fmt.Sprintf("deleting envelope %s", envelopWrapper.Name)) Expect(hubClient.Delete(ctx, envelopWrapper)).To(Succeed(), "Failed to delete ResourceEnvelope") // Remove the CRP and the namespace from the hub cluster. ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) }) }) - - Context("Test ResourceEnvelope and ClusterResourceEnvelope placement", Ordered, func() { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - workNamespaceName := appNamespace().Name - var wantSelectedResources []placementv1beta1.ResourceIdentifier - - BeforeAll(func() { - // Create the test resources. - readAllEnvelopTypes() - wantSelectedResources = []placementv1beta1.ResourceIdentifier{ - { - Kind: "Namespace", - Name: workNamespaceName, - Version: "v1", - }, - { - Kind: "ResourceEnvelope", - Name: testResourceEnvelope.Name, - Version: "v1alpha1", - Group: "placement.kubefleet", - Namespace: workNamespaceName, - }, - { - Kind: "ClusterResourceEnvelope", - Name: testClusterResourceEnvelope.Name, - Version: "v1alpha1", - Group: "placement.kubefleet", - }, - } - }) - - It("Create the test envelope resources", createAllEnvelopTypeResources) - - It("Create the CRP that selects the namespace and envelopes", func() { - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer to better observe controller behavior - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: []placementv1beta1.ResourceSelector{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: workNamespaceName, - }, - { - Group: "placement.kubefleet", - Version: "v1alpha1", - Kind: "ResourceEnvelope", - Name: testResourceEnvelope.Name, - Namespace: ptr.To(workNamespaceName), - }, - { - Group: "placement.kubefleet", - Version: "v1alpha1", - Kind: "ClusterResourceEnvelope", - Name: testClusterResourceEnvelope.Name, - }, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") - }) - - It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "0", true) - Eventually(crpStatusUpdatedActual, longEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") - }) - - It("should place the resources from both envelope types on all member clusters", func() { - for idx := range allMemberClusters { - memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkBothEnvelopeTypesPlacement(memberCluster) - Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) - } - }) - - It("Update the ResourceEnvelope with invalid content", func() { - // Get the current ResourceEnvelope - resourceEnvelope := &fleetv1alpha1.ResourceEnvelope{} - Expect(hubClient.Get(ctx, types.NamespacedName{ - Namespace: workNamespaceName, - Name: testResourceEnvelope.Name, - }, resourceEnvelope)).To(Succeed(), "Failed to get ResourceEnvelope") - - // Update with an invalid ConfigMap (immutable field change) - badConfigMap := testEnvelopeResourceQuota.DeepCopy() - badConfigMap.Spec.Scopes = []corev1.ResourceQuotaScope{ - corev1.ResourceQuotaScopeNotBestEffort, - corev1.ResourceQuotaScopeNotTerminating, - } - - badCMBytes, err := json.Marshal(badConfigMap) - Expect(err).Should(Succeed()) - - // Replace the first resource with the invalid one - resourceEnvelope.Spec.Manifests["resourceQuota1.yaml"] = fleetv1alpha1.Manifest{ - Data: runtime.RawExtension{Raw: badCMBytes}, - } - - Expect(hubClient.Update(ctx, resourceEnvelope)).To(Succeed(), "Failed to update ResourceEnvelope") - }) - - It("should update CRP status showing failure due to invalid ResourceEnvelope content", func() { - Eventually(func() error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { - return err - } - - // Check for failed conditions - if diff := cmp.Diff(crp.Status.Conditions, crpAppliedFailedConditions(crp.Generation), crpStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP conditions don't show application failure: %s", diff) - } - - // Verify at least one placement has a failed placement with immutable field error - foundFailure := false - for _, placementStatus := range crp.Status.PlacementStatuses { - for _, failedPlacement := range placementStatus.FailedPlacements { - if failedPlacement.ResourceIdentifier.Envelope != nil && - failedPlacement.ResourceIdentifier.Envelope.Type == placementv1beta1.EnvelopeType(fleetv1alpha1.EnvelopeTypeResource) && - strings.Contains(failedPlacement.Condition.Message, "field is immutable") { - foundFailure = true - break - } - } - if foundFailure { - break - } - } - - if !foundFailure { - return fmt.Errorf("didn't find expected failure for immutable field in ResourceEnvelope") - } - - return nil - }, longEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to see expected failure in CRP status") - }) - - It("Fix the ResourceEnvelope with valid content", func() { - // Get the current ResourceEnvelope - resourceEnvelope := &fleetv1alpha1.ResourceEnvelope{} - Expect(hubClient.Get(ctx, types.NamespacedName{ - Namespace: workNamespaceName, - Name: testResourceEnvelope.Name, - }, resourceEnvelope)).To(Succeed(), "Failed to get ResourceEnvelope") - - // Reset to valid content - goodCM := testEnvelopeResourceQuota.DeepCopy() - goodCMBytes, err := json.Marshal(goodCM) - Expect(err).Should(Succeed()) - - // Replace the first resource with the valid one - resourceEnvelope.Spec.Manifests["resourceQuota1.yaml"] = fleetv1alpha1.Manifest{ - Data: runtime.RawExtension{Raw: goodCMBytes}, - } - - Expect(hubClient.Update(ctx, resourceEnvelope)).To(Succeed(), "Failed to update ResourceEnvelope") - }) - - It("should update CRP status as success again", func() { - crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, allMemberClusterNames, nil, "2", true) - Eventually(crpStatusUpdatedActual, longEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") - }) - - It("should place the fixed resources on all member clusters", func() { - for idx := range allMemberClusters { - memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkBothEnvelopeTypesPlacement(memberCluster) - Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) - } - }) - - It("can delete the CRP", func() { - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - }, - } - Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP") - }) - - It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) - - It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual(crpName) - Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP") - }) - - AfterAll(func() { - By(fmt.Sprintf("deleting placement %s and related resources", crpName)) - ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) - }) - }) }) var _ = Describe("Process objects with generate name", Ordered, func() { @@ -721,7 +516,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { nsGenerateName := "application-" wrappedCMGenerateName := "wrapped-foo-" - var envelop *placementv1beta1.ResourceEnvelope + var envelope *placementv1beta1.ResourceEnvelope BeforeAll(func() { // Create the namespace with both name and generate name set. @@ -730,7 +525,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) // Create an envelope. - envelop = &placementv1beta1.ResourceEnvelope{ + envelope = &placementv1beta1.ResourceEnvelope{ ObjectMeta: metav1.ObjectMeta{ Name: envelopResourceName, Namespace: ns.Name, @@ -753,8 +548,8 @@ var _ = Describe("Process objects with generate name", Ordered, func() { } wrappedCMByte, err := json.Marshal(wrappedCM) Expect(err).Should(BeNil()) - envelop.Data["wrapped.yaml"] = runtime.RawExtension{Raw: wrappedCMByte} - Expect(hubClient.Create(ctx, envelop)).To(Succeed(), "Failed to create config map %s", envelop.Name) + envelope.Data["wrapped.yaml"] = runtime.RawExtension{Raw: wrappedCMByte} + Expect(hubClient.Create(ctx, envelope)).To(Succeed(), "Failed to create config map %s", envelope.Name) // Create a CRP that selects the namespace. crp := &placementv1beta1.ClusterResourcePlacement{ @@ -864,8 +659,8 @@ var _ = Describe("Process objects with generate name", Ordered, func() { }) AfterAll(func() { - By(fmt.Sprintf("deleting envelop %s", envelop.Name)) - Expect(hubClient.Delete(ctx, envelop)).To(Succeed(), "Failed to delete ResourceEnvelope") + By(fmt.Sprintf("deleting envelope %s", envelope.Name)) + Expect(hubClient.Delete(ctx, envelope)).To(Succeed(), "Failed to delete ResourceEnvelope") By(fmt.Sprintf("deleting placement %s and related resources", crpName)) ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) }) @@ -1077,121 +872,3 @@ func checkAllResourcesPlacement(memberCluster *framework.Cluster) func() error { return nil } } - -// readAllEnvelopTypes reads all envelope type test manifests -func readAllEnvelopTypes() { - By("Read the ConfigMap resources") - testConfigMap = corev1.ConfigMap{} - err := utils.GetObjectFromManifest("resources/test-configmap.yaml", &testConfigMap) - Expect(err).Should(Succeed()) - - By("Read ResourceQuota") - testEnvelopeResourceQuota = corev1.ResourceQuota{} - err = utils.GetObjectFromManifest("resources/resourcequota.yaml", &testEnvelopeResourceQuota) - Expect(err).Should(Succeed()) - - By("Read ClusterRole") - testClusterRole = rbacv1.ClusterRole{} - err = utils.GetObjectFromManifest("resources/test-clusterrole.yaml", &testClusterRole) - Expect(err).Should(Succeed()) - - By("Read ResourceEnvelope template") - testResourceEnvelope = fleetv1alpha1.ResourceEnvelope{ - TypeMeta: metav1.TypeMeta{ - APIVersion: fleetv1alpha1.GroupVersion.String(), - Kind: "ResourceEnvelope", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: resourceEnvelopeName, - }, - Spec: fleetv1alpha1.EnvelopeSpec{ - Manifests: make(map[string]fleetv1alpha1.Manifest), - }, - } - - By("Read ClusterResourceEnvelope template") - testClusterResourceEnvelope = fleetv1alpha1.ClusterResourceEnvelope{ - TypeMeta: metav1.TypeMeta{ - APIVersion: fleetv1alpha1.GroupVersion.String(), - Kind: "ClusterResourceEnvelope", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: clusterResourceEnvelopeName, - }, - Spec: fleetv1alpha1.EnvelopeSpec{ - Manifests: make(map[string]fleetv1alpha1.Manifest), - }, - } -} - -// createAllEnvelopTypeResources creates all types of envelope resources on the hub cluster for testing -func createAllEnvelopTypeResources() { - ns := appNamespace() - Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) - - // Update namespaces for namespaced resources - testConfigMap.Namespace = ns.Name - testEnvelopeResourceQuota.Namespace = ns.Name - testResourceEnvelope.Namespace = ns.Name - - // Create ResourceEnvelope with ResourceQuota inside - quotaBytes, err := json.Marshal(testEnvelopeResourceQuota) - Expect(err).Should(Succeed()) - testResourceEnvelope.Spec.Manifests["resourceQuota1.yaml"] = fleetv1alpha1.Manifest{ - Data: runtime.RawExtension{Raw: quotaBytes}, - } - testResourceEnvelope.Spec.Manifests["resourceQuota2.yaml"] = fleetv1alpha1.Manifest{ - Data: runtime.RawExtension{Raw: quotaBytes}, // Include a duplicate to test multiple resources - } - Expect(hubClient.Create(ctx, &testResourceEnvelope)).To(Succeed(), "Failed to create ResourceEnvelope") - - // Create ClusterResourceEnvelope with ClusterRole inside - roleBytes, err := json.Marshal(testClusterRole) - Expect(err).Should(Succeed()) - testClusterResourceEnvelope.Spec.Manifests["clusterRole.yaml"] = fleetv1alpha1.Manifest{ - Data: runtime.RawExtension{Raw: roleBytes}, - } - Expect(hubClient.Create(ctx, &testClusterResourceEnvelope)).To(Succeed(), "Failed to create ClusterResourceEnvelope") -} - -// checkBothEnvelopeTypesPlacement verifies that resources from both envelope types were properly placed -func checkBothEnvelopeTypesPlacement(memberCluster *framework.Cluster) func() error { - workNamespaceName := appNamespace().Name - return func() error { - // Verify namespace exists on target cluster - if err := validateWorkNamespaceOnCluster(memberCluster, types.NamespacedName{Name: workNamespaceName}); err != nil { - return err - } - - // Check that ResourceQuota from ResourceEnvelope was placed - By("Check ResourceQuota from ResourceEnvelope") - placedResourceQuota := &corev1.ResourceQuota{} - if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{ - Namespace: workNamespaceName, - Name: testEnvelopeResourceQuota.Name, - }, placedResourceQuota); err != nil { - return fmt.Errorf("failed to find ResourceQuota from ResourceEnvelope: %w", err) - } - - // Verify the ResourceQuota matches expected spec - if diff := cmp.Diff(placedResourceQuota.Spec, testEnvelopeResourceQuota.Spec); diff != "" { - return fmt.Errorf("ResourceQuota from ResourceEnvelope diff (-got, +want): %s", diff) - } - - // Check that ClusterRole from ClusterResourceEnvelope was placed - By("Check ClusterRole from ClusterResourceEnvelope") - placedClusterRole := &rbacv1.ClusterRole{} - if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{ - Name: testClusterRole.Name, - }, placedClusterRole); err != nil { - return fmt.Errorf("failed to find ClusterRole from ClusterResourceEnvelope: %w", err) - } - - // Verify the ClusterRole matches expected rules - if diff := cmp.Diff(placedClusterRole.Rules, testClusterRole.Rules); diff != "" { - return fmt.Errorf("ClusterRole from ClusterResourceEnvelope diff (-got, +want): %s", diff) - } - - return nil - } -} diff --git a/test/e2e/placement_negative_cases_test.go b/test/e2e/placement_negative_cases_test.go index 1cdeb15f6..de8658742 100644 --- a/test/e2e/placement_negative_cases_test.go +++ b/test/e2e/placement_negative_cases_test.go @@ -46,7 +46,7 @@ var _ = Describe("handling errors and failures gracefully", func() { Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) // Create an envelope resource to wrap the configMaps. - resourceEnvelop := &placementv1beta1.ResourceEnvelope{ + resourceEnvelope := &placementv1beta1.ResourceEnvelope{ ObjectMeta: metav1.ObjectMeta{ Name: envelopeName, Namespace: ns.Name, @@ -77,16 +77,16 @@ var _ = Describe("handling errors and failures gracefully", func() { } badCMBytes, err := json.Marshal(badConfigMap) Expect(err).To(BeNil(), "Failed to marshal configMap %s", badConfigMap.Name) - resourceEnvelop.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} + resourceEnvelope.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} wrappedCM2 := configMap.DeepCopy() wrappedCM2.Name = wrappedCMName2 wrappedCM2.Data[cmDataKey] = cmDataVal2 wrappedCM2Bytes, err := json.Marshal(wrappedCM2) Expect(err).To(BeNil(), "Failed to marshal configMap %s", wrappedCM2.Name) - resourceEnvelop.Data["cm2.yaml"] = runtime.RawExtension{Raw: wrappedCM2Bytes} + resourceEnvelope.Data["cm2.yaml"] = runtime.RawExtension{Raw: wrappedCM2Bytes} - Expect(hubClient.Create(ctx, resourceEnvelop)).To(Succeed(), "Failed to create configMap %s", resourceEnvelop.Name) + Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create configMap %s", resourceEnvelope.Name) // Create a CRP. crp := &placementv1beta1.ClusterResourcePlacement{ diff --git a/test/e2e/resources/test-clusterrole.yaml b/test/e2e/resources/test-clusterrole.yaml index 9bc9a3fbf..ba11c0b5e 100644 --- a/test/e2e/resources/test-clusterrole.yaml +++ b/test/e2e/resources/test-clusterrole.yaml @@ -5,4 +5,4 @@ metadata: rules: - apiGroups: [""] resources: ["pods"] - verbs: ["get", "list", "watch"] \ No newline at end of file + verbs: ["get", "list", "watch"] diff --git a/test/e2e/resources/test-envelope-object.yaml b/test/e2e/resources/test-envelope-object.yaml index e50a9e4c5..6ed7c9561 100644 --- a/test/e2e/resources/test-envelope-object.yaml +++ b/test/e2e/resources/test-envelope-object.yaml @@ -1,5 +1,5 @@ apiVersion: placement.kubernetes-fleet.io/v1beta1 kind: ResourceEnvelope metadata: - name: envelop-object - namespace: app \ No newline at end of file + name: envelope-object + namespace: app diff --git a/test/e2e/resources/test-statefulset.yaml b/test/e2e/resources/test-statefulset.yaml index ec5f9ff08..a3e9b624f 100644 --- a/test/e2e/resources/test-statefulset.yaml +++ b/test/e2e/resources/test-statefulset.yaml @@ -28,4 +28,4 @@ spec: memory: 400Mi ports: - containerPort: 80 - protocol: TCP \ No newline at end of file + protocol: TCP diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index f53bf56a3..9fd6ce615 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -164,7 +164,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { }) AfterAll(func() { - By(fmt.Sprintf("deleting envelop %s", testDeploymentEnvelope.Name)) + By(fmt.Sprintf("deleting envelope %s", testDeploymentEnvelope.Name)) Expect(hubClient.Delete(ctx, &testDeploymentEnvelope)).To(Succeed(), "Failed to delete ResourceEnvelope") // Remove the custom deletion blocker finalizer from the CRP. ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) @@ -1008,7 +1008,7 @@ func createWrappedResourcesForRollout(testEnvelopeObj *placementv1beta1.Resource Expect(hubClient.Create(ctx, &namespace)).To(Succeed(), "Failed to create namespace %s", namespace.Name) testEnvelopeObj.Data = make(map[string]runtime.RawExtension) constructWrappedResources(testEnvelopeObj, obj, kind, namespace) - Expect(hubClient.Create(ctx, testEnvelopeObj)).To(Succeed(), "Failed to create testEnvelop object %s containing %s", testEnvelopeObj.Name, kind) + Expect(hubClient.Create(ctx, testEnvelopeObj)).To(Succeed(), "Failed to create testEnvelope object %s containing %s", testEnvelopeObj.Name, kind) } func checkCluster(cluster *framework.Cluster, name, namespace string) bool { From da750f2f33f1f4eba38b30ae6955e1319828d69d Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 15 May 2025 18:03:26 +1000 Subject: [PATCH 8/8] Minor fixes Signed-off-by: michaelawyu --- test/e2e/enveloped_object_placement_test.go | 16 ++++++++-------- ...nfigmap.yaml => test-envelope-configmap.yaml} | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) rename test/e2e/resources/{test-envelop-configmap.yaml => test-envelope-configmap.yaml} (93%) diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 3d4e38092..e7194b224 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -41,9 +41,9 @@ import ( ) const ( - envelopResourceName = "envelop-wrapper" - cmDataKey = "foo" - cmDataVal = "bar" + envelopeResourceName = "envelope-wrapper" + cmDataKey = "foo" + cmDataVal = "bar" ) var ( @@ -386,7 +386,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { // Create an envelope config map. envelopWrapper = &placementv1beta1.ResourceEnvelope{ ObjectMeta: metav1.ObjectMeta{ - Name: envelopResourceName, + Name: envelopeResourceName, Namespace: ns.Name, }, Data: make(map[string]runtime.RawExtension), @@ -484,7 +484,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Group: placementv1beta1.GroupVersion.Group, Kind: placementv1beta1.ResourceEnvelopeKind, Version: placementv1beta1.GroupVersion.Version, - Name: envelopResourceName, + Name: envelopeResourceName, Namespace: workNamespaceName, }, }, @@ -527,7 +527,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { // Create an envelope. envelope = &placementv1beta1.ResourceEnvelope{ ObjectMeta: metav1.ObjectMeta{ - Name: envelopResourceName, + Name: envelopeResourceName, Namespace: ns.Name, }, Data: map[string]runtime.RawExtension{}, @@ -597,7 +597,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { Namespace: workNamespaceName, Version: "v1", Envelope: &placementv1beta1.EnvelopeIdentifier{ - Name: envelopResourceName, + Name: envelopeResourceName, Namespace: workNamespaceName, Type: placementv1beta1.ResourceEnvelopeType, }, @@ -623,7 +623,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { Group: placementv1beta1.GroupVersion.Group, Kind: placementv1beta1.ResourceEnvelopeKind, Version: placementv1beta1.GroupVersion.Version, - Name: envelopResourceName, + Name: envelopeResourceName, Namespace: workNamespaceName, }, }, diff --git a/test/e2e/resources/test-envelop-configmap.yaml b/test/e2e/resources/test-envelope-configmap.yaml similarity index 93% rename from test/e2e/resources/test-envelop-configmap.yaml rename to test/e2e/resources/test-envelope-configmap.yaml index c88877618..4b577f56d 100644 --- a/test/e2e/resources/test-envelop-configmap.yaml +++ b/test/e2e/resources/test-envelope-configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: envelop-configmap + name: envelope-configmap namespace: app annotations: kubernetes-fleet.io/envelope-configmap: "true"