-
Notifications
You must be signed in to change notification settings - Fork 20
feat: make hub statefulset work by stripping some properties from generated PVCs #347
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
6e509d4
676df2a
3e06c8d
1befc44
5914312
b3edb75
6ec57d7
d6e48ca
540fe6f
d37ed21
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -37,12 +37,14 @@ var _ = Describe("placing workloads using a CRP with PickAll policy", Label("res | |
| var testDeployment appsv1.Deployment | ||
| var testDaemonSet appsv1.DaemonSet | ||
| var testJob batchv1.Job | ||
| var testStatefulSet appsv1.StatefulSet | ||
|
|
||
| BeforeAll(func() { | ||
| // Read the test manifests | ||
| readDeploymentTestManifest(&testDeployment) | ||
| readDaemonSetTestManifest(&testDaemonSet) | ||
| readJobTestManifest(&testJob) | ||
| readStatefulSetTestManifest(&testStatefulSet, StatefulSetWithStorage) | ||
| workNamespace := appNamespace() | ||
|
|
||
| // Create namespace and workloads | ||
|
|
@@ -51,9 +53,11 @@ var _ = Describe("placing workloads using a CRP with PickAll policy", Label("res | |
| testDeployment.Namespace = workNamespace.Name | ||
| testDaemonSet.Namespace = workNamespace.Name | ||
| testJob.Namespace = workNamespace.Name | ||
| testStatefulSet.Namespace = workNamespace.Name | ||
| Expect(hubClient.Create(ctx, &testDeployment)).To(Succeed(), "Failed to create test deployment %s", testDeployment.Name) | ||
| Expect(hubClient.Create(ctx, &testDaemonSet)).To(Succeed(), "Failed to create test daemonset %s", testDaemonSet.Name) | ||
| Expect(hubClient.Create(ctx, &testJob)).To(Succeed(), "Failed to create test job %s", testJob.Name) | ||
| Expect(hubClient.Create(ctx, &testStatefulSet)).To(Succeed(), "Failed to create test statefulset %s", testStatefulSet.Name) | ||
|
|
||
| // Create the CRP that selects the namespace | ||
| By("creating CRP that selects the namespace") | ||
|
|
@@ -105,9 +109,30 @@ var _ = Describe("placing workloads using a CRP with PickAll policy", Label("res | |
| Name: testJob.Name, | ||
| Namespace: workNamespace.Name, | ||
| }, | ||
| { | ||
| Group: "apps", | ||
| Version: "v1", | ||
| Kind: "StatefulSet", | ||
| Name: testStatefulSet.Name, | ||
| Namespace: workNamespace.Name, | ||
| }, | ||
| // PVCs created by StatefulSet controller from volumeClaimTemplates | ||
| // Kubernetes StatefulSet controller uses naming convention: <volumeClaimTemplate-name>-<statefulset-name>-<replica-index> | ||
| { | ||
| Version: "v1", | ||
| Kind: "PersistentVolumeClaim", | ||
| Name: fmt.Sprintf("%s-%s-%d", testStatefulSet.Spec.VolumeClaimTemplates[0].Name, testStatefulSet.Name, 0), | ||
| Namespace: workNamespace.Name, | ||
| }, | ||
| { | ||
| Version: "v1", | ||
| Kind: "PersistentVolumeClaim", | ||
| Name: fmt.Sprintf("%s-%s-%d", testStatefulSet.Spec.VolumeClaimTemplates[0].Name, testStatefulSet.Name, 1), | ||
| Namespace: workNamespace.Name, | ||
| }, | ||
|
||
| } | ||
| // Use customizedPlacementStatusUpdatedActual with resourceIsTrackable=false | ||
| // because Jobs don't have availability tracking like Deployments/DaemonSets do | ||
| // because Jobs and PVCs don't have availability tracking like Deployments/DaemonSets do | ||
|
||
| crpKey := types.NamespacedName{Name: crpName} | ||
| crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(crpKey, wantSelectedResources, allMemberClusterNames, nil, "0", false) | ||
| Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") | ||
|
|
@@ -170,6 +195,13 @@ var _ = Describe("placing workloads using a CRP with PickAll policy", Label("res | |
| "Hub job should complete successfully") | ||
| }) | ||
|
|
||
| It("should verify hub statefulset is ready", func() { | ||
| By("checking hub statefulset status") | ||
| statefulSetReadyActual := waitForStatefulSetToBeReady(hubClient, &testStatefulSet) | ||
| Eventually(statefulSetReadyActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), | ||
| "Hub statefulset should be ready before placement") | ||
| }) | ||
|
|
||
| It("should place the deployment on all member clusters", func() { | ||
| By("verifying deployment is placed and ready on all member clusters") | ||
| for idx := range allMemberClusters { | ||
|
|
@@ -206,6 +238,24 @@ var _ = Describe("placing workloads using a CRP with PickAll policy", Label("res | |
| } | ||
| }) | ||
|
|
||
| It("should place the statefulset on all member clusters", func() { | ||
| By("verifying statefulset is placed and ready on all member clusters") | ||
| for idx := range allMemberClusters { | ||
| memberCluster := allMemberClusters[idx] | ||
| statefulsetPlacedActual := waitForStatefulSetPlacementToReady(memberCluster, &testStatefulSet) | ||
| Eventually(statefulsetPlacedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place statefulset on member cluster %s", memberCluster.ClusterName) | ||
| } | ||
| }) | ||
|
|
||
| It("should verify statefulset replicas are ready on all clusters", func() { | ||
| By("checking statefulset status on each cluster") | ||
| for _, cluster := range allMemberClusters { | ||
| statefulSetReadyActual := waitForStatefulSetToBeReady(cluster.KubeClient, &testStatefulSet) | ||
| Eventually(statefulSetReadyActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), | ||
| "StatefulSet should be ready on cluster %s", cluster.ClusterName) | ||
| } | ||
| }) | ||
|
|
||
| It("should verify deployment replicas are ready on all clusters", func() { | ||
| By("checking deployment status on each cluster") | ||
| for _, cluster := range allMemberClusters { | ||
|
|
@@ -232,6 +282,46 @@ var _ = Describe("placing workloads using a CRP with PickAll policy", Label("res | |
| }) | ||
| }) | ||
|
|
||
| func waitForStatefulSetToBeReady(kubeClient client.Client, testStatefulSet *appsv1.StatefulSet) func() error { | ||
| return func() error { | ||
| var statefulSet appsv1.StatefulSet | ||
| if err := kubeClient.Get(ctx, types.NamespacedName{ | ||
| Name: testStatefulSet.Name, | ||
| Namespace: testStatefulSet.Namespace, | ||
| }, &statefulSet); err != nil { | ||
| return err | ||
| } | ||
|
|
||
| // Verify statefulset is ready | ||
| if statefulSet.Status.ObservedGeneration != statefulSet.Generation { | ||
| return fmt.Errorf("statefulset has stale status: observed generation %d != generation %d", | ||
| statefulSet.Status.ObservedGeneration, statefulSet.Generation) | ||
| } | ||
|
|
||
| requiredReplicas := int32(1) | ||
| if statefulSet.Spec.Replicas != nil { | ||
| requiredReplicas = *statefulSet.Spec.Replicas | ||
| } | ||
|
|
||
| if statefulSet.Status.CurrentReplicas != requiredReplicas { | ||
| return fmt.Errorf("statefulset not ready: %d/%d current replicas", | ||
| statefulSet.Status.CurrentReplicas, requiredReplicas) | ||
| } | ||
|
|
||
| if statefulSet.Status.UpdatedReplicas != requiredReplicas { | ||
| return fmt.Errorf("statefulset not updated: %d/%d updated replicas", | ||
| statefulSet.Status.UpdatedReplicas, requiredReplicas) | ||
| } | ||
|
|
||
| if statefulSet.Status.CurrentReplicas != statefulSet.Status.UpdatedReplicas { | ||
| return fmt.Errorf("statefulset replicas not synchronized: %d current != %d updated", | ||
| statefulSet.Status.CurrentReplicas, statefulSet.Status.UpdatedReplicas) | ||
| } | ||
|
|
||
weng271190436 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| return nil | ||
| } | ||
| } | ||
|
|
||
| func waitForJobToComplete(kubeClient client.Client, testJob *batchv1.Job) func() error { | ||
| return func() error { | ||
| var job batchv1.Job | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,28 @@ | ||
| apiVersion: apps/v1 | ||
| kind: StatefulSet | ||
| metadata: | ||
| name: test-ss | ||
| spec: | ||
| selector: | ||
| matchLabels: | ||
| app: test-ss | ||
| serviceName: "test-ss-svc" | ||
| replicas: 2 | ||
| template: | ||
| metadata: | ||
| labels: | ||
| app: test-ss | ||
| spec: | ||
| terminationGracePeriodSeconds: 10 | ||
| containers: | ||
| - name: pause | ||
| image: k8s.gcr.io/pause:3.8 | ||
| volumeClaimTemplates: | ||
| - metadata: | ||
| name: test-ss-pvc | ||
| spec: | ||
| accessModes: [ "ReadWriteOnce" ] | ||
| storageClassName: "standard" | ||
| resources: | ||
| requests: | ||
| storage: 100Mi |
Uh oh!
There was an error while loading. Please reload this page.