diff --git a/go.mod b/go.mod index f0fd72de..a0f7dcb9 100644 --- a/go.mod +++ b/go.mod @@ -6,19 +6,20 @@ require ( bou.ke/monkey v1.0.2 github.com/blang/semver v3.5.1+incompatible github.com/go-ini/ini v1.62.0 + github.com/go-logr/logr v0.4.0 github.com/go-sql-driver/mysql v1.6.0 github.com/go-test/deep v1.0.7 + github.com/golang/glog v1.0.0 github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365 github.com/imdario/mergo v0.3.12 - github.com/onsi/ginkgo v1.16.4 - github.com/onsi/gomega v1.13.0 + github.com/onsi/ginkgo v1.16.5 + github.com/onsi/gomega v1.17.0 github.com/presslabs/controller-util v0.3.0 github.com/spf13/cobra v1.1.1 github.com/stretchr/testify v1.7.0 k8s.io/api v0.21.2 k8s.io/apimachinery v0.21.2 k8s.io/client-go v0.21.2 - k8s.io/component-base v0.21.2 k8s.io/klog/v2 v2.8.0 sigs.k8s.io/controller-runtime v0.9.2 ) diff --git a/go.sum b/go.sum index 981ec73e..98ab4e6c 100644 --- a/go.sum +++ b/go.sum @@ -152,6 +152,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -322,14 +324,16 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= diff --git a/test/e2e/cluster/cluster.go b/test/e2e/cluster/cluster.go new file mode 100644 index 00000000..b340a6c5 --- /dev/null +++ b/test/e2e/cluster/cluster.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 Pressinfra SRL + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "fmt" + "math/rand" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + api "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" + "github.com/radondb/radondb-mysql-kubernetes/test/e2e/framework" + "github.com/radondb/radondb-mysql-kubernetes/utils" +) + +const ( + POLLING = 2 * time.Second +) + +var _ = Describe("MySQL Cluster E2E Tests", func() { + f := framework.NewFramework("mc-1") + two := int32(2) + three := int32(3) + five := int32(5) + + sysbenchOptions := framework.SysbenchOptions{ + Timeout: 10 * time.Minute, + Threads: 8, + Tables: 4, + TableSize: 10000, + } + + var ( + cluster *api.MysqlCluster + clusterKey types.NamespacedName + name string + ) + + BeforeEach(func() { + // be careful, mysql allowed hostname lenght is <63 + name = fmt.Sprintf("cl-%d", rand.Int31()/1000) + + By("creating a new cluster") + cluster = framework.NewCluster(name, f.Namespace.Name) + clusterKey = types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace} + Expect(f.Client.Create(context.TODO(), cluster)).To(Succeed(), "failed to create cluster '%s'", cluster.Name) + + By("testing the cluster readiness") + testClusterReadiness(f, cluster) + + Expect(f.Client.Get(context.TODO(), clusterKey, cluster)).To(Succeed(), "failed to get cluster %s", cluster.Name) + + f.PrepareData(cluster, &sysbenchOptions) + // By("testing the data readiness") + // f.WaitDataReady(cluster, sysbenchOptions.Tables, sysbenchOptions.TableSize) + }) + + It("scale out/in a cluster, 2 -> 3 -> 5 -> 3 -> 2", func() { + f.Client.Get(context.TODO(), types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cluster) + + // start oltp test before testing scale in/out + f.RunOltpTest(cluster, &sysbenchOptions) + + By("test cluster is ready after scale out 2 -> 3") + cluster.Spec.Replicas = &three + Expect(f.Client.Update(context.TODO(), cluster)).To(Succeed()) + testClusterReadiness(f, cluster) + testXenonReadiness(cluster) + + By("test cluster is ready after scale out 3 -> 5") + cluster.Spec.Replicas = &five + Expect(f.Client.Update(context.TODO(), cluster)).To(Succeed()) + testClusterReadiness(f, cluster) + testXenonReadiness(cluster) + + By("test cluster is ready after scale in 5 -> 3") + cluster.Spec.Replicas = &three + Expect(f.Client.Update(context.TODO(), cluster)).To(Succeed()) + testClusterReadiness(f, cluster) + testXenonReadiness(cluster) + + By("test cluster is ready after scale in 3 -> 2") + cluster.Spec.Replicas = &two + Expect(f.Client.Update(context.TODO(), cluster)).To(Succeed()) + testClusterReadiness(f, cluster) + testXenonReadiness(cluster) + }) +}) + +// testClusterReadiness determine whether the cluster is ready. +func testClusterReadiness(f *framework.Framework, cluster *api.MysqlCluster) { + timeout := f.Timeout + if *cluster.Spec.Replicas > 0 { + timeout = time.Duration(*cluster.Spec.Replicas) * f.Timeout + } + + // wait for pods to be ready + Eventually(func() int { + cl := &api.MysqlCluster{} + f.Client.Get(context.TODO(), types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cl) + return cl.Status.ReadyNodes + }, timeout, POLLING).Should(Equal(int(*cluster.Spec.Replicas)), "Not ready replicas of cluster '%s'", cluster.Name) + + f.ClusterEventuallyCondition(cluster, api.ConditionReady, corev1.ConditionTrue, f.Timeout) +} + +// testXenonReadiness determine whether the role of the cluster is normal. +func testXenonReadiness(cluster *api.MysqlCluster) { + leader := []string{} + follower := []string{} + for _, node := range cluster.Status.Nodes { + if node.RaftStatus.Role == string(utils.Leader) { + leader = append(leader, node.Name) + } else if node.RaftStatus.Role == string(utils.Follower) { + follower = append(follower, node.Name) + } else { + Expect(node).Should(BeEmpty(), "some nodes not ready") + } + } + Expect(len(leader)).Should(Equal(1), "cluster need have only one leader") + Expect(len(follower)).Should(Equal(len(cluster.Status.Nodes)-len(leader)), "in addition to the leader node, the cluster has only follower nodes") +} diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 49956126..b61a6401 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -17,9 +17,136 @@ limitations under the License. package e2e import ( + "context" + "fmt" + "os" + "path" + "strings" "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/golang/glog" + "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/gomega" + runtimeutils "k8s.io/apimachinery/pkg/util/runtime" + clientset "k8s.io/client-go/kubernetes" + + "github.com/radondb/radondb-mysql-kubernetes/test/e2e/framework" + "github.com/radondb/radondb-mysql-kubernetes/test/e2e/framework/ginkgowrapper" + + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +const ( + operatorNamespace = "mysql-operator" + releaseName = "demo" ) +var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { + kubeCfg, err := framework.LoadConfig() + gomega.Expect(err).To(gomega.Succeed()) + // restClient := core.NewForConfigOrDie(kubeCfg).RESTClient() + + c, err := client.New(kubeCfg, client.Options{}) + if err != nil { + ginkgo.Fail(fmt.Sprintf("can't instantiate k8s client: %s", err)) + } + + // ginkgo node 1 + ginkgo.By("Install operator") + operatorNsObj := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorNamespace, + }, + } + if err := c.Create(context.TODO(), operatorNsObj); err != nil { + if !strings.Contains(err.Error(), "already exists") { + ginkgo.Fail(fmt.Sprintf("can't create mysql-operator namespace: %s", err)) + } + } + framework.HelmInstallChart(releaseName, operatorNamespace) + + return nil + +}, func(data []byte) { + // all other nodes + framework.Logf("Running BeforeSuite actions on all node") +}) + +// Similar to SynchornizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs). +// Here, the order of functions is reversed; first, the function which runs everywhere, +// and then the function that only runs on the first Ginkgo node. +var _ = ginkgo.SynchronizedAfterSuite(func() { + // Run on all Ginkgo nodes + framework.Logf("Running AfterSuite actions on all node") + framework.RunCleanupActions() + + // get the kubernetes client + kubeCfg, err := framework.LoadConfig() + gomega.Expect(err).To(gomega.Succeed()) + + client, err := clientset.NewForConfig(kubeCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Remove operator release") + framework.HelmPurgeRelease(releaseName, operatorNamespace) + + ginkgo.By("Delete operator namespace") + + if err := framework.DeleteNS(client, operatorNamespace, framework.DefaultNamespaceDeletionTimeout); err != nil { + framework.Failf(fmt.Sprintf("Can't delete namespace: %s", err)) + } +}, func() { + // Run only Ginkgo on node 1 + framework.Logf("Running AfterSuite actions on node 1") +}) + +// RunE2ETests checks configuration parameters (specified through flags) and then runs +// E2E tests using the Ginkgo runner. +// If a "report directory" is specified, one or more JUnit test reports will be +// generated in this directory, and cluster logs will also be saved. +// This function is called on each Ginkgo node in parallel mode. func RunE2ETests(t *testing.T) { - // empty now + runtimeutils.ReallyCrash = true + + gomega.RegisterFailHandler(ginkgowrapper.Fail) + // Disable skipped tests unless they are explicitly requested. + if len(config.GinkgoConfig.FocusStrings) == 0 && len(config.GinkgoConfig.SkipStrings) == 0 { + config.GinkgoConfig.SkipStrings = []string{`\[Flaky\]`, `\[Feature:.+\]`} + } + + rps := func() (rps []ginkgo.Reporter) { + // Run tests through the Ginkgo runner with output to console + JUnit for Jenkins + if framework.TestContext.ReportDir != "" { + // TODO: we should probably only be trying to create this directory once + // rather than once-per-Ginkgo-node. + if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil { + glog.Errorf("Failed creating report directory: %v", err) + return + } + // add junit report + rps = append(rps, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", "mysql_o_", config.GinkgoConfig.ParallelNode)))) + + // add logs dumper + if framework.TestContext.DumpLogsOnFailure { + rps = append(rps, NewLogsPodReporter(operatorNamespace, path.Join(framework.TestContext.ReportDir, + fmt.Sprintf("pods_logs_%d_%d.txt", config.GinkgoConfig.RandomSeed, config.GinkgoConfig.ParallelNode)))) + } + } else { + // if reportDir is not specified then print logs to stdout + if framework.TestContext.DumpLogsOnFailure { + rps = append(rps, NewLogsPodReporter(operatorNamespace, "")) + } + } + return + }() + + glog.Infof("Starting e2e run on Ginkgo node %d", config.GinkgoConfig.ParallelNode) + + ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "MySQL Operator E2E Suite", rps) } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 46c39523..e4d384b1 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -17,38 +17,17 @@ limitations under the License. package e2e import ( - "flag" - "fmt" - "math/rand" - "os" "testing" - "time" - - "k8s.io/component-base/version" "github.com/radondb/radondb-mysql-kubernetes/test/e2e/framework" -) - -// handleFlags sets up all flags and parses the command line. -func handleFlags() { - framework.RegisterCommonFlags(flag.CommandLine) - flag.Parse() -} -func TestMain(m *testing.M) { - var versionFlag bool - flag.CommandLine.BoolVar(&versionFlag, "version", false, "Displays version information.") - - // Register test flags, then parse flags. - handleFlags() - - if versionFlag { - fmt.Printf("%s\n", version.Get()) - os.Exit(0) - } + _ "github.com/radondb/radondb-mysql-kubernetes/test/e2e/cluster" +) - rand.Seed(time.Now().UnixNano()) - os.Exit(m.Run()) +func init() { + // framework.ViperizeFlags() + testing.Init() + framework.RegisterParseFlags() } func TestE2E(t *testing.T) { diff --git a/test/e2e/framework/cleanup.go b/test/e2e/framework/cleanup.go new file mode 100644 index 00000000..55fa1d4a --- /dev/null +++ b/test/e2e/framework/cleanup.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import "sync" + +type CleanupActionHandle *int + +var cleanupActionsLock sync.Mutex +var cleanupActions = map[CleanupActionHandle]func(){} + +// AddCleanupAction installs a function that will be called in the event of the +// whole test being terminated. This allows arbitrary pieces of the overall +// test to hook into SynchronizedAfterSuite(). +func AddCleanupAction(fn func()) CleanupActionHandle { + p := CleanupActionHandle(new(int)) + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + cleanupActions[p] = fn + return p +} + +// RemoveCleanupAction removes a function that was installed by +// AddCleanupAction. +func RemoveCleanupAction(p CleanupActionHandle) { + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + delete(cleanupActions, p) +} + +// RunCleanupActions runs all functions installed by AddCleanupAction. It does +// not remove them (see RemoveCleanupAction) but it does run unlocked, so they +// may remove themselves. +func RunCleanupActions() { + list := []func(){} + func() { + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + for _, fn := range cleanupActions { + list = append(list, fn) + } + }() + // Run unlocked. + for _, fn := range list { + fn() + } +} diff --git a/test/e2e/framework/cluster_util.go b/test/e2e/framework/cluster_util.go new file mode 100644 index 00000000..bcc14810 --- /dev/null +++ b/test/e2e/framework/cluster_util.go @@ -0,0 +1,180 @@ +/* +Copyright 2018 Pressinfra SRL + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "database/sql" + "fmt" + "time" + + _ "github.com/go-sql-driver/mysql" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + gomegatypes "github.com/onsi/gomega/types" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + k8score "k8s.io/client-go/kubernetes/typed/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + api "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" + pf "github.com/radondb/radondb-mysql-kubernetes/test/e2e/framework/portforward" +) + +var ( + POLLING = 2 * time.Second + // SQLPOLLING = 10 * time.Second +) + +func (f *Framework) ClusterEventuallyCondition(cluster *api.MysqlCluster, + condType api.ClusterConditionType, status corev1.ConditionStatus, timeout time.Duration) { + Eventually(func() []api.ClusterCondition { + key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace} + if err := f.Client.Get(context.TODO(), key, cluster); err != nil { + return nil + } + return cluster.Status.Conditions + }, timeout, POLLING).Should(ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(condType), + "Status": Equal(status), + })), "Testing cluster '%s' for condition %s to be %s", cluster.Name, condType, status) + +} + +func (f *Framework) NodeEventuallyCondition(cluster *api.MysqlCluster, nodeName string, + condType api.NodeConditionType, status corev1.ConditionStatus, timeout time.Duration) { + Eventually(func() []api.NodeCondition { + key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace} + if err := f.Client.Get(context.TODO(), key, cluster); err != nil { + return nil + } + + for _, ns := range cluster.Status.Nodes { + if ns.Name == nodeName { + return ns.Conditions + } + } + + return nil + }, timeout, POLLING).Should(ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(condType), + "Status": Equal(status), + })), "Testing node '%s' of the cluster '%s'", cluster.Name, nodeName) +} + +// HaveClusterCond is a helper func that returns a matcher to check for an existing condition in a ClusterCondition list. +func HaveClusterCond(condType api.ClusterConditionType, status corev1.ConditionStatus) gomegatypes.GomegaMatcher { + return PointTo(MatchFields(IgnoreExtras, Fields{ + "Status": MatchFields(IgnoreExtras, Fields{ + "Conditions": ContainElement(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(condType), + "Status": Equal(status), + })), + })}, + )) +} + +func (f *Framework) RefreshClusterFn(cluster *api.MysqlCluster) func() *api.MysqlCluster { + return func() *api.MysqlCluster { + key := types.NamespacedName{ + Name: cluster.Name, + Namespace: cluster.Namespace, + } + c := &api.MysqlCluster{} + f.Client.Get(context.TODO(), key, c) + return c + } +} + +// HaveClusterRepliacs matcher for replicas +func HaveClusterReplicas(replicas int) gomegatypes.GomegaMatcher { + return PointTo(MatchFields(IgnoreExtras, Fields{ + "Status": MatchFields(IgnoreExtras, Fields{ + "ReadyNodes": Equal(replicas), + }), + })) +} + +// GetClusterLabels returns labels.Set for the given cluster +func GetClusterLabels(cluster *api.MysqlCluster) labels.Set { + labels := labels.Set{ + "mysql.radondb.com/cluster": cluster.Name, + "app.kubernetes.io/name": "mysql", + } + + return labels +} + +func (f *Framework) GetClusterPVCsFn(cluster *api.MysqlCluster) func() []corev1.PersistentVolumeClaim { + return func() []corev1.PersistentVolumeClaim { + pvcList := &corev1.PersistentVolumeClaimList{} + lo := &client.ListOptions{ + Namespace: cluster.Namespace, + LabelSelector: labels.SelectorFromSet(GetClusterLabels(cluster)), + } + f.Client.List(context.TODO(), pvcList, lo) + return pvcList.Items + } +} + +// func (f *Framework) TestDataReadiness(cluster *api.MysqlCluster, tables, tableSize int) bool { +// count := 0 +// podName := fmt.Sprintf("%s-mysql-0", cluster.Name) +// rows, err := f.ExecSQLOnNode(*cluster, podName, fmt.Sprintf("select count(*) from sbtest%d;", tables)) +// if err != nil { +// return false +// } +// defer rows.Close() + +// if rows.Next() { +// rows.Scan(&count) +// } +// if count == tableSize { +// return true +// } +// return false +// } + +func (f *Framework) ExecSQLOnNode(cluster api.MysqlCluster, podName, query string) (*sql.Rows, error) { + kubeCfg, err := LoadConfig() + Expect(err).NotTo(HaveOccurred()) + + user := cluster.Spec.MysqlOpts.User + password := cluster.Spec.MysqlOpts.Password + + client := k8score.NewForConfigOrDie(kubeCfg).RESTClient() + tunnel := pf.NewTunnel(client, kubeCfg, cluster.Namespace, + podName, + 3306, + ) + defer tunnel.Close() + + err = tunnel.ForwardPort() + Expect(err).NotTo(HaveOccurred(), "Failed setting up port-forarding for pod: %s", podName) + + dsn := fmt.Sprintf("%s:%s@tcp(localhost:%d)/radondb?timeout=20s&multiStatements=true", user, password, tunnel.Local) + db, err := sql.Open("mysql", dsn) + Expect(err).To(Succeed(), "Failed connection to mysql DSN: %s", dsn) + defer db.Close() + + rows, err := db.Query(query) + if err != nil { + return nil, fmt.Errorf("err: %s, query: %s", err, query) + } + return rows, nil +} diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go new file mode 100644 index 00000000..6dee2481 --- /dev/null +++ b/test/e2e/framework/framework.go @@ -0,0 +1,127 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + core "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + apis "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" +) + +const ( + maxKubectlExecRetries = 5 + DefaultNamespaceDeletionTimeout = 10 * time.Minute +) + +type Framework struct { + BaseName string + Namespace *core.Namespace + + Client client.Client + ClientSet clientset.Interface + + cleanupHandle CleanupActionHandle + SkipNamespaceCreation bool + + Timeout time.Duration + + Log logr.Logger +} + +func NewFramework(baseName string) *Framework { + By(fmt.Sprintf("Creating framework with timeout: %v", TestContext.TimeoutSeconds)) + f := &Framework{ + BaseName: baseName, + SkipNamespaceCreation: false, + Log: log, + } + + BeforeEach(f.BeforeEach) + AfterEach(f.AfterEach) + + return f +} + +// BeforeEach gets a client and makes a namespace. +func (f *Framework) BeforeEach() { + // The fact that we need this feels like a bug in ginkgo. + // https://github.com/onsi/ginkgo/issues/222 + f.cleanupHandle = AddCleanupAction(f.AfterEach) + f.Timeout = time.Duration(TestContext.TimeoutSeconds) * time.Second + + By("creating a kubernetes client") + cfg, err := LoadConfig() + Expect(err).NotTo(HaveOccurred()) + + apis.AddToScheme(scheme.Scheme) + + f.Client, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + + f.ClientSet, err = clientset.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + if !f.SkipNamespaceCreation { + namespace, err := f.CreateNamespace(map[string]string{ + "e2e-framework": f.BaseName, + }) + Expect(err).NotTo(HaveOccurred()) + By(fmt.Sprintf("create a namespace api object (%s)", namespace.Name)) + + f.Namespace = namespace + } + +} + +// AfterEach deletes the namespace, after reading its events. +func (f *Framework) AfterEach() { + By("Collecting logs") + if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { + logFunc := Logf + // TODO: log in file if ReportDir is set + LogPodsWithLabels(f.ClientSet, f.Namespace.Name, map[string]string{}, logFunc) + } + + By("Run cleanup actions") + RemoveCleanupAction(f.cleanupHandle) + + By("Delete testing namespace") + err := DeleteNS(f.ClientSet, f.Namespace.Name, DefaultNamespaceDeletionTimeout) + if err != nil { + Failf(fmt.Sprintf("Can't delete namespace: %s", err)) + } +} + +func (f *Framework) CreateNamespace(labels map[string]string) (*core.Namespace, error) { + return CreateTestingNS(f.BaseName, f.ClientSet, labels) +} + +// WaitForPodReady waits for the pod to flip to ready in the namespace. +func (f *Framework) WaitForPodReady(podName string) error { + return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, + f.Namespace.Name, PodStartTimeout) +} diff --git a/test/e2e/framework/ginkgowrapper/BUILD b/test/e2e/framework/ginkgowrapper/BUILD new file mode 100644 index 00000000..b5eea398 --- /dev/null +++ b/test/e2e/framework/ginkgowrapper/BUILD @@ -0,0 +1,26 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["wrapper.go"], + importpath = "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper", + deps = ["//vendor/github.com/onsi/ginkgo:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/test/e2e/framework/ginkgowrapper/wrapper.go b/test/e2e/framework/ginkgowrapper/wrapper.go new file mode 100644 index 00000000..1cb3de1a --- /dev/null +++ b/test/e2e/framework/ginkgowrapper/wrapper.go @@ -0,0 +1,134 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic +// with structured data instead of a constant string. +package ginkgowrapper + +import ( + "bufio" + "bytes" + "regexp" + "runtime" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo" +) + +// FailurePanic is the value that will be panicked from Fail. +type FailurePanic struct { + Message string // The failure message passed to Fail + Filename string // The filename that is the source of the failure + Line int // The line number of the filename that is the source of the failure + FullStackTrace string // A full stack trace starting at the source of the failure +} + +// String makes FailurePanic look like the old Ginkgo panic when printed. +func (FailurePanic) String() string { return ginkgo.GINKGO_PANIC } + +// Fail wraps ginkgo.Fail so that it panics with more useful +// information about the failure. This function will panic with a +// FailurePanic. +func Fail(message string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip += callerSkip[0] + } + + _, file, line, _ := runtime.Caller(skip) + fp := FailurePanic{ + Message: message, + Filename: file, + Line: line, + FullStackTrace: pruneStack(skip), + } + + defer func() { + e := recover() + if e != nil { + panic(fp) + } + }() + + ginkgo.Fail(message, skip) +} + +// SkipPanic is the value that will be panicked from Skip. +type SkipPanic struct { + Message string // The failure message passed to Fail + Filename string // The filename that is the source of the failure + Line int // The line number of the filename that is the source of the failure + FullStackTrace string // A full stack trace starting at the source of the failure +} + +// String makes SkipPanic look like the old Ginkgo panic when printed. +func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC } + +// Skip wraps ginkgo.Skip so that it panics with more useful +// information about why the test is being skipped. This function will +// panic with a SkipPanic. +func Skip(message string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip += callerSkip[0] + } + + _, file, line, _ := runtime.Caller(skip) + sp := SkipPanic{ + Message: message, + Filename: file, + Line: line, + FullStackTrace: pruneStack(skip), + } + + defer func() { + e := recover() + if e != nil { + panic(sp) + } + }() + + ginkgo.Skip(message, skip) +} + +// ginkgo adds a lot of test running infrastructure to the stack, so +// we filter those out +var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`) + +func pruneStack(skip int) string { + skip += 2 // one for pruneStack and one for debug.Stack + stack := debug.Stack() + scanner := bufio.NewScanner(bytes.NewBuffer(stack)) + var prunedStack []string + + // skip the top of the stack + for i := 0; i < 2*skip+1; i++ { + scanner.Scan() + } + + for scanner.Scan() { + if stackSkipPattern.Match(scanner.Bytes()) { + scanner.Scan() // these come in pairs + } else { + prunedStack = append(prunedStack, scanner.Text()) + scanner.Scan() // these come in pairs + prunedStack = append(prunedStack, scanner.Text()) + } + } + + return strings.Join(prunedStack, "\n") +} diff --git a/test/e2e/framework/helm.go b/test/e2e/framework/helm.go new file mode 100644 index 00000000..bbb441fc --- /dev/null +++ b/test/e2e/framework/helm.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 Pressinfra SRL + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "os" + "os/exec" + + . "github.com/onsi/gomega" +) + +func HelmInstallChart(release, ns string) { + args := []string{ + "install", release, "./" + TestContext.ChartPath, + "--namespace", ns, + "--values", TestContext.ChartValues, "--wait", + "--kube-context", TestContext.KubeContext, + "--set", fmt.Sprintf("manager.tag=%s", TestContext.OperatorImageTag), + } + + cmd := exec.Command("helm", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + Expect(cmd.Run()).Should(Succeed()) +} + +func HelmPurgeRelease(release, ns string) { + args := []string{ + "delete", release, + "--namespace", ns, + "--kube-context", TestContext.KubeContext, + } + cmd := exec.Command("helm", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + Expect(cmd.Run()).Should(Succeed()) +} diff --git a/test/e2e/framework/portforward/portforward.go b/test/e2e/framework/portforward/portforward.go new file mode 100644 index 00000000..7a5c5f2d --- /dev/null +++ b/test/e2e/framework/portforward/portforward.go @@ -0,0 +1,100 @@ +package portforward + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "strconv" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +type Tunnel struct { + Local int + Remote int + Namespace string + PodName string + Out io.Writer + stopChan chan struct{} + readyChan chan struct{} + config *rest.Config + client rest.Interface +} + +func NewTunnel(client rest.Interface, config *rest.Config, namespace, podName string, remote int) *Tunnel { + return &Tunnel{ + config: config, + client: client, + Namespace: namespace, + PodName: podName, + Remote: remote, + stopChan: make(chan struct{}, 1), + readyChan: make(chan struct{}, 1), + Out: ioutil.Discard, + } +} + +func (t *Tunnel) ForwardPort() error { + u := t.client.Post(). + Resource("pods"). + Namespace(t.Namespace). + Name(t.PodName). + SubResource("portforward").URL() + + transport, upgrader, err := spdy.RoundTripperFor(t.config) + if err != nil { + return err + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", u) + + local, err := getAvailablePort() + if err != nil { + return fmt.Errorf("could not find an available port: %s", err) + } + t.Local = local + + ports := []string{fmt.Sprintf("%d:%d", t.Local, t.Remote)} + + pf, err := portforward.New(dialer, ports, t.stopChan, t.readyChan, t.Out, t.Out) + if err != nil { + return err + } + + errChan := make(chan error) + go func() { + errChan <- pf.ForwardPorts() + }() + + select { + case err = <-errChan: + return fmt.Errorf("forwarding ports: %v", err) + case <-pf.Ready: + return nil + } +} + +func (t *Tunnel) Close() { + close(t.stopChan) +} + +func getAvailablePort() (int, error) { + l, err := net.Listen("tcp", ":0") + if err != nil { + return 0, err + } + defer l.Close() + + _, p, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + return 0, err + } + port, err := strconv.Atoi(p) + if err != nil { + return 0, err + } + return port, err +} diff --git a/test/e2e/framework/sysbench.go b/test/e2e/framework/sysbench.go new file mode 100644 index 00000000..365f16e6 --- /dev/null +++ b/test/e2e/framework/sysbench.go @@ -0,0 +1,147 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + api "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" +) + +type SysbenchOptions struct { + Timeout time.Duration + Threads int + Tables int + TableSize int +} + +func (o *SysbenchOptions) cmd(leaderHost string) string { + return fmt.Sprintf(`sysbench \ + --db-driver=mysql \ + --mysql-host=%s \ + --mysql-port=3306 \ + --mysql-user=radondb_usr \ + --mysql-password=RadonDB@123 \ + --mysql-db=radondb \ + --report-interval=10 \ + --time=%d \ + --threads=%d \ + --tables=%d \ + --table_size=%d \ + /usr/share/sysbench/oltp_read_write.lua \ + `, + leaderHost, + int(o.Timeout.Seconds()), + o.Threads, + o.Tables, + o.TableSize, + ) +} + +func (f Framework) WaitPodReady(pod *corev1.Pod) { + Eventually(func() bool { + f.Client.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, pod) + return pod.Status.ContainerStatuses[0].Ready + }, f.Timeout, POLLING).Should(BeTrue(), "Not ready replicas of pod '%s'", pod.Name) +} + +// func (f Framework) WaitDataReady(cluster *api.MysqlCluster, tables, tableSize int) { +// Eventually(f.TestDataReadiness(cluster, tables, tableSize), 2 * time.Minute, SQLPOLLING).Should(BeTrue(), "data not ready") +// } + +func (f Framework) WaitPrepareFinished(pod *corev1.Pod) { + Eventually(func() *corev1.ContainerStateTerminated { + f.Client.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, pod) + return pod.Status.ContainerStatuses[0].State.Terminated + }, f.Timeout, POLLING).ShouldNot(BeNil(), "Not ready replicas of pod '%s'", pod.Name) +} + +func (f Framework) PrepareData(mysqlcluster *api.MysqlCluster, options *SysbenchOptions) { + name := "sysbench-prepare" + cmd := options.cmd(fmt.Sprintf("%s-leader", mysqlcluster.Name)) + cmd = fmt.Sprintf("%s prepare", cmd) + args := []string{cmd} + + By("create sysbench pod for preparing data") + pod, err := f.createSysbenchPod(mysqlcluster.Namespace, name, args) + Expect(err).Should(BeNil()) + + By("wait pod ready") + f.WaitPodReady(pod) + Expect(f.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: mysqlcluster.Namespace}, pod)).To(Succeed(), "failed to prepare data") + + By("testing the data readiness") + f.WaitPrepareFinished(pod) +} + +func (f Framework) RunOltpTest(mysqlcluster *api.MysqlCluster, options *SysbenchOptions) { + name := "sysbench-run" + cmd := options.cmd(fmt.Sprintf("%s-leader", mysqlcluster.Name)) + cmd = fmt.Sprintf("%s run", cmd) + args := []string{cmd} + + By("create sysbench pod for testing oltp") + pod, err := f.createSysbenchPod(mysqlcluster.Namespace, name, args) + Expect(err).Should(BeNil()) + + By("wait pod ready") + f.WaitPodReady(pod) + Expect(f.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: mysqlcluster.Namespace}, pod)).To(Succeed(), "failed to run oltp test") +} + +func (f Framework) createSysbenchPod(ns, name string, args []string) (*corev1.Pod, error) { + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + container := corev1.Container{ + Name: "sysbench", + Image: "severalnines/sysbench", + Command: []string{"/bin/bash", "-c", "--"}, + Args: args, + } + + pod.Spec.Containers = append(pod.Spec.Containers, container) + pod.Spec.RestartPolicy = corev1.RestartPolicyNever + + var got *corev1.Pod + if err := wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) { + var err error + got, err = f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + if err != nil { + Logf("Unexpected error while creating sysbench pod: %v", err) + return false, nil + } + return true, nil + }); err != nil { + return nil, err + } + return got, nil +} diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 8fcec266..4db20f57 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -1,12 +1,9 @@ /* Copyright 2016 The Kubernetes Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,82 +15,57 @@ package framework import ( "flag" - "fmt" "os" - "k8s.io/client-go/tools/clientcmd" -) + "github.com/onsi/ginkgo/config" -const ( - // TODO(gry): make sure the default port, do we need it? - defaultHost = "https://127.0.0.1:6443" - - // DefaultNumNodes is the number of nodes. If not specified, then number of nodes is auto-detected - DefaultNumNodes = -1 + "k8s.io/client-go/tools/clientcmd" ) -// TestContextType contains test settings and global state. Due to -// historic reasons, it is a mixture of items managed by the test -// framework itself, cloud providers and individual tests. -// The goal is to move anything not required by the framework -// into the code which uses the settings. -// -// The recommendation for those settings is: -// - They are stored in their own context structure or local -// variables. -// - The standard `flag` package is used to register them. -// The flag name should follow the pattern ..... -// where the prefix is unlikely to conflict with other tests or -// standard packages and each part is in lower camel case. For -// example, test/e2e/storage/csi/context.go could define -// storage.csi.numIterations. -// - framework/config can be used to simplify the registration of -// multiple options with a single function call: -// var storageCSI { -// NumIterations `default:"1" usage:"number of iterations"` -// } -// _ config.AddOptions(&storageCSI, "storage.csi") -// - The direct use Viper in tests is possible, but discouraged because -// it only works in test suites which use Viper (which is not -// required) and the supported options cannot be -// discovered by a test suite user. -// -// Test suite authors can use framework/viper to make all command line -// parameters also configurable via a configuration file. type TestContextType struct { + KubeHost string KubeConfig string KubeContext string - Host string - OutputDir string + ReportDir string + + ChartPath string + ChartValues string + + OperatorImageTag string + SidecarImage string - // If set to true test will dump data about the namespace in which test was running. + TimeoutSeconds int DumpLogsOnFailure bool - // Disables dumping cluster log from master and nodes after all tests. - DisableLogDump bool - TimeoutSeconds int } -// TestContext should be used by all tests to access common context data. var TestContext TestContextType -// RegisterCommonFlags registers flags common to all e2e test suites. -// The flag set can be flag.CommandLine (if desired) or a custom -// flag set that then gets passed to viperconfig.ViperizeFlags. -// -// The other Register*Flags methods below can be used to add more -// test-specific flags. However, those settings then get added -// regardless whether the test is actually in the test suite. -// -// For tests that have been converted to registering their -// options themselves, copy flags from test/e2e/framework/config -// as shown in HandleFlags. -func RegisterCommonFlags(flags *flag.FlagSet) { - flags.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.") - flags.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'") - flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set.", defaultHost)) - flags.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") - flags.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.") - flags.BoolVar(&TestContext.DisableLogDump, "disable-log-dump", false, "If set to true, logs from master and nodes won't be gathered after test run.") - flag.IntVar(&TestContext.TimeoutSeconds, "pod-wait-timeout", 100, "Timeout to wait for a pod to be ready.") +// Register flags common to all e2e test suites. +func RegisterCommonFlags() { + // Turn on verbose by default to get spec names + config.DefaultReporterConfig.Verbose = true + + // Turn on EmitSpecProgress to get spec progress (especially on interrupt) + config.GinkgoConfig.EmitSpecProgress = true + + // Randomize specs as well as suites + config.GinkgoConfig.RandomizeAllSpecs = true + + flag.StringVar(&TestContext.KubeHost, "kubernetes-host", "", "The kubernetes host, or apiserver, to connect to") + flag.StringVar(&TestContext.KubeConfig, "kubernetes-config", os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to config containing embedded authinfo for kubernetes. Default value is from environment variable "+clientcmd.RecommendedConfigPathEnvVar) + flag.StringVar(&TestContext.KubeContext, "kubernetes-context", "", "config context to use for kuberentes. If unset, will use value from 'current-context'") + + flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Optional directory to store junit and pod logs output in. If not specified, no junit or logs files will be output") + + flag.StringVar(&TestContext.ChartPath, "operator-chart-path", "../../charts/mysql-operator", "The chart name or path for mysql operator") + flag.StringVar(&TestContext.OperatorImageTag, "operator-image-tag", "latest", "Image tag for mysql operator.") + + flag.IntVar(&TestContext.TimeoutSeconds, "pod-wait-timeout", 1200, "Timeout to wait for a pod to be ready.") + flag.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "Dump pods logs when a test fails.") +} + +func RegisterParseFlags() { + RegisterCommonFlags() + flag.Parse() } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go new file mode 100644 index 00000000..68062fd4 --- /dev/null +++ b/test/e2e/framework/util.go @@ -0,0 +1,256 @@ +package framework + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "k8s.io/apimachinery/pkg/labels" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + + api "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" + "github.com/radondb/radondb-mysql-kubernetes/test/e2e/framework/ginkgowrapper" +) + +const ( + PodStartTimeout = 20 * time.Minute + // How often to Poll pods, nodes and claims. + Poll = 2 * time.Second +) + +var log = logf.Log.WithName("framework.util") + +// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. +// Please see NewFramework instead of using this directly. +func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*corev1.Namespace, error) { + if labels == nil { + labels = map[string]string{} + } + namespaceObj := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + // use a short name because long names produce long hostnames but + // maximum allowed length by mysql is 60. + // https://dev.mysql.com/doc/refman/8.0/en/change-master-to.html + GenerateName: fmt.Sprintf("e2e-%v-", baseName), + Namespace: "", + Labels: labels, + }, + Status: corev1.NamespaceStatus{}, + } + // Be robust about making the namespace creation call. + var got *corev1.Namespace + if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { + var err error + got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj, metav1.CreateOptions{}) + if err != nil { + Logf("Unexpected error while creating namespace: %v", err) + return false, nil + } + return true, nil + }); err != nil { + return nil, err + } + + return got, nil +} + +func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) { + if TestContext.KubeConfig == "" { + return nil, fmt.Errorf("KubeConfig must be specified to load client config") + } + c, err := clientcmd.LoadFromFile(TestContext.KubeConfig) + if err != nil { + return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error()) + } + if kubeContext != "" { + c.CurrentContext = kubeContext + } + return c, nil +} + +func LoadConfig() (*restclient.Config, error) { + c, err := RestclientConfig(TestContext.KubeContext) + if err != nil { + if TestContext.KubeConfig == "" { + return restclient.InClusterConfig() + } else { + return nil, err + } + } + + return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.KubeHost}}).ClientConfig() +} + +func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return wait.PollImmediate(Poll, timeout, podRunningAndReady(c, podName, namespace)) +} + +func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + return podRunningAndReadyByPhase(*pod) + } +} + +func podRunningAndReadyByPhase(pod corev1.Pod) (bool, error) { + switch pod.Status.Phase { + case corev1.PodFailed, corev1.PodSucceeded: + return false, errors.New("pod completed") + case corev1.PodRunning: + for _, cond := range pod.Status.Conditions { + if cond.Type != corev1.PodReady { + continue + } + return cond.Status == corev1.ConditionTrue, nil + } + return false, errors.New("pod ready condition not found") + } + return false, nil +} + +// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks +// whether there are any pods remaining in a non-terminating state. +func DeleteNS(c clientset.Interface, namespace string, timeout time.Duration) error { + startTime := time.Now() + if err := c.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}); err != nil { + return err + } + + // wait for namespace to delete or timeout. + // err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { + // if _, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil { + // if apierrs.IsNotFound(err) { + // return true, nil + // } + // Logf("Error while waiting for namespace to be terminated: %v", err) + // return false, nil + // } + // return false, nil + // }) + + Logf("namespace %v deletion completed in %s", namespace, time.Since(startTime)) + return nil +} + +func Logf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + log.Info(msg) +} + +func Failf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + ginkgowrapper.Fail(nowStamp()+": "+msg, 2) +} + +func nowStamp() string { + return time.Now().Format(time.StampMilli) +} + +func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { + return getPodLogsInternal(c, namespace, podName, containerName, false) +} + +func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { + return getPodLogsInternal(c, namespace, podName, containerName, true) +} + +// utility function for gomega Eventually +func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) { + logs, err := c.CoreV1().RESTClient().Get(). + Resource("pods"). + Namespace(namespace). + Name(podName).SubResource("log"). + Param("container", containerName). + Param("previous", strconv.FormatBool(previous)). + Do(context.TODO()). + Raw() + if err != nil { + return "", err + } + if err == nil && strings.Contains(string(logs), "Internal Error") { + return "", fmt.Errorf("fetched log contains \"Internal Error\": %q", string(logs)) + } + return string(logs), err +} + +func kubectlLogPod(c clientset.Interface, pod corev1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) { + for _, container := range pod.Spec.Containers { + if strings.Contains(container.Name, containerNameSubstr) { + // Contains() matches all strings if substr is empty + logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name) + if err != nil { + logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err) + } + plogs, err := getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) + plogs = "PREVIOUS\n" + plogs + if err != nil { + plogs = fmt.Sprintf("Failed to get previous logs for pod %v, container %v, err: %v", pod.Name, container.Name, err) + } + logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName) + logFunc("%s : %s \nSTARTLOG\n%s\nENDLOG for container %v:%v:%v", plogs, containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name) + } + } +} + +func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) { + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) + if err != nil { + logFunc("Error getting pods in namespace %q: %v", ns, err) + return + } + logFunc("Running kubectl logs on pods with labels %v in %v", match, ns) + for _, pod := range podList.Items { + kubectlLogPod(c, pod, "", logFunc) + } +} + +func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) { + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) + if err != nil { + Logf("Error getting pods in namespace %q: %v", ns, err) + return + } + for _, pod := range podList.Items { + kubectlLogPod(c, pod, containerSubstr, logFunc) + } +} + +func NewCluster(name, ns string) *api.MysqlCluster { + two := int32(2) + return &api.MysqlCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.MysqlClusterSpec{ + Replicas: &two, + }, + } + +} + +func NewClusterSecret(name, ns, pw string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + StringData: map[string]string{ + "ROOT_PASSWORD": pw, + }, + } +} diff --git a/test/e2e/reporter.go b/test/e2e/reporter.go new file mode 100644 index 00000000..2481eed1 --- /dev/null +++ b/test/e2e/reporter.go @@ -0,0 +1,151 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "io" + "os" + "strconv" + "time" + + "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + clientset "k8s.io/client-go/kubernetes" + + "github.com/radondb/radondb-mysql-kubernetes/test/e2e/framework" +) + +type podLogReporter struct { + namespace string + + logPath string + logFile *os.File + + out io.Writer +} + +// NewLogsPodReporter writes the logs for all pods in the specified namespace. +// if path is specified then the logs are written to that path, else logs are +// written to GinkgoWriter +func NewLogsPodReporter(ns, path string) reporters.Reporter { + return &podLogReporter{ + namespace: ns, + logPath: path, + out: ginkgo.GinkgoWriter, + } +} + +// called when suite starts +func (r *podLogReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, s *types.SuiteSummary) { + if r.logPath != "" { + var err error + r.logFile, err = os.OpenFile(r.logPath, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + fmt.Printf("Failed to open file: %s with error: %s\n", r.logPath, err) + return + } + + r.out = r.logFile + } +} + +// called before BeforeSuite before starting tests +func (r *podLogReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {} + +// called before every test +func (r *podLogReporter) SpecWillRun(specSummary *types.SpecSummary) {} + +// called after every test +func (r *podLogReporter) SpecDidComplete(specSummary *types.SpecSummary) { + // don't output logs if test didn't failed + if specSummary.State <= types.SpecStatePassed { + return + } + + // get the kubernetes client + kubeCfg, err := framework.LoadConfig() + if err != nil { + fmt.Println("Failed to get kubeconfig!") + return + } + + client, err := clientset.NewForConfig(kubeCfg) + if err != nil { + fmt.Println("Failed to create k8s client!") + return + } + + fmt.Fprintf(r.out, "## Start test: %v\n", specSummary.ComponentTexts) + + LogPodsWithLabels(client, r.namespace, map[string]string{}, specSummary.RunTime, r.out) + + fmt.Fprintf(r.out, "## END test\n") + +} + +// called before AfterSuite runs +func (r *podLogReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {} + +// caleed at the end +func (r *podLogReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + if r.logFile != nil { + r.logFile.Close() + } +} + +func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, since time.Duration, out io.Writer) { + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) + if err != nil { + fmt.Fprintf(out, "error listing pods: %s", err) + return + } + + for _, pod := range podList.Items { + for _, container := range pod.Spec.Containers { + fmt.Fprintf(out, "\n\n===============\nSTART LOGS for %s (%s):\n", pod.Name, container.Name) + runLogs(c, ns, pod.Name, container.Name, false, since, out) + fmt.Fprintf(out, "\n\n===============\nSTOP LOGS for %s (%s):\n", pod.Name, container.Name) + } + } +} + +func runLogs(client clientset.Interface, namespace, name, container string, previous bool, sinceStart time.Duration, out io.Writer) error { + req := client.CoreV1().RESTClient().Get(). + Namespace(namespace). + Name(name). + Resource("pods"). + SubResource("log"). + Param("container", container). + Param("previous", strconv.FormatBool(previous)). + Param("since", strconv.FormatInt(int64(sinceStart.Round(time.Second).Seconds()), 10)) + + readCloser, err := req.Stream(context.TODO()) + if err != nil { + return err + } + + defer readCloser.Close() + _, err = io.Copy(out, readCloser) + return err + +} diff --git a/test/e2e/staticcheck.conf b/test/e2e/staticcheck.conf new file mode 100644 index 00000000..f32065c0 --- /dev/null +++ b/test/e2e/staticcheck.conf @@ -0,0 +1 @@ +checks=[ "all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1001"]