From e9fd6ade6ee977f32237964888cf3a9e9f057b04 Mon Sep 17 00:00:00 2001 From: Andrew Peabody Date: Fri, 1 Nov 2024 18:41:33 +0000 Subject: [PATCH 1/3] chore(CI): migrate tests to CFT --- .kitchen.yml | 10 - build/int.cloudbuild.yaml | 6 +- examples/node_pool/main.tf | 1 + .../safer_cluster_iap_bastion/example.tf | 2 +- test/integration/node_pool/controls/gcloud.rb | 566 ------------------ .../integration/node_pool/controls/kubectl.rb | 102 ---- test/integration/node_pool/inspec.yml | 34 -- test/integration/node_pool/node_pool_test.go | 165 +++++ test/integration/testutils/cai.go | 72 +++ test/setup/iam.tf | 2 + test/setup/main.tf | 3 +- 11 files changed, 246 insertions(+), 717 deletions(-) delete mode 100644 test/integration/node_pool/controls/gcloud.rb delete mode 100644 test/integration/node_pool/controls/kubectl.rb delete mode 100644 test/integration/node_pool/inspec.yml create mode 100644 test/integration/node_pool/node_pool_test.go create mode 100644 test/integration/testutils/cai.go diff --git a/.kitchen.yml b/.kitchen.yml index 5e640e5e5f..62beeda102 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -85,13 +85,3 @@ suites: systems: - name: workload_metadata_config backend: local - - name: "node_pool" - transport: - root_module_directory: test/fixtures/node_pool - verifier: - systems: - - name: node_pool - backend: local - controls: - - gcloud - - kubectl diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index e92fb577a5..6c1322aa1f 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -309,17 +309,17 @@ steps: waitFor: - create-all name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge node-pool-local'] + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage apply --verbose'] - id: verify node-pool-local waitFor: - converge node-pool-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify node-pool-local'] + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage verify --verbose'] - id: destroy node-pool-local waitFor: - verify node-pool-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy node-pool-local'] + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage destroy --verbose'] - id: apply sandbox-enabled-local waitFor: - create-all diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index 0efa052976..3713311de6 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -43,6 +43,7 @@ module "gke" { disable_legacy_metadata_endpoints = false cluster_autoscaling = var.cluster_autoscaling deletion_protection = false + service_account = "default" node_pools = [ { diff --git a/test/fixtures/safer_cluster_iap_bastion/example.tf b/test/fixtures/safer_cluster_iap_bastion/example.tf index b4ea3d7650..767e10eac1 100644 --- a/test/fixtures/safer_cluster_iap_bastion/example.tf +++ b/test/fixtures/safer_cluster_iap_bastion/example.tf @@ -1,5 +1,5 @@ /** - * Copyright 2020 Google LLC + * Copyright 2020-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/test/integration/node_pool/controls/gcloud.rb b/test/integration/node_pool/controls/gcloud.rb deleted file mode 100644 index bd2e756b0b..0000000000 --- a/test/integration/node_pool/controls/gcloud.rb +++ /dev/null @@ -1,566 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -project_id = attribute('project_id') -location = attribute('location') -cluster_name = attribute('cluster_name') - -expected_accelerators_count = "1" -expected_accelerators_type = "nvidia-tesla-p4" - -control "gcloud" do - title "Google Compute Engine GKE configuration" - describe command("gcloud beta --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - - let!(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - - describe "cluster-autoscaling" do - it "has the expected cluster autoscaling settings" do - expect(data['autoscaling']).to include({ - "autoprovisioningNodePoolDefaults" => including({ - "imageType"=>"COS_CONTAINERD", - "oauthScopes" => %w(https://www.googleapis.com/auth/cloud-platform), - "serviceAccount" => "default" - }), - "autoscalingProfile" => "OPTIMIZE_UTILIZATION", - "enableNodeAutoprovisioning" => true, - "resourceLimits" => [ - { - "maximum" => "20", - "minimum" => "5", - "resourceType" => "cpu" - }, - { - "maximum" => "30", - "minimum" => "10", - "resourceType" => "memory" - } - ] - }) - end - end - - describe "node pools" do - let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" || p['name'] =~ %r{^nap-.*} } } - - it "has 5" do - expect(node_pools.count).to eq 5 - end - - describe "pool-01" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - ) - ) - end - - it "is the expected machine type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "machineType" => "e2-medium", - ), - ) - ) - end - - it "has the expected image type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "imageType" => "COS_CONTAINERD", - ), - ) - ) - end - - it "has autoscaling enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "autoscaling" => including( - "enabled" => true, - ), - ) - ) - end - - it "has the expected minimum node count" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "autoscaling" => including( - "minNodeCount" => 1, - ), - ) - ) - end - - it "has autorepair enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "management" => including( - "autoRepair" => true, - ), - ) - ) - end - - it "has automatic upgrades enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "management" => including( - "autoUpgrade" => true, - ), - ) - ) - end - - it "has the expected metadata" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "metadata" => including( - "shutdown-script" => "kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"", - "disable-legacy-endpoints" => "false", - ), - ), - ) - ) - end - - it "has the expected labels" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "labels" => { - "all-pools-example" => "true", - "pool-01-example" => "true", - "cluster_name" => cluster_name, - "node_pool" => "pool-01", - }, - ), - ) - ) - end - - it "has the expected network tags" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "tags" => match_array([ - "all-node-example", - "pool-01-example", - "gke-#{cluster_name}", - "gke-#{cluster_name}-pool-01", - ]), - ), - ) - ) - end - - it "has the expected linux node config sysctls" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "linuxNodeConfig" => including( - "sysctls" => including( - "net.core.netdev_max_backlog" => "10000", - "net.core.rmem_max" => "10000" - ) - ) - ) - ) - ) - end - end - - describe "pool-02" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - ) - ) - end - - it "is the expected machine type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "machineType" => "n1-standard-2", - ), - ) - ) - end - - it "has autoscaling enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "autoscaling" => including( - "enabled" => true, - ), - ) - ) - end - - it "has the expected minimum node count" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "autoscaling" => including( - "minNodeCount" => 1, - ), - ) - ) - end - - it "has the expected maximum node count" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "autoscaling" => including( - "maxNodeCount" => 2, - ), - ) - ) - end - -# TODO: Update/fix this test (manually tested) -# it "has the expected accelerators" do -# expect(data['nodePools']).to include( -# including( -# "name" => "pool-02", -# "config" => including( -# "accelerators" => [{"acceleratorCount" => expected_accelerators_count, -# "acceleratorType" => expected_accelerators_type}], -# ), -# ) -# ) -# end - - it "has the expected disk size" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "diskSizeGb" => 30, - ), - ) - ) - end - - it "has the expected disk type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "diskType" => "pd-standard", - ), - ) - ) - end - - it "has the expected image type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "imageType" => "COS_CONTAINERD", - ), - ) - ) - end - - it "has the expected labels" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "labels" => including( - "all-pools-example" => "true", - "cluster_name" => cluster_name, - "node_pool" => "pool-02", - ) - ), - ) - ) - end - - it "has the expected network tags" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "tags" => match_array([ - "all-node-example", - "gke-#{cluster_name}", - "gke-#{cluster_name}-pool-02", - ]) - ), - ) - ) - end - - it "has the expected linux node config sysctls" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "linuxNodeConfig" => including( - "sysctls" => including( - "net.core.netdev_max_backlog" => "10000" - ) - ) - ) - ) - ) - end - end - - describe "pool-03" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - ) - ) - end - - it "is the expected machine type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "machineType" => "n1-standard-2", - ), - ) - ) - end - - it "has autoscaling disabled" do - expect(data['nodePools']).not_to include( - including( - "name" => "pool-03", - "autoscaling" => including( - "enabled" => true, - ), - ) - ) - end - - it "has the expected node count" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "initialNodeCount" => 2 - ) - ) - end - - it "has autorepair enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "management" => including( - "autoRepair" => true, - ), - ) - ) - end - - it "has automatic upgrades enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "management" => including( - "autoUpgrade" => true, - ), - ) - ) - end - - it "has the expected labels" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "labels" => { - "all-pools-example" => "true", - "cluster_name" => cluster_name, - "node_pool" => "pool-03", - "sandbox.gke.io/runtime"=>"gvisor" - }, - ), - ) - ) - end - - it "has the expected network tags" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "tags" => match_array([ - "all-node-example", - "gke-#{cluster_name}", - "gke-#{cluster_name}-pool-03", - ]), - ), - ) - ) - end - - it "has the expected pod range" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "networkConfig" => including( - "podIpv4CidrBlock" => "172.16.0.0/18", - "podRange" => "test" - ) - ) - ) - end - - it "has the expected image" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "imageType" => "COS_CONTAINERD", - ), - ) - ) - end - - it "has the expected kubelet config" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "kubeletConfig" => including( - "cpuManagerPolicy" => "static", - "cpuCfsQuota" => true - ) - ) - ) - ) - end - - it "has the expected linux node config sysctls" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "linuxNodeConfig" => including( - "sysctls" => including( - "net.core.netdev_max_backlog" => "20000" - ) - ) - ) - ) - ) - end - end - - describe "pool-04" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-04", - ) - ) - end - - it "has queued_provisioning enabled" do - expect(data['nodePools']).not_to include( - including( - "name" => "pool-04", - "queued_provisioning" => including( - "enabled" => true, - ), - ) - ) - end - end - - describe "pool-05" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-05", - ) - ) - end - - it "has enable_nested_virtualization enabled" do - expect(data['nodePools']).not_to include( - including( - "name" => "pool-05", - "advanced_machine_features" => including( - "enable_nested_virtualization" => true, - ), - ) - ) - end - end - end - end - - describe command("gcloud beta --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - - let!(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - - it "pool-03 has nodes in correct locations" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "locations" => match_array([ - "#{location}-b", - "#{location}-c", - ]), - ) - ) - end - end -end diff --git a/test/integration/node_pool/controls/kubectl.rb b/test/integration/node_pool/controls/kubectl.rb deleted file mode 100644 index 811ebcda0f..0000000000 --- a/test/integration/node_pool/controls/kubectl.rb +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -require 'kubeclient' -require 'rest-client' - -require 'base64' - -kubernetes_endpoint = attribute('kubernetes_endpoint') -client_token = attribute('client_token') -ca_certificate = attribute('ca_certificate') - -control "kubectl" do - title "Kubernetes configuration" - - describe "kubernetes" do - let(:kubernetes_http_endpoint) { "https://#{kubernetes_endpoint}/api" } - let(:client) do - cert_store = OpenSSL::X509::Store.new - cert_store.add_cert(OpenSSL::X509::Certificate.new(Base64.decode64(ca_certificate))) - Kubeclient::Client.new( - kubernetes_http_endpoint, - "v1", - ssl_options: { - cert_store: cert_store, - verify_ssl: OpenSSL::SSL::VERIFY_PEER, - }, - auth_options: { - bearer_token: Base64.decode64(client_token), - }, - ) - end - - describe "nodes" do - let(:all_nodes) { client.get_nodes } - let(:taints) { nodes.first.spec.taints.map { |t| t.to_h.select { |k, v| [:effect, :key, :value].include?(k.to_sym) } } } - - describe "pool-01" do - let(:nodes) do - all_nodes.select { |n| n.metadata.labels.node_pool == "pool-01" } - end - - it "has the expected taints" do - expect(taints).to eq([ - { - effect: "PreferNoSchedule", - key: "all-pools-example", - value: "true", - }, - { - effect: "PreferNoSchedule", - key: "pool-01-example", - value: "true", - }, - ]) - end - end - - describe "pool-02" do - let(:nodes) do - all_nodes.select { |n| n.metadata.labels.node_pool == "pool-02" } - end - - it "has the expected taints" do - expect(taints).to include( - { - effect: "PreferNoSchedule", - key: "all-pools-example", - value: "true", - } - ) - end - end - describe "pool-03" do - let(:nodes) do - all_nodes.select { |n| n.metadata.labels.node_pool == "pool-03" } - end - - it "has the expected taints" do - expect(taints).to include( - { - effect: "PreferNoSchedule", - key: "all-pools-example", - value: "true", - } - ) - end - end - end - end -end diff --git a/test/integration/node_pool/inspec.yml b/test/integration/node_pool/inspec.yml deleted file mode 100644 index b915e7d119..0000000000 --- a/test/integration/node_pool/inspec.yml +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: node_pool -attributes: - - name: project_id - required: true - type: string - - name: location - required: true - type: string - - name: cluster_name - required: true - type: string - - name: kubernetes_endpoint - required: true - type: string - - name: client_token - required: true - type: string - - name: ca_certificate - required: true - type: string diff --git a/test/integration/node_pool/node_pool_test.go b/test/integration/node_pool/node_pool_test.go new file mode 100644 index 0000000000..9aae6f5143 --- /dev/null +++ b/test/integration/node_pool/node_pool_test.go @@ -0,0 +1,165 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package node_pool + +import ( + "fmt" + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/stretchr/testify/assert" + "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils" + gkeutils "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/utils" +) + +func TestNodePool(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t, + tft.WithRetryableTerraformErrors(testutils.RetryableTransientErrors, 3, 2*time.Minute), + ) + + bpt.DefineVerify(func(assert *assert.Assertions) { + // Skipping Default Verify as the Verify Stage fails due to change in Client Cert Token + // bpt.DefaultVerify(assert) + gkeutils.TGKEVerify(t, bpt, assert) // Verify Resources + + projectId := bpt.GetStringOutput("project_id") + location := bpt.GetStringOutput("location") + clusterName := bpt.GetStringOutput("cluster_name") + + //cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId) + clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName) + cluster := gkeutils.GetProjectResources(t, projectId, gkeutils.WithAssetType("container.googleapis.com/Cluster")).Get("#(name=\"" + clusterResourceName + "\").resource.data") + + // Cluster + assert.Contains([]string{"RUNNING", "RECONCILING"}, cluster.Get("status").String(), "Cluster is Running") + assert.Equal("COS_CONTAINERD", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.imageType").String(), "has the expected image type") + assert.Equal("[\n \"https://www.googleapis.com/auth/cloud-platform\"\n ]", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes").String(), "has the expected oauth scopes") + assert.Equal("default", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.serviceAccount").String(), "has the expected service account") + assert.Equal("OPTIMIZE_UTILIZATION", cluster.Get("autoscaling.autoscalingProfile").String(), "has the expected autoscaling profile") + assert.True(cluster.Get("autoscaling.enableNodeAutoprovisioning").Bool(), "has the expected node autoprovisioning") + assert.JSONEq(`[ + { + "maximum": "20", + "minimum": "5", + "resourceType": "cpu" + }, + { + "maximum": "30", + "minimum": "10", + "resourceType": "memory" + } + ]`, + cluster.Get("autoscaling.resourceLimits").String(), "has the expected resource limits") + + // Pool-01 + assert.Equal("pool-01", cluster.Get("nodePools.#(name==\"pool-01\").name").String(), "pool-1 exists") + assert.Equal("e2-medium", cluster.Get("nodePools.#(name==\"pool-01\").config.machineType").String(), "is the expected machine type") + assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-01\").config.imageType").String(), "has the expected image") + assert.True(cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.enabled").Bool(), "has autoscaling enabled") + assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.minNodeCount").Int(), "has the expected minimum node count") + assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoRepair").Bool(), "has autorepair enabled") + assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoUpgrade").Bool(), "has automatic upgrades enabled") + assert.Equal("kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.shutdown-script").String(), "pool-2 exists") + assert.Equal("false", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.disable-legacy-endpoints").String(), "pool-2 exists") + assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "pool-01-example": "true", "cluster_name": "%s", "node_pool": "pool-01"}`, clusterName), + cluster.Get("nodePools.#(name==\"pool-01\").config.labels").String(), "has the expected labels") + assert.ElementsMatch([]string{"all-node-example", "pool-01-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-01", clusterName)}, + cluster.Get("nodePools.#(name==\"pool-01\").config.tags").Value().([]interface{}), "has the expected network tags") + assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config net.core.netdev_max_backlog sysctl") + assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.rmem_max").Int(), "has the expected linux node config net.core.rmem_max sysctl") + + // Pool-02 + assert.Equal("pool-02", cluster.Get("nodePools.#(name==\"pool-02\").name").String(), "pool-2 exists") + assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-02\").config.machineType").String(), "is the expected machine type") + assert.True(cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.enabled").Bool(), "has autoscaling enabled") + assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.minNodeCount").Int(), "has the expected minimum node count") + assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.maxNodeCount").Int(), "has the expected maximum node count") + assert.Equal(int64(30), cluster.Get("nodePools.#(name==\"pool-02\").config.diskSizeGb").Int(), "has the expected disk size") + assert.Equal("pd-standard", cluster.Get("nodePools.#(name==\"pool-02\").config.diskType").String(), "has the expected disk type") + assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-02\").config.imageType").String(), "has the expected image") + assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-02"}`, clusterName), + cluster.Get("nodePools.#(name==\"pool-02\").config.labels").String(), "has the expected labels") + assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-02", clusterName)}, + cluster.Get("nodePools.#(name==\"pool-02\").config.tags").Value().([]interface{}), "has the expected network tags") + assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-02\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") + + // Pool-03 + assert.Equal("pool-03", cluster.Get("nodePools.#(name==\"pool-03\").name").String(), "pool-3 exists") + assert.JSONEq(fmt.Sprintf(`["%s-b", "%s-c"]`, location, location), cluster.Get("nodePools.#(name==\"pool-03\").locations").String(), "has nodes in correct locations") + assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-03\").config.machineType").String(), "is the expected machine type") + assert.False(cluster.Get("nodePools.#(name==\"pool-03\").autoscaling.enabled").Bool(), "has autoscaling enabled") + assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-03\").initialNodeCount").Int(), "has the expected inital node count") + assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoRepair").Bool(), "has autorepair enabled") + assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoUpgrade").Bool(), "has automatic upgrades enabled") + assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-03", "sandbox.gke.io/runtime": "gvisor"}`, clusterName), + cluster.Get("nodePools.#(name==\"pool-03\").config.labels").String(), "has the expected labels") + assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-03", clusterName)}, + cluster.Get("nodePools.#(name==\"pool-03\").config.tags").Value().([]interface{}), "has the expected network tags") + assert.Equal("172.16.0.0/18", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podIpv4CidrBlock").String(), "has the expected pod range") + assert.Equal("test", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podRange").String(), "has the expected pod range") + assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-03\").config.imageType").String(), "has the expected image") + assert.Equal("static", cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuManagerPolicy").String(), "has the expected cpuManagerPolicy kubelet config") + assert.True(cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuCfsQuota").Bool(), "has the expected cpuCfsQuota kubelet config") + assert.Equal(int64(20000), cluster.Get("nodePools.#(name==\"pool-03\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") + + // Pool-04 + assert.Equal("pool-04", cluster.Get("nodePools.#(name==\"pool-04\").name").String(), "pool-4 exists") + assert.False(cluster.Get("nodePools.#(name==\"pool-04\").config.queuedProvisioning.enabled").Bool(), "has queued provisioning not enabled") + + // Pool-05 + assert.Equal("pool-05", cluster.Get("nodePools.#(name==\"pool-05\").name").String(), "pool-5 exists") + assert.True(cluster.Get("nodePools.#(name==\"pool-05\").config.advancedMachineFeatures.enableNestedVirtualization").Bool(), "has enable_nested_virtualization enabled") + + // K8s + gcloud.Runf(t, "container clusters get-credentials %s --region %s --project %s", clusterName, location, projectId) + k8sOpts := k8s.KubectlOptions{} + clusterNodesOp, err := k8s.RunKubectlAndGetOutputE(t, &k8sOpts, "get", "nodes", "-o", "json") + assert.NoError(err) + clusterNodes := testutils.ParseKubectlJSONResult(t, clusterNodesOp) + assert.JSONEq(`[ + { + "effect": "PreferNoSchedule", + "key": "all-pools-example", + "value": "true" + }, + { + "effect": "PreferNoSchedule", + "key": "pool-01-example", + "value": "true" + } + ]`, + clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-01\").spec.taints").String(), "has the expected taints") + assert.JSONEq(`[ + { + "effect": "PreferNoSchedule", + "key": "all-pools-example", + "value": "true" + } + ]`, + clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-02\").spec.taints").String(), "has the expected all-pools-example taint") + assert.JSONEq(`[ + { + "effect": "PreferNoSchedule", + "key": "all-pools-example", + "value": "true" + } + ]`, + clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-03\").spec.taints").String(), "has the expected all-pools-example taint") + }) + + bpt.Test() +} diff --git a/test/integration/testutils/cai.go b/test/integration/testutils/cai.go new file mode 100644 index 0000000000..69f819d67d --- /dev/null +++ b/test/integration/testutils/cai.go @@ -0,0 +1,72 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cai provides a set of helpers to interact with Cloud Asset Inventory +package utils + +import ( + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/tidwall/gjson" +) + +type CmdCfg struct { + sleep int // minutes to sleep prior to CAI retreval. default: 2 + assetType string // asset type to retrieve. default: all +} + +type cmdOption func(*CmdCfg) + +// newCmdConfig sets defaults and options +func newCmdConfig(opts ...cmdOption) (*CmdCfg) { + caiOpts := &CmdCfg{ + sleep: 2, + assetType: "", + } + + for _, opt := range opts { + opt(caiOpts) + } + + return caiOpts +} + +// Set custom sleep minutes +func WithSleep(sleep int) cmdOption { + return func(f *CmdCfg) { + f.sleep = sleep + } +} + +// Set asset type +func WithAssetType(assetType string) cmdOption { + return func(f *CmdCfg) { + f.assetType = assetType + } +} + +// GetProjectResources returns the cloud asset inventory resources for a project as a gjson.Result +func GetProjectResources(t testing.TB, project string, opts ...cmdOption) gjson.Result { + caiOpts := newCmdConfig(opts...) + time.Sleep(time.Duration(caiOpts.sleep) * time.Minute) + if caiOpts.assetType != "" { + return gcloud.Runf(t, "asset list --project=%s --asset-types=%s --content-type=resource", project, caiOpts.assetType) + } else { + return gcloud.Runf(t, "asset list --project=%s --content-type=resource", project) + } +} diff --git a/test/setup/iam.tf b/test/setup/iam.tf index fe97685dd8..fb9f30eb04 100644 --- a/test/setup/iam.tf +++ b/test/setup/iam.tf @@ -34,6 +34,8 @@ locals { "roles/iam.roleAdmin", "roles/iap.admin", "roles/gkehub.admin", + "roles/cloudasset.viewer", + "roles/serviceusage.serviceUsageConsumer" ] # roles as documented https://cloud.google.com/service-mesh/docs/installation-permissions diff --git a/test/setup/main.tf b/test/setup/main.tf index b94c404385..cef4cd1c41 100644 --- a/test/setup/main.tf +++ b/test/setup/main.tf @@ -39,7 +39,8 @@ locals { "iamcredentials.googleapis.com", "gkeconnect.googleapis.com", "privateca.googleapis.com", - "gkehub.googleapis.com" + "gkehub.googleapis.com", + "cloudasset.googleapis.com" ] } From 8b7ddd484068b821704e4afc522a2b5c1f90ec3c Mon Sep 17 00:00:00 2001 From: Andrew Peabody Date: Fri, 15 Nov 2024 00:43:39 +0000 Subject: [PATCH 2/3] multi asset types validate all paths --- build/int.cloudbuild.yaml | 282 +----- test/fixtures/node_pool/outputs.tf | 4 + .../safer_cluster_iap_bastion/example.tf | 2 +- test/integration/node_pool/node_pool_test.go | 72 +- .../node_pool/testdata/TestNodePool.json | 807 ++++++++++++++++++ test/integration/testutils/cai.go | 72 -- 6 files changed, 874 insertions(+), 365 deletions(-) create mode 100644 test/integration/node_pool/testdata/TestNodePool.json delete mode 100644 test/integration/testutils/cai.go diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index 6c1322aa1f..c8bab6c35e 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -24,290 +24,14 @@ steps: - 'TF_VAR_org_id=$_ORG_ID' - 'TF_VAR_folder_id=$_FOLDER_ID' - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' -- id: init-all +- id: init node-pool-local waitFor: - prepare name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run all --stage init --verbose'] -- id: create-all - waitFor: - - init-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create'] -- id: apply disable-client-cert - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage apply --verbose --test-dir test/integration'] -- id: verify disable-client-cert - waitFor: - - apply disable-client-cert - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage verify --verbose --test-dir test/integration'] -- id: teardown disable-client-cert - waitFor: - - verify disable-client-cert - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage teardown --verbose --test-dir test/integration'] -- id: apply shared-vpc-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage apply --verbose --test-dir test/integration'] -- id: verify shared-vpc-local - waitFor: - - apply shared-vpc-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage verify --verbose --test-dir test/integration'] -- id: destroy shared-vpc-local - waitFor: - - verify shared-vpc-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage teardown --verbose --test-dir test/integration'] -- id: apply safer-cluster-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage apply --verbose'] -- id: verify safer-cluster-local - waitFor: - - apply safer-cluster-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage verify --verbose'] -- id: destroy safer-cluster-local - waitFor: - - verify safer-cluster-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage destroy --verbose'] -- id: apply simple-regional-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage apply --verbose'] -- id: verify simple-regional-local - waitFor: - - apply simple-regional-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage verify --verbose'] -- id: destroy simple-regional-local - waitFor: - - verify simple-regional-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage teardown --verbose'] -- id: apply simple-regional-private-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage apply --verbose'] -- id: verify simple-regional-private-local - waitFor: - - apply simple-regional-private-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage verify --verbose'] -- id: destroy simple-regional-private-local - waitFor: - - verify simple-regional-private-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage teardown --verbose'] -- id: apply simple-regional-cluster-autoscaling - waitFor: - - create-all - - destroy simple-regional-private-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage apply --verbose'] -- id: verify simple-regional-cluster-autoscaling - waitFor: - - apply simple-regional-cluster-autoscaling - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage verify --verbose'] -- id: destroy simple-regional-cluster-autoscaling - waitFor: - - verify simple-regional-cluster-autoscaling - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage teardown --verbose'] -- id: apply simple-regional-with-kubeconfig-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage apply --verbose'] -- id: verify simple-regional-with-kubeconfig-local - waitFor: - - apply simple-regional-with-kubeconfig-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage verify --verbose'] -- id: destroy simple-regional-with-kubeconfig-local - waitFor: - - verify simple-regional-with-kubeconfig-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage teardown --verbose'] -- id: converge simple-regional-with-gateway-api-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-with-gateway-api-local'] -- id: verify simple-regional-with-gateway-api-local - waitFor: - - converge simple-regional-with-gateway-api-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-with-gateway-api-local'] -- id: destroy simple-regional-with-gateway-api-local - waitFor: - - verify simple-regional-with-gateway-api-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-gateway-api-local'] -- id: apply simple-regional-with-networking-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage apply --verbose'] -- id: verify simple-regional-with-networking-local - waitFor: - - apply simple-regional-with-networking-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage verify --verbose'] -- id: destroy simple-regional-with-networking-local - waitFor: - - verify simple-regional-with-networking-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage teardown --verbose'] -- id: apply simple-zonal-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage apply --verbose'] -- id: verify simple-zonal-local - waitFor: - - apply simple-zonal-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage verify --verbose'] -- id: destroy simple-zonal-local - waitFor: - - verify simple-zonal-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage teardown --verbose'] -- id: apply simple-zonal-private-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage apply --verbose'] -- id: verify simple-zonal-private-local - waitFor: - - apply simple-zonal-private-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage verify --verbose'] -- id: destroy simple-zonal-private-local - waitFor: - - verify simple-zonal-private-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage teardown --verbose'] -- id: converge stub-domains-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-local'] -- id: verify stub-domains-local - waitFor: - - converge stub-domains-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-local'] -- id: destroy stub-domains-local - waitFor: - - verify stub-domains-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-local'] -- id: converge upstream-nameservers-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge upstream-nameservers-local'] -- id: verify upstream-nameservers-local - waitFor: - - converge upstream-nameservers-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify upstream-nameservers-local'] -- id: destroy upstream-nameservers-local - waitFor: - - verify upstream-nameservers-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy upstream-nameservers-local'] -- id: converge stub-domains-upstream-nameservers-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-upstream-nameservers-local'] -- id: verify stub-domains-upstream-nameservers-local - waitFor: - - converge stub-domains-upstream-nameservers-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-upstream-nameservers-local'] -- id: destroy stub-domains-upstream-nameservers-local - waitFor: - - verify stub-domains-upstream-nameservers-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-upstream-nameservers-local'] -- id: converge workload-metadata-config-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge workload-metadata-config-local'] -- id: verify workload-metadata-config-local - waitFor: - - converge workload-metadata-config-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify workload-metadata-config-local'] -- id: destroy workload-metadata-config-local - waitFor: - - verify workload-metadata-config-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-metadata-config-local'] -- id: apply beta-cluster - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage apply --verbose --test-dir test/integration'] -- id: verify beta-cluster - waitFor: - - apply beta-cluster - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage verify --verbose --test-dir test/integration'] -- id: teardown beta-cluster - waitFor: - - verify beta-cluster - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage teardown --verbose --test-dir test/integration'] -- id: apply simple-windows-node-pool-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage apply --verbose --test-dir test/integration'] -- id: verify simple-windows-node-pool-local - waitFor: - - apply simple-windows-node-pool-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage verify --verbose --test-dir test/integration'] -- id: destroy simple-windows-node-pool-local - waitFor: - - verify simple-windows-node-pool-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage teardown --verbose --test-dir test/integration'] -- id: apply deploy-service-local - waitFor: - - create-all - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage apply --verbose'] -- id: verify deploy-service-local - waitFor: - - apply deploy-service-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage verify --verbose'] -- id: destroy deploy-service-local - waitFor: - - verify deploy-service-local - name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage destroy --verbose'] + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage init --verbose'] - id: converge node-pool-local waitFor: - - create-all + - init node-pool-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage apply --verbose'] - id: verify node-pool-local diff --git a/test/fixtures/node_pool/outputs.tf b/test/fixtures/node_pool/outputs.tf index a62317bf64..74103ff0be 100644 --- a/test/fixtures/node_pool/outputs.tf +++ b/test/fixtures/node_pool/outputs.tf @@ -83,3 +83,7 @@ output "service_account" { output "registry_project_ids" { value = var.registry_project_ids } + +output "random_string" { + value = random_string.suffix.result +} diff --git a/test/fixtures/safer_cluster_iap_bastion/example.tf b/test/fixtures/safer_cluster_iap_bastion/example.tf index 767e10eac1..b4ea3d7650 100644 --- a/test/fixtures/safer_cluster_iap_bastion/example.tf +++ b/test/fixtures/safer_cluster_iap_bastion/example.tf @@ -1,5 +1,5 @@ /** - * Copyright 2020-2024 Google LLC + * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/test/integration/node_pool/node_pool_test.go b/test/integration/node_pool/node_pool_test.go index 9aae6f5143..92d6caaf53 100644 --- a/test/integration/node_pool/node_pool_test.go +++ b/test/integration/node_pool/node_pool_test.go @@ -15,15 +15,18 @@ package node_pool import ( "fmt" + "slices" "testing" "time" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/cai" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/assert" "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils" - gkeutils "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/utils" ) func TestNodePool(t *testing.T) { @@ -34,20 +37,33 @@ func TestNodePool(t *testing.T) { bpt.DefineVerify(func(assert *assert.Assertions) { // Skipping Default Verify as the Verify Stage fails due to change in Client Cert Token // bpt.DefaultVerify(assert) - gkeutils.TGKEVerify(t, bpt, assert) // Verify Resources + testutils.TGKEVerify(t, bpt, assert) // Verify Resources projectId := bpt.GetStringOutput("project_id") location := bpt.GetStringOutput("location") clusterName := bpt.GetStringOutput("cluster_name") + randomString := bpt.GetStringOutput("random_string") + kubernetesEndpoint := bpt.GetStringOutput("kubernetes_endpoint") - //cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId) + // Retrieve Project CAI + projectCAI := cai.GetProjectResources(t, projectId, cai.WithAssetTypes([]string{"container.googleapis.com/Cluster", "k8s.io/Node"})) + t.Log(projectCAI.Raw) + // Retrieve Cluster from CAI clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName) - cluster := gkeutils.GetProjectResources(t, projectId, gkeutils.WithAssetType("container.googleapis.com/Cluster")).Get("#(name=\"" + clusterResourceName + "\").resource.data") - // Cluster + if !projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data").Exists() { + t.Fatalf("Cluster not found: %s", clusterResourceName) + } + + cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data") + t.Log(cluster.Raw) + // Equivalent gcloud describe command (classic) + // cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId) + + // Cluster Assertions (classic) assert.Contains([]string{"RUNNING", "RECONCILING"}, cluster.Get("status").String(), "Cluster is Running") assert.Equal("COS_CONTAINERD", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.imageType").String(), "has the expected image type") - assert.Equal("[\n \"https://www.googleapis.com/auth/cloud-platform\"\n ]", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes").String(), "has the expected oauth scopes") + assert.Equal("https://www.googleapis.com/auth/cloud-platform", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes.0").String(), "has the expected oauth scopes") assert.Equal("default", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.serviceAccount").String(), "has the expected service account") assert.Equal("OPTIMIZE_UTILIZATION", cluster.Get("autoscaling.autoscalingProfile").String(), "has the expected autoscaling profile") assert.True(cluster.Get("autoscaling.enableNodeAutoprovisioning").Bool(), "has the expected node autoprovisioning") @@ -65,7 +81,27 @@ func TestNodePool(t *testing.T) { ]`, cluster.Get("autoscaling.resourceLimits").String(), "has the expected resource limits") - // Pool-01 + // Cluster Assertions using golden image (TestNodePool.json) with sanitizer + g := golden.NewOrUpdate(t, cluster.String(), + golden.WithSanitizer(golden.StringSanitizer(projectId, "PROJECT_ID")), + golden.WithSanitizer(golden.StringSanitizer(randomString, "RANDOM_STRING")), + golden.WithSanitizer(golden.StringSanitizer(kubernetesEndpoint, "KUBERNETES_ENDPOINT")), + ) + checkPaths := utils.GetTerminalJSONPaths(g.GetJSON()) + + exemptPaths := []string{"nodePools"} + checkPaths = slices.DeleteFunc(checkPaths, func(s string) bool { + return slices.Contains(exemptPaths, s) + }) + g.JSONPathEqs(assert, cluster, checkPaths) + + // NodePool Assertions + nodePools := []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"} + for _, nodePool := range nodePools { + g.JSONPathEqs(assert, cluster.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)), utils.GetTerminalJSONPaths(g.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)))) + } + + // nodePool-01 Assertions assert.Equal("pool-01", cluster.Get("nodePools.#(name==\"pool-01\").name").String(), "pool-1 exists") assert.Equal("e2-medium", cluster.Get("nodePools.#(name==\"pool-01\").config.machineType").String(), "is the expected machine type") assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-01\").config.imageType").String(), "has the expected image") @@ -82,7 +118,7 @@ func TestNodePool(t *testing.T) { assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config net.core.netdev_max_backlog sysctl") assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.rmem_max").Int(), "has the expected linux node config net.core.rmem_max sysctl") - // Pool-02 + // nodePool-02 Assertions assert.Equal("pool-02", cluster.Get("nodePools.#(name==\"pool-02\").name").String(), "pool-2 exists") assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-02\").config.machineType").String(), "is the expected machine type") assert.True(cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.enabled").Bool(), "has autoscaling enabled") @@ -97,7 +133,7 @@ func TestNodePool(t *testing.T) { cluster.Get("nodePools.#(name==\"pool-02\").config.tags").Value().([]interface{}), "has the expected network tags") assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-02\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") - // Pool-03 + // nodwPool-03 Assertions assert.Equal("pool-03", cluster.Get("nodePools.#(name==\"pool-03\").name").String(), "pool-3 exists") assert.JSONEq(fmt.Sprintf(`["%s-b", "%s-c"]`, location, location), cluster.Get("nodePools.#(name==\"pool-03\").locations").String(), "has nodes in correct locations") assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-03\").config.machineType").String(), "is the expected machine type") @@ -116,20 +152,20 @@ func TestNodePool(t *testing.T) { assert.True(cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuCfsQuota").Bool(), "has the expected cpuCfsQuota kubelet config") assert.Equal(int64(20000), cluster.Get("nodePools.#(name==\"pool-03\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") - // Pool-04 + // nodePool-04 Assertions assert.Equal("pool-04", cluster.Get("nodePools.#(name==\"pool-04\").name").String(), "pool-4 exists") assert.False(cluster.Get("nodePools.#(name==\"pool-04\").config.queuedProvisioning.enabled").Bool(), "has queued provisioning not enabled") - // Pool-05 + // nodePool-05 Assertions assert.Equal("pool-05", cluster.Get("nodePools.#(name==\"pool-05\").name").String(), "pool-5 exists") assert.True(cluster.Get("nodePools.#(name==\"pool-05\").config.advancedMachineFeatures.enableNestedVirtualization").Bool(), "has enable_nested_virtualization enabled") - // K8s + // K8s Assertions gcloud.Runf(t, "container clusters get-credentials %s --region %s --project %s", clusterName, location, projectId) k8sOpts := k8s.KubectlOptions{} clusterNodesOp, err := k8s.RunKubectlAndGetOutputE(t, &k8sOpts, "get", "nodes", "-o", "json") assert.NoError(err) - clusterNodes := testutils.ParseKubectlJSONResult(t, clusterNodesOp) + clusterNodes := utils.ParseKubectlJSONResult(t, clusterNodesOp) assert.JSONEq(`[ { "effect": "PreferNoSchedule", @@ -148,6 +184,11 @@ func TestNodePool(t *testing.T) { "effect": "PreferNoSchedule", "key": "all-pools-example", "value": "true" + }, + { + "effect": "NoSchedule", + "key": "nvidia.com/gpu", + "value": "present" } ]`, clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-02\").spec.taints").String(), "has the expected all-pools-example taint") @@ -156,6 +197,11 @@ func TestNodePool(t *testing.T) { "effect": "PreferNoSchedule", "key": "all-pools-example", "value": "true" + }, + { + "effect": "NoSchedule", + "key": "sandbox.gke.io/runtime", + "value": "gvisor" } ]`, clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-03\").spec.taints").String(), "has the expected all-pools-example taint") diff --git a/test/integration/node_pool/testdata/TestNodePool.json b/test/integration/node_pool/testdata/TestNodePool.json new file mode 100644 index 0000000000..15a7a12de3 --- /dev/null +++ b/test/integration/node_pool/testdata/TestNodePool.json @@ -0,0 +1,807 @@ +{ + "addonsConfig": { + "configConnectorConfig": {}, + "dnsCacheConfig": {}, + "gcePersistentDiskCsiDriverConfig": { + "enabled": true + }, + "gcpFilestoreCsiDriverConfig": {}, + "gkeBackupAgentConfig": {}, + "horizontalPodAutoscaling": {}, + "httpLoadBalancing": {}, + "kubernetesDashboard": { + "disabled": true + }, + "networkPolicyConfig": { + "disabled": true + } + }, + "autopilot": {}, + "autoscaling": { + "autoprovisioningNodePoolDefaults": { + "diskSizeGb": 100, + "diskType": "pd-standard", + "imageType": "COS_CONTAINERD", + "management": { + "autoRepair": true, + "autoUpgrade": true, + "upgradeOptions": {} + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "serviceAccount": "default", + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "upgradeSettings": { + "strategy": "SURGE" + } + }, + "autoscalingProfile": "OPTIMIZE_UTILIZATION", + "enableNodeAutoprovisioning": true, + "resourceLimits": [ + { + "maximum": "20", + "minimum": "5", + "resourceType": "cpu" + }, + { + "maximum": "30", + "minimum": "10", + "resourceType": "memory" + } + ] + }, + "binaryAuthorization": {}, + "clusterIpv4Cidr": "192.168.0.0/18", + "controlPlaneEndpointsConfig": { + "dnsEndpointConfig": { + "allowExternalTraffic": false + }, + "ipEndpointsConfig": { + "authorizedNetworksConfig": { + "gcpPublicCidrsAccessEnabled": true + }, + "enablePublicEndpoint": true, + "enabled": true, + "privateEndpoint": "10.0.0.2", + "publicEndpoint": "KUBERNETES_ENDPOINT" + } + }, + "currentNodeCount": 8, + "databaseEncryption": { + "currentState": "CURRENT_STATE_DECRYPTED", + "state": "DECRYPTED" + }, + "defaultMaxPodsConstraint": { + "maxPodsPerNode": "110" + }, + "endpoint": "KUBERNETES_ENDPOINT", + "enterpriseConfig": { + "clusterTier": "STANDARD" + }, + "identityServiceConfig": {}, + "ipAllocationPolicy": { + "clusterIpv4Cidr": "192.168.0.0/18", + "clusterIpv4CidrBlock": "192.168.0.0/18", + "clusterSecondaryRangeName": "cft-gke-test-pods-RANDOM_STRING", + "defaultPodIpv4RangeUtilization": 0.0624, + "podCidrOverprovisionConfig": {}, + "servicesIpv4Cidr": "192.168.64.0/18", + "servicesIpv4CidrBlock": "192.168.64.0/18", + "servicesSecondaryRangeName": "cft-gke-test-services-RANDOM_STRING", + "stackType": "IPV4", + "useIpAliases": true + }, + "labelFingerprint": "78cdf2f6", + "legacyAbac": {}, + "location": "europe-west4", + "locations": [ + "europe-west4-b" + ], + "loggingConfig": { + "componentConfig": { + "enableComponents": [ + "SYSTEM_COMPONENTS", + "WORKLOADS" + ] + } + }, + "loggingService": "logging.googleapis.com/kubernetes", + "maintenancePolicy": { + "resourceVersion": "ce912209", + "window": { + "dailyMaintenanceWindow": { + "duration": "PT4H0M0S", + "startTime": "05:00" + } + } + }, + "masterAuth": { + "clientCertificateConfig": {} + }, + "masterAuthorizedNetworksConfig": { + "gcpPublicCidrsAccessEnabled": true + }, + "meshCertificates": { + "enableCertificates": false + }, + "monitoringConfig": { + "advancedDatapathObservabilityConfig": {}, + "componentConfig": { + "enableComponents": [ + "SYSTEM_COMPONENTS", + "STORAGE", + "HPA", + "POD", + "DAEMONSET", + "DEPLOYMENT", + "STATEFULSET", + "CADVISOR", + "KUBELET" + ] + }, + "managedPrometheusConfig": { + "enabled": true + } + }, + "monitoringService": "monitoring.googleapis.com/kubernetes", + "name": "node-pool-cluster-RANDOM_STRING", + "network": "cft-gke-test-RANDOM_STRING", + "networkConfig": { + "defaultSnatStatus": {}, + "network": "projects/PROJECT_ID/global/networks/cft-gke-test-RANDOM_STRING", + "serviceExternalIpsConfig": {}, + "subnetwork": "projects/PROJECT_ID/regions/europe-west4/subnetworks/cft-gke-test-RANDOM_STRING" + }, + "nodeConfig": { + "diskSizeGb": 100, + "diskType": "pd-balanced", + "effectiveCgroupMode": "EFFECTIVE_CGROUP_MODE_V2", + "gcfsConfig": {}, + "imageType": "COS_CONTAINERD", + "loggingConfig": { + "variantConfig": { + "variant": "DEFAULT" + } + }, + "machineType": "e2-medium", + "metadata": { + "disable-legacy-endpoints": "true" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/cloud-platform" + ], + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "tags": [ + "gke-node-pool-cluster-RANDOM_STRING", + "gke-node-pool-cluster-RANDOM_STRING-default-pool", + "all-node-example", + "pool-01-example" + ], + "windowsNodeConfig": {}, + "workloadMetadataConfig": { + "mode": "GKE_METADATA" + } + }, + "nodePoolDefaults": { + "nodeConfigDefaults": { + "gcfsConfig": {}, + "loggingConfig": { + "variantConfig": { + "variant": "DEFAULT" + } + }, + "nodeKubeletConfig": {} + } + }, + "nodePools": [ + { + "autoscaling": {}, + "config": { + "diskSizeGb": 100, + "diskType": "pd-balanced", + "effectiveCgroupMode": "EFFECTIVE_CGROUP_MODE_V2", + "gcfsConfig": {}, + "imageType": "COS_CONTAINERD", + "loggingConfig": { + "variantConfig": { + "variant": "DEFAULT" + } + }, + "machineType": "e2-medium", + "metadata": { + "disable-legacy-endpoints": "true" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/cloud-platform" + ], + "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "tags": [ + "gke-node-pool-cluster-RANDOM_STRING", + "gke-node-pool-cluster-RANDOM_STRING-default-pool", + "all-node-example", + "pool-01-example" + ], + "windowsNodeConfig": {}, + "workloadMetadataConfig": { + "mode": "GKE_METADATA" + } + }, + "locations": [ + "europe-west4-b" + ], + "management": { + "autoRepair": true, + "autoUpgrade": true + }, + "maxPodsConstraint": { + "maxPodsPerNode": "110" + }, + "name": "default-pool", + "networkConfig": { + "podIpv4CidrBlock": "192.168.0.0/18", + "podIpv4RangeUtilization": 0.0624, + "podRange": "cft-gke-test-pods-RANDOM_STRING" + }, + "podIpv4CidrSize": 24, + "selfLink": "https://container.googleapis.com/v1/projects/PROJECT_ID/locations/europe-west4/clusters/node-pool-cluster-RANDOM_STRING/nodePools/default-pool", + "status": "RUNNING", + "upgradeSettings": { + "maxSurge": 1, + "strategy": "SURGE" + } + }, + { + "autoscaling": { + "autoprovisioned": true, + "enabled": true, + "locationPolicy": "BALANCED", + "maxNodeCount": 1000 + }, + "config": { + "diskSizeGb": 100, + "diskType": "pd-standard", + "effectiveCgroupMode": "EFFECTIVE_CGROUP_MODE_V2", + "imageType": "COS_CONTAINERD", + "machineType": "e2-medium", + "metadata": { + "disable-legacy-endpoints": "true" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "serviceAccount": "default", + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "windowsNodeConfig": {}, + "workloadMetadataConfig": { + "mode": "GKE_METADATA" + } + }, + "locations": [ + "europe-west4-b" + ], + "management": { + "autoRepair": true, + "autoUpgrade": true, + "upgradeOptions": {} + }, + "maxPodsConstraint": { + "maxPodsPerNode": "110" + }, + "name": "nap-e2-medium-1d469r1p", + "networkConfig": { + "podIpv4CidrBlock": "192.168.0.0/18", + "podIpv4RangeUtilization": 0.0624, + "podRange": "cft-gke-test-pods-RANDOM_STRING" + }, + "placementPolicy": {}, + "podIpv4CidrSize": 24, + "selfLink": "https://container.googleapis.com/v1/projects/PROJECT_ID/locations/europe-west4/clusters/node-pool-cluster-RANDOM_STRING/nodePools/nap-e2-medium-1d469r1p", + "status": "RUNNING", + "upgradeSettings": { + "maxSurge": 1, + "strategy": "SURGE" + } + }, + { + "autoscaling": { + "enabled": true, + "locationPolicy": "BALANCED", + "maxNodeCount": 2, + "minNodeCount": 1 + }, + "config": { + "diskSizeGb": 100, + "diskType": "pd-standard", + "effectiveCgroupMode": "EFFECTIVE_CGROUP_MODE_V2", + "gcfsConfig": {}, + "imageType": "COS_CONTAINERD", + "labels": { + "all-pools-example": "true", + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "node_pool": "pool-01", + "pool-01-example": "true" + }, + "linuxNodeConfig": { + "sysctls": { + "net.core.netdev_max_backlog": "10000", + "net.core.rmem_max": "10000" + } + }, + "loggingConfig": { + "variantConfig": { + "variant": "DEFAULT" + } + }, + "machineType": "e2-medium", + "metadata": { + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "disable-legacy-endpoints": "false", + "node_pool": "pool-01", + "shutdown-script": "kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "tags": [ + "gke-node-pool-cluster-RANDOM_STRING", + "gke-node-pool-cluster-RANDOM_STRING-pool-01", + "all-node-example", + "pool-01-example" + ], + "taints": [ + { + "effect": "PREFER_NO_SCHEDULE", + "key": "all-pools-example", + "value": "true" + }, + { + "effect": "PREFER_NO_SCHEDULE", + "key": "pool-01-example", + "value": "true" + } + ], + "windowsNodeConfig": {}, + "workloadMetadataConfig": { + "mode": "GKE_METADATA" + } + }, + "initialNodeCount": 1, + "locations": [ + "europe-west4-b" + ], + "management": { + "autoRepair": true, + "autoUpgrade": true + }, + "maxPodsConstraint": { + "maxPodsPerNode": "110" + }, + "name": "pool-01", + "networkConfig": { + "podIpv4CidrBlock": "192.168.0.0/18", + "podIpv4RangeUtilization": 0.0624, + "podRange": "cft-gke-test-pods-RANDOM_STRING" + }, + "podIpv4CidrSize": 24, + "selfLink": "https://container.googleapis.com/v1/projects/PROJECT_ID/locations/europe-west4/clusters/node-pool-cluster-RANDOM_STRING/nodePools/pool-01", + "status": "RUNNING", + "upgradeSettings": { + "maxSurge": 1, + "strategy": "SURGE" + } + }, + { + "autoscaling": { + "enabled": true, + "locationPolicy": "BALANCED", + "maxNodeCount": 2, + "minNodeCount": 1 + }, + "config": { + "accelerators": [ + { + "acceleratorCount": "1", + "acceleratorType": "nvidia-tesla-p4", + "gpuDriverInstallationConfig": { + "gpuDriverVersion": "DEFAULT" + } + } + ], + "diskSizeGb": 30, + "diskType": "pd-standard", + "effectiveCgroupMode": "EFFECTIVE_CGROUP_MODE_V2", + "imageType": "COS_CONTAINERD", + "labels": { + "all-pools-example": "true", + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "node_pool": "pool-02" + }, + "linuxNodeConfig": { + "sysctls": { + "net.core.netdev_max_backlog": "10000" + } + }, + "loggingConfig": { + "variantConfig": { + "variant": "DEFAULT" + } + }, + "machineType": "n1-standard-2", + "metadata": { + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "disable-legacy-endpoints": "false", + "node_pool": "pool-02" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "tags": [ + "gke-node-pool-cluster-RANDOM_STRING", + "gke-node-pool-cluster-RANDOM_STRING-pool-02", + "all-node-example" + ], + "taints": [ + { + "effect": "PREFER_NO_SCHEDULE", + "key": "all-pools-example", + "value": "true" + }, + { + "effect": "NO_SCHEDULE", + "key": "nvidia.com/gpu", + "value": "present" + } + ], + "windowsNodeConfig": {}, + "workloadMetadataConfig": { + "mode": "GKE_METADATA" + } + }, + "initialNodeCount": 1, + "locations": [ + "europe-west4-b" + ], + "management": { + "autoUpgrade": true + }, + "maxPodsConstraint": { + "maxPodsPerNode": "110" + }, + "name": "pool-02", + "networkConfig": { + "podIpv4CidrBlock": "192.168.0.0/18", + "podIpv4RangeUtilization": 0.0624, + "podRange": "cft-gke-test-pods-RANDOM_STRING" + }, + "podIpv4CidrSize": 24, + "selfLink": "https://container.googleapis.com/v1/projects/PROJECT_ID/locations/europe-west4/clusters/node-pool-cluster-RANDOM_STRING/nodePools/pool-02", + "status": "RUNNING", + "upgradeSettings": { + "maxSurge": 1, + "strategy": "SURGE" + } + }, + { + "config": { + "diskSizeGb": 100, + "diskType": "pd-standard", + "effectiveCgroupMode": "EFFECTIVE_CGROUP_MODE_V2", + "imageType": "COS_CONTAINERD", + "kubeletConfig": { + "cpuCfsQuota": true, + "cpuManagerPolicy": "static", + "insecureKubeletReadonlyPortEnabled": false, + "podPidsLimit": "4096" + }, + "labels": { + "all-pools-example": "true", + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "node_pool": "pool-03", + "sandbox.gke.io/runtime": "gvisor" + }, + "linuxNodeConfig": { + "sysctls": { + "net.core.netdev_max_backlog": "20000" + } + }, + "loggingConfig": { + "variantConfig": { + "variant": "DEFAULT" + } + }, + "machineType": "n1-standard-2", + "metadata": { + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "disable-legacy-endpoints": "false", + "node_pool": "pool-03" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "sandboxConfig": { + "type": "GVISOR" + }, + "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "tags": [ + "gke-node-pool-cluster-RANDOM_STRING", + "gke-node-pool-cluster-RANDOM_STRING-pool-03", + "all-node-example" + ], + "taints": [ + { + "effect": "PREFER_NO_SCHEDULE", + "key": "all-pools-example", + "value": "true" + }, + { + "effect": "NO_SCHEDULE", + "key": "sandbox.gke.io/runtime", + "value": "gvisor" + } + ], + "windowsNodeConfig": {}, + "workloadMetadataConfig": { + "mode": "GKE_METADATA" + } + }, + "initialNodeCount": 2, + "locations": [ + "europe-west4-b", + "europe-west4-c" + ], + "management": { + "autoRepair": true, + "autoUpgrade": true + }, + "maxPodsConstraint": { + "maxPodsPerNode": "110" + }, + "name": "pool-03", + "networkConfig": { + "enablePrivateNodes": false, + "podIpv4CidrBlock": "172.16.0.0/18", + "podIpv4RangeUtilization": 0.0625, + "podRange": "test" + }, + "podIpv4CidrSize": 24, + "selfLink": "https://container.googleapis.com/v1/projects/PROJECT_ID/locations/europe-west4/clusters/node-pool-cluster-RANDOM_STRING/nodePools/pool-03", + "status": "RUNNING", + "upgradeSettings": { + "maxSurge": 1, + "strategy": "SURGE" + } + }, + { + "autoscaling": { + "enabled": true, + "locationPolicy": "ANY", + "maxNodeCount": 100 + }, + "config": { + "diskSizeGb": 100, + "diskType": "pd-standard", + "effectiveCgroupMode": "EFFECTIVE_CGROUP_MODE_V2", + "imageType": "COS_CONTAINERD", + "labels": { + "all-pools-example": "true", + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "node_pool": "pool-04" + }, + "linuxNodeConfig": { + "sysctls": { + "net.core.netdev_max_backlog": "10000" + } + }, + "loggingConfig": { + "variantConfig": { + "variant": "DEFAULT" + } + }, + "machineType": "e2-medium", + "metadata": { + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "disable-legacy-endpoints": "false", + "node_pool": "pool-04" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "reservationAffinity": { + "consumeReservationType": "NO_RESERVATION" + }, + "serviceAccount": "default", + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "tags": [ + "gke-node-pool-cluster-RANDOM_STRING", + "gke-node-pool-cluster-RANDOM_STRING-pool-04", + "all-node-example" + ], + "taints": [ + { + "effect": "PREFER_NO_SCHEDULE", + "key": "all-pools-example", + "value": "true" + }, + { + "effect": "NO_SCHEDULE", + "key": "cloud.google.com/gke-queued", + "value": "true" + } + ], + "windowsNodeConfig": {}, + "workloadMetadataConfig": { + "mode": "GKE_METADATA" + } + }, + "locations": [ + "europe-west4-b" + ], + "management": { + "autoRepair": true, + "autoUpgrade": true + }, + "maxPodsConstraint": { + "maxPodsPerNode": "110" + }, + "name": "pool-04", + "networkConfig": { + "podIpv4CidrBlock": "192.168.0.0/18", + "podIpv4RangeUtilization": 0.0624, + "podRange": "cft-gke-test-pods-RANDOM_STRING" + }, + "podIpv4CidrSize": 24, + "queuedProvisioning": { + "enabled": true + }, + "selfLink": "https://container.googleapis.com/v1/projects/PROJECT_ID/locations/europe-west4/clusters/node-pool-cluster-RANDOM_STRING/nodePools/pool-04", + "status": "RUNNING", + "upgradeSettings": { + "maxSurge": 1, + "strategy": "SURGE" + } + }, + { + "autoscaling": { + "enabled": true, + "locationPolicy": "BALANCED", + "maxNodeCount": 100, + "minNodeCount": 1 + }, + "config": { + "advancedMachineFeatures": { + "enableNestedVirtualization": true + }, + "diskSizeGb": 100, + "diskType": "pd-standard", + "effectiveCgroupMode": "EFFECTIVE_CGROUP_MODE_V2", + "imageType": "COS_CONTAINERD", + "labels": { + "all-pools-example": "true", + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "node_pool": "pool-05" + }, + "linuxNodeConfig": { + "sysctls": { + "net.core.netdev_max_backlog": "10000" + } + }, + "loggingConfig": { + "variantConfig": { + "variant": "DEFAULT" + } + }, + "machineType": "n1-standard-2", + "metadata": { + "cluster_name": "node-pool-cluster-RANDOM_STRING", + "disable-legacy-endpoints": "false", + "node_pool": "pool-05" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true + }, + "tags": [ + "gke-node-pool-cluster-RANDOM_STRING", + "gke-node-pool-cluster-RANDOM_STRING-pool-05", + "all-node-example" + ], + "taints": [ + { + "effect": "PREFER_NO_SCHEDULE", + "key": "all-pools-example", + "value": "true" + } + ], + "windowsNodeConfig": {}, + "workloadMetadataConfig": { + "mode": "GKE_METADATA" + } + }, + "initialNodeCount": 1, + "locations": [ + "europe-west4-b" + ], + "management": { + "autoRepair": true, + "autoUpgrade": true + }, + "maxPodsConstraint": { + "maxPodsPerNode": "110" + }, + "name": "pool-05", + "networkConfig": { + "podIpv4CidrBlock": "192.168.0.0/18", + "podIpv4RangeUtilization": 0.0624, + "podRange": "cft-gke-test-pods-RANDOM_STRING" + }, + "podIpv4CidrSize": 24, + "selfLink": "https://container.googleapis.com/v1/projects/PROJECT_ID/locations/europe-west4/clusters/node-pool-cluster-RANDOM_STRING/nodePools/pool-05", + "status": "RUNNING", + "upgradeSettings": { + "maxSurge": 1, + "strategy": "SURGE" + } + } + ], + "notificationConfig": { + "pubsub": {} + }, + "privateClusterConfig": { + "privateEndpoint": "10.0.0.2", + "publicEndpoint": "KUBERNETES_ENDPOINT" + }, + "rbacBindingConfig": { + "enableInsecureBindingSystemAuthenticated": true, + "enableInsecureBindingSystemUnauthenticated": true + }, + "releaseChannel": { + "channel": "REGULAR" + }, + "resourceLabels": { + "goog-terraform-provisioned": "true" + }, + "securityPostureConfig": { + "mode": "DISABLED", + "vulnerabilityMode": "VULNERABILITY_DISABLED" + }, + "selfLink": "https://container.googleapis.com/v1/projects/PROJECT_ID/locations/europe-west4/clusters/node-pool-cluster-RANDOM_STRING", + "servicesIpv4Cidr": "192.168.64.0/18", + "shieldedNodes": { + "enabled": true + }, + "status": "RUNNING", + "subnetwork": "cft-gke-test-RANDOM_STRING", + "verticalPodAutoscaling": {}, + "workloadIdentityConfig": { + "workloadPool": "PROJECT_ID.svc.id.goog" + }, + "zone": "europe-west4" +} diff --git a/test/integration/testutils/cai.go b/test/integration/testutils/cai.go deleted file mode 100644 index 69f819d67d..0000000000 --- a/test/integration/testutils/cai.go +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package cai provides a set of helpers to interact with Cloud Asset Inventory -package utils - -import ( - "testing" - "time" - - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" - "github.com/tidwall/gjson" -) - -type CmdCfg struct { - sleep int // minutes to sleep prior to CAI retreval. default: 2 - assetType string // asset type to retrieve. default: all -} - -type cmdOption func(*CmdCfg) - -// newCmdConfig sets defaults and options -func newCmdConfig(opts ...cmdOption) (*CmdCfg) { - caiOpts := &CmdCfg{ - sleep: 2, - assetType: "", - } - - for _, opt := range opts { - opt(caiOpts) - } - - return caiOpts -} - -// Set custom sleep minutes -func WithSleep(sleep int) cmdOption { - return func(f *CmdCfg) { - f.sleep = sleep - } -} - -// Set asset type -func WithAssetType(assetType string) cmdOption { - return func(f *CmdCfg) { - f.assetType = assetType - } -} - -// GetProjectResources returns the cloud asset inventory resources for a project as a gjson.Result -func GetProjectResources(t testing.TB, project string, opts ...cmdOption) gjson.Result { - caiOpts := newCmdConfig(opts...) - time.Sleep(time.Duration(caiOpts.sleep) * time.Minute) - if caiOpts.assetType != "" { - return gcloud.Runf(t, "asset list --project=%s --asset-types=%s --content-type=resource", project, caiOpts.assetType) - } else { - return gcloud.Runf(t, "asset list --project=%s --content-type=resource", project) - } -} From 1c64a8aa27ae46c31b5eb89a725ce0ccfb2e74e8 Mon Sep 17 00:00:00 2001 From: Andrew Peabody Date: Wed, 18 Dec 2024 19:27:35 +0000 Subject: [PATCH 3/3] final work --- build/int.cloudbuild.yaml | 286 +++++++++++++++++- test/fixtures/node_pool/example.tf | 6 +- test/fixtures/node_pool/outputs.tf | 4 + test/integration/go.mod | 4 +- test/integration/node_pool/node_pool_test.go | 125 +------- .../node_pool/testdata/TestNodePool.json | 12 +- .../safer_cluster_iap_bastion_test.go | 10 +- test/integration/testutils/utils.go | 46 +++ 8 files changed, 360 insertions(+), 133 deletions(-) diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index c8bab6c35e..6a9d7d0c24 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -24,19 +24,295 @@ steps: - 'TF_VAR_org_id=$_ORG_ID' - 'TF_VAR_folder_id=$_FOLDER_ID' - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' -- id: init node-pool-local +- id: init-all waitFor: - prepare name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage init --verbose'] -- id: converge node-pool-local + args: ['/bin/bash', '-c', 'cft test run all --stage init --verbose'] +- id: create-all waitFor: - - init node-pool-local + - init-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create'] +- id: apply disable-client-cert + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage apply --verbose --test-dir test/integration'] +- id: verify disable-client-cert + waitFor: + - apply disable-client-cert + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage verify --verbose --test-dir test/integration'] +- id: teardown disable-client-cert + waitFor: + - verify disable-client-cert + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage teardown --verbose --test-dir test/integration'] +- id: apply shared-vpc-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage apply --verbose --test-dir test/integration'] +- id: verify shared-vpc-local + waitFor: + - apply shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage verify --verbose --test-dir test/integration'] +- id: destroy shared-vpc-local + waitFor: + - verify shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage teardown --verbose --test-dir test/integration'] +- id: apply safer-cluster-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage apply --verbose'] +- id: verify safer-cluster-local + waitFor: + - apply safer-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage verify --verbose'] +- id: destroy safer-cluster-local + waitFor: + - verify safer-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage destroy --verbose'] +- id: apply simple-regional-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage apply --verbose'] +- id: verify simple-regional-local + waitFor: + - apply simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage verify --verbose'] +- id: destroy simple-regional-local + waitFor: + - verify simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage teardown --verbose'] +- id: apply simple-regional-private-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage apply --verbose'] +- id: verify simple-regional-private-local + waitFor: + - apply simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage verify --verbose'] +- id: destroy simple-regional-private-local + waitFor: + - verify simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage teardown --verbose'] +- id: apply simple-regional-cluster-autoscaling + waitFor: + - create-all + - destroy simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage apply --verbose'] +- id: verify simple-regional-cluster-autoscaling + waitFor: + - apply simple-regional-cluster-autoscaling + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage verify --verbose'] +- id: destroy simple-regional-cluster-autoscaling + waitFor: + - verify simple-regional-cluster-autoscaling + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage teardown --verbose'] +- id: apply simple-regional-with-kubeconfig-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage apply --verbose'] +- id: verify simple-regional-with-kubeconfig-local + waitFor: + - apply simple-regional-with-kubeconfig-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage verify --verbose'] +- id: destroy simple-regional-with-kubeconfig-local + waitFor: + - verify simple-regional-with-kubeconfig-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage teardown --verbose'] +- id: converge simple-regional-with-gateway-api-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-with-gateway-api-local'] +- id: verify simple-regional-with-gateway-api-local + waitFor: + - converge simple-regional-with-gateway-api-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-with-gateway-api-local'] +- id: destroy simple-regional-with-gateway-api-local + waitFor: + - verify simple-regional-with-gateway-api-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-gateway-api-local'] +- id: apply simple-regional-with-networking-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage apply --verbose'] +- id: verify simple-regional-with-networking-local + waitFor: + - apply simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage verify --verbose'] +- id: destroy simple-regional-with-networking-local + waitFor: + - verify simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage teardown --verbose'] +- id: apply simple-zonal-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage apply --verbose'] +- id: verify simple-zonal-local + waitFor: + - apply simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage verify --verbose'] +- id: destroy simple-zonal-local + waitFor: + - verify simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage teardown --verbose'] +- id: apply simple-zonal-private-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage apply --verbose'] +- id: verify simple-zonal-private-local + waitFor: + - apply simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage verify --verbose'] +- id: destroy simple-zonal-private-local + waitFor: + - verify simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage teardown --verbose'] +- id: converge stub-domains-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-local'] +- id: verify stub-domains-local + waitFor: + - converge stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-local'] +- id: destroy stub-domains-local + waitFor: + - verify stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-local'] +- id: converge upstream-nameservers-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge upstream-nameservers-local'] +- id: verify upstream-nameservers-local + waitFor: + - converge upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify upstream-nameservers-local'] +- id: destroy upstream-nameservers-local + waitFor: + - verify upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy upstream-nameservers-local'] +- id: converge stub-domains-upstream-nameservers-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-upstream-nameservers-local'] +- id: verify stub-domains-upstream-nameservers-local + waitFor: + - converge stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-upstream-nameservers-local'] +- id: destroy stub-domains-upstream-nameservers-local + waitFor: + - verify stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-upstream-nameservers-local'] +- id: converge workload-metadata-config-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge workload-metadata-config-local'] +- id: verify workload-metadata-config-local + waitFor: + - converge workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify workload-metadata-config-local'] +- id: destroy workload-metadata-config-local + waitFor: + - verify workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-metadata-config-local'] +- id: apply beta-cluster + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage apply --verbose --test-dir test/integration'] +- id: verify beta-cluster + waitFor: + - apply beta-cluster + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage verify --verbose --test-dir test/integration'] +- id: teardown beta-cluster + waitFor: + - verify beta-cluster + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage teardown --verbose --test-dir test/integration'] +- id: apply simple-windows-node-pool-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage apply --verbose --test-dir test/integration'] +- id: verify simple-windows-node-pool-local + waitFor: + - apply simple-windows-node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage verify --verbose --test-dir test/integration'] +- id: destroy simple-windows-node-pool-local + waitFor: + - verify simple-windows-node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage teardown --verbose --test-dir test/integration'] +- id: apply deploy-service-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage apply --verbose'] +- id: verify deploy-service-local + waitFor: + - apply deploy-service-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage verify --verbose'] +- id: destroy deploy-service-local + waitFor: + - verify deploy-service-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage destroy --verbose'] +- id: apply node-pool-local + waitFor: + - create-all name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage apply --verbose'] - id: verify node-pool-local waitFor: - - converge node-pool-local + - apply node-pool-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage verify --verbose'] - id: destroy node-pool-local diff --git a/test/fixtures/node_pool/example.tf b/test/fixtures/node_pool/example.tf index 633c276133..673298e488 100644 --- a/test/fixtures/node_pool/example.tf +++ b/test/fixtures/node_pool/example.tf @@ -14,6 +14,10 @@ * limitations under the License. */ +locals { + compute_engine_service_account = var.compute_engine_service_accounts[0] +} + module "example" { source = "../../../examples/node_pool" @@ -25,7 +29,7 @@ module "example" { subnetwork = google_compute_subnetwork.main.name ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name - compute_engine_service_account = var.compute_engine_service_accounts[0] + compute_engine_service_account = local.compute_engine_service_account cluster_autoscaling = { enabled = true diff --git a/test/fixtures/node_pool/outputs.tf b/test/fixtures/node_pool/outputs.tf index 74103ff0be..4df1f503f0 100644 --- a/test/fixtures/node_pool/outputs.tf +++ b/test/fixtures/node_pool/outputs.tf @@ -87,3 +87,7 @@ output "registry_project_ids" { output "random_string" { value = random_string.suffix.result } + +output "compute_engine_service_account" { + value = local.compute_engine_service_account +} diff --git a/test/integration/go.mod b/test/integration/go.mod index c42f2394a6..cd75b51e9b 100644 --- a/test/integration/go.mod +++ b/test/integration/go.mod @@ -9,6 +9,8 @@ require ( github.com/gruntwork-io/terratest v0.48.1 github.com/hashicorp/terraform-json v0.24.0 github.com/stretchr/testify v1.10.0 + github.com/tidwall/gjson v1.18.0 + golang.org/x/sync v0.10.0 ) require ( @@ -103,7 +105,6 @@ require ( github.com/pquerna/otp v1.4.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/sjson v1.2.5 // indirect @@ -116,7 +117,6 @@ require ( golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.31.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect diff --git a/test/integration/node_pool/node_pool_test.go b/test/integration/node_pool/node_pool_test.go index 92d6caaf53..1e4afa1941 100644 --- a/test/integration/node_pool/node_pool_test.go +++ b/test/integration/node_pool/node_pool_test.go @@ -15,16 +15,12 @@ package node_pool import ( "fmt" - "slices" "testing" "time" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/cai" - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" - "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/assert" "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils" ) @@ -44,128 +40,29 @@ func TestNodePool(t *testing.T) { clusterName := bpt.GetStringOutput("cluster_name") randomString := bpt.GetStringOutput("random_string") kubernetesEndpoint := bpt.GetStringOutput("kubernetes_endpoint") + nodeServiceAccount := bpt.GetStringOutput("compute_engine_service_account") // Retrieve Project CAI projectCAI := cai.GetProjectResources(t, projectId, cai.WithAssetTypes([]string{"container.googleapis.com/Cluster", "k8s.io/Node"})) - t.Log(projectCAI.Raw) - // Retrieve Cluster from CAI - clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName) - - if !projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data").Exists() { - t.Fatalf("Cluster not found: %s", clusterResourceName) - } - cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data") - t.Log(cluster.Raw) + // Retrieve Cluster from CAI // Equivalent gcloud describe command (classic) // cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId) + clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName) + cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data") - // Cluster Assertions (classic) - assert.Contains([]string{"RUNNING", "RECONCILING"}, cluster.Get("status").String(), "Cluster is Running") - assert.Equal("COS_CONTAINERD", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.imageType").String(), "has the expected image type") - assert.Equal("https://www.googleapis.com/auth/cloud-platform", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes.0").String(), "has the expected oauth scopes") - assert.Equal("default", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.serviceAccount").String(), "has the expected service account") - assert.Equal("OPTIMIZE_UTILIZATION", cluster.Get("autoscaling.autoscalingProfile").String(), "has the expected autoscaling profile") - assert.True(cluster.Get("autoscaling.enableNodeAutoprovisioning").Bool(), "has the expected node autoprovisioning") - assert.JSONEq(`[ - { - "maximum": "20", - "minimum": "5", - "resourceType": "cpu" - }, - { - "maximum": "30", - "minimum": "10", - "resourceType": "memory" - } - ]`, - cluster.Get("autoscaling.resourceLimits").String(), "has the expected resource limits") - - // Cluster Assertions using golden image (TestNodePool.json) with sanitizer + // Setup golden image with sanitizers g := golden.NewOrUpdate(t, cluster.String(), + golden.WithSanitizer(golden.StringSanitizer(nodeServiceAccount, "NODE_SERVICE_ACCOUNT")), golden.WithSanitizer(golden.StringSanitizer(projectId, "PROJECT_ID")), golden.WithSanitizer(golden.StringSanitizer(randomString, "RANDOM_STRING")), golden.WithSanitizer(golden.StringSanitizer(kubernetesEndpoint, "KUBERNETES_ENDPOINT")), ) - checkPaths := utils.GetTerminalJSONPaths(g.GetJSON()) - - exemptPaths := []string{"nodePools"} - checkPaths = slices.DeleteFunc(checkPaths, func(s string) bool { - return slices.Contains(exemptPaths, s) - }) - g.JSONPathEqs(assert, cluster, checkPaths) - - // NodePool Assertions - nodePools := []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"} - for _, nodePool := range nodePools { - g.JSONPathEqs(assert, cluster.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)), utils.GetTerminalJSONPaths(g.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)))) - } - - // nodePool-01 Assertions - assert.Equal("pool-01", cluster.Get("nodePools.#(name==\"pool-01\").name").String(), "pool-1 exists") - assert.Equal("e2-medium", cluster.Get("nodePools.#(name==\"pool-01\").config.machineType").String(), "is the expected machine type") - assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-01\").config.imageType").String(), "has the expected image") - assert.True(cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.enabled").Bool(), "has autoscaling enabled") - assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.minNodeCount").Int(), "has the expected minimum node count") - assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoRepair").Bool(), "has autorepair enabled") - assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoUpgrade").Bool(), "has automatic upgrades enabled") - assert.Equal("kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.shutdown-script").String(), "pool-2 exists") - assert.Equal("false", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.disable-legacy-endpoints").String(), "pool-2 exists") - assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "pool-01-example": "true", "cluster_name": "%s", "node_pool": "pool-01"}`, clusterName), - cluster.Get("nodePools.#(name==\"pool-01\").config.labels").String(), "has the expected labels") - assert.ElementsMatch([]string{"all-node-example", "pool-01-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-01", clusterName)}, - cluster.Get("nodePools.#(name==\"pool-01\").config.tags").Value().([]interface{}), "has the expected network tags") - assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config net.core.netdev_max_backlog sysctl") - assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.rmem_max").Int(), "has the expected linux node config net.core.rmem_max sysctl") - - // nodePool-02 Assertions - assert.Equal("pool-02", cluster.Get("nodePools.#(name==\"pool-02\").name").String(), "pool-2 exists") - assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-02\").config.machineType").String(), "is the expected machine type") - assert.True(cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.enabled").Bool(), "has autoscaling enabled") - assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.minNodeCount").Int(), "has the expected minimum node count") - assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.maxNodeCount").Int(), "has the expected maximum node count") - assert.Equal(int64(30), cluster.Get("nodePools.#(name==\"pool-02\").config.diskSizeGb").Int(), "has the expected disk size") - assert.Equal("pd-standard", cluster.Get("nodePools.#(name==\"pool-02\").config.diskType").String(), "has the expected disk type") - assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-02\").config.imageType").String(), "has the expected image") - assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-02"}`, clusterName), - cluster.Get("nodePools.#(name==\"pool-02\").config.labels").String(), "has the expected labels") - assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-02", clusterName)}, - cluster.Get("nodePools.#(name==\"pool-02\").config.tags").Value().([]interface{}), "has the expected network tags") - assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-02\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") - - // nodwPool-03 Assertions - assert.Equal("pool-03", cluster.Get("nodePools.#(name==\"pool-03\").name").String(), "pool-3 exists") - assert.JSONEq(fmt.Sprintf(`["%s-b", "%s-c"]`, location, location), cluster.Get("nodePools.#(name==\"pool-03\").locations").String(), "has nodes in correct locations") - assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-03\").config.machineType").String(), "is the expected machine type") - assert.False(cluster.Get("nodePools.#(name==\"pool-03\").autoscaling.enabled").Bool(), "has autoscaling enabled") - assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-03\").initialNodeCount").Int(), "has the expected inital node count") - assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoRepair").Bool(), "has autorepair enabled") - assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoUpgrade").Bool(), "has automatic upgrades enabled") - assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-03", "sandbox.gke.io/runtime": "gvisor"}`, clusterName), - cluster.Get("nodePools.#(name==\"pool-03\").config.labels").String(), "has the expected labels") - assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-03", clusterName)}, - cluster.Get("nodePools.#(name==\"pool-03\").config.tags").Value().([]interface{}), "has the expected network tags") - assert.Equal("172.16.0.0/18", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podIpv4CidrBlock").String(), "has the expected pod range") - assert.Equal("test", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podRange").String(), "has the expected pod range") - assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-03\").config.imageType").String(), "has the expected image") - assert.Equal("static", cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuManagerPolicy").String(), "has the expected cpuManagerPolicy kubelet config") - assert.True(cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuCfsQuota").Bool(), "has the expected cpuCfsQuota kubelet config") - assert.Equal(int64(20000), cluster.Get("nodePools.#(name==\"pool-03\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") - - // nodePool-04 Assertions - assert.Equal("pool-04", cluster.Get("nodePools.#(name==\"pool-04\").name").String(), "pool-4 exists") - assert.False(cluster.Get("nodePools.#(name==\"pool-04\").config.queuedProvisioning.enabled").Bool(), "has queued provisioning not enabled") - // nodePool-05 Assertions - assert.Equal("pool-05", cluster.Get("nodePools.#(name==\"pool-05\").name").String(), "pool-5 exists") - assert.True(cluster.Get("nodePools.#(name==\"pool-05\").config.advancedMachineFeatures.enableNestedVirtualization").Bool(), "has enable_nested_virtualization enabled") + // Cluster (and listed node pools) Assertions + testutils.TGKEAssertGolden(assert, g, &cluster, []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"}, []string{"monitoringConfig.componentConfig.enableComponents"}) // TODO: enableComponents is UL // K8s Assertions - gcloud.Runf(t, "container clusters get-credentials %s --region %s --project %s", clusterName, location, projectId) - k8sOpts := k8s.KubectlOptions{} - clusterNodesOp, err := k8s.RunKubectlAndGetOutputE(t, &k8sOpts, "get", "nodes", "-o", "json") - assert.NoError(err) - clusterNodes := utils.ParseKubectlJSONResult(t, clusterNodesOp) assert.JSONEq(`[ { "effect": "PreferNoSchedule", @@ -178,7 +75,7 @@ func TestNodePool(t *testing.T) { "value": "true" } ]`, - clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-01\").spec.taints").String(), "has the expected taints") + projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-01\").resource.data.spec.taints").String(), "has the expected taints") assert.JSONEq(`[ { "effect": "PreferNoSchedule", @@ -191,7 +88,7 @@ func TestNodePool(t *testing.T) { "value": "present" } ]`, - clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-02\").spec.taints").String(), "has the expected all-pools-example taint") + projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-02\").resource.data.spec.taints").String(), "has the expected all-pools-example taint") assert.JSONEq(`[ { "effect": "PreferNoSchedule", @@ -204,7 +101,7 @@ func TestNodePool(t *testing.T) { "value": "gvisor" } ]`, - clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-03\").spec.taints").String(), "has the expected all-pools-example taint") + projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-03\").resource.data.spec.taints").String(), "has the expected all-pools-example taint") }) bpt.Test() diff --git a/test/integration/node_pool/testdata/TestNodePool.json b/test/integration/node_pool/testdata/TestNodePool.json index 15a7a12de3..413d0ce050 100644 --- a/test/integration/node_pool/testdata/TestNodePool.json +++ b/test/integration/node_pool/testdata/TestNodePool.json @@ -221,7 +221,7 @@ "https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/cloud-platform" ], - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -354,7 +354,7 @@ "oauthScopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -451,7 +451,7 @@ "oauthScopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -541,7 +541,7 @@ "sandboxConfig": { "type": "GVISOR" }, - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -632,7 +632,7 @@ "reservationAffinity": { "consumeReservationType": "NO_RESERVATION" }, - "serviceAccount": "default", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -724,7 +724,7 @@ "oauthScopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "default", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, diff --git a/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go b/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go index aa912771dd..d577451121 100644 --- a/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go +++ b/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go @@ -35,20 +35,20 @@ func TestSaferClusterIapBastion(t *testing.T) { // bpt.DefaultVerify(assert) testutils.TGKEVerify(t, bpt, assert) // Verify Resources - test_command, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ") + testCommand, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ") // pre run ssh command so that ssh-keygen can run - gcloud.RunCmd(t, test_command, + gcloud.RunCmd(t, testCommand, gcloud.WithCommonArgs([]string{}), ) - cluster_version := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version")) + clusterVersion := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version")) - op := gcloud.Run(t, test_command, + op := gcloud.Run(t, testCommand, gcloud.WithCommonArgs([]string{}), ) - assert.Equal(cluster_version, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE") + assert.Equal(clusterVersion, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE") }) bpt.Test() diff --git a/test/integration/testutils/utils.go b/test/integration/testutils/utils.go index 1554678b91..0711bca793 100644 --- a/test/integration/testutils/utils.go +++ b/test/integration/testutils/utils.go @@ -15,14 +15,19 @@ package testutils import ( + "fmt" "slices" "strings" "testing" "time" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" tfjson "github.com/hashicorp/terraform-json" "github.com/stretchr/testify/assert" + "github.com/tidwall/gjson" + "golang.org/x/sync/errgroup" ) var ( @@ -36,6 +41,8 @@ var ( // API Rate limit exceeded errors can be retried. ".*rateLimitExceeded.*": "Rate limit exceeded.", } + + ClusterAlwaysExemptPaths = []string{"nodePools"} // node pools are separately checked by name ) func GetTestProjectFromSetup(t *testing.T, idx int) string { @@ -67,3 +74,42 @@ func TGKEVerifyExemptResources(t *testing.T, b *tft.TFBlueprintTest, assert *ass assert.Equal(tfjson.Actions{tfjson.ActionNoop}, r.Change.Actions, "Plan must be no-op for resource: %s", r.Address) } } + +// TGKEAssertGolden asserts a cluster and listed node pools against paths in golden image +func TGKEAssertGolden(assert *assert.Assertions, golden *golden.GoldenFile, clusterJson *gjson.Result, nodePools []string, exemptClusterPaths []string) { + // Retrieve golden paths + clusterCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON()) + + // Remove exempt cluster paths + exemptPaths := slices.Concat(exemptClusterPaths, ClusterAlwaysExemptPaths) + clusterCheckPaths = slices.DeleteFunc(clusterCheckPaths, func(s string) bool { + for _, exempPath := range exemptPaths { + if strings.HasPrefix(s, exempPath) { + return true + } + } + return false + }) + + // Cluster assertions + golden.JSONPathEqs(assert, *clusterJson, clusterCheckPaths) + + // NodePool assertions + for _, nodePool := range nodePools { + assert.Truef(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)).Exists(), "NodePool not found: %s", nodePool) + + nodeCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool))) + + syncGroup := new(errgroup.Group) + syncGroup.SetLimit(24) + for _, nodeCheckPath := range nodeCheckPaths { + nodeCheckPath := nodeCheckPath + syncGroup.Go(func() error { + gotData := golden.ApplySanitizers(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)).Get(nodeCheckPath).String()) + gfData := golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)).Get(nodeCheckPath).String() + assert.Equalf(gfData, gotData, "For node %s path %q expected %q to match fixture %q", nodePool, nodeCheckPath, gotData, gfData) + return nil + }) + } + } +}