diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..30e551a122 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,60 @@ +--- +name: Report a Bug +about: Found an issue? Let us fix it. +--- + +Please ensure you do the following when reporting a bug: + +- [ ] Provide a concise description of what the bug is. +- [ ] Provide information about your environment. +- [ ] Provide clear steps to reproduce the bug. +- [ ] Attach applicable logs. Please do not attach screenshots showing logs unless you are unable to copy and paste the log data. +- [ ] Ensure any code / output examples are [properly formatted](https://docs.github.com/en/github/writing-on-github/basic-writing-and-formatting-syntax#quoting-code) for legibility. + +Note that some logs needed to troubleshoot may be found in the `/pgdata//pg_log` directory on your Postgres instance. + +An incomplete bug report can lead to delays in resolving the issue or the closing of a ticket, so please be as detailed as possible. + +If you are looking for [general support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/), please view the [support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) page for where you can ask questions. + +Thanks for reporting the issue, we're looking forward to helping you! + +## Overview + +Add a concise description of what the bug is. + +## Environment + +Please provide the following details: + +- Platform: (`Kubernetes`, `OpenShift`, `Rancher`, `GKE`, `EKS`, `AKS` etc.) +- Platform Version: (e.g. `1.20.3`, `4.7.0`) +- PGO Image Tag: (e.g. `ubi8-5.x.y-0`) +- Postgres Version (e.g. `15`) +- Storage: (e.g. `hostpath`, `nfs`, or the name of your storage class) + +## Steps to Reproduce + +### REPRO + +Provide steps to get to the error condition: + +1. Run `...` +1. Do `...` +1. Try `...` + +### EXPECTED + +1. Provide the behavior that you expected. + +### ACTUAL + +1. Describe what actually happens + +## Logs + +Please provided appropriate log output or any configuration files that may help troubleshoot the issue. **DO NOT** include sensitive information, such as passwords. + +## Additional Information + +Please provide any additional information that may be helpful. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..4de2077c77 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,42 @@ +--- +name: Feature Request +about: Help us improve PGO! +--- + +Have an idea to improve PGO? We'd love to hear it! We're going to need some information from you to learn more about your feature requests. + +Please be sure you've done the following: + +- [ ] Provide a concise description of your feature request. +- [ ] Describe your use case. Detail the problem you are trying to solve. +- [ ] Describe how you envision that the feature would work. +- [ ] Provide general information about your current PGO environment. + +## Overview + +Provide a concise description of your feature request. + +## Use Case + +Describe your use case. Why do you want this feature? What problem will it solve? Why will it help you? Why will it make it easier to use PGO? + +## Desired Behavior + +Describe how the feature would work. How do you envision interfacing with it? + +## Environment + +Tell us about your environment: + +Please provide the following details: + +- Platform: (`Kubernetes`, `OpenShift`, `Rancher`, `GKE`, `EKS`, `AKS` etc.) +- Platform Version: (e.g. `1.20.3`, `4.7.0`) +- PGO Image Tag: (e.g. `ubi8-5.x.y-0`) +- Postgres Version (e.g. `15`) +- Storage: (e.g. `hostpath`, `nfs`, or the name of your storage class) +- Number of Postgres clusters: (`XYZ`) + +## Additional Information + +Please provide any additional information that may be helpful. diff --git a/.github/ISSUE_TEMPLATE/support---question-and-answer.md b/.github/ISSUE_TEMPLATE/support---question-and-answer.md new file mode 100644 index 0000000000..271caa9029 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/support---question-and-answer.md @@ -0,0 +1,35 @@ +--- +name: Support +about: "Learn how to interact with the PGO community" +--- + +If you believe you have found have found a bug, please open up [Bug Report](https://github.com/CrunchyData/postgres-operator/issues/new?template=bug_report.md) + +If you have a feature request, please open up a [Feature Request](https://github.com/CrunchyData/postgres-operator/issues/new?template=feature_request.md) + +You can find information about general PGO [support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) at: + +[https://access.crunchydata.com/documentation/postgres-operator/latest/support/](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) + +## Questions + +For questions that are neither bugs nor feature requests, please be sure to + +- [ ] Provide information about your environment (see below for more information). +- [ ] Provide any steps or other relevant details related to your question. +- [ ] Attach logs, where applicable. Please do not attach screenshots showing logs unless you are unable to copy and paste the log data. +- [ ] Ensure any code / output examples are [properly formatted](https://docs.github.com/en/github/writing-on-github/basic-writing-and-formatting-syntax#quoting-code) for legibility. + +Besides Pod logs, logs may also be found in the `/pgdata/pg/log` directory on your Postgres instance. + +If you are looking for [general support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/), please view the [support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) page for where you can ask questions. + +### Environment + +Please provide the following details: + +- Platform: (`Kubernetes`, `OpenShift`, `Rancher`, `GKE`, `EKS`, `AKS` etc.) +- Platform Version: (e.g. `1.20.3`, `4.7.0`) +- PGO Image Tag: (e.g. `ubi8-5.x.y-0`) +- Postgres Version (e.g. `15`) +- Storage: (e.g. `hostpath`, `nfs`, or the name of your storage class) diff --git a/.github/actions/k3d/action.yaml b/.github/actions/k3d/action.yaml new file mode 100644 index 0000000000..b6e6ed5c2b --- /dev/null +++ b/.github/actions/k3d/action.yaml @@ -0,0 +1,94 @@ +name: k3d +description: Start k3s using k3d +inputs: + k3d-tag: + default: latest + required: true + description: > + Git tag from https://github.com/k3d-io/k3d/releases or "latest" + k3s-channel: + default: latest + required: true + description: > + https://docs.k3s.io/upgrades/manual#release-channels + prefetch-images: + required: true + description: > + Each line is the name of an image to fetch onto all Kubernetes nodes + prefetch-timeout: + default: 3m + required: true + description: > + Amount of time to wait for images to be fetched + +outputs: + k3d-version: + value: ${{ steps.k3d.outputs.k3d }} + description: > + K3d version + kubernetes-version: + value: ${{ steps.k3s.outputs.server }} + description: > + Kubernetes server version, as reported by the Kubernetes API + pause-image: + value: ${{ steps.k3s.outputs.pause-image }} + description: > + Pause image for prefetch images DaemonSet + +runs: + using: composite + steps: + - id: k3d + name: Install k3d + shell: bash + env: + K3D_TAG: ${{ inputs.k3d-tag }} + run: | + curl --fail --silent https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | + TAG="${K3D_TAG#latest}" bash + k3d version | awk '{ print "${tolower($1)}=${$3}" >> $GITHUB_OUTPUT }' + + - id: k3s + name: Start k3s + shell: bash + run: | + k3d cluster create --image '+${{ inputs.k3s-channel }}' --no-lb --timeout=2m --wait + kubectl version | awk '{ print "${tolower($1)}=${$3}" >> $GITHUB_OUTPUT }' + + PAUSE_IMAGE=$(docker exec $(k3d node list --output json | jq --raw-output 'first.name') \ + k3s agent --help | awk '$1 == "--pause-image" { + match($0, /default: "[^"]*"/); + print substr($0, RSTART+10, RLENGTH-11) + }') + echo "pause-image=${PAUSE_IMAGE}" >> $GITHUB_OUTPUT + + - name: Prefetch container images + shell: bash + env: + INPUT_IMAGES: ${{ inputs.prefetch-images }} + INPUT_TIMEOUT: ${{ inputs.prefetch-timeout }} + run: | + jq <<< "$INPUT_IMAGES" --raw-input 'select(. != "")' | + jq --slurp \ + --arg pause '${{ steps.k3s.outputs.pause-image }}' \ + --argjson labels '{"name":"image-prefetch"}' \ + --argjson name '"image-prefetch"' \ + '{ + apiVersion: "apps/v1", kind: "DaemonSet", + metadata: { name: $name, labels: $labels }, + spec: { + selector: { matchLabels: $labels }, + template: { + metadata: { labels: $labels }, + spec: { + initContainers: to_entries | map({ + name: "c\(.key)", image: .value, command: ["true"], + }), + containers: [{ name: "pause", image: $pause }] + } + } + } + }' | + kubectl create --filename=- + kubectl rollout status daemonset.apps/image-prefetch --timeout "$INPUT_TIMEOUT" || + kubectl describe daemonset.apps/image-prefetch diff --git a/.github/actions/trivy/action.yaml b/.github/actions/trivy/action.yaml new file mode 100644 index 0000000000..bcc67421cb --- /dev/null +++ b/.github/actions/trivy/action.yaml @@ -0,0 +1,138 @@ +# Copyright 2024 - 2025 Crunchy Data Solutions, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# schema documentation: https://docs.github.com/actions/sharing-automations/creating-actions/metadata-syntax-for-github-actions +# yaml-language-server: $schema=https://json.schemastore.org/github-action.json + +name: Trivy +description: Scan this project using Trivy + +# The Trivy team maintains an action, but it has trouble caching its vulnerability data: +# https://github.com/aquasecurity/trivy-action/issues/389 +# +# 1. It caches vulnerability data once per calendar day, despite Trivy wanting +# to download more frequently than that. +# 2. When it fails to download the data, it fails the workflow *and* caches +# the incomplete data. +# 3. When (1) and (2) coincide, every following run that day *must* update the data, +# producing more opportunities for (2) and more failed workflows. +# +# The action below uses any recent cache matching `cache-prefix` and calculates a cache key +# derived from the data Trivy downloads. An older database is better than no scans at all. +# When a run successfully updates the data, that data is cached and available to other runs. + +inputs: + cache: + default: restore,success,use + description: >- + What Trivy data to cache; one or more of restore, save, success, or use. + The value "use" instructs Trivy to read and write to its cache. + The value "restore" loads the Trivy cache from GitHub. + The value "success" saves the Trivy cache to GitHub when Trivy succeeds. + The value "save" saves the Trivy cache to GitHub regardless of Trivy. + + database: + default: update + description: >- + How Trivy should handle its data; one of update or skip. + The value "skip" fetches no Trivy data at all. + + setup: + default: v0.65.0,cache + description: >- + How to install Trivy; one or more of version, none, or cache. + The value "none" does not install Trivy at all. + + cache-directory: + default: ${{ github.workspace }}/.cache/trivy + description: >- + Directory where Trivy should store its data + + cache-prefix: + default: cache-trivy + description: >- + Name (key) where Trivy data should be stored in the GitHub cache + + scan-target: + default: . + description: >- + What Trivy should scan + + scan-type: + default: repository + description: >- + How Trivy should interpret scan-target; one of filesystem, image, repository, or sbom. + +runs: + using: composite + steps: + # Parse list inputs as separated by commas and spaces. + # Select the maximum version-looking string from `inputs.setup`. + - id: parsed + shell: bash + run: | + # Validate inputs + ( + <<< '${{ inputs.cache }}' jq -rRsS '"cache=\(split("[,\\s]+"; "") - [""])"' + <<< '${{ inputs.setup }}' jq -rRsS ' + "setup=\(split("[,\\s]+"; "") - [""])", + "version=\(split("[,\\s]+"; "") | max_by(split("[v.]"; "") | map(tonumber?)))" + ' + ) | tee --append "${GITHUB_OUTPUT}" + + # Install Trivy as requested. + # NOTE: `setup-trivy` can download a "latest" version but cannot cache it. + - if: ${{ ! contains(fromJSON(steps.parsed.outputs.setup), 'none') }} + uses: aquasecurity/setup-trivy@v0.2.4 + with: + cache: ${{ contains(fromJSON(steps.parsed.outputs.setup), 'cache') }} + version: ${{ steps.parsed.outputs.version }} + + # Restore a recent cache beginning with the prefix. + - id: restore + if: ${{ contains(fromJSON(steps.parsed.outputs.cache), 'restore') }} + uses: actions/cache/restore@v4 + with: + path: ${{ inputs.cache-directory }} + key: ${{ inputs.cache-prefix }}- + + - id: trivy + shell: bash + env: + TRIVY_CACHE_DIR: >- + ${{ contains(fromJSON(steps.parsed.outputs.cache), 'use') && inputs.cache-directory || '' }} + TRIVY_SKIP_CHECK_UPDATE: ${{ inputs.database == 'skip' }} + TRIVY_SKIP_DB_UPDATE: ${{ inputs.database == 'skip' }} + TRIVY_SKIP_JAVA_DB_UPDATE: ${{ inputs.database == 'skip' }} + TRIVY_SKIP_VEX_REPO_UPDATE: ${{ inputs.database == 'skip' }} + run: | + # Run Trivy + trivy '${{ inputs.scan-type }}' '${{ inputs.scan-target }}' || result=$? + + checksum=$([[ -z "${TRIVY_CACHE_DIR}" ]] || cat "${TRIVY_CACHE_DIR}/"*/metadata.json | sha256sum) + echo 'cache-key=${{ inputs.cache-prefix }}-'"${checksum%% *}" >> "${GITHUB_OUTPUT}" + + exit "${result-0}" + + # Save updated data to the cache when requested. + - if: >- + ${{ + steps.restore.outcome == 'success' && + steps.restore.outputs.cache-matched-key == steps.trivy.outputs.cache-key + }} + shell: bash + run: | + # Cache hit on ${{ steps.restore.outputs.cache-matched-key }} + - if: >- + ${{ + steps.restore.outputs.cache-matched-key != steps.trivy.outputs.cache-key && + ( + (contains(fromJSON(steps.parsed.outputs.cache), 'save') && !cancelled()) || + (contains(fromJSON(steps.parsed.outputs.cache), 'success') && success()) + ) + }} + uses: actions/cache/save@v4 + with: + key: ${{ steps.trivy.outputs.cache-key }} + path: ${{ inputs.cache-directory }} diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..8a16fc8d6f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,63 @@ +# Copyright 2024 - 2025 Crunchy Data Solutions, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# documentation: https://docs.github.com/code-security/dependabot/dependabot-version-updates +# schema documentation: https://docs.github.com/code-security/dependabot/working-with-dependabot/dependabot-options-reference +# yaml-language-server: $schema=https://json.schemastore.org/dependabot-2.0.json +# +# Dependabot allows only one schedule per package-ecosystem, directory, and target-branch. +# Configurations that lack a "target-branch" field also affect security updates. +# +# There is a hack to have *two* schedules: https://github.com/dependabot/dependabot-core/issues/1778#issuecomment-1988140219 +--- +version: 2 + +updates: + - package-ecosystem: github-actions + directories: + # "/" is a special case that includes ".github/workflows/*" + - '/' + - '.github/actions/*' + registries: '*' + schedule: + interval: weekly + day: tuesday + labels: + - dependencies + groups: + # Group security updates into one pull request + action-vulnerabilities: + applies-to: security-updates + patterns: ['*'] + + # Group version updates into one pull request + github-actions: + applies-to: version-updates + patterns: ['*'] + + - package-ecosystem: gomod + directory: '/' + registries: '*' + schedule: + interval: weekly + day: wednesday + labels: + - dependencies + groups: + # Group security updates into one pull request + go-vulnerabilities: + applies-to: security-updates + patterns: ['*'] + + # Group Kubernetes and OpenTelemetry version updates into separate pull requests + kubernetes: + patterns: ['k8s.io/*', 'sigs.k8s.io/*'] + opentelemetry: + patterns: ['go.opentelemetry.io/*'] + go-dependencies: + patterns: ['*'] + exclude-patterns: + - 'k8s.io/*' + - 'sigs.k8s.io/*' + - 'go.opentelemetry.io/*' diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..b03369bf09 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,30 @@ +**Checklist:** + + + - [ ] Have you added an explanation of what your changes do and why you'd like them to be included? + - [ ] Have you updated or added documentation for the change, as applicable? + - [ ] Have you tested your changes on all related environments with successful results, as applicable? + - [ ] Have you added automated tests? + + + +**Type of Changes:** + + + - [ ] New feature + - [ ] Bug fix + - [ ] Documentation + - [ ] Testing enhancement + - [ ] Other + + +**What is the current behavior (link to any open issues here)?** + + + +**What is the new behavior (if this is a feature change)?** +- [ ] Breaking change (fix or feature that would cause existing functionality to change) + + + +**Other Information**: diff --git a/.github/workflows/codeql-analysis.yaml b/.github/workflows/codeql-analysis.yaml new file mode 100644 index 0000000000..0c268c33a8 --- /dev/null +++ b/.github/workflows/codeql-analysis.yaml @@ -0,0 +1,37 @@ +# https://codeql.github.com +name: CodeQL + +on: + pull_request: + branches: + - REL_5_7 + push: + branches: + - REL_5_7 + schedule: + - cron: '10 18 * * 2' + +jobs: + analyze: + if: ${{ github.repository == 'CrunchyData/postgres-operator' }} + permissions: + actions: read + contents: read + security-events: write + + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 + with: { go-version: stable } + + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: { languages: go } + + - name: Autobuild + # This action calls `make` which runs our "help" target. + uses: github/codeql-action/autobuild@v4 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/govulncheck.yaml b/.github/workflows/govulncheck.yaml new file mode 100644 index 0000000000..6684f0c937 --- /dev/null +++ b/.github/workflows/govulncheck.yaml @@ -0,0 +1,50 @@ +# https://go.dev/security/vuln +name: govulncheck + +on: + pull_request: + branches: + - REL_5_7 + push: + branches: + - REL_5_7 + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + # + # TODO(govulncheck): Remove when "golang/govulncheck-action" uses "actions/setup-go" v6 or newer + GOTOOLCHAIN: local + +jobs: + vulnerabilities: + if: ${{ github.repository == 'CrunchyData/postgres-operator' }} + permissions: + security-events: write + + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + + # Install Go and produce a SARIF report. This fails only when the tool is + # unable to scan. + - name: Prepare report + uses: golang/govulncheck-action@v1 + with: + output-file: 'govulncheck-results.sarif' + output-format: 'sarif' + repo-checkout: false + + # Submit the SARIF report to GitHub code scanning. Pull request checks + # succeed or fail according to branch protection rules. + # - https://docs.github.com/en/code-security/code-scanning + - name: Upload results to GitHub + uses: github/codeql-action/upload-sarif@v4 + with: + sarif_file: 'govulncheck-results.sarif' + + # Print any detected vulnerabilities to the workflow log. This step fails + # when the tool detects a vulnerability in code that is called. + # - https://go.dev/blog/govulncheck + - name: Log results + run: govulncheck --format text --show verbose ./... diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 0000000000..c6c90b7662 --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,32 @@ +name: Linters + +on: + pull_request: + branches: + - REL_5_7 + +jobs: + golangci-lint: + runs-on: ubuntu-24.04 + permissions: + contents: read + checks: write + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 + with: { go-version: stable } + + - uses: golangci/golangci-lint-action@v9 + with: + version: latest + args: --timeout=5m + + # Count issues reported by disabled linters. The command always + # exits zero to ensure it does not fail the pull request check. + - name: Count non-blocking issues + run: | + golangci-lint run --config .golangci.next.yaml --show-stats >> "${GITHUB_STEP_SUMMARY}" \ + --max-issues-per-linter=0 \ + --max-same-issues=0 \ + --uniq-by-line=0 \ + --output.text.path=/dev/null ||: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 0000000000..41a7b91ac0 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,201 @@ +name: Tests + +on: + pull_request: + branches: + - REL_5_7 + push: + branches: + - REL_5_7 + +jobs: + go-test: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 + with: { go-version: 1.24 } + + - name: Ensure go.mod is tidy + run: go mod tidy --diff + - name: Ensure generated files are committed + run: make check-generate + - run: make check + + kubernetes-api: + runs-on: ubuntu-24.04 + needs: [go-test] + strategy: + fail-fast: false + matrix: + kubernetes: ['default'] + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 + with: { go-version: stable } + + - run: go mod download + - run: ENVTEST_K8S_VERSION="${KUBERNETES#default}" make check-envtest + env: + KUBERNETES: "${{ matrix.kubernetes }}" + GO_TEST: go test --coverprofile 'envtest.coverage' --coverpkg ./internal/... + + # Upload coverage to GitHub + - run: gzip envtest.coverage + - uses: actions/upload-artifact@v5 + with: + name: "~coverage~kubernetes-api=${{ matrix.kubernetes }}" + path: envtest.coverage.gz + retention-days: 1 + + kubernetes-k3d: + if: "${{ github.repository == 'CrunchyData/postgres-operator' }}" + runs-on: ubuntu-24.04 + needs: [go-test] + strategy: + fail-fast: false + matrix: + kubernetes: [v1.28, v1.34] + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 + with: { go-version: stable } + + - name: Start k3s + uses: ./.github/actions/k3d + with: + k3s-channel: "${{ matrix.kubernetes }}" + prefetch-images: | + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.11-2547 + + - run: make createnamespaces check-envtest-existing + env: + PGO_TEST_TIMEOUT_SCALE: 1.2 + GO_TEST: go test --coverprofile 'envtest-existing.coverage' --coverpkg ./internal/... + + # Upload coverage to GitHub + - run: gzip envtest-existing.coverage + - uses: actions/upload-artifact@v5 + with: + name: "~coverage~kubernetes-k3d=${{ matrix.kubernetes }}" + path: envtest-existing.coverage.gz + retention-days: 1 + + e2e-k3d-kuttl: + runs-on: ubuntu-24.04 + needs: [go-test] + strategy: + fail-fast: false + matrix: + kubernetes: [v1.28, v1.34] + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 + with: { go-version: stable } + + - name: Start k3s + uses: ./.github/actions/k3d + with: + k3s-channel: "${{ matrix.kubernetes }}" + prefetch-timeout: 5m + prefetch-images: | + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.7-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.11-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.18.1-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-18.1-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.8-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-2547 + + - name: Get pgMonitor files. + run: make get-pgmonitor + env: + PGMONITOR_DIR: "${{ github.workspace }}/hack/tools/pgmonitor" + QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" + + - run: go mod download + - name: Build executable + run: PGO_VERSION='${{ github.sha }}' make build-postgres-operator + + # Start a Docker container with the working directory mounted. + - name: Start PGO + run: | + kubectl apply --server-side -k ./config/namespace + kubectl apply --server-side -k ./config/dev + hack/create-kubeconfig.sh postgres-operator pgo + docker run --detach --network host --read-only \ + --volume "$(pwd):/mnt" --workdir '/mnt' --env 'PATH=/mnt/bin' \ + --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ + --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ + --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-2547' \ + --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2547' \ + --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2547' \ + --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.18.1-2547' \ + --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-18.1-2547' \ + --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.11-2547' \ + --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.7-2547' \ + --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.8-2547' \ + --env 'RELATED_IMAGE_COLLECTOR=registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.5-0' \ + --env 'PGO_FEATURE_GATES=TablespaceVolumes=true' \ + --name 'postgres-operator' ubuntu \ + postgres-operator + + - run: make generate-kuttl + env: + KUTTL_PG_UPGRADE_FROM_VERSION: '16' + KUTTL_PG_UPGRADE_TO_VERSION: '17' + KUTTL_PG_VERSION: '16' + KUTTL_POSTGIS_VERSION: '3.4' + KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.11-2547' + - run: | + make check-kuttl && exit + failed=$? + echo '::group::PGO logs'; docker logs 'postgres-operator'; echo '::endgroup::' + exit $failed + + - name: Stop PGO + run: docker stop 'postgres-operator' || true + + coverage-report: + if: ${{ success() || contains(needs.*.result, 'success') }} + runs-on: ubuntu-24.04 + needs: + - kubernetes-api + - kubernetes-k3d + - e2e-k3d-kuttl + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 + with: { go-version: stable } + - uses: actions/download-artifact@v6 + with: { path: download } + + # Combine the coverage profiles by taking the mode line from any one file + # and the data from all files. Write a list of functions with less than + # 100% coverage to the job summary, and upload a complete HTML report. + - name: Generate report + run: | + gunzip --keep download/*/*.gz + ( sed -e '1q' download/*/*.coverage + tail -qn +2 download/*/*.coverage ) > total.coverage + go tool cover --func total.coverage -o total-coverage.txt + go tool cover --html total.coverage -o total-coverage.html + + awk < total-coverage.txt ' + END { print "
Total Coverage: " $3 " " $2 "" } + ' >> "${GITHUB_STEP_SUMMARY}" + + sed < total-coverage.txt -e '/100.0%/d' -e "s,$(go list -m)/,," | column -t | awk ' + NR == 1 { print "\n\n```" } { print } END { if (NR > 0) print "```\n\n"; print "
" } + ' >> "${GITHUB_STEP_SUMMARY}" + + # Upload coverage to GitHub + - run: gzip total-coverage.html + - uses: actions/upload-artifact@v5 + with: + name: coverage-report=html + path: total-coverage.html.gz + retention-days: 15 diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml new file mode 100644 index 0000000000..f8d7734c6f --- /dev/null +++ b/.github/workflows/trivy.yaml @@ -0,0 +1,127 @@ +# https://aquasecurity.github.io/trivy +name: Trivy + +on: + pull_request: + branches: + - REL_5_7 + push: + branches: + - REL_5_7 + +env: + # Use the committed Trivy configuration files. + TRIVY_IGNOREFILE: .trivyignore.yaml + TRIVY_SECRET_CONFIG: trivy-secret.yaml + +jobs: + cache: + # Run only one of these jobs at a time across the entire project. + concurrency: { group: trivy-cache } + # Do not fail this workflow when this job fails. + continue-on-error: true + + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + - name: Download Trivy + uses: ./.github/actions/trivy + env: + TRIVY_DEBUG: true + TRIVY_DOWNLOAD_DB_ONLY: true + TRIVY_NO_PROGRESS: true + TRIVY_SCANNERS: license,secret,vuln + with: + cache: restore,success,use + database: update + + licenses: + # Run this job after the cache job regardless of its success or failure. + needs: [cache] + if: >- + ${{ !cancelled() }} + + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + + # Trivy needs a populated Go module cache to detect Go module licenses. + - uses: actions/setup-go@v6 + with: { go-version: stable } + - run: go mod download + + # Report success only when detected licenses are listed in [.trivyignore.yaml]. + - name: Scan licenses + uses: ./.github/actions/trivy + env: + TRIVY_DEBUG: true + TRIVY_EXIT_CODE: 1 + TRIVY_SCANNERS: license + with: + cache: restore,use + database: skip + + secrets: + # Run this job after the cache job regardless of its success or failure. + needs: [cache] + if: >- + ${{ !cancelled() }} + + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + + # Report success only when detected secrets are listed in [.trivyignore.yaml]. + - name: Scan secrets + uses: ./.github/actions/trivy + env: + TRIVY_EXIT_CODE: 1 + TRIVY_SCANNERS: secret + with: + cache: restore,use + database: skip + + vulnerabilities: + # Run this job after the cache job regardless of its success or failure. + needs: [cache] + if: >- + ${{ github.repository == 'CrunchyData/postgres-operator' && !cancelled() }} + permissions: + security-events: write + + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + + # Print any detected secrets or vulnerabilities to the workflow log for + # human consumption. This step fails only when Trivy is unable to scan. + # A later step uploads results to GitHub as a pull request check. + - name: Log detected vulnerabilities + uses: ./.github/actions/trivy + env: + TRIVY_SCANNERS: secret,vuln + with: + cache: restore,use + database: skip + + # Produce a SARIF report of actionable results. This step fails only when + # Trivy is unable to scan. + - name: Report actionable vulnerabilities + uses: ./.github/actions/trivy + env: + TRIVY_IGNORE_UNFIXED: true + TRIVY_FORMAT: 'sarif' + TRIVY_OUTPUT: 'trivy-results.sarif' + TRIVY_SCANNERS: secret,vuln + with: + cache: use + database: skip + setup: none + + # Submit the SARIF report to GitHub code scanning. Pull requests checks + # succeed or fail according to branch protection rules. + # - https://docs.github.com/en/code-security/code-scanning + - name: Upload results to GitHub + uses: github/codeql-action/upload-sarif@v4 + with: + sarif_file: 'trivy-results.sarif' diff --git a/.golangci.next.yaml b/.golangci.next.yaml index 95b3f63347..2aa389e841 100644 --- a/.golangci.next.yaml +++ b/.golangci.next.yaml @@ -4,37 +4,95 @@ # Rules that should be enforced immediately belong in [.golangci.yaml]. # # Both files are used by [.github/workflows/lint.yaml]. +version: "2" +# https://golangci-lint.run/usage/linters linters: - disable-all: true - enable: - - contextcheck - - err113 + default: all + disable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - copyloopvar + - depguard + - dupword + - durationcheck - errchkjson - - gocritic - - godot - - godox - - gofumpt - - gosec # exclude-use-default - - nilnil + - errname + - errorlint + - exhaustive + - exptostd + - fatcontext + - forbidigo + - ginkgolinter + - gocheckcompilerdirectives + - gochecksumtype + - goheader + - gomoddirectives + - gomodguard + - goprintffuncname + - gosmopolitan + - grouper + - iface + - importas + - interfacebloat + - intrange + - loggercheck + - makezero + - mirror + - misspell + - musttag + - nilerr + - nilnesserr + - noctx - nolintlint - - predeclared - - revive - - staticcheck # exclude-use-default - - tenv - - thelper - - tparallel + - nosprintfhostport + - prealloc + - promlinter + - protogetter + - reassign + - recvcheck + - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck + - tagalign + - testifylint + - unconvert + - unparam + - usestdlibvars + - usetesting - wastedassign + - wsl + - zerologlint -issues: - # https://github.com/golangci/golangci-lint/issues/2239 - exclude-use-default: false + settings: + thelper: + # https://github.com/kulti/thelper/issues/27 + tb: { begin: true, first: true } + test: { begin: true, first: true, name: true } + + exclusions: + warn-unused: true + # Ignore built-in exclusions + presets: [] + rules: + # We call external linters when they are installed: Flake8, ShellCheck, etc. + - linters: [gosec] + path: '_test[.]go$' + text: 'G204: Subprocess launched with variable' -linters-settings: - errchkjson: - check-error-free-encoding: true +# https://golangci-lint.run/usage/formatters +formatters: + enable: + - gofumpt + +issues: + # Fix only when requested + fix: false - thelper: - # https://github.com/kulti/thelper/issues/27 - tb: { begin: true, first: true } - test: { begin: true, first: true, name: true } + # Show all issues at once + max-issues-per-linter: 0 + max-same-issues: 0 + uniq-by-line: false diff --git a/.golangci.yaml b/.golangci.yaml index cbf66e1e83..99a2ec576b 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,92 +1,202 @@ # https://golangci-lint.run/usage/configuration/ +version: "2" +# https://golangci-lint.run/usage/linters linters: - disable: - - contextcheck - - errchkjson - - gci - - gofumpt + default: standard enable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - copyloopvar - depguard + - dupword + - durationcheck + - errchkjson + - errname + - errorlint + - exhaustive + - exptostd + - fatcontext + - forbidigo + - ginkgolinter + - gocheckcompilerdirectives + - gochecksumtype - goheader + - gomoddirectives - gomodguard - - gosimple + - goprintffuncname + - gosec + - gosmopolitan + - grouper + - iface - importas + - interfacebloat + - intrange + - loggercheck + - makezero + - mirror - misspell + - musttag + - nilerr + - nilnesserr + - noctx + - nolintlint + - nosprintfhostport + - prealloc + - promlinter + - protogetter + - reassign + - recvcheck + - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck + - tagalign + - testifylint - unconvert - presets: - - bugs - - format - - unused + - unparam + - usestdlibvars + - usetesting + - wastedassign + - zerologlint + + settings: + dupword: + ignore: + # We might see duplicate instances of 'fi' if we end two bash 'if' statements + - fi + # We use "local all all peer" in some hba tests + - all + + depguard: + rules: + everything: + files: ['$all'] + list-mode: lax + allow: + - go.opentelemetry.io/otel/semconv/v1.27.0 + deny: + - pkg: go.opentelemetry.io/otel/semconv + desc: Use "go.opentelemetry.io/otel/semconv/v1.27.0" instead. + - pkg: io/ioutil + desc: Use the "io" and "os" packages instead. See https://go.dev/doc/go1.16#ioutil + - pkg: math/rand$ + desc: Use the "math/rand/v2" package instead. See https://go.dev/doc/go1.22#math_rand_v2 + not-tests: + files: ['!$test','!**/internal/testing/**'] + list-mode: lax + deny: + - pkg: net/http/httptest + desc: Should be used only in tests. + - pkg: testing/* + desc: The "testing" packages should be used only in tests. + - pkg: github.com/crunchydata/postgres-operator/internal/crd/* + desc: The "internal/crd" packages should be used only in tests. + - pkg: github.com/crunchydata/postgres-operator/internal/testing/* + desc: The "internal/testing" packages should be used only in tests. + + tests: + files: ['$test'] + list-mode: lax + deny: + - pkg: github.com/pkg/errors + desc: Use the "errors" package unless you are interacting with stack traces. + + errchkjson: + check-error-free-encoding: true + + goheader: + template: |- + Copyright {{ DATES }} Crunchy Data Solutions, Inc. + + SPDX-License-Identifier: Apache-2.0 + values: + regexp: + DATES: ((201[7-9]|202[0-4]) - 2025|2025) + + gomodguard: + blocked: + modules: + - go.yaml.in/yaml/v2: { recommendations: [sigs.k8s.io/yaml] } + - go.yaml.in/yaml/v3: { recommendations: [sigs.k8s.io/yaml] } + - gopkg.in/yaml.v2: { recommendations: [sigs.k8s.io/yaml] } + - gopkg.in/yaml.v3: { recommendations: [sigs.k8s.io/yaml] } + - gotest.tools: { recommendations: [gotest.tools/v3] } + - k8s.io/kubernetes: + reason: k8s.io/kubernetes is for building kubelet, kubeadm, etc. + + importas: + no-unaliased: true + alias: + - pkg: k8s.io/api/(\w+)/(v[\w\w]+) + alias: $1$2 + - pkg: k8s.io/apimachinery/pkg/apis/(\w+)/(v[\w\d]+) + alias: $1$2 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors -linters-settings: - depguard: + spancheck: + checks: [end, record-error] + extra-start-span-signatures: + - github.com/crunchydata/postgres-operator/internal/tracing.Start:opentelemetry + ignore-check-signatures: + - tracing.Escape + + exclusions: + warn-unused: true + presets: + - common-false-positives + - legacy + - std-error-handling rules: - everything: - deny: - - pkg: io/ioutil - desc: > - Use the "io" and "os" packages instead. - See https://go.dev/doc/go1.16#ioutil - - not-tests: - files: ['!$test'] - deny: - - pkg: net/http/httptest - desc: Should be used only in tests. - - - pkg: testing/* - desc: The "testing" packages should be used only in tests. - - - pkg: github.com/crunchydata/postgres-operator/internal/testing/* - desc: The "internal/testing" packages should be used only in tests. - - exhaustive: - default-signifies-exhaustive: true - - goheader: - template: |- - Copyright {{ DATES }} Crunchy Data Solutions, Inc. - - SPDX-License-Identifier: Apache-2.0 - values: - regexp: - DATES: '((201[7-9]|202[0-4]) - 2025|2025)' - - goimports: - local-prefixes: github.com/crunchydata/postgres-operator - - gomodguard: - blocked: - modules: - - gopkg.in/yaml.v2: { recommendations: [sigs.k8s.io/yaml] } - - gopkg.in/yaml.v3: { recommendations: [sigs.k8s.io/yaml] } - - gotest.tools: { recommendations: [gotest.tools/v3] } - - k8s.io/kubernetes: - reason: > - k8s.io/kubernetes is for managing dependencies of the Kubernetes - project, i.e. building kubelet and kubeadm. - - gosec: - excludes: - # Flags for potentially-unsafe casting of ints, similar problem to globally-disabled G103 - - G115 - - importas: - alias: - - pkg: k8s.io/api/(\w+)/(v[\w\w]+) - alias: $1$2 - - pkg: k8s.io/apimachinery/pkg/apis/(\w+)/(v[\w\d]+) - alias: $1$2 - - pkg: k8s.io/apimachinery/pkg/api/errors - alias: apierrors - no-unaliased: true + # It is fine for tests to use "math/rand" packages. + - linters: [gosec] + path: '(.+)_test[.]go' + text: weak random number generator + + # This internal package is the one place we want to do API discovery. + - linters: [depguard] + path: internal/kubernetes/discovery.go + text: k8s.io/client-go/discovery + + # Postgres HBA rules often include "all all all" + - linters: [dupword] + path: /(hba|postgres)[^/]+$ + text: words \(all\) found + + # These value types have unmarshal methods. + # https://github.com/raeperd/recvcheck/issues/7 + - linters: [recvcheck] + path: internal/pki/pki.go + text: methods of "(Certificate|PrivateKey)" + + - linters: [staticcheck] + text: corev1.(Endpoints|EndpointSubset) is deprecated + + - linters: [staticcheck] + path: internal/controller/ + text: >- + deprecated: Use `RequeueAfter` instead + +# https://golangci-lint.run/usage/formatters +formatters: + enable: + - gci + - gofmt + settings: + gci: + sections: + - standard + - default + - localmodule issues: - exclude-generated: strict - exclude-rules: - # These value types have unmarshal methods. - # https://github.com/raeperd/recvcheck/issues/7 - - linters: [recvcheck] - path: internal/pki/pki.go - text: 'methods of "(Certificate|PrivateKey)"' + # Fix only when requested + fix: false + + # Show all issues at once + max-issues-per-linter: 0 + max-same-issues: 0 + uniq-by-line: false diff --git a/Makefile b/Makefile index f75e501474..a493694fcc 100644 --- a/Makefile +++ b/Makefile @@ -18,8 +18,9 @@ BUILDAH_BUILD ?= buildah bud GO ?= go GO_BUILD = $(GO) build GO_TEST ?= $(GO) test -KUTTL ?= kubectl-kuttl +KUTTL ?= $(GO) run github.com/kudobuilder/kuttl/cmd/kubectl-kuttl@latest KUTTL_TEST ?= $(KUTTL) test +ENVTEST_K8S_VERSION ?= 1.34 ##@ General diff --git a/go.mod b/go.mod index 11afb9279e..7ea5e074f3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crunchydata/postgres-operator -go 1.23.0 +go 1.24.0 require ( github.com/go-logr/logr v1.4.2 @@ -19,7 +19,7 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0 go.opentelemetry.io/otel/sdk v1.27.0 go.opentelemetry.io/otel/trace v1.27.0 - golang.org/x/crypto v0.36.0 + golang.org/x/crypto v0.45.0 gotest.tools/v3 v3.1.0 k8s.io/api v0.30.2 k8s.io/apimachinery v0.30.2 @@ -71,13 +71,13 @@ require ( go.opentelemetry.io/otel/metric v1.27.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect - golang.org/x/net v0.38.0 // indirect + golang.org/x/net v0.47.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/tools v0.38.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/go.sum b/go.sum index 15358229a9..5a8052bdc6 100644 --- a/go.sum +++ b/go.sum @@ -153,8 +153,8 @@ go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM= golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -163,8 +163,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -176,15 +176,15 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -192,8 +192,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/bridge/client.go b/internal/bridge/client.go index b1d3992506..3e3c4c3b4c 100644 --- a/internal/bridge/client.go +++ b/internal/bridge/client.go @@ -280,7 +280,7 @@ func (c *Client) doWithBackoff( request.Header = headers.Clone() //nolint:bodyclose // This response is returned to the caller. - response, err = c.Client.Do(request) + response, err = c.Do(request) } // An error indicates there was no response from the server, and the @@ -327,7 +327,7 @@ func (c *Client) doWithRetry( // Retry the request when the server responds with "Too many requests". // - https://docs.crunchybridge.com/api-concepts/getting-started/#status-codes // - https://docs.crunchybridge.com/api-concepts/getting-started/#rate-limiting - for err == nil && response.StatusCode == 429 { + for err == nil && response.StatusCode == http.StatusTooManyRequests { seconds, _ := strconv.Atoi(response.Header.Get("Retry-After")) // Only retry when the response indicates how long to wait. @@ -378,11 +378,11 @@ func (c *Client) CreateAuthObject(ctx context.Context, authn AuthObject) (AuthOb } // 401, Unauthorized - case response.StatusCode == 401: + case response.StatusCode == http.StatusUnauthorized: err = fmt.Errorf("%w: %s", errAuthentication, body) default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -409,7 +409,7 @@ func (c *Client) CreateInstallation(ctx context.Context) (Installation, error) { } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -445,7 +445,7 @@ func (c *Client) ListClusters(ctx context.Context, apiKey, teamId string) ([]*Cl } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -486,7 +486,7 @@ func (c *Client) CreateCluster( } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -524,14 +524,14 @@ func (c *Client) DeleteCluster(ctx context.Context, apiKey, id string) (*Cluster // --https://docs.crunchybridge.com/api-concepts/idempotency#delete-semantics // But also, if we can't find it... // Maybe if no ID we return already deleted? - case response.StatusCode == 410: + case response.StatusCode == http.StatusGone: fallthrough - case response.StatusCode == 404: + case response.StatusCode == http.StatusNotFound: deletedAlready = true err = nil default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -565,7 +565,7 @@ func (c *Client) GetCluster(ctx context.Context, apiKey, id string) (*ClusterApi } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -599,7 +599,7 @@ func (c *Client) GetClusterStatus(ctx context.Context, apiKey, id string) (*Clus } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -633,7 +633,7 @@ func (c *Client) GetClusterUpgrade(ctx context.Context, apiKey, id string) (*Clu } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -674,7 +674,7 @@ func (c *Client) UpgradeCluster( } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -709,7 +709,7 @@ func (c *Client) UpgradeClusterHA(ctx context.Context, apiKey, id, action string } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -724,10 +724,7 @@ func (c *Client) UpdateCluster( ) (*ClusterApiResource, error) { result := &ClusterApiResource{} - clusterbyte, err := json.Marshal(clusterRequestPayload) - if err != nil { - return result, err - } + clusterbyte, _ := json.Marshal(clusterRequestPayload) response, err := c.doWithRetry(ctx, "PATCH", "/clusters/"+id, nil, clusterbyte, http.Header{ "Accept": []string{"application/json"}, @@ -750,7 +747,7 @@ func (c *Client) UpdateCluster( } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -780,7 +777,7 @@ func (c *Client) GetClusterRole(ctx context.Context, apiKey, clusterId, roleName } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -810,7 +807,7 @@ func (c *Client) ListClusterRoles(ctx context.Context, apiKey, id string) ([]*Cl } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } diff --git a/internal/bridge/client_test.go b/internal/bridge/client_test.go index 6b464c05b3..f1aa1c8ddd 100644 --- a/internal/bridge/client_test.go +++ b/internal/bridge/client_test.go @@ -31,8 +31,8 @@ func TestClientBackoff(t *testing.T) { client := NewClient("", "") var total time.Duration - for i := 1; i <= 50 && client.Backoff.Steps > 0; i++ { - step := client.Backoff.Step() + for i := 1; i <= 50 && client.Steps > 0; i++ { + step := client.Step() total += step t.Logf("%02d:%20v%20v", i, step, total) @@ -68,7 +68,7 @@ func TestClientDoWithBackoff(t *testing.T) { // Client with one attempt, i.e. no backoff. client := NewClient(server.URL, "xyz") - client.Backoff.Steps = 1 + client.Steps = 1 assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() @@ -113,8 +113,8 @@ func TestClientDoWithBackoff(t *testing.T) { // Client with brief backoff. client := NewClient(server.URL, "") - client.Backoff.Duration = time.Millisecond - client.Backoff.Steps = 5 + client.Duration = time.Millisecond + client.Steps = 5 assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() @@ -170,8 +170,8 @@ func TestClientDoWithBackoff(t *testing.T) { // Client with brief backoff. client := NewClient(server.URL, "") - client.Backoff.Duration = time.Millisecond - client.Backoff.Steps = 5 + client.Duration = time.Millisecond + client.Steps = 5 assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() @@ -190,8 +190,8 @@ func TestClientDoWithBackoff(t *testing.T) { // Client with lots of brief backoff. client := NewClient(server.URL, "") - client.Backoff.Duration = time.Millisecond - client.Backoff.Steps = 100 + client.Duration = time.Millisecond + client.Steps = 100 assert.Equal(t, client.BaseURL.String(), server.URL) ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) diff --git a/internal/bridge/crunchybridgecluster/apply.go b/internal/bridge/crunchybridgecluster/apply.go index baffd16516..6edd870790 100644 --- a/internal/bridge/crunchybridgecluster/apply.go +++ b/internal/bridge/crunchybridgecluster/apply.go @@ -22,7 +22,7 @@ func (r *CrunchyBridgeClusterReconciler) patch( patch client.Patch, options ...client.PatchOption, ) error { options = append([]client.PatchOption{r.Owner}, options...) - return r.Client.Patch(ctx, object, patch, options...) + return r.Patch(ctx, object, patch, options...) } // apply sends an apply patch to object's endpoint in the Kubernetes API and diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go index 98816cc0c9..c83c7a0c39 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -23,7 +23,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/controller/runtime" - pgoRuntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -57,7 +56,7 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( // Potentially replace with different requeue times, remove the Watch function // Smarter: retry after a certain time for each cluster: https://gist.github.com/cbandy/a5a604e3026630c5b08cfbcdfffd2a13 WatchesRawSource( - pgoRuntime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, r.Watch()), + runtime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, r.Watch()), ). // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters Watches( @@ -80,7 +79,7 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( func (r *CrunchyBridgeClusterReconciler) setControllerReference( owner *v1beta1.CrunchyBridgeCluster, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, r.Scheme()) } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,patch,update} @@ -671,7 +670,7 @@ func (r *CrunchyBridgeClusterReconciler) GetSecretKeys( }} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if err == nil { if existing.Data["key"] != nil && existing.Data["team"] != nil { @@ -694,7 +693,7 @@ func (r *CrunchyBridgeClusterReconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Client.Delete(ctx, object, exactly) + return r.Delete(ctx, object, exactly) } return nil diff --git a/internal/bridge/crunchybridgecluster/delete.go b/internal/bridge/crunchybridgecluster/delete.go index b0a957a0ec..ae44c8036b 100644 --- a/internal/bridge/crunchybridgecluster/delete.go +++ b/internal/bridge/crunchybridgecluster/delete.go @@ -28,7 +28,7 @@ func (r *CrunchyBridgeClusterReconciler) handleDelete( log := ctrl.LoggerFrom(ctx) // If the CrunchyBridgeCluster isn't being deleted, add the finalizer - if crunchybridgecluster.ObjectMeta.DeletionTimestamp.IsZero() { + if crunchybridgecluster.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { controllerutil.AddFinalizer(crunchybridgecluster, finalizer) if err := r.Update(ctx, crunchybridgecluster); err != nil { diff --git a/internal/bridge/crunchybridgecluster/delete_test.go b/internal/bridge/crunchybridgecluster/delete_test.go index c04daaa131..c86746ef1b 100644 --- a/internal/bridge/crunchybridgecluster/delete_test.go +++ b/internal/bridge/crunchybridgecluster/delete_test.go @@ -65,7 +65,7 @@ func TestHandleDeleteCluster(t *testing.T) { // Get cluster from kubernetes and assert that the deletion timestamp was added assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) - assert.Check(t, !cluster.ObjectMeta.DeletionTimestamp.IsZero()) + assert.Check(t, !cluster.DeletionTimestamp.IsZero()) // Note: We must run handleDelete multiple times because we don't want to remove the // finalizer until we're sure that the cluster has been deleted from Bridge, so we @@ -107,7 +107,7 @@ func TestHandleDeleteCluster(t *testing.T) { // Get cluster from kubernetes and assert that the deletion timestamp was added assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) - assert.Check(t, !cluster.ObjectMeta.DeletionTimestamp.IsZero()) + assert.Check(t, !cluster.DeletionTimestamp.IsZero()) // Run handleDelete again to attempt to delete from Bridge, but provide bad api key cluster.Status.ID = "2345" diff --git a/internal/bridge/crunchybridgecluster/mock_bridge_api.go b/internal/bridge/crunchybridgecluster/mock_bridge_api.go index f0841dee44..f0439531d1 100644 --- a/internal/bridge/crunchybridgecluster/mock_bridge_api.go +++ b/internal/bridge/crunchybridgecluster/mock_bridge_api.go @@ -13,7 +13,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go index a1431ca93f..80096de91b 100644 --- a/internal/bridge/crunchybridgecluster/postgres.go +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -11,7 +11,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -93,7 +92,7 @@ func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( // Make sure that this cluster's role secret names are not being used by any other // secrets in the namespace allSecretsInNamespace := &corev1.SecretList{} - err := errors.WithStack(r.Client.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) + err := errors.WithStack(r.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) if err != nil { return nil, nil, err } @@ -116,7 +115,7 @@ func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( selector, err := naming.AsSelector(naming.CrunchyBridgeClusterPostgresRoles(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, secrets, + r.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) diff --git a/internal/bridge/crunchybridgecluster/postgres_test.go b/internal/bridge/crunchybridgecluster/postgres_test.go index e9454bd4ee..6fae4fe26a 100644 --- a/internal/bridge/crunchybridgecluster/postgres_test.go +++ b/internal/bridge/crunchybridgecluster/postgres_test.go @@ -8,12 +8,11 @@ import ( "context" "testing" - "sigs.k8s.io/controller-runtime/pkg/client" - "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/testing/require" diff --git a/internal/bridge/installation_test.go b/internal/bridge/installation_test.go index 28317e07f4..7f35781820 100644 --- a/internal/bridge/installation_test.go +++ b/internal/bridge/installation_test.go @@ -99,7 +99,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } @@ -155,7 +155,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } @@ -289,7 +289,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } @@ -343,7 +343,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } @@ -426,7 +426,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } diff --git a/internal/config/config.go b/internal/config/config.go index ff3c6507d0..5186f1626a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -152,7 +152,7 @@ func VerifyImageValues(cluster *v1beta1.PostgresCluster) error { } if len(images) > 0 { - return fmt.Errorf("Missing image(s): %s", images) + return fmt.Errorf("missing image(s): %s", images) } return nil diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index fd1571f109..30a2334a18 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -338,7 +338,7 @@ func pgUpgradeContainerImage(upgrade *v1beta1.PGUpgrade) string { // spec is defined. If it is undefined, an error is returned. func verifyUpgradeImageValue(upgrade *v1beta1.PGUpgrade) error { if pgUpgradeContainerImage(upgrade) == "" { - return fmt.Errorf("Missing crunchy-upgrade image") + return fmt.Errorf("missing crunchy-upgrade image") } return nil } diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index 85dbca995d..d2c77ceb27 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -151,6 +151,14 @@ func TestServerSideApply(t *testing.T) { MatchLabels: map[string]string{"select": name}, } sts.Spec.Template.Labels = map[string]string{"select": name} + sts.Spec.Template.Spec = corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + } return &sts } diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index 1516836c66..5e35cc285f 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -8,7 +8,6 @@ import ( "context" "testing" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" @@ -91,7 +90,7 @@ func TestCustomLabels(t *testing.T) { ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -141,8 +140,8 @@ func TestCustomLabels(t *testing.T) { t.Run("Cluster", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "global-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "global-cluster" + cluster.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "daisy-instance1", Replicas: initialize.Int32(1), @@ -191,8 +190,8 @@ func TestCustomLabels(t *testing.T) { t.Run("Instance", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "instance-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "instance-cluster" + cluster.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "max-instance", Replicas: initialize.Int32(1), @@ -245,8 +244,8 @@ func TestCustomLabels(t *testing.T) { t.Run("PGBackRest", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "pgbackrest-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "pgbackrest-cluster" + cluster.Namespace = ns.Name cluster.Spec.Backups.PGBackRest.Metadata = &v1beta1.Metadata{ Labels: map[string]string{"my.pgbackrest.label": "lucy"}, } @@ -292,8 +291,8 @@ func TestCustomLabels(t *testing.T) { t.Run("PGBouncer", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "pgbouncer-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "pgbouncer-cluster" + cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Metadata = &v1beta1.Metadata{ Labels: map[string]string{"my.pgbouncer.label": "lucy"}, } @@ -344,7 +343,7 @@ func TestCustomAnnotations(t *testing.T) { ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -394,8 +393,8 @@ func TestCustomAnnotations(t *testing.T) { t.Run("Cluster", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "global-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "global-cluster" + cluster.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "daisy-instance1", Replicas: initialize.Int32(1), @@ -445,8 +444,8 @@ func TestCustomAnnotations(t *testing.T) { t.Run("Instance", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "instance-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "instance-cluster" + cluster.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "max-instance", Replicas: initialize.Int32(1), @@ -499,8 +498,8 @@ func TestCustomAnnotations(t *testing.T) { t.Run("PGBackRest", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "pgbackrest-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "pgbackrest-cluster" + cluster.Namespace = ns.Name cluster.Spec.Backups.PGBackRest.Metadata = &v1beta1.Metadata{ Annotations: map[string]string{"my.pgbackrest.annotation": "lucy"}, } @@ -546,8 +545,8 @@ func TestCustomAnnotations(t *testing.T) { t.Run("PGBouncer", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "pgbouncer-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "pgbouncer-cluster" + cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Metadata = &v1beta1.Metadata{ Annotations: map[string]string{"my.pgbouncer.annotation": "lucy"}, } @@ -798,12 +797,12 @@ type: ClusterIP assert.NilError(t, err) // Annotations present in the metadata. - assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Annotations, ` + assert.Assert(t, cmp.MarshalMatches(service.Annotations, ` some: note `)) // Labels present in the metadata. - assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Labels, ` + assert.Assert(t, cmp.MarshalMatches(service.Labels, ` happy: label postgres-operator.crunchydata.com/cluster: pg2 postgres-operator.crunchydata.com/role: replica diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index fb2c39d9c3..4086fe15dc 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -87,7 +87,7 @@ func (r *Reconciler) claimObject(ctx context.Context, postgresCluster *v1beta1.P // At this point the resource has no controller ref and is therefore an orphan. Ignore if // either the PostgresCluster resource or the orphaned resource is being deleted, or if the selector - // for the orphaned resource doesn't doesn't include the proper PostgresCluster label + // for the orphaned resource doesn't include the proper PostgresCluster label _, hasPGClusterLabel := obj.GetLabels()[naming.LabelCluster] if postgresCluster.GetDeletionTimestamp() != nil || !hasPGClusterLabel { return nil diff --git a/internal/controller/postgrescluster/controller_ref_manager_test.go b/internal/controller/postgrescluster/controller_ref_manager_test.go index fa8450c5d9..758daf2ef3 100644 --- a/internal/controller/postgrescluster/controller_ref_manager_test.go +++ b/internal/controller/postgrescluster/controller_ref_manager_test.go @@ -46,6 +46,14 @@ func TestManageControllerRefs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"label1": "val1"}, }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + }, }, }, } diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index 91ca725f0e..e2397c24bb 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -13,8 +13,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" - "github.com/pkg/errors" - + "github.com/pkg/errors" //nolint:depguard // This legacy test covers so much code, it logs the origin of unexpected errors. "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 60db9ae9d0..6bc3f9e9ec 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -336,7 +336,7 @@ func (r *Reconciler) observeInstances( status.DesiredPGDataVolume = make(map[string]string) for _, instance := range observed.bySet[name] { - status.Replicas += int32(len(instance.Pods)) //nolint:gosec + status.Replicas += int32(len(instance.Pods)) if ready, known := instance.IsReady(); known && ready { status.ReadyReplicas++ @@ -754,7 +754,7 @@ func findAvailableInstanceNames(set v1beta1.PostgresInstanceSetSpec, } // Determine whether or not the PVC is associated with an existing instance within the same - // instance set. If not, then the instance name associated with that PVC can be be reused. + // instance set. If not, then the instance name associated with that PVC can be reused. for _, pvc := range setVolumes { pvcInstanceName := pvc.GetLabels()[naming.LabelInstance] instance := observedInstances.byName[pvcInstanceName] @@ -1090,7 +1090,7 @@ func (r *Reconciler) scaleUpInstances( // and append it. for len(instances) < int(*set.Replicas) { var span trace.Span - ctx, span = r.Tracer.Start(ctx, "generateInstanceName") + _, span = r.Tracer.Start(ctx, "generateInstanceName") next := naming.GenerateInstance(cluster, set) // if there are any available instance names (as determined by observing any PVCs for the // instance set that are not currently associated with an instance, e.g. in the event the diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index 2c2901dd46..7039e9274f 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -6,6 +6,7 @@ package postgrescluster import ( "context" + "errors" "fmt" "os" "sort" @@ -15,7 +16,6 @@ import ( "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" @@ -1219,7 +1219,7 @@ func TestDeleteInstance(t *testing.T) { cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -1269,9 +1269,9 @@ func TestDeleteInstance(t *testing.T) { err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, errors.WithStack(reconciler.Client.List(ctx, uList, + assert.NilError(t, reconciler.Client.List(ctx, uList, client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector}))) + client.MatchingLabelsSelector{Selector: selector})) if len(uList.Items) == 0 { return true, nil @@ -1582,7 +1582,6 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { `)) }, }} { - test := test t.Run(test.name, func(t *testing.T) { cluster := test.ip.cluster diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index 44f0104c57..50a762977d 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -6,6 +6,7 @@ package postgrescluster import ( "context" + "errors" "fmt" "io" "os" @@ -14,7 +15,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -97,12 +97,12 @@ ownerReferences: assert.NilError(t, err) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "postgres-operator.crunchydata.com/cluster": "pg2", "postgres-operator.crunchydata.com/patroni": "pg2-ha", @@ -125,13 +125,13 @@ ownerReferences: assert.NilError(t, err) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", "c": "v3", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "d": "v4", "postgres-operator.crunchydata.com/cluster": "pg2", @@ -472,15 +472,15 @@ func TestReconcilePatroniStatus(t *testing.T) { ObjectMeta: naming.PatroniDistributedConfiguration(postgresCluster), } if writeAnnotation { - endpoints.ObjectMeta.Annotations = make(map[string]string) - endpoints.ObjectMeta.Annotations["initialize"] = systemIdentifier + endpoints.Annotations = make(map[string]string) + endpoints.Annotations["initialize"] = systemIdentifier } assert.NilError(t, tClient.Create(ctx, endpoints, &client.CreateOptions{})) instance := &Instance{ Name: instanceName, Runner: runner, } - for i := 0; i < readyReplicas; i++ { + for range readyReplicas { instance.Pods = append(instance.Pods, &corev1.Pod{ Status: corev1.PodStatus{ Conditions: []corev1.PodCondition{{ diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go index 592333003e..4fca974241 100644 --- a/internal/controller/postgrescluster/pgadmin_test.go +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -6,11 +6,11 @@ package postgrescluster import ( "context" + "errors" "io" "strconv" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -104,12 +104,12 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, configmap.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, configmap.Annotations, map[string]string{ "a": "v5", "b": "v2", "e": "v6", }) // Labels present in the metadata. - assert.DeepEqual(t, configmap.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, configmap.Labels, map[string]string{ "c": "v7", "d": "v4", "f": "v8", "postgres-operator.crunchydata.com/cluster": "pg1", "postgres-operator.crunchydata.com/role": "pgadmin", @@ -194,12 +194,12 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "postgres-operator.crunchydata.com/cluster": "my-cluster", "postgres-operator.crunchydata.com/role": "pgadmin", @@ -225,13 +225,13 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", "c": "v3", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "d": "v4", "postgres-operator.crunchydata.com/cluster": "my-cluster", diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 6ee2c96f2e..059a2f4ec9 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1202,7 +1202,7 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, var deltaOptFound, foundTarget bool for _, opt := range opts { switch { - case targetRegex.Match([]byte(opt)): + case targetRegex.MatchString(opt): foundTarget = true case strings.Contains(opt, "--delta"): deltaOptFound = true @@ -2284,7 +2284,7 @@ func (r *Reconciler) reconcileDedicatedRepoHost(ctx context.Context, if isCreate { r.Recorder.Eventf(postgresCluster, corev1.EventTypeNormal, EventRepoHostCreated, - "created pgBackRest repository host %s/%s", repoHost.TypeMeta.Kind, repoHostName) + "created pgBackRest repository host %s/%s", repoHost.Kind, repoHostName) } return repoHost, nil @@ -2470,7 +2470,7 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, backupJob := &batchv1.Job{} backupJob.ObjectMeta = naming.PGBackRestBackupJob(postgresCluster) if currentBackupJob != nil { - backupJob.ObjectMeta.Name = currentBackupJob.ObjectMeta.Name + backupJob.Name = currentBackupJob.Name } var labels, annotations map[string]string @@ -2483,8 +2483,8 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, map[string]string{ naming.PGBackRestBackup: manualAnnotation, }) - backupJob.ObjectMeta.Labels = labels - backupJob.ObjectMeta.Annotations = annotations + backupJob.Labels = labels + backupJob.Annotations = annotations spec := r.generateBackupJobSpecIntent(ctx, postgresCluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) @@ -2647,7 +2647,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, backupJob := &batchv1.Job{} backupJob.ObjectMeta = naming.PGBackRestBackupJob(postgresCluster) if job != nil { - backupJob.ObjectMeta.Name = job.ObjectMeta.Name + backupJob.Name = job.Name } var labels, annotations map[string]string @@ -2661,8 +2661,8 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, naming.PGBackRestCurrentConfig: containerName, naming.PGBackRestConfigHash: configHash, }) - backupJob.ObjectMeta.Labels = labels - backupJob.ObjectMeta.Annotations = annotations + backupJob.Labels = labels + backupJob.Annotations = annotations spec := r.generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, serviceAccount.GetName(), labels, annotations) @@ -2829,7 +2829,7 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, } // Don't record event or return an error if configHashMismatch is true, since this just means // configuration changes in ConfigMaps/Secrets have not yet propagated to the container. - // Therefore, just log an an info message and return an error to requeue and try again. + // Therefore, just log an info message and return an error to requeue and try again. if configHashMismatch { return true, nil diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 2b0b0e7db4..74ac58bc39 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -468,9 +468,10 @@ topologySpreadConstraints: var instanceConfFound, dedicatedRepoConfFound bool for k, v := range config.Data { if v != "" { - if k == pgbackrest.CMInstanceKey { + switch k { + case pgbackrest.CMInstanceKey: instanceConfFound = true - } else if k == pgbackrest.CMRepoKey { + case pgbackrest.CMRepoKey: dedicatedRepoConfFound = true } } @@ -962,7 +963,7 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { var foundOwnershipRef bool // verify ownership refs - for _, ref := range backupJob.ObjectMeta.GetOwnerReferences() { + for _, ref := range backupJob.GetOwnerReferences() { if ref.Name == clusterName { foundOwnershipRef = true break @@ -1679,7 +1680,14 @@ func TestGetPGBackRestResources(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: naming.PGBackRestDedicatedLabels(clusterName), }, - Spec: corev1.PodSpec{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + }, }, }, }, @@ -1717,7 +1725,14 @@ func TestGetPGBackRestResources(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: naming.PGBackRestDedicatedLabels(clusterName), }, - Spec: corev1.PodSpec{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + }, }, }, }, @@ -1753,7 +1768,14 @@ func TestGetPGBackRestResources(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: naming.PGBackRestDedicatedLabels(clusterName), }, - Spec: corev1.PodSpec{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + }, }, }, }, @@ -3208,11 +3230,11 @@ func TestGenerateRestoreJobIntent(t *testing.T) { t.Run(fmt.Sprintf("openshift-%v", openshift), func(t *testing.T) { t.Run("ObjectMeta", func(t *testing.T) { t.Run("Name", func(t *testing.T) { - assert.Equal(t, job.ObjectMeta.Name, + assert.Equal(t, job.Name, naming.PGBackRestRestoreJob(cluster).Name) }) t.Run("Namespace", func(t *testing.T) { - assert.Equal(t, job.ObjectMeta.Namespace, + assert.Equal(t, job.Namespace, naming.PGBackRestRestoreJob(cluster).Namespace) }) t.Run("Annotations", func(t *testing.T) { @@ -3435,15 +3457,15 @@ func TestObserveRestoreEnv(t *testing.T) { createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) - fakeLeaderEP.ObjectMeta.Namespace = namespace + fakeLeaderEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) - fakeDCSEP.ObjectMeta.Namespace = namespace + fakeDCSEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) - fakeFailoverEP.ObjectMeta.Namespace = namespace + fakeFailoverEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) job := generateJob(cluster.Name, initialize.Bool(false), initialize.Bool(false)) @@ -3459,15 +3481,15 @@ func TestObserveRestoreEnv(t *testing.T) { createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) - fakeLeaderEP.ObjectMeta.Namespace = namespace + fakeLeaderEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) - fakeDCSEP.ObjectMeta.Namespace = namespace + fakeDCSEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) - fakeFailoverEP.ObjectMeta.Namespace = namespace + fakeFailoverEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) }, result: testResult{ @@ -3637,15 +3659,15 @@ func TestPrepareForRestore(t *testing.T) { cluster *v1beta1.PostgresCluster) (*batchv1.Job, []corev1.Endpoints) { fakeLeaderEP := corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) - fakeLeaderEP.ObjectMeta.Namespace = namespace + fakeLeaderEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeLeaderEP)) fakeDCSEP := corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) - fakeDCSEP.ObjectMeta.Namespace = namespace + fakeDCSEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeDCSEP)) fakeFailoverEP := corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) - fakeFailoverEP.ObjectMeta.Namespace = namespace + fakeFailoverEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeFailoverEP)) return nil, []corev1.Endpoints{fakeLeaderEP, fakeDCSEP, fakeFailoverEP} }, diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index b6d35907a8..e18388882c 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -6,10 +6,10 @@ package postgrescluster import ( "context" + "errors" "strconv" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -105,12 +105,12 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "postgres-operator.crunchydata.com/cluster": "pg7", "postgres-operator.crunchydata.com/role": "pgbouncer", @@ -136,13 +136,13 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", "c": "v3", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "d": "v4", "postgres-operator.crunchydata.com/cluster": "pg7", @@ -420,12 +420,12 @@ namespace: ns3 assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, deploy.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, deploy.Annotations, map[string]string{ "a": "v1", }) // Labels present in the metadata. - assert.DeepEqual(t, deploy.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, deploy.Labels, map[string]string{ "b": "v2", "postgres-operator.crunchydata.com/cluster": "test-cluster", "postgres-operator.crunchydata.com/role": "pgbouncer", diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index 956a99bffd..a4a6ff64f0 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -346,7 +346,7 @@ func addPGMonitorExporterToInstancePodSpec( }, }, } - configVolume.VolumeSource.Projected.Sources = append(configVolume.VolumeSource.Projected.Sources, + configVolume.Projected.Sources = append(configVolume.Projected.Sources, defaultConfigVolumeProjection) } diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index 787daef212..d52d6a75da 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -63,7 +63,7 @@ func (r *Reconciler) reconcileRootCertificate( intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) intent.Namespace, intent.Name = cluster.Namespace, naming.RootCertSecret intent.Data = make(map[string][]byte) - intent.ObjectMeta.OwnerReferences = existing.ObjectMeta.OwnerReferences + intent.OwnerReferences = existing.OwnerReferences // A root secret is scoped to the namespace where postgrescluster(s) // are deployed. For operator deployments with postgresclusters in more than @@ -140,7 +140,7 @@ func (r *Reconciler) reconcileClusterCertificate( intent := &corev1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) intent.Data = make(map[string][]byte) - intent.ObjectMeta.OwnerReferences = existing.ObjectMeta.OwnerReferences + intent.OwnerReferences = existing.OwnerReferences intent.Annotations = naming.Merge(cluster.Spec.Metadata.GetAnnotationsOrNil()) intent.Labels = naming.Merge( diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index e2332c5800..0cb5f15a99 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -12,7 +12,6 @@ import ( "strings" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -90,7 +89,7 @@ func TestReconcileCerts(t *testing.T) { err := tClient.Get(ctx, client.ObjectKeyFromObject(rootSecret), rootSecret) assert.NilError(t, err) - assert.Check(t, len(rootSecret.ObjectMeta.OwnerReferences) == 1, "first owner reference not set") + assert.Check(t, len(rootSecret.OwnerReferences) == 1, "first owner reference not set") expectedOR := metav1.OwnerReference{ APIVersion: "postgres-operator.crunchydata.com/v1beta1", @@ -99,8 +98,8 @@ func TestReconcileCerts(t *testing.T) { UID: cluster1.UID, } - if len(rootSecret.ObjectMeta.OwnerReferences) > 0 { - assert.Equal(t, rootSecret.ObjectMeta.OwnerReferences[0], expectedOR) + if len(rootSecret.OwnerReferences) > 0 { + assert.Equal(t, rootSecret.OwnerReferences[0], expectedOR) } }) @@ -115,7 +114,7 @@ func TestReconcileCerts(t *testing.T) { clist := &v1beta1.PostgresClusterList{} assert.NilError(t, tClient.List(ctx, clist)) - assert.Check(t, len(rootSecret.ObjectMeta.OwnerReferences) == 2, "second owner reference not set") + assert.Check(t, len(rootSecret.OwnerReferences) == 2, "second owner reference not set") expectedOR := metav1.OwnerReference{ APIVersion: "postgres-operator.crunchydata.com/v1beta1", @@ -124,8 +123,8 @@ func TestReconcileCerts(t *testing.T) { UID: cluster2.UID, } - if len(rootSecret.ObjectMeta.OwnerReferences) > 1 { - assert.Equal(t, rootSecret.ObjectMeta.OwnerReferences[1], expectedOR) + if len(rootSecret.OwnerReferences) > 1 { + assert.Equal(t, rootSecret.OwnerReferences[1], expectedOR) } }) @@ -145,8 +144,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret, creating a new root cert returnedRoot, err := r.reconcileRootCertificate(ctx, cluster1) @@ -206,7 +204,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret newRootCert, err := r.reconcileRootCertificate(ctx, cluster1) @@ -303,7 +301,7 @@ func TestReconcileCerts(t *testing.T) { testSecret := &corev1.Secret{} testSecret.Namespace, testSecret.Name = namespace, "newcustomsecret" // simulate cluster spec update - cluster2.Spec.CustomTLSSecret.LocalObjectReference.Name = "newcustomsecret" + cluster2.Spec.CustomTLSSecret.Name = "newcustomsecret" // get the expected secret projection testSecretProjection := clusterCertSecretProjection(testSecret) @@ -331,8 +329,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret, creating a new root cert returnedRoot, err := r.reconcileRootCertificate(ctx, cluster1) @@ -392,7 +389,7 @@ func getCertFromSecret( // get the cert from the secret secretCRT, ok := secret.Data[dataKey] if !ok { - return nil, errors.New(fmt.Sprintf("could not retrieve %s", dataKey)) + return nil, fmt.Errorf("could not retrieve %s", dataKey) } // parse the cert from binary encoded data diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index 7ce28b6507..f8987b8332 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -429,7 +429,7 @@ func (r *Reconciler) reconcilePostgresUserSecrets( // If both secrets have "pguser" or neither have "pguser", // sort by creation timestamp - return secrets.Items[i].CreationTimestamp.Time.After(secrets.Items[j].CreationTimestamp.Time) + return secrets.Items[i].CreationTimestamp.After(secrets.Items[j].CreationTimestamp.Time) }) // Index secrets by PostgreSQL user name and delete any that are not in the @@ -502,11 +502,11 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( running, known := instance.IsRunning(container) if running && known && len(instance.Pods) > 0 { pod := instance.Pods[0] - ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { + ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } break diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 79dd61fe30..9e52bc38c1 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -6,14 +6,16 @@ package postgrescluster import ( "context" + "errors" "fmt" "io" + "os" + "strings" "testing" "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -282,6 +284,9 @@ volumeMode: Filesystem }) t.Run("DataVolumeSourceClusterWithGoodSnapshot", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } cluster := testCluster() ns := setupNamespace(t, tClient) cluster.Namespace = ns.Name diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go index 8e0debc507..281cab2d39 100644 --- a/internal/controller/postgrescluster/snapshots.go +++ b/internal/controller/postgrescluster/snapshots.go @@ -10,14 +10,13 @@ import ( "strings" "time" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "github.com/pkg/errors" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index bd0512a6f9..b73ad76ded 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -6,10 +6,12 @@ package postgrescluster import ( "context" + "os" + "strings" "testing" "time" - "github.com/pkg/errors" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -28,8 +30,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/testing/events" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ) func TestReconcileVolumeSnapshots(t *testing.T) { @@ -59,7 +59,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Create cluster (without snapshots spec) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -72,34 +72,29 @@ func TestReconcileVolumeSnapshots(t *testing.T) { volumeSnapshotClassName := "my-snapshotclass" snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) assert.NilError(t, err) - err = errors.WithStack(r.apply(ctx, snapshot)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot)) // Get all snapshots for this cluster and assert 1 exists selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) // Reconcile snapshots - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Get all snapshots for this cluster and assert 0 exist - assert.NilError(t, err) snapshots = &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 0) }) @@ -131,8 +126,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert warning event was created and has expected attributes if assert.Check(t, len(recorder.Events) > 0) { @@ -173,23 +167,24 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert no snapshots exist selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 0) }) t.Run("SnapshotsEnabledReadySnapshotsExist", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } // Create a volume snapshot class volumeSnapshotClassName := "my-snapshotclass" volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ @@ -204,7 +199,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Create a cluster with snapshots enabled cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } @@ -244,18 +239,15 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(cluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) // Update snapshot status truePtr := initialize.Bool(true) snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - err = r.Client.Status().Update(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, snapshot1)) // Create second snapshot with different annotation value snapshot2 := &volumesnapshotv1.VolumeSnapshot{ @@ -279,38 +271,32 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, }, } - err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, r.apply(ctx, snapshot2)) // Update second snapshot's status snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - err = r.Client.Status().Update(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, snapshot2)) // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert first snapshot exists and second snapshot was deleted selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) assert.Equal(t, snapshots.Items[0].Name, "first-snapshot") // Cleanup - err = r.deleteControlled(ctx, cluster, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.deleteControlled(ctx, cluster, snapshot1)) }) t.Run("SnapshotsEnabledCreateSnapshot", func(t *testing.T) { @@ -328,7 +314,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Create a cluster with snapshots enabled cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } @@ -347,19 +333,17 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert that a snapshot was created selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) assert.Equal(t, snapshots.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], "another-backup-timestamp") @@ -392,7 +376,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -413,21 +397,18 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { }, Spec: testVolumeClaimSpec(), } - err = errors.WithStack(r.setControllerReference(cluster, pvc)) - assert.NilError(t, err) - err = r.apply(ctx, pvc) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, pvc)) + assert.NilError(t, r.apply(ctx, pvc)) // Assert that the pvc was created selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) - assert.NilError(t, err) assert.Equal(t, len(pvcs.Items), 1) // Create volumes for reconcile @@ -452,7 +433,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } @@ -471,21 +452,23 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) - assert.NilError(t, err) assert.Equal(t, len(pvcs.Items), 1) }) t.Run("SnapshotsEnabledBackupExistsCreateRestore", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } // Create cluster with snapshots enabled ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } @@ -494,10 +477,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) currentTime := metav1.Now() startTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) @@ -518,22 +499,24 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { restoreJobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(restoreJobs.Items), 1) assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion] != "") }) t.Run("SnapshotsEnabledSuccessfulRestoreExists", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } // Create cluster with snapshots enabled ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } @@ -548,10 +531,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) backupJob.Status = succeededJobStatus(earlierStartTime, earlierTime) assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) @@ -561,10 +542,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { restoreJob.Annotations = map[string]string{ naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } - err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, err) - err = r.apply(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, r.apply(ctx, restoreJob)) restoreJob.Status = succeededJobStatus(currentStartTime, currentTime) assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) @@ -583,12 +562,11 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { restoreJobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(restoreJobs.Items), 0) // Assert pvc was annotated @@ -596,11 +574,14 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { }) t.Run("SnapshotsEnabledFailedRestoreExists", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } // Create cluster with snapshots enabled ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } @@ -614,10 +595,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) backupJob.Status = succeededJobStatus(startTime, earlierTime) assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) @@ -627,17 +606,14 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { restoreJob.Annotations = map[string]string{ naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } - err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, err) - err = r.apply(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, r.apply(ctx, restoreJob)) restoreJob.Status = batchv1.JobStatus{ Succeeded: 0, Failed: 1, } - err = r.Client.Status().Update(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) // Setup instances and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -672,7 +648,7 @@ func TestCreateDedicatedSnapshotVolume(t *testing.T) { ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" labelMap := map[string]string{ naming.LabelCluster: cluster.Name, @@ -700,7 +676,7 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -714,19 +690,17 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { backupJob := testBackupJob(cluster) backupJob.Status.CompletionTime = ¤tTime - err := r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob)) // Assert a restore job was created that has the correct annotation jobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, jobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(jobs.Items), 1) assert.Equal(t, jobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], backupJob.Status.CompletionTime.Format(time.RFC3339)) @@ -788,7 +762,7 @@ func TestGenerateVolumeSnapshot(t *testing.T) { assert.Equal(t, *snapshot.Spec.VolumeSnapshotClassName, "my-snapshot") assert.Equal(t, *snapshot.Spec.Source.PersistentVolumeClaimName, "dedicated-snapshot-volume") assert.Equal(t, snapshot.Labels[naming.LabelCluster], "hippo") - assert.Equal(t, snapshot.ObjectMeta.OwnerReferences[0].Name, "hippo") + assert.Equal(t, snapshot.OwnerReferences[0].Name, "hippo") } func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { @@ -814,9 +788,7 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { t.Run("NoDsvRestoreJobs", func(t *testing.T) { job1 := testRestoreJob(cluster) job1.Namespace = ns.Name - - err := r.apply(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job1)) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) assert.NilError(t, err) @@ -830,16 +802,12 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { job2.Annotations = map[string]string{ naming.PGBackRestBackupJobCompletion: "backup-timestamp", } - - err := r.apply(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job2)) job3 := testRestoreJob(cluster) job3.Name = "restore-job-3" job3.Namespace = ns.Name - - err = r.apply(ctx, job3) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job3)) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) assert.NilError(t, err) @@ -851,7 +819,6 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { func TestGetLatestCompleteBackupJob(t *testing.T) { ctx := context.Background() _, cc := setupKubernetes(t) - // require.ParallelCapacity(t, 1) r := &Reconciler{ Client: cc, @@ -871,9 +838,7 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { t.Run("NoCompleteJobs", func(t *testing.T) { job1 := testBackupJob(cluster) job1.Namespace = ns.Name - - err := r.apply(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job1)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -881,25 +846,23 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { }) t.Run("OneCompleteBackupJob", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } currentTime := metav1.Now() currentStartTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) job1 := testBackupJob(cluster) job1.Namespace = ns.Name - - err := r.apply(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job1)) job2 := testBackupJob(cluster) job2.Namespace = ns.Name job2.Name = "backup-job-2" - - err = r.apply(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = succeededJobStatus(currentStartTime, currentTime) assert.NilError(t, r.Client.Status().Update(ctx, job1)) @@ -910,6 +873,9 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { }) t.Run("TwoCompleteBackupJobs", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } currentTime := metav1.Now() currentStartTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) @@ -918,28 +884,20 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job1 := testBackupJob(cluster) job1.Namespace = ns.Name - - err := r.apply(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job1)) job2 := testBackupJob(cluster) job2.Namespace = ns.Name job2.Name = "backup-job-2" - - err = r.apply(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) - assert.NilError(t, err) - + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = succeededJobStatus(currentStartTime, currentTime) assert.NilError(t, r.Client.Status().Update(ctx, job1)) // Get job2 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2) - assert.NilError(t, err) - + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2)) job2.Status = succeededJobStatus(earlierStartTime, earlierTime) assert.NilError(t, r.Client.Status().Update(ctx, job2)) @@ -1016,7 +974,7 @@ func TestGetSnapshotWithLatestError(t *testing.T) { }, } snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) - assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "bad-snapshot") + assert.Equal(t, snapshotWithLatestError.Name, "bad-snapshot") }) t.Run("TwoSnapshotsWithErrors", func(t *testing.T) { @@ -1051,7 +1009,7 @@ func TestGetSnapshotWithLatestError(t *testing.T) { }, } snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) - assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "second-bad-snapshot") + assert.Equal(t, snapshotWithLatestError.Name, "second-bad-snapshot") }) } @@ -1091,8 +1049,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - err := r.apply(ctx, snapshot) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1115,8 +1072,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - err := r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshot2 := &volumesnapshotv1.VolumeSnapshot{ TypeMeta: metav1.TypeMeta{ @@ -1133,8 +1089,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1158,8 +1113,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - err := r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshot2 := &volumesnapshotv1.VolumeSnapshot{ TypeMeta: metav1.TypeMeta{ @@ -1176,8 +1130,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1250,7 +1203,7 @@ func TestGetLatestReadySnapshot(t *testing.T) { }, } latestReadySnapshot := getLatestReadySnapshot(snapshotList) - assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "good-snapshot") + assert.Equal(t, latestReadySnapshot.Name, "good-snapshot") }) t.Run("TwoReadySnapshots", func(t *testing.T) { @@ -1281,7 +1234,7 @@ func TestGetLatestReadySnapshot(t *testing.T) { }, } latestReadySnapshot := getLatestReadySnapshot(snapshotList) - assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "second-good-snapshot") + assert.Equal(t, latestReadySnapshot.Name, "second-good-snapshot") }) } @@ -1300,13 +1253,13 @@ func TestDeleteSnapshots(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" assert.NilError(t, r.Client.Create(ctx, cluster)) rhinoCluster := testCluster() rhinoCluster.Name = "rhino" rhinoCluster.Namespace = ns.Name - rhinoCluster.ObjectMeta.UID = "the-uid-456" + rhinoCluster.UID = "the-uid-456" assert.NilError(t, r.Client.Create(ctx, rhinoCluster)) t.Cleanup(func() { @@ -1337,24 +1290,20 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshotList := &volumesnapshotv1.VolumeSnapshotList{ Items: []volumesnapshotv1.VolumeSnapshot{ *snapshot1, }, } - err = r.deleteSnapshots(ctx, cluster, snapshotList) - assert.NilError(t, err) + assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshotList)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) - assert.NilError(t, err) assert.Equal(t, len(existingSnapshots.Items), 1) }) @@ -1375,10 +1324,8 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshot2 := &volumesnapshotv1.VolumeSnapshot{ TypeMeta: metav1.TypeMeta{ @@ -1395,24 +1342,20 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshotList := &volumesnapshotv1.VolumeSnapshotList{ Items: []volumesnapshotv1.VolumeSnapshot{ *snapshot1, *snapshot2, }, } - err = r.deleteSnapshots(ctx, cluster, snapshotList) - assert.NilError(t, err) + assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshotList)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) - assert.NilError(t, err) assert.Equal(t, len(existingSnapshots.Items), 1) assert.Equal(t, existingSnapshots.Items[0].Name, "first-snapshot") }) diff --git a/internal/controller/postgrescluster/suite_test.go b/internal/controller/postgrescluster/suite_test.go index ffbdbe9d7d..d7f8bece32 100644 --- a/internal/controller/postgrescluster/suite_test.go +++ b/internal/controller/postgrescluster/suite_test.go @@ -15,9 +15,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/discovery" - - // Google Kubernetes Engine / Google Cloud Platform authentication provider - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Google Kubernetes Engine / Google Cloud Platform authentication provider "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" diff --git a/internal/controller/postgrescluster/util_test.go b/internal/controller/postgrescluster/util_test.go index c7332eea4e..8e7d5c434f 100644 --- a/internal/controller/postgrescluster/util_test.go +++ b/internal/controller/postgrescluster/util_test.go @@ -79,7 +79,7 @@ func TestAddDevSHM(t *testing.T) { // check there is an empty dir mounted under the dshm volume for _, v := range template.Spec.Volumes { - if v.Name == "dshm" && v.VolumeSource.EmptyDir != nil && v.VolumeSource.EmptyDir.Medium == corev1.StorageMediumMemory { + if v.Name == "dshm" && v.EmptyDir != nil && v.EmptyDir.Medium == corev1.StorageMediumMemory { found = true break } @@ -221,15 +221,14 @@ func TestAddNSSWrapper(t *testing.T) { // Each container that requires the nss_wrapper envs should be updated var actualUpdatedContainerCount int for i, c := range template.Spec.Containers { - if c.Name == naming.ContainerDatabase || - c.Name == naming.PGBackRestRepoContainerName || - c.Name == naming.PGBackRestRestoreContainerName { + switch c.Name { + case naming.ContainerDatabase, naming.PGBackRestRepoContainerName, naming.PGBackRestRestoreContainerName: assert.DeepEqual(t, expectedEnv, c.Env) actualUpdatedContainerCount++ - } else if c.Name == "pgadmin" { + case "pgadmin": assert.DeepEqual(t, expectedPGAdminEnv, c.Env) actualUpdatedContainerCount++ - } else { + default: assert.DeepEqual(t, beforeAddNSS[i], c) } } diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index db093d96e5..962b8878a4 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -220,7 +220,7 @@ func (r *Reconciler) configureExistingPGVolumes( Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, } - volume.ObjectMeta.Labels = map[string]string{ + volume.Labels = map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelInstanceSet: cluster.Spec.InstanceSets[0].Name, naming.LabelInstance: instanceName, @@ -273,7 +273,7 @@ func (r *Reconciler) configureExistingPGWALVolume( Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, } - volume.ObjectMeta.Labels = map[string]string{ + volume.Labels = map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelInstanceSet: cluster.Spec.InstanceSets[0].Name, naming.LabelInstance: instanceName, @@ -427,14 +427,14 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, // at this point, the Job either wasn't found or it has failed, so the it // should be created - moveDirJob.ObjectMeta.Annotations = naming.Merge(cluster.Spec.Metadata. + moveDirJob.Annotations = naming.Merge(cluster.Spec.Metadata. GetAnnotationsOrNil()) labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), naming.DirectoryMoveJobLabels(cluster.Name), map[string]string{ naming.LabelMovePGDataDir: "", }) - moveDirJob.ObjectMeta.Labels = labels + moveDirJob.Labels = labels // `patroni.dynamic.json` holds the previous state of the DCS. Since we are // migrating the volumes, we want to clear out any obsolete configuration info. @@ -548,14 +548,14 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, } } - moveDirJob.ObjectMeta.Annotations = naming.Merge(cluster.Spec.Metadata. + moveDirJob.Annotations = naming.Merge(cluster.Spec.Metadata. GetAnnotationsOrNil()) labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), naming.DirectoryMoveJobLabels(cluster.Name), map[string]string{ naming.LabelMovePGWalDir: "", }) - moveDirJob.ObjectMeta.Labels = labels + moveDirJob.Labels = labels script := fmt.Sprintf(`echo "Preparing cluster %s volumes for PGO v5.x" echo "pg_wal_pvc=%s" @@ -570,7 +570,7 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, cluster.Spec.DataSource.Volumes.PGWALVolume.PVCName, cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, - cluster.ObjectMeta.Name) + cluster.Name) container := corev1.Container{ Command: []string{"bash", "-ceu", script}, @@ -665,14 +665,14 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, } } - moveDirJob.ObjectMeta.Annotations = naming.Merge( + moveDirJob.Annotations = naming.Merge( cluster.Spec.Metadata.GetAnnotationsOrNil()) labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), naming.DirectoryMoveJobLabels(cluster.Name), map[string]string{ naming.LabelMovePGBackRestRepoDir: "", }) - moveDirJob.ObjectMeta.Labels = labels + moveDirJob.Labels = labels script := fmt.Sprintf(`echo "Preparing cluster %s pgBackRest repo volume for PGO v5.x" echo "repo_pvc=%s" diff --git a/internal/controller/runtime/runtime.go b/internal/controller/runtime/runtime.go index 7d17304657..9d9e59854c 100644 --- a/internal/controller/runtime/runtime.go +++ b/internal/controller/runtime/runtime.go @@ -7,6 +7,7 @@ package runtime import ( "context" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -18,8 +19,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ) type ( diff --git a/internal/controller/standalone_pgadmin/apply.go b/internal/controller/standalone_pgadmin/apply.go index 1108853e7f..0cc3191967 100644 --- a/internal/controller/standalone_pgadmin/apply.go +++ b/internal/controller/standalone_pgadmin/apply.go @@ -22,7 +22,7 @@ func (r *PGAdminReconciler) patch( patch client.Patch, options ...client.PatchOption, ) error { options = append([]client.PatchOption{r.Owner}, options...) - return r.Client.Patch(ctx, object, patch, options...) + return r.Patch(ctx, object, patch, options...) } // apply sends an apply patch to object's endpoint in the Kubernetes API and diff --git a/internal/controller/standalone_pgadmin/configmap.go b/internal/controller/standalone_pgadmin/configmap.go index 0324a45976..150f7ecb12 100644 --- a/internal/controller/standalone_pgadmin/configmap.go +++ b/internal/controller/standalone_pgadmin/configmap.go @@ -12,9 +12,8 @@ import ( "sort" "strconv" - corev1 "k8s.io/api/core/v1" - "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" diff --git a/internal/controller/standalone_pgadmin/configmap_test.go b/internal/controller/standalone_pgadmin/configmap_test.go index 5e58a0be04..6e79c4a316 100644 --- a/internal/controller/standalone_pgadmin/configmap_test.go +++ b/internal/controller/standalone_pgadmin/configmap_test.go @@ -197,12 +197,12 @@ namespace: some-ns assert.NilError(t, err) // Annotations present in the metadata. - assert.DeepEqual(t, configmap.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, configmap.Annotations, map[string]string{ "a": "v1", "b": "v2", }) // Labels present in the metadata. - assert.DeepEqual(t, configmap.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, configmap.Labels, map[string]string{ "c": "v3", "d": "v4", "postgres-operator.crunchydata.com/pgadmin": "pg1", "postgres-operator.crunchydata.com/role": "pgadmin", diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index 54143cdbed..d81823a8c2 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -158,7 +158,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct func (r *PGAdminReconciler) setControllerReference( owner *v1beta1.PGAdmin, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, r.Scheme()) } // deleteControlled safely deletes object when it is controlled by pgAdmin. @@ -170,7 +170,7 @@ func (r *PGAdminReconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Client.Delete(ctx, object, exactly) + return r.Delete(ctx, object, exactly) } return nil diff --git a/internal/controller/standalone_pgadmin/pod.go b/internal/controller/standalone_pgadmin/pod.go index a0855b415d..5231771538 100644 --- a/internal/controller/standalone_pgadmin/pod.go +++ b/internal/controller/standalone_pgadmin/pod.go @@ -170,7 +170,7 @@ func pod( // Check the configmap to see if we think TLS is enabled // If so, update the readiness check scheme to HTTPS if strings.Contains(gunicornData, "certfile") && strings.Contains(gunicornData, "keyfile") { - readinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS + readinessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS } container.ReadinessProbe = readinessProbe diff --git a/internal/controller/standalone_pgadmin/postgrescluster.go b/internal/controller/standalone_pgadmin/postgrescluster.go index 4fa1f39165..b9c6a55551 100644 --- a/internal/controller/standalone_pgadmin/postgrescluster.go +++ b/internal/controller/standalone_pgadmin/postgrescluster.go @@ -7,12 +7,12 @@ package standalone_pgadmin import ( "context" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list} diff --git a/internal/controller/standalone_pgadmin/service.go b/internal/controller/standalone_pgadmin/service.go index b465dadb97..bfdc04c6ec 100644 --- a/internal/controller/standalone_pgadmin/service.go +++ b/internal/controller/standalone_pgadmin/service.go @@ -7,16 +7,14 @@ package standalone_pgadmin import ( "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - apierrors "k8s.io/apimachinery/pkg/api/errors" - - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -38,7 +36,7 @@ func (r *PGAdminReconciler) reconcilePGAdminService( // need to delete any existing service(s). At the start of every reconcile // get all services that match the current pgAdmin labels. services := corev1.ServiceList{} - if err := r.Client.List(ctx, &services, + if err := r.List(ctx, &services, client.InNamespace(pgadmin.Namespace), client.MatchingLabels{ naming.LabelStandalonePGAdmin: pgadmin.Name, @@ -64,7 +62,7 @@ func (r *PGAdminReconciler) reconcilePGAdminService( if pgadmin.Spec.ServiceName != "" { // Look for an existing service with name ServiceName in the namespace existingService := &corev1.Service{} - err := r.Client.Get(ctx, types.NamespacedName{ + err := r.Get(ctx, types.NamespacedName{ Name: pgadmin.Spec.ServiceName, Namespace: pgadmin.GetNamespace(), }, existingService) diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index d300bfc113..cf2cb1b89d 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -7,14 +7,13 @@ package standalone_pgadmin import ( "context" + "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -33,7 +32,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if err := errors.WithStack(r.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { if !apierrors.IsNotFound(err) { return err } @@ -46,7 +45,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + return errors.WithStack(client.IgnoreNotFound(r.Delete(ctx, existing, exactly, propagate))) } } diff --git a/internal/controller/standalone_pgadmin/statefulset_test.go b/internal/controller/standalone_pgadmin/statefulset_test.go index 4047a94d30..9b7e11e8d8 100644 --- a/internal/controller/standalone_pgadmin/statefulset_test.go +++ b/internal/controller/standalone_pgadmin/statefulset_test.go @@ -35,6 +35,7 @@ func TestReconcilePGAdminStatefulSet(t *testing.T) { pgadmin := new(v1beta1.PGAdmin) pgadmin.Name = "test-standalone-pgadmin" pgadmin.Namespace = ns.Name + pgadmin.Spec.Image = initialize.String("some-image") assert.NilError(t, cc.Create(ctx, pgadmin)) t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pgadmin)) }) @@ -103,6 +104,7 @@ terminationGracePeriodSeconds: 30 // add pod level customizations custompgadmin.Name = "custom-pgadmin" custompgadmin.Namespace = ns.Name + custompgadmin.Spec.Image = initialize.String("some-image") // annotation and label custompgadmin.Spec.Metadata = &v1beta1.Metadata{ diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index 7f9de76452..027960e90c 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -53,7 +53,7 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * pod := &corev1.Pod{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} pod.Name += "-0" - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + err := errors.WithStack(r.Get(ctx, client.ObjectKeyFromObject(pod), pod)) if err != nil { return client.IgnoreNotFound(err) } @@ -142,7 +142,7 @@ func (r *PGAdminReconciler) writePGAdminUsers(ctx context.Context, pgadmin *v1be existingUserSecret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existingUserSecret), existingUserSecret)) + r.Get(ctx, client.ObjectKeyFromObject(existingUserSecret), existingUserSecret)) if client.IgnoreNotFound(err) != nil { return err } @@ -204,10 +204,10 @@ cd $PGADMIN_DIR // Get password from secret userPasswordSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ Namespace: pgadmin.Namespace, - Name: user.PasswordRef.LocalObjectReference.Name, + Name: user.PasswordRef.Name, }} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(userPasswordSecret), userPasswordSecret)) + r.Get(ctx, client.ObjectKeyFromObject(userPasswordSecret), userPasswordSecret)) if err != nil { log.Error(err, "Could not get user password secret") continue @@ -323,11 +323,7 @@ cd $PGADMIN_DIR // to add a user, that user will not be in intentUsers. If errors occurred when attempting to // update a user, the user will be in intentUsers as it existed before. We now want to marshal the // intentUsers to json and write the users.json file to the secret. - usersJSON, err := json.Marshal(intentUsers) - if err != nil { - return err - } - intentUserSecret.Data["users.json"] = usersJSON + intentUserSecret.Data["users.json"], _ = json.Marshal(intentUsers) err = errors.WithStack(r.setControllerReference(pgadmin, intentUserSecret)) if err == nil { diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go index 72b2c4d071..57910b0926 100644 --- a/internal/controller/standalone_pgadmin/users_test.go +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -7,12 +7,12 @@ package standalone_pgadmin import ( "context" "encoding/json" + "errors" "fmt" "io" "strings" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -330,8 +330,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 1, "PodExec should be called once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -390,8 +390,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -462,8 +462,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -507,8 +507,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 0, "PodExec should be called zero times") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -549,8 +549,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -576,8 +576,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 2, "PodExec should be called once more") // User in users.json should be unchanged - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -629,8 +629,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -657,8 +657,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -685,8 +685,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -714,8 +714,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) diff --git a/internal/controller/standalone_pgadmin/volume.go b/internal/controller/standalone_pgadmin/volume.go index ea95e0f22b..b37c21ed9a 100644 --- a/internal/controller/standalone_pgadmin/volume.go +++ b/internal/controller/standalone_pgadmin/volume.go @@ -7,14 +7,13 @@ package standalone_pgadmin import ( "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) diff --git a/internal/controller/standalone_pgadmin/volume_test.go b/internal/controller/standalone_pgadmin/volume_test.go index b822fc6fca..b0113cba64 100644 --- a/internal/controller/standalone_pgadmin/volume_test.go +++ b/internal/controller/standalone_pgadmin/volume_test.go @@ -6,6 +6,7 @@ package standalone_pgadmin import ( "context" + "errors" "testing" "gotest.tools/v3/assert" @@ -16,8 +17,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" diff --git a/internal/controller/standalone_pgadmin/watches.go b/internal/controller/standalone_pgadmin/watches.go index 748cfafef2..8991c71244 100644 --- a/internal/controller/standalone_pgadmin/watches.go +++ b/internal/controller/standalone_pgadmin/watches.go @@ -91,7 +91,7 @@ func (r *PGAdminReconciler) findPGAdminsForSecret( }); err == nil { for i := range pgadmins.Items { for j := range pgadmins.Items[i].Spec.Users { - if pgadmins.Items[i].Spec.Users[j].PasswordRef.LocalObjectReference.Name == secret.Name { + if pgadmins.Items[i].Spec.Users[j].PasswordRef.Name == secret.Name { matching = append(matching, &pgadmins.Items[i]) break } diff --git a/internal/logging/logrus_test.go b/internal/logging/logrus_test.go index 071bbd0664..d74a3a0e01 100644 --- a/internal/logging/logrus_test.go +++ b/internal/logging/logrus_test.go @@ -12,7 +12,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" + "github.com/pkg/errors" //nolint:depguard // This is testing the logging of stack frames. "gotest.tools/v3/assert" ) diff --git a/internal/patroni/config.go b/internal/patroni/config.go index 67ff9405e8..c9487dfa52 100644 --- a/internal/patroni/config.go +++ b/internal/patroni/config.go @@ -493,7 +493,7 @@ func instanceYAML( // created. That value should be injected using the downward API and the // PATRONI_KUBERNETES_POD_IP environment variable. - // Missing here is "ports" which is is connascent with "postgresql.connect_address". + // Missing here is "ports" which is connascent with "postgresql.connect_address". // See the PATRONI_KUBERNETES_PORTS env variable. }, diff --git a/internal/patroni/config_test.go b/internal/patroni/config_test.go index 590f3bf81a..9187dc3572 100644 --- a/internal/patroni/config_test.go +++ b/internal/patroni/config_test.go @@ -999,7 +999,7 @@ func TestPGBackRestCreateReplicaCommand(t *testing.T) { file := filepath.Join(dir, "command.sh") assert.NilError(t, os.WriteFile(file, []byte(command), 0o600)) - cmd := exec.Command(shellcheck, "--enable=all", "--shell=sh", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", "--shell=sh", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } @@ -1021,7 +1021,7 @@ func TestPGBackRestCreateReplicaCommand(t *testing.T) { file := filepath.Join(dir, "script.bash") assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } @@ -1100,7 +1100,6 @@ func TestProbeTiming(t *testing.T) { FailureThreshold: 1, }}, } { - tt := tt actual := probeTiming(&v1beta1.PatroniSpec{ LeaderLeaseDurationSeconds: &tt.lease, SyncPeriodSeconds: &tt.sync, diff --git a/internal/pgadmin/config_test.go b/internal/pgadmin/config_test.go index e634aee361..0e659c7070 100644 --- a/internal/pgadmin/config_test.go +++ b/internal/pgadmin/config_test.go @@ -77,7 +77,7 @@ func TestStartupCommand(t *testing.T) { assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) }) @@ -94,7 +94,7 @@ func TestStartupCommand(t *testing.T) { // Expect flake8 to be happy. Ignore "E401 multiple imports on one line" // in addition to the defaults. The file contents appear in PodSpec, so // allow lines longer than the default to save some vertical space. - cmd := exec.Command(flake8, "--extend-ignore=E401", "--max-line-length=99", file) + cmd := exec.CommandContext(t.Context(), flake8, "--extend-ignore=E401", "--max-line-length=99", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) }) diff --git a/internal/pgadmin/users_test.go b/internal/pgadmin/users_test.go index 17bec23204..18bfd1efd3 100644 --- a/internal/pgadmin/users_test.go +++ b/internal/pgadmin/users_test.go @@ -180,7 +180,7 @@ with create_app().app_context(): // Expect flake8 to be happy. Ignore "E402 module level import not // at top of file" in addition to the defaults. - cmd := exec.Command(flake8, "--extend-ignore=E402", file) + cmd := exec.CommandContext(ctx, flake8, "--extend-ignore=E402", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index bbaca45b04..8dec601144 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -614,7 +614,7 @@ func TestReloadCommand(t *testing.T) { assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } @@ -642,7 +642,7 @@ func TestRestoreCommand(t *testing.T) { file := filepath.Join(dir, "script.bash") assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } @@ -679,7 +679,7 @@ func TestDedicatedSnapshotVolumeRestoreCommand(t *testing.T) { file := filepath.Join(dir, "script.bash") assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } diff --git a/internal/pgbackrest/pgbackrest_test.go b/internal/pgbackrest/pgbackrest_test.go index 4df29b8449..5bb1d571bb 100644 --- a/internal/pgbackrest/pgbackrest_test.go +++ b/internal/pgbackrest/pgbackrest_test.go @@ -13,12 +13,10 @@ import ( "testing" "gotest.tools/v3/assert" - "k8s.io/apimachinery/pkg/api/resource" - corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -94,7 +92,7 @@ fi assert.NilError(t, os.WriteFile(file, []byte(shellCheckScript), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(ctx, shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index c7abfef900..fcdb131cce 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -128,7 +128,7 @@ func TestAddRepoVolumesToPod(t *testing.T) { for _, r := range tc.repos { var foundVolume bool for _, v := range template.Spec.Volumes { - if v.Name == r.Name && v.VolumeSource.PersistentVolumeClaim.ClaimName == + if v.Name == r.Name && v.PersistentVolumeClaim.ClaimName == naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { foundVolume = true break diff --git a/internal/pgbackrest/util_test.go b/internal/pgbackrest/util_test.go index e3c98e0dd7..d2fd93455c 100644 --- a/internal/pgbackrest/util_test.go +++ b/internal/pgbackrest/util_test.go @@ -6,7 +6,7 @@ package pgbackrest import ( "io" - "math/rand" + "math/rand/v2" "strconv" "testing" @@ -80,7 +80,7 @@ func TestCalculateConfigHashes(t *testing.T) { assert.Equal(t, preCalculatedRepo3S3Hash, configHashMap["repo3"]) // call CalculateConfigHashes multiple times to ensure consistent results - for i := 0; i < 10; i++ { + for range 10 { hashMap, hash, err := CalculateConfigHashes(postgresCluster) assert.NilError(t, err) assert.Equal(t, configHash, hash) @@ -92,7 +92,7 @@ func TestCalculateConfigHashes(t *testing.T) { // shuffle the repo slice in order to ensure the same result is returned regardless of the // order of the repos slice shuffleCluster := postgresCluster.DeepCopy() - for i := 0; i < 10; i++ { + for range 10 { repos := shuffleCluster.Spec.Backups.PGBackRest.Repos rand.Shuffle(len(repos), func(i, j int) { repos[i], repos[j] = repos[j], repos[i] @@ -103,7 +103,7 @@ func TestCalculateConfigHashes(t *testing.T) { } // now modify some values in each repo and confirm we see a different result - for i := 0; i < 3; i++ { + for i := range 3 { modCluster := postgresCluster.DeepCopy() switch i { case 0: diff --git a/internal/pgbouncer/config_test.go b/internal/pgbouncer/config_test.go index f5ddef6214..f19a37d992 100644 --- a/internal/pgbouncer/config_test.go +++ b/internal/pgbouncer/config_test.go @@ -214,7 +214,7 @@ func TestReloadCommand(t *testing.T) { assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } diff --git a/internal/pgbouncer/postgres.go b/internal/pgbouncer/postgres.go index 74cc23ecd6..39bf5d7458 100644 --- a/internal/pgbouncer/postgres.go +++ b/internal/pgbouncer/postgres.go @@ -183,7 +183,7 @@ REVOKE ALL PRIVILEGES // - https://www.postgresql.org/docs/current/perm-functions.html `ALTER ROLE :"username" SET search_path TO :'namespace';`, - // Allow the PgBouncer user to to login. + // Allow the PgBouncer user to login. `ALTER ROLE :"username" LOGIN PASSWORD :'verifier';`, // Commit (finish) the transaction. diff --git a/internal/pki/encoding_test.go b/internal/pki/encoding_test.go index 2c63099ca4..eb2b1365b3 100644 --- a/internal/pki/encoding_test.go +++ b/internal/pki/encoding_test.go @@ -81,7 +81,7 @@ func TestCertificateTextMarshaling(t *testing.T) { assert.NilError(t, os.WriteFile(certFile, certBytes, 0o600)) // The "openssl x509" command parses X.509 certificates. - cmd := exec.Command(openssl, "x509", + cmd := exec.CommandContext(t.Context(), openssl, "x509", "-in", certFile, "-inform", "PEM", "-noout", "-text") output, err := cmd.CombinedOutput() @@ -153,7 +153,7 @@ func TestPrivateKeyTextMarshaling(t *testing.T) { assert.NilError(t, os.WriteFile(keyFile, keyBytes, 0o600)) // The "openssl pkey" command processes public and private keys. - cmd := exec.Command(openssl, "pkey", + cmd := exec.CommandContext(t.Context(), openssl, "pkey", "-in", keyFile, "-inform", "PEM", "-noout", "-text") output, err := cmd.CombinedOutput() @@ -164,12 +164,12 @@ func TestPrivateKeyTextMarshaling(t *testing.T) { "expected valid private key, got:\n%s", output) t.Run("Check", func(t *testing.T) { - output, _ := exec.Command(openssl, "pkey", "-help").CombinedOutput() + output, _ := exec.CommandContext(t.Context(), openssl, "pkey", "-help").CombinedOutput() if !strings.Contains(string(output), "-check") { t.Skip(`requires "-check" flag`) } - cmd := exec.Command(openssl, "pkey", + cmd := exec.CommandContext(t.Context(), openssl, "pkey", "-check", "-in", keyFile, "-inform", "PEM", "-noout", "-text") output, err := cmd.CombinedOutput() diff --git a/internal/pki/pki_test.go b/internal/pki/pki_test.go index 000f1a5042..fa8f290475 100644 --- a/internal/pki/pki_test.go +++ b/internal/pki/pki_test.go @@ -194,7 +194,7 @@ func TestRootIsInvalid(t *testing.T) { t.Cleanup(func() { currentTime = original }) currentTime = func() time.Time { - return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.Local) + return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC) } root, err := NewRootCertificateAuthority() @@ -395,7 +395,7 @@ func TestLeafIsInvalid(t *testing.T) { t.Cleanup(func() { currentTime = original }) currentTime = func() time.Time { - return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.Local) + return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC) } leaf, err := root.GenerateLeafCertificate("", nil) @@ -439,7 +439,7 @@ func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { verify := func(t testing.TB, args ...string) { t.Helper() // #nosec G204 -- args from this test - cmd := exec.Command(openssl, append([]string{"verify"}, args...)...) + cmd := exec.CommandContext(t.Context(), openssl, append([]string{"verify"}, args...)...) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) @@ -476,7 +476,7 @@ func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { } func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { - output, _ := exec.Command(openssl, "verify", "-help").CombinedOutput() + output, _ := exec.CommandContext(t.Context(), openssl, "verify", "-help").CombinedOutput() if !strings.Contains(string(output), "-x509_strict") { t.Skip(`requires "-x509_strict" flag`) } @@ -487,7 +487,7 @@ func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { verify := func(t testing.TB, args ...string) { t.Helper() // #nosec G204 -- args from this test - cmd := exec.Command(openssl, append([]string{"verify", + cmd := exec.CommandContext(t.Context(), openssl, append([]string{"verify", // Do not use the default trusted CAs. "-no-CAfile", "-no-CApath", // Disable "non-compliant workarounds for broken certificates". diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index 0315072af6..ef24f202c3 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -53,7 +53,7 @@ func TestWALDirectory(t *testing.T) { func TestBashHalt(t *testing.T) { t.Run("NoPipeline", func(t *testing.T) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; halt ab cd e`) var exit *exec.ExitError @@ -65,7 +65,7 @@ func TestBashHalt(t *testing.T) { }) t.Run("PipelineZeroStatus", func(t *testing.T) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; true && halt message`) var exit *exec.ExitError @@ -77,7 +77,7 @@ func TestBashHalt(t *testing.T) { }) t.Run("PipelineNonZeroStatus", func(t *testing.T) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; (exit 99) || halt $'multi\nline'`) var exit *exec.ExitError @@ -89,7 +89,7 @@ func TestBashHalt(t *testing.T) { }) t.Run("Subshell", func(t *testing.T) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; (halt 'err') || echo 'after'`) stderr := new(bytes.Buffer) @@ -105,7 +105,7 @@ func TestBashHalt(t *testing.T) { func TestBashPermissions(t *testing.T) { // macOS `stat` takes different arguments than BusyBox and GNU coreutils. - if output, err := exec.Command("stat", "--help").CombinedOutput(); err != nil { + if output, err := exec.CommandContext(t.Context(), "stat", "--help").CombinedOutput(); err != nil { t.Skip(`requires "stat" executable`) } else if !strings.Contains(string(output), "%A") { t.Skip(`requires "stat" with access format sequence`) @@ -117,7 +117,7 @@ func TestBashPermissions(t *testing.T) { assert.NilError(t, os.WriteFile(filepath.Join(dir, "sub", "fn"), nil, 0o624)) // #nosec G306 OK permissions for a temp dir in a test assert.NilError(t, os.Chmod(filepath.Join(dir, "sub", "fn"), 0o624)) - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashPermissions+`; permissions "$@"`, "-", filepath.Join(dir, "sub", "fn")) @@ -132,7 +132,7 @@ func TestBashPermissions(t *testing.T) { func TestBashRecreateDirectory(t *testing.T) { // macOS `stat` takes different arguments than BusyBox and GNU coreutils. - if output, err := exec.Command("stat", "--help").CombinedOutput(); err != nil { + if output, err := exec.CommandContext(t.Context(), "stat", "--help").CombinedOutput(); err != nil { t.Skip(`requires "stat" executable`) } else if !strings.Contains(string(output), "%a") { t.Skip(`requires "stat" with access format sequence`) @@ -144,7 +144,7 @@ func TestBashRecreateDirectory(t *testing.T) { assert.NilError(t, os.WriteFile(filepath.Join(dir, "d", "file"), nil, 0o644)) // #nosec G306 OK permissions for a temp dir in a test stat := func(args ...string) string { - cmd := exec.Command("stat", "-c", "%i %#a %N") + cmd := exec.CommandContext(t.Context(), "stat", "-c", "%i %#a %N") cmd.Args = append(cmd.Args, args...) out, err := cmd.CombinedOutput() @@ -161,7 +161,7 @@ func TestBashRecreateDirectory(t *testing.T) { filepath.Join(dir, "d", "file"), ) - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-ceu", "--", bashRecreateDirectory+` recreate "$@"`, "-", filepath.Join(dir, "d"), "0740") @@ -200,7 +200,7 @@ func TestBashRecreateDirectory(t *testing.T) { func TestBashSafeLink(t *testing.T) { // macOS `mv` takes different arguments than GNU coreutils. - if output, err := exec.Command("mv", "--help").CombinedOutput(); err != nil { + if output, err := exec.CommandContext(t.Context(), "mv", "--help").CombinedOutput(); err != nil { t.Skip(`requires "mv" executable`) } else if !strings.Contains(string(output), "no-target-directory") { t.Skip(`requires "mv" that overwrites a directory symlink`) @@ -208,7 +208,7 @@ func TestBashSafeLink(t *testing.T) { // execute calls the bash function with args. execute := func(args ...string) (string, error) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-ceu", "--", bashSafeLink+`safelink "$@"`, "-") cmd.Args = append(cmd.Args, args...) output, err := cmd.CombinedOutput() @@ -475,7 +475,7 @@ func TestStartupCommand(t *testing.T) { assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(ctx, shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) diff --git a/internal/postgres/exec_test.go b/internal/postgres/exec_test.go index b8f5693bef..3ec94717d5 100644 --- a/internal/postgres/exec_test.go +++ b/internal/postgres/exec_test.go @@ -184,7 +184,7 @@ done <<< "${databases}" assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) diff --git a/internal/postgres/password/md5.go b/internal/postgres/password/md5.go index c99b2c0e30..55cc43f5cb 100644 --- a/internal/postgres/password/md5.go +++ b/internal/postgres/password/md5.go @@ -5,7 +5,6 @@ package password import ( - // #nosec G501 "crypto/md5" "errors" diff --git a/internal/postgres/password/scram.go b/internal/postgres/password/scram.go index bbf8dbcbe6..90eb2a54ad 100644 --- a/internal/postgres/password/scram.go +++ b/internal/postgres/password/scram.go @@ -138,7 +138,7 @@ func (s *SCRAMPassword) isASCII() bool { // iterate through each character of the plaintext password and determine if // it is ASCII. if it is not ASCII, exit early // per research, this loop is optimized to be fast for searching - for i := 0; i < len(s.password); i++ { + for i := range len(s.password) { if s.password[i] > unicode.MaxASCII { return false } diff --git a/internal/testing/events/recorder.go b/internal/testing/events/recorder.go index e76ef21eb3..dad5dccf83 100644 --- a/internal/testing/events/recorder.go +++ b/internal/testing/events/recorder.go @@ -89,7 +89,7 @@ func (*Recorder) AnnotatedEventf(object runtime.Object, annotations map[string]s } func (r *Recorder) Event(object runtime.Object, eventtype, reason, message string) { if r.eventf != nil { - r.eventf(object, nil, eventtype, reason, "", message) + r.eventf(object, nil, eventtype, reason, "", "%v", message) } } func (r *Recorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...any) { diff --git a/internal/testing/require/exec.go b/internal/testing/require/exec.go index 338abef584..a9e028c55e 100644 --- a/internal/testing/require/exec.go +++ b/internal/testing/require/exec.go @@ -38,7 +38,7 @@ func executable(name string, args ...string) func(testing.TB) string { t.Helper() once.Do(func() { path, err := exec.LookPath(name) - cmd := exec.Command(path, args...) // #nosec G204 -- args from init() + cmd := exec.CommandContext(t.Context(), path, args...) // #nosec G204 -- args from init() if err != nil { result = func(t testing.TB) string { diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/testing/validation/postgrescluster_test.go index fb79095ab6..d1f1421988 100644 --- a/internal/testing/validation/postgrescluster_test.go +++ b/internal/testing/validation/postgrescluster_test.go @@ -63,7 +63,6 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "cannot contain comments") - //nolint:errorlint // This is a test, and a panic is unlikely. status := err.(apierrors.APIStatus).Status() assert.Assert(t, status.Details != nil) assert.Equal(t, len(status.Details.Causes), 3) @@ -85,7 +84,6 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "cannot assign password") - //nolint:errorlint // This is a test, and a panic is unlikely. status := err.(apierrors.APIStatus).Status() assert.Assert(t, status.Details != nil) assert.Equal(t, len(status.Details.Causes), 2) @@ -106,7 +104,6 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "should match") - //nolint:errorlint // This is a test, and a panic is unlikely. status := err.(apierrors.APIStatus).Status() assert.Assert(t, status.Details != nil) assert.Equal(t, len(status.Details.Causes), 1) diff --git a/internal/util/secrets_test.go b/internal/util/secrets_test.go index e07a430718..ae5f7f5b05 100644 --- a/internal/util/secrets_test.go +++ b/internal/util/secrets_test.go @@ -55,7 +55,7 @@ func TestGenerateAlphaNumericPassword(t *testing.T) { } previous := sets.Set[string]{} - for i := 0; i < 10; i++ { + for range 10 { password, err := GenerateAlphaNumericPassword(5) assert.NilError(t, err) @@ -80,7 +80,7 @@ func TestGenerateASCIIPassword(t *testing.T) { } previous := sets.Set[string]{} - for i := 0; i < 10; i++ { + for range 10 { password, err := GenerateASCIIPassword(5) assert.NilError(t, err) diff --git a/internal/util/util.go b/internal/util/util.go index 301ccacb4d..a008c9b3e1 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -24,7 +24,7 @@ func SQLQuoteIdentifier(identifier string) string { identifier = identifier[:end] } - return `"` + strings.Replace(identifier, `"`, `""`, -1) + `"` + return `"` + strings.ReplaceAll(identifier, `"`, `""`) + `"` } // SQLQuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal @@ -45,14 +45,14 @@ func SQLQuoteLiteral(literal string) string { // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c // // substitute any single-quotes (') with two single-quotes ('') - literal = strings.Replace(literal, `'`, `''`, -1) + literal = strings.ReplaceAll(literal, `'`, `''`) // determine if the string has any backslashes (\) in it. // if it does, replace any backslashes (\) with two backslashes (\\) // then, we need to wrap the entire string with a PostgreSQL // C-style escape. Per how "PQEscapeStringInternal" handles this case, we // also add a space before the "E" if strings.Contains(literal, `\`) { - literal = strings.Replace(literal, `\`, `\\`, -1) + literal = strings.ReplaceAll(literal, `\`, `\\`) literal = ` E'` + literal + `'` } else { // otherwise, we can just wrap the literal with a pair of single quotes