diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 780582498e..a9767daeae 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -124,7 +124,6 @@ jobs: hack/create-kubeconfig.sh postgres-operator pgo docker run --detach --network host --read-only \ --volume "$(pwd):/mnt" --workdir '/mnt' \ - --env 'CHECK_FOR_UPGRADES=false' \ --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534' \ diff --git a/Makefile b/Makefile index 06355b4cc1..793e806d77 100644 --- a/Makefile +++ b/Makefile @@ -116,7 +116,6 @@ deploy-dev: createnamespaces QUERIES_CONFIG_DIR='$(QUERIES_CONFIG_DIR)' \ CRUNCHY_DEBUG="$${CRUNCHY_DEBUG:-true}" \ PGO_FEATURE_GATES="$${PGO_FEATURE_GATES:-AllAlpha=true,AppendCustomQueries=false}" \ - CHECK_FOR_UPGRADES="$${CHECK_FOR_UPGRADES:-false}" \ KUBECONFIG=hack/.kube/postgres-operator/pgo \ PGO_NAMESPACE='postgres-operator' \ PGO_INSTALLER='deploy-dev' \ diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index 50ac74943d..48a6a25d43 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -34,9 +34,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/tracing" - "github.com/crunchydata/postgres-operator/internal/upgradecheck" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -243,10 +241,6 @@ func main() { manager := need(runtime.NewManager(config, options)) must(manager.Add(k8s)) - registrar := need(registration.NewRunner(os.Getenv("RSA_KEY"), os.Getenv("TOKEN_PATH"), stopRunning)) - must(manager.Add(registrar)) - token, _ := registrar.CheckToken() - bridgeURL := os.Getenv("PGO_BRIDGE_URL") bridgeClient := func() *bridge.Client { client := bridge.NewClient(bridgeURL, versionString) @@ -255,8 +249,8 @@ func main() { } // add all PostgreSQL Operator controllers to the runtime manager - must(pgupgrade.ManagedReconciler(manager, registrar)) - must(postgrescluster.ManagedReconciler(manager, registrar)) + must(pgupgrade.ManagedReconciler(manager)) + must(postgrescluster.ManagedReconciler(manager)) must(standalone_pgadmin.ManagedReconciler(manager)) must(crunchybridgecluster.ManagedReconciler(manager, func() bridge.ClientInterface { return bridgeClient() @@ -266,16 +260,6 @@ func main() { must(bridge.ManagedInstallationReconciler(manager, bridgeClient)) } - // Enable upgrade checking - upgradeCheckingDisabled := strings.EqualFold(os.Getenv("CHECK_FOR_UPGRADES"), "false") - if !upgradeCheckingDisabled { - log.Info("upgrade checking enabled") - url := os.Getenv("CHECK_FOR_UPGRADES_URL") - must(upgradecheck.ManagedScheduler(manager, url, versionString, token)) - } else { - log.Info("upgrade checking disabled") - } - // Enable health probes must(manager.AddHealthzCheck("health", healthz.Ping)) must(manager.AddReadyzCheck("check", healthz.Ping)) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 921d1fc48a..8a39cc717e 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -19085,11 +19085,6 @@ spec: type: integer type: object type: object - registrationRequired: - properties: - pgoVersion: - type: string - type: object startupInstance: description: |- The instance that should be started first when bootstrapping and/or starting a @@ -19098,8 +19093,6 @@ spec: startupInstanceSet: description: The instance set associated with the startupInstance type: string - tokenRequired: - type: string userInterface: description: Current state of the PostgreSQL user interface. properties: @@ -38138,11 +38131,6 @@ spec: type: integer type: object type: object - registrationRequired: - properties: - pgoVersion: - type: string - type: object startupInstance: description: |- The instance that should be started first when bootstrapping and/or starting a @@ -38151,8 +38139,6 @@ spec: startupInstanceSet: description: The instance set associated with the startupInstance type: string - tokenRequired: - type: string userInterface: description: Current state of the PostgreSQL user interface. properties: diff --git a/go.mod b/go.mod index 74914ddeb7..e1bd404a44 100644 --- a/go.mod +++ b/go.mod @@ -5,9 +5,7 @@ go 1.24.0 require ( github.com/go-logr/logr v1.4.3 - github.com/golang-jwt/jwt/v5 v5.3.0 github.com/google/go-cmp v0.7.0 - github.com/google/uuid v1.6.0 github.com/itchyny/gojq v0.12.17 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 github.com/onsi/ginkgo/v2 v2.25.3 @@ -62,6 +60,7 @@ require ( github.com/google/cel-go v0.23.2 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect diff --git a/go.sum b/go.sum index e65172ea2e..7c812823ff 100644 --- a/go.sum +++ b/go.sum @@ -52,8 +52,6 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index 61eb39a7c8..b977ea8ba4 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -23,7 +23,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/tracing" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -34,8 +33,7 @@ const ( // PGUpgradeReconciler reconciles a PGUpgrade object type PGUpgradeReconciler struct { - Recorder record.EventRecorder - Registration registration.Registration + Recorder record.EventRecorder Reader interface { Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error @@ -55,14 +53,13 @@ type PGUpgradeReconciler struct { //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} // ManagedReconciler creates a [PGUpgradeReconciler] and adds it to m. -func ManagedReconciler(m ctrl.Manager, r registration.Registration) error { +func ManagedReconciler(m ctrl.Manager) error { kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPGUpgrade) recorder := m.GetEventRecorderFor(naming.ControllerPGUpgrade) reconciler := &PGUpgradeReconciler{ Reader: kubernetes, Recorder: recorder, - Registration: r, StatusWriter: kubernetes.Status(), Writer: kubernetes, } @@ -150,10 +147,6 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, upgrade *v1beta1.PG return } - if !r.UpgradeAuthorized(upgrade) { - return ctrl.Result{}, nil - } - // Set progressing condition to true if it doesn't exist already setStatusToProgressingIfReasonWas("", upgrade) diff --git a/internal/controller/pgupgrade/registration.go b/internal/controller/pgupgrade/registration.go deleted file mode 100644 index 4fbf7a7ce1..0000000000 --- a/internal/controller/pgupgrade/registration.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package pgupgrade - -import ( - "k8s.io/apimachinery/pkg/api/meta" - - "github.com/crunchydata/postgres-operator/internal/registration" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func (r *PGUpgradeReconciler) UpgradeAuthorized(upgrade *v1beta1.PGUpgrade) bool { - // Allow an upgrade in progress to complete, when the registration requirement is introduced. - // But don't allow new upgrades to be started until a valid token is applied. - progressing := meta.FindStatusCondition(upgrade.Status.Conditions, ConditionPGUpgradeProgressing) != nil - required := r.Registration.Required(r.Recorder, upgrade, &upgrade.Status.Conditions) - - // If a valid token has not been applied, warn the user. - if required && !progressing { - registration.SetRequiredWarning(r.Recorder, upgrade, &upgrade.Status.Conditions) - return false - } - - return true -} diff --git a/internal/controller/pgupgrade/registration_test.go b/internal/controller/pgupgrade/registration_test.go deleted file mode 100644 index 22903d8cdb..0000000000 --- a/internal/controller/pgupgrade/registration_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package pgupgrade - -import ( - "testing" - - "gotest.tools/v3/assert" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/internal/controller/runtime" - "github.com/crunchydata/postgres-operator/internal/registration" - "github.com/crunchydata/postgres-operator/internal/testing/cmp" - "github.com/crunchydata/postgres-operator/internal/testing/events" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func TestUpgradeAuthorized(t *testing.T) { - t.Run("UpgradeAlreadyInProgress", func(t *testing.T) { - reconciler := new(PGUpgradeReconciler) - upgrade := new(v1beta1.PGUpgrade) - - for _, required := range []bool{false, true} { - reconciler.Registration = registration.RegistrationFunc( - func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { - return required - }) - - meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ - Type: ConditionPGUpgradeProgressing, - Status: metav1.ConditionTrue, - }) - - result := reconciler.UpgradeAuthorized(upgrade) - assert.Assert(t, result, "expected signal to proceed") - - progressing := meta.FindStatusCondition(upgrade.Status.Conditions, ConditionPGUpgradeProgressing) - assert.Equal(t, progressing.Status, metav1.ConditionTrue) - } - }) - - t.Run("RegistrationRequired", func(t *testing.T) { - recorder := events.NewRecorder(t, runtime.Scheme) - upgrade := new(v1beta1.PGUpgrade) - upgrade.Name = "some-upgrade" - - reconciler := PGUpgradeReconciler{ - Recorder: recorder, - Registration: registration.RegistrationFunc( - func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { - return true - }), - } - - meta.RemoveStatusCondition(&upgrade.Status.Conditions, ConditionPGUpgradeProgressing) - - result := reconciler.UpgradeAuthorized(upgrade) - assert.Assert(t, !result, "expected signal to not proceed") - - condition := meta.FindStatusCondition(upgrade.Status.Conditions, v1beta1.Registered) - if assert.Check(t, condition != nil) { - assert.Equal(t, condition.Status, metav1.ConditionFalse) - } - - if assert.Check(t, len(recorder.Events) > 0) { - assert.Equal(t, recorder.Events[0].Type, "Warning") - assert.Equal(t, recorder.Events[0].Regarding.Kind, "PGUpgrade") - assert.Equal(t, recorder.Events[0].Regarding.Name, "some-upgrade") - assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "requires")) - } - }) - - t.Run("RegistrationCompleted", func(t *testing.T) { - reconciler := new(PGUpgradeReconciler) - upgrade := new(v1beta1.PGUpgrade) - - called := false - reconciler.Registration = registration.RegistrationFunc( - func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { - called = true - return false - }) - - meta.RemoveStatusCondition(&upgrade.Status.Conditions, ConditionPGUpgradeProgressing) - - result := reconciler.UpgradeAuthorized(upgrade) - assert.Assert(t, result, "expected signal to proceed") - assert.Assert(t, called, "expected registration package to clear conditions") - }) -} diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index 7d015c4012..781db78720 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -36,7 +36,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/tracing" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -64,8 +63,7 @@ type Reconciler struct { Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error } - Recorder record.EventRecorder - Registration registration.Registration + Recorder record.EventRecorder } // +kubebuilder:rbac:groups="",resources="events",verbs={create,patch} @@ -183,12 +181,6 @@ func (r *Reconciler) Reconcile( return nil } - if r.Registration != nil && r.Registration.Required(r.Recorder, cluster, &cluster.Status.Conditions) { - registration.SetAdvanceWarning(r.Recorder, cluster, &cluster.Status.Conditions) - } - cluster.Status.RegistrationRequired = nil - cluster.Status.TokenRequired = "" - // if the cluster is paused, set a condition and return if cluster.Spec.Paused != nil && *cluster.Spec.Paused { meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ @@ -444,7 +436,7 @@ func (r *Reconciler) setOwnerReference( // +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} // ManagedReconciler creates a [Reconciler] and adds it to m. -func ManagedReconciler(m manager.Manager, r registration.Registration) error { +func ManagedReconciler(m manager.Manager) error { exec, err := runtime.NewPodExecutor(m.GetConfig()) kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPostgresCluster) recorder := m.GetEventRecorderFor(naming.ControllerPostgresCluster) @@ -453,7 +445,6 @@ func ManagedReconciler(m manager.Manager, r registration.Registration) error { PodExec: exec, Reader: kubernetes, Recorder: recorder, - Registration: r, StatusWriter: kubernetes.Status(), Writer: kubernetes, } diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index 5b6f3e4c77..bcc80c12d4 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -18,7 +18,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/record" @@ -27,7 +26,6 @@ import ( "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -86,34 +84,6 @@ func TestDeleteControlled(t *testing.T) { }) } -var olmClusterYAML = ` -metadata: - name: olm -spec: - postgresVersion: 13 - image: postgres - instances: - - name: register-now - dataVolumeClaimSpec: - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: pgbackrest - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -` - var _ = Describe("PostgresCluster Reconciler", func() { var test struct { Namespace *corev1.Namespace @@ -137,7 +107,6 @@ var _ = Describe("PostgresCluster Reconciler", func() { test.Reconciler.Reader = client test.Reconciler.Recorder = test.Recorder - test.Reconciler.Registration = nil test.Reconciler.StatusWriter = client.Status() test.Reconciler.Writer = client }) @@ -177,49 +146,6 @@ var _ = Describe("PostgresCluster Reconciler", func() { return result } - Context("Cluster with Registration Requirement, no token", func() { - var cluster *v1beta1.PostgresCluster - - BeforeEach(func() { - test.Reconciler.Registration = registration.RegistrationFunc( - func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { - return true - }) - - cluster = create(olmClusterYAML) - Expect(reconcile(cluster)).To(BeZero()) - }) - - AfterEach(func() { - ctx := context.Background() - - if cluster != nil { - Expect(client.IgnoreNotFound( - suite.Client.Delete(ctx, cluster), - )).To(Succeed()) - - // Remove finalizers, if any, so the namespace can terminate. - Expect(client.IgnoreNotFound( - suite.Client.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))), - )).To(Succeed()) - } - }) - - Specify("Cluster RegistrationRequired Status", func() { - existing := &v1beta1.PostgresCluster{} - Expect(suite.Client.Get( - context.Background(), client.ObjectKeyFromObject(cluster), existing, - )).To(Succeed()) - - Expect(meta.IsStatusConditionFalse(existing.Status.Conditions, v1beta1.Registered)).To(BeTrue()) - - event, ok := <-test.Recorder.Events - Expect(ok).To(BeTrue()) - Expect(event).To(ContainSubstring("Register Soon")) - }) - }) - Context("Cluster", func() { var cluster *v1beta1.PostgresCluster diff --git a/internal/naming/names.go b/internal/naming/names.go index f4ea8d2fd7..f2b600fb05 100644 --- a/internal/naming/names.go +++ b/internal/naming/names.go @@ -570,11 +570,3 @@ func StandalonePGAdmin(pgadmin *v1beta1.PGAdmin) metav1.ObjectMeta { Name: fmt.Sprintf("pgadmin-%s", pgadmin.UID), } } - -// UpgradeCheckConfigMap returns the ObjectMeta for the PGO ConfigMap -func UpgradeCheckConfigMap() metav1.ObjectMeta { - return metav1.ObjectMeta{ - Namespace: config.PGONamespace(), - Name: "pgo-upgrade-check", - } -} diff --git a/internal/registration/interface.go b/internal/registration/interface.go deleted file mode 100644 index c0d4e390ad..0000000000 --- a/internal/registration/interface.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package registration - -import ( - "fmt" - "os" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -type Registration interface { - // Required returns true when registration is required but the token is missing or invalid. - Required(record.EventRecorder, client.Object, *[]metav1.Condition) bool -} - -var URL = os.Getenv("REGISTRATION_URL") - -func SetAdvanceWarning(recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition) { - recorder.Eventf(object, corev1.EventTypeWarning, "Register Soon", - "Crunchy Postgres for Kubernetes requires registration for upgrades."+ - " Register now to be ready for your next upgrade. See %s for details.", URL) - - meta.SetStatusCondition(conditions, metav1.Condition{ - Type: v1beta1.Registered, - Status: metav1.ConditionFalse, - Reason: "TokenRequired", - Message: fmt.Sprintf( - "Crunchy Postgres for Kubernetes requires registration for upgrades."+ - " Register now to be ready for your next upgrade. See %s for details.", URL), - ObservedGeneration: object.GetGeneration(), - }) -} - -func SetRequiredWarning(recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition) { - recorder.Eventf(object, corev1.EventTypeWarning, "Registration Required", - "Crunchy Postgres for Kubernetes requires registration for upgrades."+ - " Register now to be ready for your next upgrade. See %s for details.", URL) - - meta.SetStatusCondition(conditions, metav1.Condition{ - Type: v1beta1.Registered, - Status: metav1.ConditionFalse, - Reason: "TokenRequired", - Message: fmt.Sprintf( - "Crunchy Postgres for Kubernetes requires registration for upgrades."+ - " Upgrade suspended. See %s for details.", URL), - ObservedGeneration: object.GetGeneration(), - }) -} - -func emitFailedWarning(recorder record.EventRecorder, object client.Object) { - recorder.Eventf(object, corev1.EventTypeWarning, "Token Authentication Failed", - "See %s for details.", URL) -} - -func emitVerifiedEvent(recorder record.EventRecorder, object client.Object) { - recorder.Event(object, corev1.EventTypeNormal, "Token Verified", - "Thank you for registering your installation of Crunchy Postgres for Kubernetes.") -} diff --git a/internal/registration/runner.go b/internal/registration/runner.go deleted file mode 100644 index b50ceeb4ed..0000000000 --- a/internal/registration/runner.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package registration - -import ( - "context" - "crypto/rsa" - "errors" - "os" - "strings" - "sync" - "time" - - "github.com/golang-jwt/jwt/v5" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -// Runner implements [Registration] by loading and validating the token at a -// fixed path. Its methods are safe to call concurrently. -type Runner struct { - changed func() - enabled bool - publicKey *rsa.PublicKey - refresh time.Duration - tokenPath string - - token struct { - sync.RWMutex - Exists bool `json:"-"` - - jwt.RegisteredClaims - Iteration int `json:"itr"` - } -} - -// Runner implements [Registration] and [manager.Runnable]. -var _ Registration = (*Runner)(nil) -var _ manager.Runnable = (*Runner)(nil) - -// NewRunner creates a [Runner] that periodically checks the validity of the -// token at tokenPath. It calls changed when the validity of the token changes. -func NewRunner(publicKey, tokenPath string, changed func()) (*Runner, error) { - runner := &Runner{ - changed: changed, - refresh: time.Minute, - tokenPath: tokenPath, - } - - var err error - switch { - case publicKey != "" && tokenPath != "": - if !strings.HasPrefix(strings.TrimSpace(publicKey), "-") { - publicKey = "-----BEGIN -----\n" + publicKey + "\n-----END -----" - } - - runner.enabled = true - runner.publicKey, err = jwt.ParseRSAPublicKeyFromPEM([]byte(publicKey)) - - case publicKey == "" && tokenPath != "": - err = errors.New("registration: missing public key") - - case publicKey != "" && tokenPath == "": - err = errors.New("registration: missing token path") - } - - return runner, err -} - -// CheckToken loads and verifies the configured token, returning an error when -// the file exists but cannot be verified, and -// returning the token if it can be verified. -// NOTE(upgradecheck): return the token/nil so that we can use the token -// in upgradecheck; currently a refresh of the token will cause a restart of the pod -// meaning that the token used in upgradecheck is always the current token. -// But if the restart behavior changes, we might drop the token return in main.go -// and change upgradecheck to retrieve the token itself -func (r *Runner) CheckToken() (*jwt.Token, error) { - data, errFile := os.ReadFile(r.tokenPath) - key := func(*jwt.Token) (any, error) { return r.publicKey, nil } - - // Assume [jwt] and [os] functions could do something unexpected; use defer - // to safely write to the token. - r.token.Lock() - defer r.token.Unlock() - - token, errToken := jwt.ParseWithClaims(string(data), &r.token, key, - jwt.WithExpirationRequired(), - jwt.WithValidMethods([]string{"RS256"}), - ) - - // The error from [os.ReadFile] indicates whether a token file exists. - r.token.Exists = !os.IsNotExist(errFile) - - // Reset most claims if there is any problem loading, parsing, validating, or - // verifying the token file. - if errFile != nil || errToken != nil { - r.token.RegisteredClaims = jwt.RegisteredClaims{} - } - - switch { - case !r.enabled || !r.token.Exists: - return nil, nil - case errFile != nil: - return nil, errFile - default: - return token, errToken - } -} - -func (r *Runner) state() (failed, required bool) { - // Assume [time] functions could do something unexpected; use defer to safely - // read the token. - r.token.RLock() - defer r.token.RUnlock() - - failed = r.token.Exists && r.token.ExpiresAt == nil - required = r.enabled && - (!r.token.Exists || failed || r.token.ExpiresAt.Before(time.Now())) - return -} - -// Required returns true when registration is required but the token is missing or invalid. -func (r *Runner) Required( - recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition, -) bool { - failed, required := r.state() - - if r.enabled && failed { - emitFailedWarning(recorder, object) - } - - if !required && conditions != nil { - before := len(*conditions) - meta.RemoveStatusCondition(conditions, v1beta1.Registered) - meta.RemoveStatusCondition(conditions, "RegistrationRequired") - meta.RemoveStatusCondition(conditions, "TokenRequired") - found := len(*conditions) != before - - if r.enabled && found { - emitVerifiedEvent(recorder, object) - } - } - - return required -} - -// NeedLeaderElection returns true so that r runs only on the single -// [manager.Manager] that is elected leader in the Kubernetes namespace. -func (r *Runner) NeedLeaderElection() bool { return true } - -// Start watches for a mounted registration token when enabled. It blocks -// until ctx is cancelled. -func (r *Runner) Start(ctx context.Context) error { - var ticks <-chan time.Time - - if r.enabled { - ticker := time.NewTicker(r.refresh) - defer ticker.Stop() - ticks = ticker.C - } - - log := logging.FromContext(ctx).WithValues("controller", "registration") - - for { - select { - case <-ticks: - _, before := r.state() - if _, err := r.CheckToken(); err != nil { - log.Error(err, "Unable to validate token") - } - if _, after := r.state(); before != after && r.changed != nil { - r.changed() - } - case <-ctx.Done(): - return ctx.Err() - } - } -} diff --git a/internal/registration/runner_test.go b/internal/registration/runner_test.go deleted file mode 100644 index 32bea6a485..0000000000 --- a/internal/registration/runner_test.go +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package registration - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "gotest.tools/v3/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/crunchydata/postgres-operator/internal/controller/runtime" - "github.com/crunchydata/postgres-operator/internal/testing/events" -) - -func TestNewRunner(t *testing.T) { - t.Parallel() - - key, err := rsa.GenerateKey(rand.Reader, 2048) - assert.NilError(t, err) - - der, err := x509.MarshalPKIXPublicKey(&key.PublicKey) - assert.NilError(t, err) - - public := pem.EncodeToMemory(&pem.Block{Bytes: der}) - assert.Assert(t, len(public) != 0) - - t.Run("Disabled", func(t *testing.T) { - runner, err := NewRunner("", "", nil) - assert.NilError(t, err) - assert.Assert(t, runner != nil) - assert.Assert(t, !runner.enabled) - }) - - t.Run("ConfiguredCorrectly", func(t *testing.T) { - runner, err := NewRunner(string(public), "any", nil) - assert.NilError(t, err) - assert.Assert(t, runner != nil) - assert.Assert(t, runner.enabled) - - t.Run("ExtraLines", func(t *testing.T) { - input := "\n\n" + strings.ReplaceAll(string(public), "\n", "\n\n") + "\n\n" - - runner, err := NewRunner(input, "any", nil) - assert.NilError(t, err) - assert.Assert(t, runner != nil) - assert.Assert(t, runner.enabled) - }) - - t.Run("WithoutPEMBoundaries", func(t *testing.T) { - lines := strings.Split(strings.TrimSpace(string(public)), "\n") - lines = lines[1 : len(lines)-1] - - for _, input := range []string{ - strings.Join(lines, ""), // single line - strings.Join(lines, "\n"), // multi-line - "\n\n" + strings.Join(lines, "\n\n") + "\n\n", // extra lines - } { - runner, err := NewRunner(input, "any", nil) - assert.NilError(t, err) - assert.Assert(t, runner != nil) - assert.Assert(t, runner.enabled) - } - }) - }) - - t.Run("ConfiguredIncorrectly", func(t *testing.T) { - for _, tt := range []struct { - key, path, msg string - }{ - {msg: "public key", key: "", path: "any"}, - {msg: "token path", key: "bad", path: ""}, - {msg: "invalid key", key: "bad", path: "any"}, - {msg: "token path", key: string(public), path: ""}, - } { - _, err := NewRunner(tt.key, tt.path, nil) - assert.ErrorContains(t, err, tt.msg, "(key=%q, path=%q)", tt.key, tt.path) - } - }) -} - -func TestRunnerCheckToken(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - key, err := rsa.GenerateKey(rand.Reader, 2048) - assert.NilError(t, err) - - t.Run("SafeToCallDisabled", func(t *testing.T) { - r := Runner{enabled: false} - _, err := r.CheckToken() - assert.NilError(t, err) - }) - - t.Run("FileMissing", func(t *testing.T) { - r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} - _, err := r.CheckToken() - assert.NilError(t, err) - }) - - t.Run("FileUnreadable", func(t *testing.T) { - r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} - assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o200)) // Writeable - - _, err := r.CheckToken() - assert.ErrorContains(t, err, "permission") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("FileEmpty", func(t *testing.T) { - r := Runner{enabled: true, tokenPath: filepath.Join(dir, "empty")} - assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o400)) // Readable - - _, err := r.CheckToken() - assert.ErrorContains(t, err, "malformed") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("WrongAlgorithm", func(t *testing.T) { - r := Runner{ - enabled: true, - publicKey: &key.PublicKey, - tokenPath: filepath.Join(dir, "hs256"), - } - - // Maliciously treating an RSA public key as an HMAC secret. - // - https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ - public, err := x509.MarshalPKIXPublicKey(r.publicKey) - assert.NilError(t, err) - data, err := jwt.New(jwt.SigningMethodHS256).SignedString(public) - assert.NilError(t, err) - assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - - _, err = r.CheckToken() - assert.Assert(t, err != nil, "HMAC algorithm should be rejected") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("MissingExpiration", func(t *testing.T) { - r := Runner{ - enabled: true, - publicKey: &key.PublicKey, - tokenPath: filepath.Join(dir, "no-claims"), - } - - data, err := jwt.New(jwt.SigningMethodRS256).SignedString(key) - assert.NilError(t, err) - assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - - _, err = r.CheckToken() - assert.ErrorContains(t, err, "exp claim is required") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("ExpiredToken", func(t *testing.T) { - r := Runner{ - enabled: true, - publicKey: &key.PublicKey, - tokenPath: filepath.Join(dir, "expired"), - } - - data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ - "exp": jwt.NewNumericDate(time.Date(2020, 1, 1, 1, 1, 1, 1, time.UTC)), - }).SignedString(key) - assert.NilError(t, err) - assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - - _, err = r.CheckToken() - assert.ErrorContains(t, err, "is expired") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("ValidToken", func(t *testing.T) { - r := Runner{ - enabled: true, - publicKey: &key.PublicKey, - tokenPath: filepath.Join(dir, "valid"), - } - - expiration := jwt.NewNumericDate(time.Now().Add(time.Hour)) - data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ - "exp": expiration, - }).SignedString(key) - assert.NilError(t, err) - assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - - token, err := r.CheckToken() - assert.NilError(t, err) - assert.Assert(t, r.token.ExpiresAt != nil) - assert.Assert(t, token.Valid) - exp, err := token.Claims.GetExpirationTime() - assert.NilError(t, err) - assert.Equal(t, exp.Time, expiration.Time) - }) -} - -func TestRunnerLeaderElectionRunnable(t *testing.T) { - var runner manager.LeaderElectionRunnable = &Runner{} - - assert.Assert(t, runner.NeedLeaderElection()) -} - -func TestRunnerRequiredConditions(t *testing.T) { - t.Parallel() - - t.Run("RegistrationDisabled", func(t *testing.T) { - r := Runner{enabled: false} - - for _, tt := range []struct { - before, after []metav1.Condition - }{ - { - before: []metav1.Condition{}, - after: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{ - {Type: "Registered"}, - {Type: "ExistingOther"}, - {Type: "RegistrationRequired"}, - }, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "TokenRequired"}}, - after: []metav1.Condition{}, - }, - } { - for _, exists := range []bool{false, true} { - for _, expires := range []time.Time{ - time.Now().Add(time.Hour), - time.Now().Add(-time.Hour), - } { - r.token.Exists = exists - r.token.ExpiresAt = jwt.NewNumericDate(expires) - - conditions := append([]metav1.Condition{}, tt.before...) - discard := new(events.Recorder) - object := &corev1.ConfigMap{} - - result := r.Required(discard, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.DeepEqual(t, conditions, tt.after) - } - } - } - }) - - t.Run("RegistrationRequired", func(t *testing.T) { - r := Runner{enabled: true} - - for _, tt := range []struct { - exists bool - expires time.Time - before []metav1.Condition - }{ - { - exists: false, expires: time.Now().Add(time.Hour), - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - { - exists: false, expires: time.Now().Add(-time.Hour), - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - { - exists: true, expires: time.Now().Add(-time.Hour), - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - } { - r.token.Exists = tt.exists - r.token.ExpiresAt = jwt.NewNumericDate(tt.expires) - - conditions := append([]metav1.Condition{}, tt.before...) - discard := new(events.Recorder) - object := &corev1.ConfigMap{} - - result := r.Required(discard, object, &conditions) - - assert.Equal(t, result, true, "expected registration required") - assert.DeepEqual(t, conditions, tt.before) - } - }) - - t.Run("Registered", func(t *testing.T) { - r := Runner{} - r.token.Exists = true - r.token.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Hour)) - - for _, tt := range []struct { - before, after []metav1.Condition - }{ - { - before: []metav1.Condition{}, - after: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{ - {Type: "Registered"}, - {Type: "ExistingOther"}, - {Type: "RegistrationRequired"}, - }, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "TokenRequired"}}, - after: []metav1.Condition{}, - }, - } { - for _, enabled := range []bool{false, true} { - r.enabled = enabled - - conditions := append([]metav1.Condition{}, tt.before...) - discard := new(events.Recorder) - object := &corev1.ConfigMap{} - - result := r.Required(discard, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.DeepEqual(t, conditions, tt.after) - } - } - }) -} - -func TestRunnerRequiredEvents(t *testing.T) { - t.Parallel() - - t.Run("RegistrationDisabled", func(t *testing.T) { - r := Runner{enabled: false} - - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - } { - for _, exists := range []bool{false, true} { - for _, expires := range []time.Time{ - time.Now().Add(time.Hour), - time.Now().Add(-time.Hour), - } { - r.token.Exists = exists - r.token.ExpiresAt = jwt.NewNumericDate(expires) - - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, runtime.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.Equal(t, len(recorder.Events), 0, "expected no events") - } - } - } - }) - - t.Run("RegistrationRequired", func(t *testing.T) { - r := Runner{enabled: true} - - t.Run("MissingToken", func(t *testing.T) { - r.token.Exists = false - - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - } { - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, runtime.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, true, "expected registration required") - assert.Equal(t, len(recorder.Events), 0, "expected no events") - } - }) - - t.Run("InvalidToken", func(t *testing.T) { - r.token.Exists = true - r.token.ExpiresAt = nil - - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - } { - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, runtime.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, true, "expected registration required") - assert.Equal(t, len(recorder.Events), 1, "expected one event") - assert.Equal(t, recorder.Events[0].Type, "Warning") - assert.Equal(t, recorder.Events[0].Reason, "Token Authentication Failed") - } - }) - }) - - t.Run("Registered", func(t *testing.T) { - r := Runner{} - r.token.Exists = true - r.token.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Hour)) - - t.Run("AlwaysRegistered", func(t *testing.T) { - // No prior registration conditions - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - }, - } { - for _, enabled := range []bool{false, true} { - r.enabled = enabled - - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, runtime.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.Equal(t, len(recorder.Events), 0, "expected no events") - } - } - }) - - t.Run("PreviouslyUnregistered", func(t *testing.T) { - r.enabled = true - - // One or more prior registration conditions - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{ - {Type: "Registered"}, - {Type: "ExistingOther"}, - {Type: "RegistrationRequired"}, - }, - }, - { - before: []metav1.Condition{{Type: "TokenRequired"}}, - }, - } { - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, runtime.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.Equal(t, len(recorder.Events), 1, "expected one event") - assert.Equal(t, recorder.Events[0].Type, "Normal") - assert.Equal(t, recorder.Events[0].Reason, "Token Verified") - } - }) - }) -} - -func TestRunnerStart(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - key, err := rsa.GenerateKey(rand.Reader, 2048) - assert.NilError(t, err) - - token, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ - "exp": jwt.NewNumericDate(time.Now().Add(time.Hour)), - }).SignedString(key) - assert.NilError(t, err) - - t.Run("DisabledDoesNothing", func(t *testing.T) { - runner := &Runner{ - enabled: false, - refresh: time.Nanosecond, - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - - assert.ErrorIs(t, runner.Start(ctx), context.DeadlineExceeded, - "expected it to block until context is canceled") - }) - - t.Run("WithCallback", func(t *testing.T) { - called := false - runner := &Runner{ - changed: func() { called = true }, - enabled: true, - publicKey: &key.PublicKey, - refresh: time.Second, - tokenPath: filepath.Join(dir, "token"), - } - - // Begin with an invalid token. - assert.NilError(t, os.WriteFile(runner.tokenPath, nil, 0o600)) - _, err = runner.CheckToken() - assert.Assert(t, err != nil) - - // Replace it with a valid token. - assert.NilError(t, os.WriteFile(runner.tokenPath, []byte(token), 0o600)) - - // Run with a timeout that exceeds the refresh interval. - ctx, cancel := context.WithTimeout(context.Background(), runner.refresh*3/2) - defer cancel() - - assert.ErrorIs(t, runner.Start(ctx), context.DeadlineExceeded) - assert.Assert(t, called, "expected a call back") - }) -} diff --git a/internal/registration/testing.go b/internal/registration/testing.go deleted file mode 100644 index 7ea0032b31..0000000000 --- a/internal/registration/testing.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package registration - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// NOTE: This type can go away following https://go.dev/issue/47487. - -type RegistrationFunc func(record.EventRecorder, client.Object, *[]metav1.Condition) bool - -func (fn RegistrationFunc) Required(rec record.EventRecorder, obj client.Object, conds *[]metav1.Condition) bool { - return fn(rec, obj, conds) -} - -var _ Registration = RegistrationFunc(nil) diff --git a/internal/upgradecheck/header.go b/internal/upgradecheck/header.go deleted file mode 100644 index 719105d9d3..0000000000 --- a/internal/upgradecheck/header.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "encoding/json" - "net/http" - "os" - - googleuuid "github.com/google/uuid" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/uuid" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/internal/feature" - "github.com/crunchydata/postgres-operator/internal/kubernetes" - "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -const ( - clientHeader = "X-Crunchy-Client-Metadata" -) - -var ( - // Using apimachinery's UUID package, so our deployment UUID will be a string - deploymentID string -) - -// Extensible struct for client upgrade data -type clientUpgradeData struct { - BridgeClustersTotal int `json:"bridge_clusters_total"` - BuildSource string `json:"build_source"` - DeploymentID string `json:"deployment_id"` - FeatureGatesEnabled string `json:"feature_gates_enabled"` - IsOpenShift bool `json:"is_open_shift"` - KubernetesEnv string `json:"kubernetes_env"` - PGOClustersTotal int `json:"pgo_clusters_total"` - PGOInstaller string `json:"pgo_installer"` - PGOInstallerOrigin string `json:"pgo_installer_origin"` - PGOVersion string `json:"pgo_version"` - RegistrationToken string `json:"registration_token"` -} - -// generateHeader aggregates data and returns a struct of that data -// If any errors are encountered, it logs those errors and uses the default values -func generateHeader(ctx context.Context, crClient crclient.Client, - pgoVersion string, registrationToken string) *clientUpgradeData { - - return &clientUpgradeData{ - BridgeClustersTotal: getBridgeClusters(ctx, crClient), - BuildSource: os.Getenv("BUILD_SOURCE"), - DeploymentID: ensureDeploymentID(ctx, crClient), - FeatureGatesEnabled: feature.ShowEnabled(ctx), - IsOpenShift: kubernetes.IsOpenShift(ctx), - KubernetesEnv: kubernetes.VersionString(ctx), - PGOClustersTotal: getManagedClusters(ctx, crClient), - PGOInstaller: os.Getenv("PGO_INSTALLER"), - PGOInstallerOrigin: os.Getenv("PGO_INSTALLER_ORIGIN"), - PGOVersion: pgoVersion, - RegistrationToken: registrationToken, - } -} - -// ensureDeploymentID checks if the UUID exists in memory or in a ConfigMap -// If no UUID exists, ensureDeploymentID creates one and saves it in memory/as a ConfigMap -// Any errors encountered will be logged and the ID result will be what is in memory -func ensureDeploymentID(ctx context.Context, crClient crclient.Client) string { - // If there is no deploymentID in memory, generate one for possible use - if deploymentID == "" { - deploymentID = string(uuid.NewUUID()) - } - - cm := manageUpgradeCheckConfigMap(ctx, crClient, deploymentID) - - if cm != nil && cm.Data["deployment_id"] != "" { - deploymentID = cm.Data["deployment_id"] - } - - return deploymentID -} - -// manageUpgradeCheckConfigMap ensures a ConfigMap exists with a UUID -// If it doesn't exist, this creates it with the in-memory ID -// If it exists and it has a valid UUID, use that to replace the in-memory ID -// If it exists but the field is blank or mangled, we update the ConfigMap with the in-memory ID -func manageUpgradeCheckConfigMap(ctx context.Context, crClient crclient.Client, - currentID string) *corev1.ConfigMap { - - log := logging.FromContext(ctx) - upgradeCheckConfigMapMetadata := naming.UpgradeCheckConfigMap() - - cm := &corev1.ConfigMap{ - ObjectMeta: upgradeCheckConfigMapMetadata, - Data: map[string]string{"deployment_id": currentID}, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - - // If no namespace is set, then log this and skip trying to set the UUID in the ConfigMap - if upgradeCheckConfigMapMetadata.GetNamespace() == "" { - log.V(1).Info("upgrade check issue: namespace not set") - return cm - } - - retrievedCM := &corev1.ConfigMap{} - err := crClient.Get(ctx, naming.AsObjectKey(upgradeCheckConfigMapMetadata), retrievedCM) - - // If we get any error besides IsNotFound, log it, skip any ConfigMap steps, - // and use the in-memory deploymentID - if err != nil && !apierrors.IsNotFound(err) { - log.V(1).Info("upgrade check issue: error retrieving configmap", - "response", err.Error()) - return cm - } - - // If we get a ConfigMap with a "deployment_id", check if that UUID is valid - if retrievedCM.Data["deployment_id"] != "" { - _, parseErr := googleuuid.Parse(retrievedCM.Data["deployment_id"]) - // No error -- the ConfigMap has a valid deploymentID, so use that - if parseErr == nil { - cm.Data["deployment_id"] = retrievedCM.Data["deployment_id"] - } - } - - err = applyConfigMap(ctx, crClient, cm, naming.ControllerPostgresCluster) - if err != nil { - log.V(1).Info("upgrade check issue: could not apply configmap", - "response", err.Error()) - } - return cm -} - -// applyConfigMap is a focused version of the Reconciler.apply method, -// meant only to work with this ConfigMap -// It sends an apply patch to the Kubernetes API, with the fieldManager set to the deployment_id -// and the force parameter set to true. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts -func applyConfigMap(ctx context.Context, crClient crclient.Client, - object crclient.Object, owner string) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := &corev1.ConfigMap{} - data, err := crclient.MergeFrom(zero).Data(object) - - if err == nil { - apply := crclient.RawPatch(crclient.Apply.Type(), data) - err = crClient.Patch(ctx, object, apply, - []crclient.PatchOption{crclient.ForceOwnership, crclient.FieldOwner(owner)}...) - } - return err -} - -// getManagedClusters returns a count of postgres clusters managed by this PGO instance -// Any errors encountered will be logged and the count result will be 0 -func getManagedClusters(ctx context.Context, crClient crclient.Client) int { - var count int - clusters := &v1beta1.PostgresClusterList{} - err := crClient.List(ctx, clusters) - if err != nil { - log := logging.FromContext(ctx) - log.V(1).Info("upgrade check issue: could not count postgres clusters", - "response", err.Error()) - } else { - count = len(clusters.Items) - } - return count -} - -// getBridgeClusters returns a count of Bridge clusters managed by this PGO instance -// Any errors encountered will be logged and the count result will be 0 -func getBridgeClusters(ctx context.Context, crClient crclient.Client) int { - var count int - clusters := &v1beta1.CrunchyBridgeClusterList{} - err := crClient.List(ctx, clusters) - if err != nil { - log := logging.FromContext(ctx) - log.V(1).Info("upgrade check issue: could not count bridge clusters", - "response", err.Error()) - } else { - count = len(clusters.Items) - } - return count -} - -func addHeader(req *http.Request, upgradeInfo *clientUpgradeData) *http.Request { - marshaled, _ := json.Marshal(upgradeInfo) - req.Header.Add(clientHeader, string(marshaled)) - return req -} diff --git a/internal/upgradecheck/header_test.go b/internal/upgradecheck/header_test.go deleted file mode 100644 index 40c3728ca0..0000000000 --- a/internal/upgradecheck/header_test.go +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "encoding/json" - "net/http" - "strings" - "testing" - - "gotest.tools/v3/assert" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/uuid" - - "github.com/crunchydata/postgres-operator/internal/feature" - "github.com/crunchydata/postgres-operator/internal/kubernetes" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/testing/cmp" - "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func TestGenerateHeader(t *testing.T) { - setupDeploymentID(t) - ctx := context.Background() - cfg, cc := require.Kubernetes2(t) - - discovery, err := kubernetes.NewDiscoveryRunner(cfg) - assert.NilError(t, err) - assert.NilError(t, discovery.Read(ctx)) - ctx = kubernetes.NewAPIContext(ctx, discovery) - - t.Setenv("PGO_INSTALLER", "test") - t.Setenv("PGO_INSTALLER_ORIGIN", "test-origin") - t.Setenv("PGO_NAMESPACE", require.Namespace(t, cc).Name) - t.Setenv("BUILD_SOURCE", "developer") - - t.Run("error ensuring ID", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "patch error", - } - ctx, calls := setupLogCapture(ctx) - - res := generateHeader(ctx, fakeClientWithOptionalError, "1.2.3", "") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) - assert.Equal(t, discovery.IsOpenShift(), res.IsOpenShift) - assert.Equal(t, deploymentID, res.DeploymentID) - pgoList := v1beta1.PostgresClusterList{} - err := cc.List(ctx, &pgoList) - assert.NilError(t, err) - assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) - bridgeList := v1beta1.CrunchyBridgeClusterList{} - err = cc.List(ctx, &bridgeList) - assert.NilError(t, err) - assert.Equal(t, len(bridgeList.Items), res.BridgeClustersTotal) - assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, discovery.Version().String(), res.KubernetesEnv) - assert.Equal(t, "test", res.PGOInstaller) - assert.Equal(t, "test-origin", res.PGOInstallerOrigin) - assert.Equal(t, "developer", res.BuildSource) - }) - - t.Run("error getting cluster count", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "list error", - } - ctx, calls := setupLogCapture(ctx) - - res := generateHeader(ctx, fakeClientWithOptionalError, "1.2.3", "") - assert.Equal(t, len(*calls), 2) - // Aggregating the logs since we cannot determine which call will be first - callsAggregate := strings.Join(*calls, " ") - assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count postgres clusters`)) - assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count bridge clusters`)) - assert.Equal(t, discovery.IsOpenShift(), res.IsOpenShift) - assert.Equal(t, deploymentID, res.DeploymentID) - assert.Equal(t, 0, res.PGOClustersTotal) - assert.Equal(t, 0, res.BridgeClustersTotal) - assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, discovery.Version().String(), res.KubernetesEnv) - assert.Equal(t, "test", res.PGOInstaller) - assert.Equal(t, "test-origin", res.PGOInstallerOrigin) - assert.Equal(t, "developer", res.BuildSource) - }) - - t.Run("success", func(t *testing.T) { - ctx, calls := setupLogCapture(ctx) - gate := feature.NewGate() - assert.NilError(t, gate.SetFromMap(map[string]bool{ - feature.TablespaceVolumes: true, - })) - ctx = feature.NewContext(ctx, gate) - - res := generateHeader(ctx, cc, "1.2.3", "") - assert.Equal(t, len(*calls), 0) - assert.Equal(t, discovery.IsOpenShift(), res.IsOpenShift) - assert.Equal(t, deploymentID, res.DeploymentID) - pgoList := v1beta1.PostgresClusterList{} - err := cc.List(ctx, &pgoList) - assert.NilError(t, err) - assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) - assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, discovery.Version().String(), res.KubernetesEnv) - assert.Check(t, strings.Contains( - res.FeatureGatesEnabled, - "TablespaceVolumes=true", - )) - assert.Equal(t, "test", res.PGOInstaller) - assert.Equal(t, "test-origin", res.PGOInstallerOrigin) - assert.Equal(t, "developer", res.BuildSource) - }) -} - -func TestEnsureID(t *testing.T) { - ctx := context.Background() - cc := require.Kubernetes(t) - t.Setenv("PGO_NAMESPACE", require.Namespace(t, cc).Name) - - t.Run("success, no id set in mem or configmap", func(t *testing.T) { - deploymentID = "" - oldID := deploymentID - ctx, calls := setupLogCapture(ctx) - - newID := ensureDeploymentID(ctx, cc) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, newID != oldID) - assert.Assert(t, newID == deploymentID) - - cm := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cm) - assert.NilError(t, err) - assert.Equal(t, newID, cm.Data["deployment_id"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("success, id set in mem, configmap created", func(t *testing.T) { - oldID := setupDeploymentID(t) - - cm := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cm) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - ctx, calls := setupLogCapture(ctx) - - newID := ensureDeploymentID(ctx, cc) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, newID == oldID) - assert.Assert(t, newID == deploymentID) - - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cm) - assert.NilError(t, err) - assert.Assert(t, deploymentID == cm.Data["deployment_id"]) - - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("success, id set in configmap, mem overwritten", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "deployment_id": string(uuid.NewUUID()), - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - oldID := setupDeploymentID(t) - ctx, calls := setupLogCapture(ctx) - newID := ensureDeploymentID(ctx, cc) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, newID != oldID) - assert.Assert(t, newID == deploymentID) - assert.Assert(t, deploymentID == cmRetrieved.Data["deployment_id"]) - - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("configmap failed, no namespace given", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "deployment_id": string(uuid.NewUUID()), - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - oldID := setupDeploymentID(t) - ctx, calls := setupLogCapture(ctx) - t.Setenv("PGO_NAMESPACE", "") - - newID := ensureDeploymentID(ctx, cc) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: namespace not set`)) - assert.Assert(t, newID == oldID) - assert.Assert(t, newID == deploymentID) - assert.Assert(t, deploymentID != cmRetrieved.Data["deployment_id"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("configmap failed with not NotFound error, using preexisting ID", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "get error", - } - oldID := setupDeploymentID(t) - ctx, calls := setupLogCapture(ctx) - - newID := ensureDeploymentID(ctx, fakeClientWithOptionalError) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: error retrieving configmap`)) - assert.Assert(t, newID == oldID) - assert.Assert(t, newID == deploymentID) - - cmRetrieved := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - }) - - t.Run("configmap failed to create, using preexisting ID", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "patch error", - } - oldID := setupDeploymentID(t) - - ctx, calls := setupLogCapture(ctx) - newID := ensureDeploymentID(ctx, fakeClientWithOptionalError) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) - assert.Assert(t, newID == oldID) - assert.Assert(t, newID == deploymentID) - }) -} - -func TestManageUpgradeCheckConfigMap(t *testing.T) { - ctx := context.Background() - cc := require.Kubernetes(t) - t.Setenv("PGO_NAMESPACE", require.Namespace(t, cc).Name) - - t.Run("no namespace given", func(t *testing.T) { - ctx, calls := setupLogCapture(ctx) - t.Setenv("PGO_NAMESPACE", "") - - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: namespace not set`)) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - }) - - t.Run("configmap not found, created", func(t *testing.T) { - cmRetrieved := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - - assert.Equal(t, len(*calls), 0) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - err = cc.Delete(ctx, returnedCM) - assert.NilError(t, err) - }) - - t.Run("configmap failed with not NotFound error", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "get error", - } - ctx, calls := setupLogCapture(ctx) - - returnedCM := manageUpgradeCheckConfigMap(ctx, fakeClientWithOptionalError, - "current-id") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: error retrieving configmap`)) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - }) - - t.Run("no deployment id in configmap", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "wrong_field": string(uuid.NewUUID()), - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - assert.Equal(t, len(*calls), 0) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("mangled deployment id", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "deploymentid": string(uuid.NewUUID())[1:], - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - assert.Equal(t, len(*calls), 0) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("good configmap with good id", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "deployment_id": string(uuid.NewUUID()), - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - assert.Equal(t, len(*calls), 0) - assert.Assert(t, returnedCM.Data["deployment-id"] != "current-id") - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("configmap failed to create", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "patch error", - } - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, fakeClientWithOptionalError, - "current-id") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - }) -} - -func TestApplyConfigMap(t *testing.T) { - ctx := context.Background() - cc := require.Kubernetes(t) - t.Setenv("PGO_NAMESPACE", require.Namespace(t, cc).Name) - - t.Run("successful create", func(t *testing.T) { - cmRetrieved := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err = applyConfigMap(ctx, cc, cm, "test") - assert.NilError(t, err) - cmRetrieved = &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("successful update", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "old_value", - }, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err := cc.Create(ctx, cm) - assert.NilError(t, err) - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - cm2 := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm2.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err = applyConfigMap(ctx, cc, cm2, "test") - assert.NilError(t, err) - cmRetrieved = &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("successful nothing changed", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err := cc.Create(ctx, cm) - assert.NilError(t, err) - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - cm2 := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm2.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err = applyConfigMap(ctx, cc, cm2, "test") - assert.NilError(t, err) - cmRetrieved = &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("failure", func(t *testing.T) { - cmRetrieved := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "patch error", - } - - err = applyConfigMap(ctx, fakeClientWithOptionalError, cm, "test") - assert.Error(t, err, "patch error") - }) -} - -func TestGetManagedClusters(t *testing.T) { - ctx := context.Background() - - t.Run("success", func(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, true) - ctx, calls := setupLogCapture(ctx) - count := getManagedClusters(ctx, fakeClient) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, count == 2) - }) - - t.Run("list throw error", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - setupFakeClientWithPGOScheme(t, true), "list error", - } - ctx, calls := setupLogCapture(ctx) - count := getManagedClusters(ctx, fakeClientWithOptionalError) - assert.Assert(t, len(*calls) > 0) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count postgres clusters`)) - assert.Assert(t, count == 0) - }) -} - -func TestGetBridgeClusters(t *testing.T) { - ctx := context.Background() - - t.Run("success", func(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, true) - ctx, calls := setupLogCapture(ctx) - count := getBridgeClusters(ctx, fakeClient) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, count == 2) - }) - - t.Run("list throw error", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - setupFakeClientWithPGOScheme(t, true), "list error", - } - ctx, calls := setupLogCapture(ctx) - count := getBridgeClusters(ctx, fakeClientWithOptionalError) - assert.Assert(t, len(*calls) > 0) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count bridge clusters`)) - assert.Assert(t, count == 0) - }) -} - -func TestAddHeader(t *testing.T) { - t.Run("successful", func(t *testing.T) { - req := &http.Request{ - Header: http.Header{}, - } - versionString := "1.2.3" - upgradeInfo := &clientUpgradeData{ - PGOVersion: versionString, - } - - result := addHeader(req, upgradeInfo) - header := result.Header[clientHeader] - - passedThroughData := &clientUpgradeData{} - err := json.Unmarshal([]byte(header[0]), passedThroughData) - assert.NilError(t, err) - - assert.Equal(t, passedThroughData.PGOVersion, "1.2.3") - // Failure to list clusters results in 0 returned - assert.Equal(t, passedThroughData.PGOClustersTotal, 0) - }) -} diff --git a/internal/upgradecheck/helpers_test.go b/internal/upgradecheck/helpers_test.go deleted file mode 100644 index 5e83cffe2a..0000000000 --- a/internal/upgradecheck/helpers_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "fmt" - "testing" - - "github.com/go-logr/logr/funcr" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - - "github.com/crunchydata/postgres-operator/internal/controller/runtime" - "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -// fakeClientWithError is a controller runtime client and an error type to force -type fakeClientWithError struct { - crclient.Client - errorType string -} - -// Get returns the client.get OR an Error (`get error`) if the fakeClientWithError is set to error that way -func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object, opts ...crclient.GetOption) error { - switch f.errorType { - case "get error": - return fmt.Errorf("get error") - default: - return f.Client.Get(ctx, key, obj, opts...) - } -} - -// Patch returns the client.get OR an Error (`patch error`) if the fakeClientWithError is set to error that way -// TODO: PatchType is not supported currently by fake -// - https://github.com/kubernetes/client-go/issues/970 -// Once that gets fixed, we can test without envtest -func (f *fakeClientWithError) Patch(ctx context.Context, obj crclient.Object, - patch crclient.Patch, opts ...crclient.PatchOption) error { - switch f.errorType { - case "patch error": - return fmt.Errorf("patch error") - default: - return f.Client.Patch(ctx, obj, patch, opts...) - } -} - -// List returns the client.get OR an Error (`list error`) if the fakeClientWithError is set to error that way -func (f *fakeClientWithError) List(ctx context.Context, objList crclient.ObjectList, - opts ...crclient.ListOption) error { - switch f.errorType { - case "list error": - return fmt.Errorf("list error") - default: - return f.Client.List(ctx, objList, opts...) - } -} - -// setupDeploymentID returns a UUID -func setupDeploymentID(t *testing.T) string { - t.Helper() - deploymentID = string(uuid.NewUUID()) - return deploymentID -} - -// setupFakeClientWithPGOScheme returns a fake client with the PGO scheme added; -// if `includeCluster` is true, also adds some empty PostgresCluster and CrunchyBridgeCluster -// items to the client -func setupFakeClientWithPGOScheme(t *testing.T, includeCluster bool) crclient.Client { - t.Helper() - if includeCluster { - pc := &v1beta1.PostgresClusterList{ - Items: []v1beta1.PostgresCluster{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "hippo", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "elephant", - }, - }, - }, - } - - bcl := &v1beta1.CrunchyBridgeClusterList{ - Items: []v1beta1.CrunchyBridgeCluster{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "hippo", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "elephant", - }, - }, - }, - } - - return fake.NewClientBuilder(). - WithScheme(runtime.Scheme). - WithLists(pc, bcl). - Build() - } - return fake.NewClientBuilder().WithScheme(runtime.Scheme).Build() -} - -// setupLogCapture captures the logs and keeps count of the logs captured -func setupLogCapture(ctx context.Context) (context.Context, *[]string) { - calls := []string{} - testlog := funcr.NewJSON(func(object string) { - calls = append(calls, object) - }, funcr.Options{ - Verbosity: 1, - }) - return logging.NewContext(ctx, testlog), &calls -} diff --git a/internal/upgradecheck/http.go b/internal/upgradecheck/http.go deleted file mode 100644 index acb4da386b..0000000000 --- a/internal/upgradecheck/http.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "fmt" - "io" - "net/http" - "time" - - "github.com/golang-jwt/jwt/v5" - "k8s.io/apimachinery/pkg/util/wait" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/crunchydata/postgres-operator/internal/logging" -) - -var ( - client HTTPClient - - // With these Backoff settings, wait.ExponentialBackoff will - // * use one second as the base time; - // * increase delays between calls by a power of 2 (1, 2, 4, etc.); - // * and retry four times. - // Note that there is no indeterminacy here since there is no Jitter set). - // With these parameters, the calls will occur at 0, 1, 3, and 7 seconds - // (i.e., at 1, 2, and 4 second delays for the retries). - backoff = wait.Backoff{ - Duration: 1 * time.Second, - Factor: float64(2), - Steps: 4, - } -) - -const ( - // upgradeCheckURL can be set using the CHECK_FOR_UPGRADES_URL env var - upgradeCheckURL = "https://operator-maestro.crunchydata.com/pgo-versions" -) - -type HTTPClient interface { - Do(req *http.Request) (*http.Response, error) -} - -// Creating an interface for cache with WaitForCacheSync to allow easier mocking -type CacheWithWait interface { - WaitForCacheSync(ctx context.Context) bool -} - -func init() { - // Since we create this client once during startup, - // we want each connection to be fresh, hence the non-default transport - // with DisableKeepAlives set to true - // See https://github.com/golang/go/issues/43905 and https://github.com/golang/go/issues/23427 - // for discussion of problems with long-lived connections - client = &http.Client{ - Timeout: 5 * time.Second, - Transport: &http.Transport{ - DisableKeepAlives: true, - }, - } -} - -func checkForUpgrades(ctx context.Context, url, versionString string, backoff wait.Backoff, - crclient crclient.Client, registrationToken string, -) (message string, header string, err error) { - var headerPayloadStruct *clientUpgradeData - - // Prep request - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err == nil { - // generateHeader always returns some sort of struct, using defaults/nil values - // in case some of the checks return errors - headerPayloadStruct = generateHeader(ctx, crclient, - versionString, registrationToken) - req = addHeader(req, headerPayloadStruct) - } - - // wait.ExponentialBackoff will retry the func according to the backoff object until - // (a) func returns done as true or - // (b) the backoff settings are exhausted, - // i.e., the process hits the cap for time or the number of steps - // The anonymous function here sets certain preexisting variables (bodyBytes, err, status) - // which are then used by the surrounding `checkForUpgrades` function as part of the return - var bodyBytes []byte - var status int - - if err == nil { - _ = wait.ExponentialBackoff( - backoff, - func() (done bool, backoffErr error) { - var res *http.Response - res, err = client.Do(req) - - if err == nil { - defer res.Body.Close() - status = res.StatusCode - - // This is a very basic check, ignoring nuances around - // certain StatusCodes that should either prevent or impact retries - if status == http.StatusOK { - bodyBytes, err = io.ReadAll(res.Body) - return true, nil - } - } - - // Return false, nil to continue checking - return false, nil - }) - } - - // We received responses, but none of them were 200 OK. - if err == nil && status != http.StatusOK { - err = fmt.Errorf("received StatusCode %d", status) - } - - // TODO: Parse response and log info for user on potential upgrades - return string(bodyBytes), req.Header.Get(clientHeader), err -} - -type CheckForUpgradesScheduler struct { - Client crclient.Client - - Refresh time.Duration - RegistrationToken string - URL, Version string -} - -// ManagedScheduler creates a [CheckForUpgradesScheduler] and adds it to m. -// NOTE(registration): This takes a token/nil parameter when the operator is started. -// Currently the operator restarts when the token is updated, -// so this token is always current; but if that restart behavior is changed, -// we will want the upgrade mechanism to instantiate its own registration runner -// or otherwise get the most recent token. -func ManagedScheduler(m manager.Manager, - url, version string, registrationToken *jwt.Token) error { - if url == "" { - url = upgradeCheckURL - } - - var token string - if registrationToken != nil { - token = registrationToken.Raw - } - - return m.Add(&CheckForUpgradesScheduler{ - Client: m.GetClient(), - Refresh: 24 * time.Hour, - RegistrationToken: token, - URL: url, - Version: version, - }) -} - -// NeedLeaderElection returns true so that s runs only on the single -// [manager.Manager] that is elected leader in the Kubernetes cluster. -func (s *CheckForUpgradesScheduler) NeedLeaderElection() bool { return true } - -// Start checks for upgrades periodically. It blocks until ctx is cancelled. -func (s *CheckForUpgradesScheduler) Start(ctx context.Context) error { - s.check(ctx) - - ticker := time.NewTicker(s.Refresh) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - s.check(ctx) - case <-ctx.Done(): - return ctx.Err() - } - } -} - -func (s *CheckForUpgradesScheduler) check(ctx context.Context) { - log := logging.FromContext(ctx) - - defer func() { - if v := recover(); v != nil { - log.V(1).Info("encountered panic in upgrade check", "response", v) - } - }() - - info, header, err := checkForUpgrades(ctx, - s.URL, s.Version, backoff, s.Client, s.RegistrationToken) - - if err != nil { - log.V(1).Info("could not complete upgrade check", "response", err.Error()) - } else { - log.Info(info, clientHeader, header) - } -} diff --git a/internal/upgradecheck/http_test.go b/internal/upgradecheck/http_test.go deleted file mode 100644 index f6a51ce759..0000000000 --- a/internal/upgradecheck/http_test.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "encoding/json" - "errors" - "io" - "net/http" - "strings" - "testing" - "time" - - "github.com/go-logr/logr/funcr" - "gotest.tools/v3/assert" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/crunchydata/postgres-operator/internal/feature" - "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/testing/cmp" -) - -func init() { - client = &MockClient{Timeout: 1} - // set backoff to two steps, 1 second apart for testing - backoff = wait.Backoff{ - Duration: 1 * time.Second, - Factor: float64(1), - Steps: 2, - } -} - -type MockClient struct { - Timeout time.Duration -} - -var funcFoo func() (*http.Response, error) - -// Do is the mock request that will return a mock success -func (m *MockClient) Do(req *http.Request) (*http.Response, error) { - return funcFoo() -} - -func TestCheckForUpgrades(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, true) - - ctx := logging.NewContext(context.Background(), logging.Discard()) - gate := feature.NewGate() - assert.NilError(t, gate.SetFromMap(map[string]bool{ - feature.TablespaceVolumes: true, - })) - ctx = feature.NewContext(ctx, gate) - - // Pass *testing.T to allows the correct messages from the assert package - // in the event of certain failures. - checkData := func(t *testing.T, header string) { - data := clientUpgradeData{} - err := json.Unmarshal([]byte(header), &data) - assert.NilError(t, err) - assert.Assert(t, data.DeploymentID != "") - assert.Equal(t, data.PGOVersion, "4.7.3") - assert.Equal(t, data.RegistrationToken, "speakFriend") - assert.Equal(t, data.BridgeClustersTotal, 2) - assert.Equal(t, data.PGOClustersTotal, 2) - assert.Equal(t, data.FeatureGatesEnabled, - "AutoCreateUserSchema=true,AutoGrowVolumes=true,InstanceSidecars=true,PGUpgradeCPUConcurrency=true,TablespaceVolumes=true") - } - - t.Run("success", func(t *testing.T) { - // A successful call - funcFoo = func() (*http.Response, error) { - json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` - return &http.Response{ - Body: io.NopCloser(strings.NewReader(json)), - StatusCode: http.StatusOK, - }, nil - } - - res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, "speakFriend") - assert.NilError(t, err) - assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) - checkData(t, header) - }) - - t.Run("total failure, err sending", func(t *testing.T) { - var counter int - // A call returning errors - funcFoo = func() (*http.Response, error) { - counter++ - return &http.Response{}, errors.New("whoops") - } - - res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, "speakFriend") - // Two failed calls because of env var - assert.Equal(t, counter, 2) - assert.Equal(t, res, "") - assert.Equal(t, err.Error(), `whoops`) - checkData(t, header) - }) - - t.Run("total failure, bad StatusCode", func(t *testing.T) { - var counter int - // A call returning bad StatusCode - funcFoo = func() (*http.Response, error) { - counter++ - return &http.Response{ - Body: io.NopCloser(strings.NewReader("")), - StatusCode: http.StatusBadRequest, - }, nil - } - - res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, "speakFriend") - assert.Equal(t, res, "") - // Two failed calls because of env var - assert.Equal(t, counter, 2) - assert.Equal(t, err.Error(), `received StatusCode 400`) - checkData(t, header) - }) - - t.Run("one failure, then success", func(t *testing.T) { - var counter int - // A call returning bad StatusCode the first time - // and a successful response the second time - funcFoo = func() (*http.Response, error) { - if counter < 1 { - counter++ - return &http.Response{ - Body: io.NopCloser(strings.NewReader("")), - StatusCode: http.StatusBadRequest, - }, nil - } - counter++ - json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` - return &http.Response{ - Body: io.NopCloser(strings.NewReader(json)), - StatusCode: http.StatusOK, - }, nil - } - - res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, "speakFriend") - assert.Equal(t, counter, 2) - assert.NilError(t, err) - assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) - checkData(t, header) - }) -} - -// TODO(benjaminjb): Replace `fake` with envtest -func TestCheckForUpgradesScheduler(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, false) - - t.Run("panic from checkForUpgrades doesn't bubble up", func(t *testing.T) { - ctx := context.Background() - - // capture logs - var calls []string - ctx = logging.NewContext(ctx, funcr.NewJSON(func(object string) { - calls = append(calls, object) - }, funcr.Options{ - Verbosity: 1, - })) - - // A panicking call - funcFoo = func() (*http.Response, error) { - panic("oh no!") - } - - s := CheckForUpgradesScheduler{ - Client: fakeClient, - } - s.check(ctx) - - assert.Equal(t, len(calls), 2) - assert.Assert(t, cmp.Contains(calls[1], `encountered panic in upgrade check`)) - }) - - t.Run("successful log each loop, ticker works", func(t *testing.T) { - ctx := context.Background() - - // capture logs - var calls []string - ctx = logging.NewContext(ctx, funcr.NewJSON(func(object string) { - calls = append(calls, object) - }, funcr.Options{ - Verbosity: 1, - })) - - // A successful call - funcFoo = func() (*http.Response, error) { - json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` - return &http.Response{ - Body: io.NopCloser(strings.NewReader(json)), - StatusCode: http.StatusOK, - }, nil - } - - // Set loop time to 1s and sleep for 2s before sending the done signal - ctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - s := CheckForUpgradesScheduler{ - Client: fakeClient, - Refresh: 1 * time.Second, - } - assert.ErrorIs(t, context.DeadlineExceeded, s.Start(ctx)) - - // Sleeping leads to some non-deterministic results, but we expect at least 2 executions - // plus one log for the failure to apply the configmap - assert.Assert(t, len(calls) >= 4) - - assert.Assert(t, cmp.Contains(calls[1], `{\"pgo_versions\":[{\"tag\":\"v5.0.4\"},{\"tag\":\"v5.0.3\"},{\"tag\":\"v5.0.2\"},{\"tag\":\"v5.0.1\"},{\"tag\":\"v5.0.0\"}]}`)) - assert.Assert(t, cmp.Contains(calls[3], `{\"pgo_versions\":[{\"tag\":\"v5.0.4\"},{\"tag\":\"v5.0.3\"},{\"tag\":\"v5.0.2\"},{\"tag\":\"v5.0.1\"},{\"tag\":\"v5.0.0\"}]}`)) - }) -} - -func TestCheckForUpgradesSchedulerLeaderOnly(t *testing.T) { - // CheckForUpgradesScheduler should implement this interface. - var s manager.LeaderElectionRunnable = new(CheckForUpgradesScheduler) - - assert.Assert(t, s.NeedLeaderElection(), - "expected to only run on the leader") -} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go index a8aaa59363..fdd53df52b 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go @@ -382,12 +382,6 @@ type PostgresClusterStatus struct { // +optional PGBackRest *v1beta1.PGBackRestStatus `json:"pgbackrest,omitempty"` - // +optional - RegistrationRequired *RegistrationRequirementStatus `json:"registrationRequired,omitempty"` - - // +optional - TokenRequired string `json:"tokenRequired,omitempty"` - // Stores the current PostgreSQL major version following a successful // major PostgreSQL upgrade. // +optional @@ -442,7 +436,6 @@ const ( PersistentVolumeResizeError = "PersistentVolumeResizeError" PostgresClusterProgressing = "Progressing" ProxyAvailable = "ProxyAvailable" - Registered = "Registered" ) type PostgresInstanceSetSpec struct { @@ -623,10 +616,6 @@ func (s *PostgresProxySpec) Default() { } } -type RegistrationRequirementStatus struct { - PGOVersion string `json:"pgoVersion,omitempty"` -} - type PostgresProxyStatus struct { // +optional PGBouncer v1beta1.PGBouncerPodStatus `json:"pgBouncer,omitzero"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go index 46d1817070..c4e2692d1d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go @@ -472,11 +472,6 @@ func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { *out = new(v1beta1.PGBackRestStatus) (*in).DeepCopyInto(*out) } - if in.RegistrationRequired != nil { - in, out := &in.RegistrationRequired, &out.RegistrationRequired - *out = new(RegistrationRequirementStatus) - **out = **in - } out.Proxy = in.Proxy if in.UserInterface != nil { in, out := &in.UserInterface, &out.UserInterface @@ -722,21 +717,6 @@ func (in *PostgresUserInterfaceStatus) DeepCopy() *PostgresUserInterfaceStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistrationRequirementStatus) DeepCopyInto(out *RegistrationRequirementStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationRequirementStatus. -func (in *RegistrationRequirementStatus) DeepCopy() *RegistrationRequirementStatus { - if in == nil { - return nil - } - out := new(RegistrationRequirementStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TablespaceVolume) DeepCopyInto(out *TablespaceVolume) { *out = *in diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index ed539341d7..1b6f3e1c77 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -369,12 +369,6 @@ type PostgresClusterStatus struct { // +optional PGBackRest *PGBackRestStatus `json:"pgbackrest,omitempty"` - // +optional - RegistrationRequired *RegistrationRequirementStatus `json:"registrationRequired,omitempty"` - - // +optional - TokenRequired string `json:"tokenRequired,omitempty"` - // Stores the current PostgreSQL major version following a successful // major PostgreSQL upgrade. // +optional @@ -429,7 +423,6 @@ const ( PersistentVolumeResizeError = "PersistentVolumeResizeError" PostgresClusterProgressing = "Progressing" ProxyAvailable = "ProxyAvailable" - Registered = "Registered" ) type PostgresInstanceSetSpec struct { @@ -626,10 +619,6 @@ func (s *PostgresProxySpec) Default() { } } -type RegistrationRequirementStatus struct { - PGOVersion string `json:"pgoVersion,omitempty"` -} - type PostgresProxyStatus struct { // +optional PGBouncer PGBouncerPodStatus `json:"pgBouncer,omitzero"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index 2d1301c2df..8d674daf3f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -2337,11 +2337,6 @@ func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { *out = new(PGBackRestStatus) (*in).DeepCopyInto(*out) } - if in.RegistrationRequired != nil { - in, out := &in.RegistrationRequired, &out.RegistrationRequired - *out = new(RegistrationRequirementStatus) - **out = **in - } out.Proxy = in.Proxy if in.UserInterface != nil { in, out := &in.UserInterface, &out.UserInterface @@ -2701,21 +2696,6 @@ func (in *PostgresVolumesSpec) DeepCopy() *PostgresVolumesSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistrationRequirementStatus) DeepCopyInto(out *RegistrationRequirementStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationRequirementStatus. -func (in *RegistrationRequirementStatus) DeepCopy() *RegistrationRequirementStatus { - if in == nil { - return nil - } - out := new(RegistrationRequirementStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepoAzure) DeepCopyInto(out *RepoAzure) { *out = *in diff --git a/testing/kuttl/e2e/exporter-custom-queries/00-assert.yaml b/testing/kuttl/e2e/exporter-custom-queries/00-assert.yaml index bbf5c051fd..405969c18c 100644 --- a/testing/kuttl/e2e/exporter-custom-queries/00-assert.yaml +++ b/testing/kuttl/e2e/exporter-custom-queries/00-assert.yaml @@ -31,7 +31,7 @@ commands: contains "${queries_files}" "queries.yml" && !(contains "${queries_files}" "defaultQueries.yml") } || { - echo >&2 'The /conf directory should contain the queries.yml file. Instead it has:' + echo >&2 'The /conf directory should only contain the queries.yml file. Instead it has:' echo "${queries_files}" exit 1 }