Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion internal/controller/postgrescluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import (
// files (etc) that apply to the entire cluster.
func (r *Reconciler) reconcileClusterConfigMap(
ctx context.Context, cluster *v1beta1.PostgresCluster,
pgHBAs postgres.HBAs, pgParameters *postgres.ParameterSet,
pgHBAs *postgres.OrderedHBAs, pgParameters *postgres.ParameterSet,
) (*corev1.ConfigMap, error) {
clusterConfigMap := &corev1.ConfigMap{ObjectMeta: naming.ClusterConfigMap(cluster)}
clusterConfigMap.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap"))
Expand Down
7 changes: 1 addition & 6 deletions internal/controller/postgrescluster/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@ import (
"github.com/crunchydata/postgres-operator/internal/initialize"
"github.com/crunchydata/postgres-operator/internal/kubernetes"
"github.com/crunchydata/postgres-operator/internal/logging"
"github.com/crunchydata/postgres-operator/internal/pgbouncer"
"github.com/crunchydata/postgres-operator/internal/pgmonitor"
"github.com/crunchydata/postgres-operator/internal/pki"
"github.com/crunchydata/postgres-operator/internal/postgres"
"github.com/crunchydata/postgres-operator/internal/registration"
Expand Down Expand Up @@ -231,10 +229,7 @@ func (r *Reconciler) Reconcile(
}
}

pgHBAs := postgres.NewHBAs()
pgmonitor.PostgreSQLHBAs(ctx, cluster, &pgHBAs)
pgbouncer.PostgreSQL(cluster, &pgHBAs)

pgHBAs := r.generatePostgresHBAs(ctx, cluster)
pgParameters := r.generatePostgresParameters(ctx, cluster, backupsSpecFound)

otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters)
Expand Down
2 changes: 1 addition & 1 deletion internal/controller/postgrescluster/patroni.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ func (r *Reconciler) reconcilePatroniDistributedConfiguration(

func (r *Reconciler) reconcilePatroniDynamicConfiguration(
ctx context.Context, cluster *v1beta1.PostgresCluster, instances *observedInstances,
pgHBAs postgres.HBAs, pgParameters *postgres.ParameterSet,
pgHBAs *postgres.OrderedHBAs, pgParameters *postgres.ParameterSet,
) error {
if !patroni.ClusterBootstrapped(cluster) {
// Patroni has not yet bootstrapped. Dynamic configuration happens through
Expand Down
30 changes: 30 additions & 0 deletions internal/controller/postgrescluster/postgres.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"github.com/crunchydata/postgres-operator/internal/patroni"
"github.com/crunchydata/postgres-operator/internal/pgaudit"
"github.com/crunchydata/postgres-operator/internal/pgbackrest"
"github.com/crunchydata/postgres-operator/internal/pgbouncer"
"github.com/crunchydata/postgres-operator/internal/pgmonitor"
"github.com/crunchydata/postgres-operator/internal/postgis"
"github.com/crunchydata/postgres-operator/internal/postgres"
Expand All @@ -41,6 +42,35 @@ import (
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
)

// generatePostgresHBAs produces the HBA rules for cluster that incorporates,
// from highest to lowest precedence:
// 1. mandatory rules determined by controllers
// 2. rules in cluster.spec.patroni.dynamicConfiguration
// 3. default rules, when none were in cluster.spec
func (*Reconciler) generatePostgresHBAs(
ctx context.Context, cluster *v1beta1.PostgresCluster,
) *postgres.OrderedHBAs {
builtin := postgres.NewHBAs()
pgmonitor.PostgreSQLHBAs(ctx, cluster, &builtin)
pgbouncer.PostgreSQL(cluster, &builtin)

// Postgres processes HBA rules in order. Start with mandatory rules
// so connections are matched against them first.
result := new(postgres.OrderedHBAs)
result.Append(builtin.Mandatory...)

// Append any rules specified in the Patroni section.
before := result.Length()
result.AppendUnstructured(patroni.PostgresHBAs(cluster.Spec.Patroni)...)

// When there are no specified rules, include the recommended defaults.
if result.Length() == before {
result.Append(builtin.Default...)
}

return result
}

// generatePostgresParameters produces the parameter set for cluster that
// incorporates, from highest to lowest precedence:
// 1. mandatory values determined by controllers
Expand Down
36 changes: 36 additions & 0 deletions internal/controller/postgrescluster/postgres_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,42 @@ import (
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
)

func TestGeneratePostgresHBAs(t *testing.T) {
ctx := context.Background()
reconciler := &Reconciler{}

builtin := reconciler.generatePostgresHBAs(ctx, v1beta1.NewPostgresCluster()).AsStrings()
assert.Assert(t, len(builtin) > 0,
"expected an empty cluster to have some builtin rules")

defaults := builtin[len(builtin)-1:]
assert.Assert(t, len(defaults) > 0,
"expected at least one default rule")

required := builtin[:len(builtin)-len(defaults)]
assert.Assert(t, len(required) > 0,
"expected at least one mandatory rule")

t.Run("Patroni", func(t *testing.T) {
cluster := v1beta1.NewPostgresCluster()
require.UnmarshalInto(t, &cluster.Spec.Patroni, `{
dynamicConfiguration: {
postgresql: { pg_hba: [ "first custom", "another" ] },
},
}`)

result := reconciler.generatePostgresHBAs(ctx, cluster).AsStrings()
assert.Assert(t, cmp.Len(result, len(required)+2),
"expected two rules from the Patroni section and no defaults")

// mandatory rules should be first
assert.DeepEqual(t, result[:len(required)], required)

// specified rules should be last and in their original order
assert.DeepEqual(t, result[len(required):], []string{`first custom`, `another`})
})
}

func TestGeneratePostgresParameters(t *testing.T) {
ctx := context.Background()
reconciler := &Reconciler{}
Expand Down
29 changes: 5 additions & 24 deletions internal/patroni/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ const (
// clusterYAML returns Patroni settings that apply to the entire cluster.
func clusterYAML(
cluster *v1beta1.PostgresCluster,
pgHBAs postgres.HBAs, parameters *postgres.ParameterSet, patroniLogStorageLimit int64,
pgHBAs *postgres.OrderedHBAs, parameters *postgres.ParameterSet, patroniLogStorageLimit int64,
) (string, error) {
root := map[string]any{
// The cluster identifier. This value cannot change during the cluster's
Expand Down Expand Up @@ -208,7 +208,7 @@ func clusterYAML(
// and returns a value that can be marshaled to JSON.
func DynamicConfiguration(
spec *v1beta1.PostgresClusterSpec,
pgHBAs postgres.HBAs, parameters *postgres.ParameterSet,
pgHBAs *postgres.OrderedHBAs, parameters *postgres.ParameterSet,
) map[string]any {
// Copy the entire configuration before making any changes.
root := make(map[string]any)
Expand Down Expand Up @@ -239,32 +239,13 @@ func DynamicConfiguration(
postgresql[k] = v
}
}
root["postgresql"] = postgresql

if m := parameters.AsMap(); m != nil {
postgresql["parameters"] = m
}

// Copy the "postgresql.pg_hba" section after any mandatory values.
hba := make([]string, 0, len(pgHBAs.Mandatory))
for i := range pgHBAs.Mandatory {
hba = append(hba, pgHBAs.Mandatory[i].String())
if pgHBAs != nil {
postgresql["pg_hba"] = pgHBAs.AsStrings()
}
if section, ok := postgresql["pg_hba"].([]any); ok {
for i := range section {
// any pg_hba values that are not strings will be skipped
if value, ok := section[i].(string); ok {
hba = append(hba, value)
}
}
}
// When the section is missing or empty, include the recommended defaults.
if len(hba) == len(pgHBAs.Mandatory) {
for i := range pgHBAs.Default {
hba = append(hba, pgHBAs.Default[i].String())
}
}
postgresql["pg_hba"] = hba
root["postgresql"] = postgresql

// Enabling `pg_rewind` allows a former primary to automatically rejoin the
// cluster even if it has commits that were not sent to a replica. In other
Expand Down
Loading