diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 26e1d31154..6a612936d3 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -11036,6 +11036,218 @@ spec: - whenUnsatisfiable type: object type: array + volumes: + properties: + temp: + description: |- + An ephemeral volume for temporary files. + More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: missing accessModes + rule: 0 < size(self.accessModes) + - message: missing storage request + rule: has(self.resources.requests.storage) + type: object walVolumeClaimSpec: description: |- Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 5c9786459d..35498a3ea9 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1188,7 +1188,7 @@ func (r *Reconciler) reconcileInstance( ctx, cluster, spec, primaryCertificate, replicationCertSecretProjection(clusterReplicationSecret), postgresDataVolume, postgresWALVolume, tablespaceVolumes, - &instance.Spec.Template.Spec) + &instance.Spec.Template) if backupsSpecFound { addPGBackRestToInstancePodSpec( diff --git a/internal/postgres/config.go b/internal/postgres/config.go index b3102b74dc..a478c0e72b 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -58,6 +58,9 @@ safelink() ( // dataMountPath is where to mount the main data volume. tablespaceMountPath = "/tablespaces" + // tmpMountPath is where to mount the optional ephemeral volume. + tmpMountPath = "/pgtmp" + // walMountPath is where to mount the optional WAL volume. walMountPath = "/pgwal" diff --git a/internal/postgres/reconcile.go b/internal/postgres/reconcile.go index fda5229792..5041140b0d 100644 --- a/internal/postgres/reconcile.go +++ b/internal/postgres/reconcile.go @@ -32,6 +32,11 @@ func TablespaceVolumeMount(tablespaceName string) corev1.VolumeMount { return corev1.VolumeMount{Name: "tablespace-" + tablespaceName, MountPath: tablespaceMountPath + "/" + tablespaceName} } +// TempVolumeMount returns the name and mount path of the ephemeral volume. +func TempVolumeMount() corev1.VolumeMount { + return corev1.VolumeMount{Name: "postgres-temp", MountPath: tmpMountPath} +} + // WALVolumeMount returns the name and mount path of the PostgreSQL WAL volume. func WALVolumeMount() corev1.VolumeMount { return corev1.VolumeMount{Name: "postgres-wal", MountPath: walMountPath} @@ -63,7 +68,7 @@ func InstancePod(ctx context.Context, inClusterCertificates, inClientCertificates *corev1.SecretProjection, inDataVolume, inWALVolume *corev1.PersistentVolumeClaim, inTablespaceVolumes []*corev1.PersistentVolumeClaim, - outInstancePod *corev1.PodSpec, + outInstancePod *corev1.PodTemplateSpec, ) { certVolumeMount := corev1.VolumeMount{ Name: naming.CertVolume, @@ -207,7 +212,7 @@ func InstancePod(ctx context.Context, VolumeMounts: []corev1.VolumeMount{certVolumeMount, dataVolumeMount}, } - outInstancePod.Volumes = []corev1.Volume{ + outInstancePod.Spec.Volumes = []corev1.Volume{ certVolume, dataVolume, downwardAPIVolume, @@ -227,7 +232,7 @@ func InstancePod(ctx context.Context, }, }, } - outInstancePod.Volumes = append(outInstancePod.Volumes, tablespaceVolume) + outInstancePod.Spec.Volumes = append(outInstancePod.Spec.Volumes, tablespaceVolume) container.VolumeMounts = append(container.VolumeMounts, tablespaceVolumeMount) startup.VolumeMounts = append(startup.VolumeMounts, tablespaceVolumeMount) } @@ -239,7 +244,7 @@ func InstancePod(ctx context.Context, Sources: append([]corev1.VolumeProjection{}, inCluster.Spec.Config.Files...), } container.VolumeMounts = append(container.VolumeMounts, additionalConfigVolumeMount) - outInstancePod.Volumes = append(outInstancePod.Volumes, additionalConfigVolume) + outInstancePod.Spec.Volumes = append(outInstancePod.Spec.Volumes, additionalConfigVolume) } // Mount the WAL PVC whenever it exists. The startup command will move WAL @@ -258,19 +263,37 @@ func InstancePod(ctx context.Context, container.VolumeMounts = append(container.VolumeMounts, walVolumeMount) startup.VolumeMounts = append(startup.VolumeMounts, walVolumeMount) - outInstancePod.Volumes = append(outInstancePod.Volumes, walVolume) + outInstancePod.Spec.Volumes = append(outInstancePod.Spec.Volumes, walVolume) + } + + // Mount an ephemeral volume, if specified. + if inInstanceSpec.Volumes != nil && inInstanceSpec.Volumes.Temp != nil { + tmpVolumeMount := TempVolumeMount() + tmpVolume := corev1.Volume{Name: tmpVolumeMount.Name} + tmpVolume.Ephemeral = &corev1.EphemeralVolumeSource{ + VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{ + Spec: inInstanceSpec.Volumes.Temp.AsPersistentVolumeClaimSpec(), + }, + } + + // Create the PVC with the same labels and annotations as the pod. + tmpVolume.Ephemeral.VolumeClaimTemplate.Annotations = outInstancePod.Annotations + tmpVolume.Ephemeral.VolumeClaimTemplate.Labels = outInstancePod.Labels + + container.VolumeMounts = append(container.VolumeMounts, tmpVolumeMount) + outInstancePod.Spec.Volumes = append(outInstancePod.Spec.Volumes, tmpVolume) } - outInstancePod.Containers = []corev1.Container{container, reloader} + outInstancePod.Spec.Containers = []corev1.Container{container, reloader} // If the InstanceSidecars feature gate is enabled and instance sidecars are // defined, add the defined container to the Pod. if feature.Enabled(ctx, feature.InstanceSidecars) && inInstanceSpec.Containers != nil { - outInstancePod.Containers = append(outInstancePod.Containers, inInstanceSpec.Containers...) + outInstancePod.Spec.Containers = append(outInstancePod.Spec.Containers, inInstanceSpec.Containers...) } - outInstancePod.InitContainers = []corev1.Container{startup} + outInstancePod.Spec.InitContainers = []corev1.Container{startup} } // PodSecurityContext returns a v1.PodSecurityContext for cluster that can write diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index a36e3c5368..9903afb97c 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -115,11 +115,11 @@ func TestInstancePod(t *testing.T) { } // without WAL volume nor WAL volume spec - pod := new(corev1.PodSpec) + pod := new(corev1.PodTemplateSpec) InstancePod(ctx, cluster, instance, serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) - assert.Assert(t, cmp.MarshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec, ` containers: - env: - name: PGDATA @@ -384,15 +384,15 @@ volumes: walVolume := new(corev1.PersistentVolumeClaim) walVolume.Name = "walvol" - pod := new(corev1.PodSpec) + pod := new(corev1.PodTemplateSpec) InstancePod(ctx, cluster, instance, serverSecretProjection, clientSecretProjection, dataVolume, walVolume, nil, pod) - assert.Assert(t, len(pod.Containers) > 0) - assert.Assert(t, len(pod.InitContainers) > 0) + assert.Assert(t, len(pod.Spec.Containers) > 0) + assert.Assert(t, len(pod.Spec.InitContainers) > 0) // Container has all mountPaths, including downwardAPI - assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -402,19 +402,19 @@ volumes: name: database-containerinfo readOnly: true - mountPath: /pgwal - name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) + name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Spec.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI - assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true - mountPath: /pgdata name: postgres-data - mountPath: /pgwal - name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) + name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.Spec.InitContainers[0].Name) - assert.Assert(t, cmp.MarshalMatches(pod.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.Volumes, ` - name: cert-volume projected: defaultMode: 384 @@ -475,7 +475,7 @@ volumes: `), "expected WAL volume") // Startup moves WAL files to data volume. - assert.DeepEqual(t, pod.InitContainers[0].Command[4:], + assert.DeepEqual(t, pod.Spec.InitContainers[0].Command[4:], []string{"startup", "11", "/pgdata/pg11_wal"}) }) @@ -485,16 +485,16 @@ volumes: files: [{ secret: { name: keytab } }], }`) - pod := new(corev1.PodSpec) + pod := new(corev1.PodTemplateSpec) InstancePod(ctx, clusterWithConfig, instance, serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) - assert.Assert(t, len(pod.Containers) > 0) - assert.Assert(t, len(pod.InitContainers) > 0) + assert.Assert(t, len(pod.Spec.Containers) > 0) + assert.Assert(t, len(pod.Spec.InitContainers) > 0) // Container has all mountPaths, including downwardAPI, // and the postgres-config - assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -505,15 +505,15 @@ volumes: readOnly: true - mountPath: /etc/postgres name: postgres-config - readOnly: true`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) + readOnly: true`), "expected WAL and downwardAPI mounts in %q container", pod.Spec.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI and additionalConfig - assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true - mountPath: /pgdata - name: postgres-data`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) + name: postgres-data`), "expected WAL mount, no downwardAPI mount in %q container", pod.Spec.InitContainers[0].Name) }) t.Run("WithCustomSidecarContainer", func(t *testing.T) { @@ -526,7 +526,7 @@ volumes: InstancePod(ctx, cluster, sidecarInstance, serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) - assert.Equal(t, len(pod.Containers), 2, "expected 2 containers in Pod, got %d", len(pod.Containers)) + assert.Equal(t, len(pod.Spec.Containers), 2, "expected 2 containers in Pod") }) t.Run("SidecarEnabled", func(t *testing.T) { @@ -539,11 +539,11 @@ volumes: InstancePod(ctx, cluster, sidecarInstance, serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) - assert.Equal(t, len(pod.Containers), 3, "expected 3 containers in Pod, got %d", len(pod.Containers)) + assert.Equal(t, len(pod.Spec.Containers), 3, "expected 3 containers in Pod") var found bool - for i := range pod.Containers { - if pod.Containers[i].Name == "customsidecar1" { + for i := range pod.Spec.Containers { + if pod.Spec.Containers[i].Name == "customsidecar1" { found = true break } @@ -576,7 +576,7 @@ volumes: InstancePod(ctx, cluster, instance, serverSecretProjection, clientSecretProjection, dataVolume, nil, tablespaceVolumes, pod) - assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -588,10 +588,10 @@ volumes: - mountPath: /tablespaces/castle name: tablespace-castle - mountPath: /tablespaces/trial - name: tablespace-trial`), "expected tablespace mount(s) in %q container", pod.Containers[0].Name) + name: tablespace-trial`), "expected tablespace mount(s) in %q container", pod.Spec.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI and additionalConfig - assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -600,7 +600,7 @@ volumes: - mountPath: /tablespaces/castle name: tablespace-castle - mountPath: /tablespaces/trial - name: tablespace-trial`), "expected tablespace mount(s) in %q container", pod.InitContainers[0].Name) + name: tablespace-trial`), "expected tablespace mount(s) in %q container", pod.Spec.InitContainers[0].Name) }) t.Run("WithWALVolumeWithWALVolumeSpec", func(t *testing.T) { @@ -610,14 +610,14 @@ volumes: instance := new(v1beta1.PostgresInstanceSetSpec) instance.WALVolumeClaimSpec = new(v1beta1.VolumeClaimSpec) - pod := new(corev1.PodSpec) + pod := new(corev1.PodTemplateSpec) InstancePod(ctx, cluster, instance, serverSecretProjection, clientSecretProjection, dataVolume, walVolume, nil, pod) - assert.Assert(t, len(pod.Containers) > 0) - assert.Assert(t, len(pod.InitContainers) > 0) + assert.Assert(t, len(pod.Spec.Containers) > 0) + assert.Assert(t, len(pod.Spec.InitContainers) > 0) - assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -627,18 +627,18 @@ volumes: name: database-containerinfo readOnly: true - mountPath: /pgwal - name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) + name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Spec.Containers[0].Name) - assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true - mountPath: /pgdata name: postgres-data - mountPath: /pgwal - name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) + name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.Spec.InitContainers[0].Name) - assert.Assert(t, cmp.MarshalMatches(pod.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(pod.Spec.Volumes, ` - name: cert-volume projected: defaultMode: 384 @@ -699,9 +699,71 @@ volumes: `), "expected WAL volume") // Startup moves WAL files to WAL volume. - assert.DeepEqual(t, pod.InitContainers[0].Command[4:], + assert.DeepEqual(t, pod.Spec.InitContainers[0].Command[4:], []string{"startup", "11", "/pgwal/pg11_wal"}) }) + + t.Run("TempVolume", func(t *testing.T) { + instance := new(v1beta1.PostgresInstanceSetSpec) + require.UnmarshalInto(t, &instance, `{ + volumes: { temp: { + resources: { requests: { storage: 99Mi } }, + storageClassName: somesuch, + } }, + }`) + + pod := new(corev1.PodTemplateSpec) + InstancePod(ctx, cluster, instance, + serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) + + assert.Assert(t, len(pod.Spec.Containers) > 0) + assert.Assert(t, cmp.MarshalContains(pod.Spec.Containers[0].VolumeMounts, ` +- mountPath: /pgtmp + name: postgres-temp +`), "expected temp mount in %q container", pod.Spec.Containers[0].Name) + + // NOTE: `creationTimestamp: null` appears in the resulting pod, + // but it does not affect the PVC or reconciliation events; + // possibly https://pr.k8s.io/100032 + assert.Assert(t, cmp.MarshalContains(pod.Spec.Volumes, ` +- ephemeral: + volumeClaimTemplate: + metadata: + creationTimestamp: null + spec: + resources: + requests: + storage: 99Mi + storageClassName: somesuch + name: postgres-temp +`), "expected definition in the pod") + + t.Run("Metadata", func(t *testing.T) { + annotated := pod.DeepCopy() + annotated.Annotations = map[string]string{"n1": "etc"} + annotated.Labels = map[string]string{"gg": "asdf"} + + InstancePod(ctx, cluster, instance, + serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, annotated) + + assert.Assert(t, cmp.MarshalContains(annotated.Spec.Volumes, ` +- ephemeral: + volumeClaimTemplate: + metadata: + annotations: + n1: etc + creationTimestamp: null + labels: + gg: asdf + spec: + resources: + requests: + storage: 99Mi + storageClassName: somesuch + name: postgres-temp +`), "expected definition in the pod") + }) + }) } func TestPodSecurityContext(t *testing.T) { diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 33edac4ebf..e804233a9c 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -518,6 +518,16 @@ type PostgresInstanceSetSpec struct { // +listMapKey=name // +optional TablespaceVolumes []TablespaceVolume `json:"tablespaceVolumes,omitempty"` + + Volumes *PostgresVolumesSpec `json:"volumes,omitempty"` +} + +type PostgresVolumesSpec struct { + // An ephemeral volume for temporary files. + // More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes + // --- + // +optional + Temp *VolumeClaimSpec `json:"temp,omitempty"` } type TablespaceVolume struct { diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index b139390346..7f08c6cf65 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -2209,6 +2209,11 @@ func (in *PostgresInstanceSetSpec) DeepCopyInto(out *PostgresInstanceSetSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = new(PostgresVolumesSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresInstanceSetSpec. @@ -2355,6 +2360,25 @@ func (in *PostgresUserSpec) DeepCopy() *PostgresUserSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresVolumesSpec) DeepCopyInto(out *PostgresVolumesSpec) { + *out = *in + if in.Temp != nil { + in, out := &in.Temp, &out.Temp + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresVolumesSpec. +func (in *PostgresVolumesSpec) DeepCopy() *PostgresVolumesSpec { + if in == nil { + return nil + } + out := new(PostgresVolumesSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistrationRequirementStatus) DeepCopyInto(out *RegistrationRequirementStatus) { *out = *in