Skip to content

Commit 9b551e8

Browse files
committed
update the others
1 parent cbaf869 commit 9b551e8

File tree

16 files changed

+145
-149
lines changed

16 files changed

+145
-149
lines changed

internal/collector/naming.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ const PGBouncerMetrics = "metrics/pgbouncer"
1414
const PostgresMetrics = "metrics/postgres"
1515
const PatroniMetrics = "metrics/patroni"
1616
const ResourceDetectionProcessor = "resourcedetection"
17+
const MonitoringUser = "ccp_monitoring"
1718

1819
const SqlQuery = "sqlquery"
1920

internal/collector/patroni.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ import (
88
"context"
99
"slices"
1010

11-
"github.com/crunchydata/postgres-operator/internal/feature"
1211
"github.com/crunchydata/postgres-operator/internal/naming"
1312
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
1413
)
@@ -22,7 +21,7 @@ func EnablePatroniLogging(ctx context.Context,
2221
spec = inCluster.Spec.Instrumentation.Logs
2322
}
2423

25-
if feature.Enabled(ctx, feature.OpenTelemetryLogs) {
24+
if OpenTelemetryLogsEnabled(ctx, inCluster) {
2625
directory := naming.PatroniPGDataLogPath
2726

2827
// Keep track of what log records and files have been processed.
@@ -133,7 +132,7 @@ func EnablePatroniMetrics(ctx context.Context,
133132
inCluster *v1beta1.PostgresCluster,
134133
outConfig *Config,
135134
) {
136-
if feature.Enabled(ctx, feature.OpenTelemetryMetrics) {
135+
if OpenTelemetryMetricsEnabled(ctx, inCluster) {
137136
// Add Prometheus exporter
138137
outConfig.Exporters[Prometheus] = map[string]any{
139138
"endpoint": "0.0.0.0:9187",

internal/collector/pgadmin.go

Lines changed: 104 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -10,120 +10,120 @@ import (
1010

1111
corev1 "k8s.io/api/core/v1"
1212

13-
"github.com/crunchydata/postgres-operator/internal/feature"
1413
"github.com/crunchydata/postgres-operator/internal/naming"
1514
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
1615
)
1716

1817
func EnablePgAdminLogging(ctx context.Context, spec *v1beta1.InstrumentationSpec,
1918
configmap *corev1.ConfigMap,
2019
) error {
21-
if !feature.Enabled(ctx, feature.OpenTelemetryLogs) {
22-
return nil
23-
}
24-
otelConfig := NewConfig(spec)
25-
26-
otelConfig.Extensions["file_storage/pgadmin_data_logs"] = map[string]any{
27-
"directory": "/var/lib/pgadmin/logs/receiver",
28-
"create_directory": false,
29-
"fsync": true,
30-
}
31-
32-
otelConfig.Receivers["filelog/pgadmin"] = map[string]any{
33-
"include": []string{"/var/lib/pgadmin/logs/pgadmin.log"},
34-
"storage": "file_storage/pgadmin_data_logs",
35-
}
36-
otelConfig.Receivers["filelog/gunicorn"] = map[string]any{
37-
"include": []string{"/var/lib/pgadmin/logs/gunicorn.log"},
38-
"storage": "file_storage/pgadmin_data_logs",
39-
}
40-
41-
otelConfig.Processors["resource/pgadmin"] = map[string]any{
42-
"attributes": []map[string]any{
43-
// Container and Namespace names need no escaping because they are DNS labels.
44-
// Pod names need no escaping because they are DNS subdomains.
45-
//
46-
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names
47-
// https://github.com/open-telemetry/semantic-conventions/blob/v1.29.0/docs/resource/k8s.md
48-
// https://github.com/open-telemetry/semantic-conventions/blob/v1.29.0/docs/general/logs.md
49-
{"action": "insert", "key": "k8s.container.name", "value": naming.ContainerPGAdmin},
50-
{"action": "insert", "key": "k8s.namespace.name", "value": "${env:K8S_POD_NAMESPACE}"},
51-
{"action": "insert", "key": "k8s.pod.name", "value": "${env:K8S_POD_NAME}"},
52-
},
53-
}
54-
55-
otelConfig.Processors["transform/pgadmin_log"] = map[string]any{
56-
"log_statements": []map[string]any{
57-
{
58-
"context": "log",
59-
"statements": []string{
60-
// Keep the unparsed log record in a standard attribute, and replace
61-
// the log record body with the message field.
62-
//
63-
// https://github.com/open-telemetry/semantic-conventions/blob/v1.29.0/docs/general/logs.md
64-
`set(attributes["log.record.original"], body)`,
65-
`set(cache, ParseJSON(body))`,
66-
`merge_maps(attributes, ExtractPatterns(cache["message"], "(?P<webrequest>[A-Z]{3}.*?[\\d]{3})"), "insert")`,
67-
`set(body, cache["message"])`,
68-
69-
// Set instrumentation scope to the "name" from each log record.
70-
`set(instrumentation_scope.name, cache["name"])`,
71-
72-
// https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext
73-
`set(severity_text, cache["level"])`,
74-
`set(time_unix_nano, Int(cache["time"]*1000000000))`,
75-
76-
// Map pgAdmin "logging levels" to OpenTelemetry severity levels.
77-
//
78-
// https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber
79-
// https://opentelemetry.io/docs/specs/otel/logs/data-model-appendix/#appendix-b-severitynumber-example-mappings
80-
// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/-/pkg/ottl/contexts/ottllog#enums
81-
`set(severity_number, SEVERITY_NUMBER_DEBUG) where severity_text == "DEBUG"`,
82-
`set(severity_number, SEVERITY_NUMBER_INFO) where severity_text == "INFO"`,
83-
`set(severity_number, SEVERITY_NUMBER_WARN) where severity_text == "WARNING"`,
84-
`set(severity_number, SEVERITY_NUMBER_ERROR) where severity_text == "ERROR"`,
85-
`set(severity_number, SEVERITY_NUMBER_FATAL) where severity_text == "CRITICAL"`,
20+
var err error
21+
if OpenTelemetryLogsEnabled(ctx, spec) {
22+
23+
otelConfig := NewConfig(spec)
24+
25+
otelConfig.Extensions["file_storage/pgadmin_data_logs"] = map[string]any{
26+
"directory": "/var/lib/pgadmin/logs/receiver",
27+
"create_directory": false,
28+
"fsync": true,
29+
}
30+
31+
otelConfig.Receivers["filelog/pgadmin"] = map[string]any{
32+
"include": []string{"/var/lib/pgadmin/logs/pgadmin.log"},
33+
"storage": "file_storage/pgadmin_data_logs",
34+
}
35+
otelConfig.Receivers["filelog/gunicorn"] = map[string]any{
36+
"include": []string{"/var/lib/pgadmin/logs/gunicorn.log"},
37+
"storage": "file_storage/pgadmin_data_logs",
38+
}
39+
40+
otelConfig.Processors["resource/pgadmin"] = map[string]any{
41+
"attributes": []map[string]any{
42+
// Container and Namespace names need no escaping because they are DNS labels.
43+
// Pod names need no escaping because they are DNS subdomains.
44+
//
45+
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names
46+
// https://github.com/open-telemetry/semantic-conventions/blob/v1.29.0/docs/resource/k8s.md
47+
// https://github.com/open-telemetry/semantic-conventions/blob/v1.29.0/docs/general/logs.md
48+
{"action": "insert", "key": "k8s.container.name", "value": naming.ContainerPGAdmin},
49+
{"action": "insert", "key": "k8s.namespace.name", "value": "${env:K8S_POD_NAMESPACE}"},
50+
{"action": "insert", "key": "k8s.pod.name", "value": "${env:K8S_POD_NAME}"},
51+
},
52+
}
53+
54+
otelConfig.Processors["transform/pgadmin_log"] = map[string]any{
55+
"log_statements": []map[string]any{
56+
{
57+
"context": "log",
58+
"statements": []string{
59+
// Keep the unparsed log record in a standard attribute, and replace
60+
// the log record body with the message field.
61+
//
62+
// https://github.com/open-telemetry/semantic-conventions/blob/v1.29.0/docs/general/logs.md
63+
`set(attributes["log.record.original"], body)`,
64+
`set(cache, ParseJSON(body))`,
65+
`merge_maps(attributes, ExtractPatterns(cache["message"], "(?P<webrequest>[A-Z]{3}.*?[\\d]{3})"), "insert")`,
66+
`set(body, cache["message"])`,
67+
68+
// Set instrumentation scope to the "name" from each log record.
69+
`set(instrumentation_scope.name, cache["name"])`,
70+
71+
// https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext
72+
`set(severity_text, cache["level"])`,
73+
`set(time_unix_nano, Int(cache["time"]*1000000000))`,
74+
75+
// Map pgAdmin "logging levels" to OpenTelemetry severity levels.
76+
//
77+
// https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber
78+
// https://opentelemetry.io/docs/specs/otel/logs/data-model-appendix/#appendix-b-severitynumber-example-mappings
79+
// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/-/pkg/ottl/contexts/ottllog#enums
80+
`set(severity_number, SEVERITY_NUMBER_DEBUG) where severity_text == "DEBUG"`,
81+
`set(severity_number, SEVERITY_NUMBER_INFO) where severity_text == "INFO"`,
82+
`set(severity_number, SEVERITY_NUMBER_WARN) where severity_text == "WARNING"`,
83+
`set(severity_number, SEVERITY_NUMBER_ERROR) where severity_text == "ERROR"`,
84+
`set(severity_number, SEVERITY_NUMBER_FATAL) where severity_text == "CRITICAL"`,
85+
},
8686
},
8787
},
88-
},
89-
}
90-
91-
// If there are exporters to be added to the logs pipelines defined in
92-
// the spec, add them to the pipeline. Otherwise, add the DebugExporter.
93-
exporters := []ComponentID{DebugExporter}
94-
if spec != nil && spec.Logs != nil && spec.Logs.Exporters != nil {
95-
exporters = slices.Clone(spec.Logs.Exporters)
96-
}
97-
98-
otelConfig.Pipelines["logs/pgadmin"] = Pipeline{
99-
Extensions: []ComponentID{"file_storage/pgadmin_data_logs"},
100-
Receivers: []ComponentID{"filelog/pgadmin"},
101-
Processors: []ComponentID{
102-
"resource/pgadmin",
103-
"transform/pgadmin_log",
104-
ResourceDetectionProcessor,
105-
LogsBatchProcessor,
106-
CompactingProcessor,
107-
},
108-
Exporters: exporters,
109-
}
110-
111-
otelConfig.Pipelines["logs/gunicorn"] = Pipeline{
112-
Extensions: []ComponentID{"file_storage/pgadmin_data_logs"},
113-
Receivers: []ComponentID{"filelog/gunicorn"},
114-
Processors: []ComponentID{
115-
"resource/pgadmin",
116-
"transform/pgadmin_log",
117-
ResourceDetectionProcessor,
118-
LogsBatchProcessor,
119-
CompactingProcessor,
120-
},
121-
Exporters: exporters,
122-
}
88+
}
89+
90+
// If there are exporters to be added to the logs pipelines defined in
91+
// the spec, add them to the pipeline. Otherwise, add the DebugExporter.
92+
exporters := []ComponentID{DebugExporter}
93+
if spec != nil && spec.Logs != nil && spec.Logs.Exporters != nil {
94+
exporters = slices.Clone(spec.Logs.Exporters)
95+
}
96+
97+
otelConfig.Pipelines["logs/pgadmin"] = Pipeline{
98+
Extensions: []ComponentID{"file_storage/pgadmin_data_logs"},
99+
Receivers: []ComponentID{"filelog/pgadmin"},
100+
Processors: []ComponentID{
101+
"resource/pgadmin",
102+
"transform/pgadmin_log",
103+
ResourceDetectionProcessor,
104+
LogsBatchProcessor,
105+
CompactingProcessor,
106+
},
107+
Exporters: exporters,
108+
}
109+
110+
otelConfig.Pipelines["logs/gunicorn"] = Pipeline{
111+
Extensions: []ComponentID{"file_storage/pgadmin_data_logs"},
112+
Receivers: []ComponentID{"filelog/gunicorn"},
113+
Processors: []ComponentID{
114+
"resource/pgadmin",
115+
"transform/pgadmin_log",
116+
ResourceDetectionProcessor,
117+
LogsBatchProcessor,
118+
CompactingProcessor,
119+
},
120+
Exporters: exporters,
121+
}
123122

124-
otelYAML, err := otelConfig.ToYAML()
125-
if err == nil {
126-
configmap.Data["collector.yaml"] = otelYAML
123+
otelYAML, err := otelConfig.ToYAML()
124+
if err == nil {
125+
configmap.Data["collector.yaml"] = otelYAML
126+
}
127127
}
128128
return err
129129
}

internal/collector/pgbackrest.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ import (
1111
"fmt"
1212
"slices"
1313

14-
"github.com/crunchydata/postgres-operator/internal/feature"
1514
"github.com/crunchydata/postgres-operator/internal/naming"
1615
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
1716
)
@@ -29,7 +28,7 @@ func NewConfigForPgBackrestRepoHostPod(
2928
) *Config {
3029
config := NewConfig(spec)
3130

32-
if feature.Enabled(ctx, feature.OpenTelemetryLogs) {
31+
if OpenTelemetryLogsEnabled(ctx, spec) {
3332

3433
var directory string
3534
for _, repo := range repos {

internal/collector/pgbouncer.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ import (
1111
"fmt"
1212
"slices"
1313

14-
"github.com/crunchydata/postgres-operator/internal/feature"
1514
"github.com/crunchydata/postgres-operator/internal/naming"
1615
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
1716
)
@@ -39,7 +38,7 @@ func NewConfigForPgBouncerPod(
3938
config := NewConfig(cluster.Spec.Instrumentation)
4039

4140
EnablePgBouncerLogging(ctx, cluster, config)
42-
EnablePgBouncerMetrics(ctx, config, sqlQueryUsername)
41+
EnablePgBouncerMetrics(ctx, cluster, config, sqlQueryUsername)
4342

4443
return config
4544
}
@@ -55,7 +54,7 @@ func EnablePgBouncerLogging(ctx context.Context,
5554
spec = inCluster.Spec.Instrumentation.Logs
5655
}
5756

58-
if feature.Enabled(ctx, feature.OpenTelemetryLogs) {
57+
if OpenTelemetryLogsEnabled(ctx, inCluster) {
5958
directory := naming.PGBouncerLogPath
6059

6160
// Keep track of what log records and files have been processed.
@@ -170,8 +169,10 @@ func EnablePgBouncerLogging(ctx context.Context,
170169

171170
// EnablePgBouncerMetrics adds necessary configuration to the collector config to scrape
172171
// metrics from pgBouncer when the OpenTelemetryMetrics feature flag is enabled.
173-
func EnablePgBouncerMetrics(ctx context.Context, config *Config, sqlQueryUsername string) {
174-
if feature.Enabled(ctx, feature.OpenTelemetryMetrics) {
172+
func EnablePgBouncerMetrics(ctx context.Context, inCluster *v1beta1.PostgresCluster,
173+
config *Config, sqlQueryUsername string) {
174+
175+
if OpenTelemetryMetricsEnabled(ctx, inCluster) {
175176
// Add Prometheus exporter
176177
config.Exporters[Prometheus] = map[string]any{
177178
"endpoint": "0.0.0.0:9187",

internal/collector/postgres.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ import (
1515

1616
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1717

18-
"github.com/crunchydata/postgres-operator/internal/feature"
1918
"github.com/crunchydata/postgres-operator/internal/naming"
2019
"github.com/crunchydata/postgres-operator/internal/postgres"
2120
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
@@ -86,7 +85,7 @@ func EnablePostgresLogging(
8685
spec = inCluster.Spec.Instrumentation.Logs
8786
}
8887

89-
if inCluster != nil && feature.Enabled(ctx, feature.OpenTelemetryLogs) {
88+
if OpenTelemetryLogsEnabled(ctx, inCluster) {
9089
directory := postgres.LogDirectory()
9190
version := inCluster.Spec.PostgresVersion
9291

internal/collector/postgres_metrics.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@ import (
1111
"fmt"
1212
"slices"
1313

14-
"github.com/crunchydata/postgres-operator/internal/feature"
15-
"github.com/crunchydata/postgres-operator/internal/pgmonitor"
14+
// "github.com/crunchydata/postgres-operator/internal/pgmonitor"
1615
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
1716
)
1817

@@ -37,7 +36,7 @@ var gtePG16 json.RawMessage
3736
var ltPG16 json.RawMessage
3837

3938
func EnablePostgresMetrics(ctx context.Context, inCluster *v1beta1.PostgresCluster, config *Config) {
40-
if feature.Enabled(ctx, feature.OpenTelemetryMetrics) {
39+
if OpenTelemetryMetricsEnabled(ctx, inCluster) {
4140
// We must create a copy of the fiveSecondMetrics variable, otherwise we
4241
// will continually append to it and blow up our ConfigMap
4342
fiveSecondMetricsClone := slices.Clone(fiveSecondMetrics)
@@ -61,7 +60,7 @@ func EnablePostgresMetrics(ctx context.Context, inCluster *v1beta1.PostgresClust
6160

6261
config.Receivers[FiveSecondSqlQuery] = map[string]any{
6362
"driver": "postgres",
64-
"datasource": fmt.Sprintf(`host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, pgmonitor.MonitoringUser),
63+
"datasource": fmt.Sprintf(`host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, MonitoringUser),
6564
"collection_interval": "5s",
6665
// Give Postgres time to finish setup.
6766
"initial_delay": "10s",
@@ -70,7 +69,7 @@ func EnablePostgresMetrics(ctx context.Context, inCluster *v1beta1.PostgresClust
7069

7170
config.Receivers[FiveMinuteSqlQuery] = map[string]any{
7271
"driver": "postgres",
73-
"datasource": fmt.Sprintf(`host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, pgmonitor.MonitoringUser),
72+
"datasource": fmt.Sprintf(`host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, MonitoringUser),
7473
"collection_interval": "300s",
7574
// Give Postgres time to finish setup.
7675
"initial_delay": "10s",

internal/collector/util.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ func OpenTelemetrySpecPresent[T CrunchyCRD](object T) bool {
1919

2020
switch v := any(object).(type) {
2121
case *v1beta1.InstrumentationSpec:
22-
return true
22+
return v != nil
2323
case *v1beta1.PostgresCluster:
2424
return v.Spec.Instrumentation != nil
2525
case *v1beta1.PGAdmin:

internal/controller/postgrescluster/cluster.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import (
1515
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1616
"k8s.io/apimachinery/pkg/util/intstr"
1717

18-
"github.com/crunchydata/postgres-operator/internal/feature"
18+
"github.com/crunchydata/postgres-operator/internal/collector"
1919
"github.com/crunchydata/postgres-operator/internal/initialize"
2020
"github.com/crunchydata/postgres-operator/internal/naming"
2121
"github.com/crunchydata/postgres-operator/internal/patroni"
@@ -75,7 +75,7 @@ func (r *Reconciler) patroniLogSize(ctx context.Context, cluster *v1beta1.Postgr
7575
sizeInBytes = 25000000
7676
}
7777
return sizeInBytes
78-
} else if feature.Enabled(ctx, feature.OpenTelemetryLogs) {
78+
} else if collector.OpenTelemetryLogsEnabled(ctx, cluster) {
7979
return 25000000
8080
}
8181
return 0

0 commit comments

Comments
 (0)