Skip to content

Commit 6a11a1e

Browse files
committed
wip - put in the same format as postgres test
1 parent aa3e343 commit 6a11a1e

File tree

2 files changed

+137
-140
lines changed

2 files changed

+137
-140
lines changed
Lines changed: 135 additions & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -1,153 +1,149 @@
1-
# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.
2-
# This product includes software developed at Datadog (https://www.datadoghq.com/).
3-
# Copyright 2024 Datadog, Inc.
4-
5-
import json
6-
import os
71
import time
82
from pathlib import Path
3+
from typing import TYPE_CHECKING
4+
5+
from utils import scenarios, interfaces, logger, features, context
6+
from utils.otel_metrics_validator import OtelMetricsValidator, get_collector_metrics_from_scenario
7+
8+
if TYPE_CHECKING:
9+
from utils._context._scenarios.otel_collector import OtelCollectorScenario
10+
11+
12+
# Load MySQL metrics specification
13+
# Exclude metrics that require specific configurations or sustained activity
14+
_EXCLUDED_MYSQL_METRICS: set[str] = {
15+
# Add any metrics that need to be excluded here
16+
# Example: metrics that require replication, specific storage engines, etc.
17+
}
18+
19+
mysql_metrics = OtelMetricsValidator.load_metrics_from_file(
20+
metrics_file=Path(__file__).parent / "mysql_metrics.json",
21+
excluded_metrics=_EXCLUDED_MYSQL_METRICS,
22+
)
23+
24+
# Initialize validator with MySQL metrics
25+
_metrics_validator = OtelMetricsValidator(mysql_metrics)
26+
27+
28+
@scenarios.otel_mysql_metrics_e2e
29+
@features.otel_mysql_support
30+
class Test_MySQLMetricsCollection:
31+
def test_mysql_metrics_received_by_collector(self):
32+
scenario: OtelCollectorScenario = context.scenario # type: ignore[assignment]
33+
metrics_batch = get_collector_metrics_from_scenario(scenario)
34+
35+
_, _, _validation_results, failed_validations = _metrics_validator.process_and_validate_metrics(metrics_batch)
936

10-
from utils import context, weblog, interfaces, scenarios, features, logger
37+
assert len(failed_validations) == 0, (
38+
f"Error: {len(failed_validations)} metrics failed the expected behavior!\n"
39+
f"\n\nFailed validations:\n" + "\n".join(failed_validations)
40+
)
1141

1242

13-
def load_expected_metrics() -> dict[str, dict]:
14-
"""Load the expected MySQL metrics from the mysql_metrics.json file."""
15-
metrics_file = Path(__file__).parent / "mysql_metrics.json"
16-
with open(metrics_file, "r") as f:
17-
return json.load(f)
43+
@scenarios.otel_mysql_metrics_e2e
44+
@features.otel_mysql_support
45+
class Test_BackendValidity:
46+
def test_mysql_metrics_received_by_backend(self):
47+
"""Test metrics were actually queried / received by the backend"""
48+
metrics_to_validate = list(mysql_metrics.keys())
49+
query_tags = {"rid": "otel-mysql-metrics", "host": "collector"}
50+
51+
time.sleep(15)
52+
_validated_metrics, failed_metrics = _metrics_validator.query_backend_for_metrics(
53+
metric_names=metrics_to_validate,
54+
query_tags=query_tags,
55+
lookback_seconds=300,
56+
retries=3,
57+
initial_delay_s=0.5,
58+
semantic_mode="combined",
59+
)
60+
61+
if failed_metrics:
62+
logger.error(f"\n❌ Failed validations for semantic mode combined: {failed_metrics}")
63+
64+
# test with native mode
65+
_validated_metrics, failed_metrics = _metrics_validator.query_backend_for_metrics(
66+
metric_names=metrics_to_validate,
67+
query_tags=query_tags,
68+
lookback_seconds=300,
69+
retries=3,
70+
initial_delay_s=0.5,
71+
semantic_mode="native",
72+
)
73+
74+
if failed_metrics:
75+
logger.error(f"\n❌ Failed validations for semantic mode native: {failed_metrics}")
1876

1977

2078
@scenarios.otel_mysql_metrics_e2e
2179
@features.otel_mysql_support
22-
class Test_OTelMySQLMetricsE2E:
23-
"""Validate MySQL metrics collection via OpenTelemetry instrumentation.
24-
25-
This test ensures that MySQL metrics are properly collected, exported via OTLP,
26-
and ingested into the Datadog backend. It validates metrics through three paths:
27-
1. Via Datadog Agent
28-
2. Via backend OTLP intake endpoint
29-
3. Via OTel Collector
80+
class Test_Smoke:
81+
"""MySQL-specific smoke test to generate database activity.
82+
This test validates that basic MySQL metrics are collected after database operations.
3083
"""
3184

32-
def setup_metrics_collected(self):
33-
"""Initialize test by triggering MySQL operations and capturing timestamp."""
34-
self.start = int(time.time())
35-
# Trigger MySQL operations through the weblog
36-
self.r = weblog.get("/db", params={"service": "mysql", "operation": "select"}, timeout=20)
37-
self.expected_metrics = load_expected_metrics()
38-
logger.info(f"Loaded {len(self.expected_metrics)} expected MySQL metrics")
39-
40-
def test_metrics_collected(self):
41-
"""Verify that MySQL metrics are collected and sent to the backend."""
42-
end = int(time.time())
43-
rid = self.r.get_rid().lower()
44-
45-
# Count how many metrics we successfully found
46-
metrics_found = 0
47-
metrics_not_found = []
48-
49-
for metric_name, metric_info in self.expected_metrics.items():
50-
logger.info(f"Checking metric: {metric_name} (type: {metric_info['data_type']})")
51-
52-
try:
53-
# Try to query the metric from the backend via Agent
54-
metric_data = interfaces.backend.query_timeseries(
55-
start=self.start,
56-
end=end,
57-
rid=rid,
58-
metric=metric_name,
59-
dd_api_key=os.environ.get("DD_API_KEY"),
60-
dd_app_key=os.environ.get("DD_APP_KEY", os.environ.get("DD_APPLICATION_KEY")),
61-
)
62-
63-
if metric_data and len(metric_data.get("series", [])) > 0:
64-
metrics_found += 1
65-
logger.debug(f"✓ Found metric: {metric_name}")
66-
else:
67-
metrics_not_found.append(metric_name)
68-
logger.debug(f"✗ Metric not found: {metric_name}")
69-
70-
except Exception as e:
71-
logger.warning(f"Error querying metric {metric_name}: {e}")
72-
metrics_not_found.append(metric_name)
73-
74-
logger.info(f"Metrics found: {metrics_found}/{len(self.expected_metrics)}")
75-
if metrics_not_found:
76-
logger.warning(f"Metrics not found: {', '.join(metrics_not_found[:10])}")
77-
if len(metrics_not_found) > 10:
78-
logger.warning(f"... and {len(metrics_not_found) - 10} more")
79-
80-
# Assert that at least some core metrics are present
81-
# We don't require 100% because some metrics may only appear under specific conditions
82-
assert metrics_found > 0, "No MySQL metrics were found in the backend"
83-
84-
# Check for some critical metrics that should always be present
85-
critical_metrics = [
85+
def setup_main(self) -> None:
86+
"""When the MySQL container spins up, we need some activity:
87+
- create a table
88+
- insert some data
89+
- run queries
90+
"""
91+
scenario: OtelCollectorScenario = context.scenario # type: ignore[assignment]
92+
container = scenario.mysql_container
93+
94+
# Create table
95+
r = container.exec_run(
96+
'mysql -u system_tests_user -psystem_tests_password system_tests_dbname -e '
97+
'"CREATE TABLE IF NOT EXISTS test_table (id INT PRIMARY KEY AUTO_INCREMENT, value VARCHAR(255));"'
98+
)
99+
logger.info(f"Create table output: {r.output}")
100+
101+
# Insert data
102+
r = container.exec_run(
103+
'mysql -u system_tests_user -psystem_tests_password system_tests_dbname -e '
104+
'"INSERT INTO test_table (value) VALUES (\'test1\'), (\'test2\'), (\'test3\');"'
105+
)
106+
logger.info(f"Insert data output: {r.output}")
107+
108+
# Run a SELECT query
109+
r = container.exec_run(
110+
'mysql -u system_tests_user -psystem_tests_password system_tests_dbname -e '
111+
'"SELECT * FROM test_table;"'
112+
)
113+
logger.info(f"Select query output: {r.output}")
114+
115+
# Run a COUNT query
116+
r = container.exec_run(
117+
'mysql -u system_tests_user -psystem_tests_password system_tests_dbname -e '
118+
'"SELECT COUNT(*) FROM test_table;"'
119+
)
120+
logger.info(f"Count query output: {r.output}")
121+
122+
def test_main(self) -> None:
123+
observed_metrics: set[str] = set()
124+
125+
expected_metrics = {
126+
"mysql.buffer_pool.usage",
86127
"mysql.connection.count",
128+
"mysql.connection.errors",
87129
"mysql.query.count",
88130
"mysql.threads",
89-
]
90-
91-
for critical_metric in critical_metrics:
92-
if critical_metric in self.expected_metrics:
93-
assert critical_metric not in metrics_not_found, f"Critical metric '{critical_metric}' was not found"
94-
95-
def setup_metrics_via_collector(self):
96-
"""Initialize test by triggering MySQL operations and capturing timestamp."""
97-
self.start = int(time.time())
98-
# Trigger MySQL operations through the weblog
99-
self.r = weblog.get("/db", params={"service": "mysql", "operation": "select"}, timeout=20)
100-
self.expected_metrics = load_expected_metrics()
101-
logger.info(f"Loaded {len(self.expected_metrics)} expected MySQL metrics")
102-
103-
def test_metrics_via_collector(self):
104-
"""Verify that MySQL metrics are properly sent via OTel Collector."""
105-
end = int(time.time())
106-
rid = self.r.get_rid().lower()
107-
108-
# Sample a few key metrics to validate via collector
109-
sample_metrics = [
110-
"mysql.connection.count",
111-
"mysql.query.count",
112-
"mysql.buffer_pool.usage",
113-
]
114-
115-
metrics_found_collector = 0
116-
117-
for metric_name in sample_metrics:
118-
if metric_name not in self.expected_metrics:
119-
continue
120-
121-
try:
122-
metric_data = interfaces.backend.query_timeseries(
123-
start=self.start,
124-
end=end,
125-
rid=rid,
126-
metric=metric_name,
127-
dd_api_key=os.environ.get("DD_API_KEY_3"),
128-
dd_app_key=os.environ.get("DD_APP_KEY_3"),
129-
)
130-
131-
if metric_data and len(metric_data.get("series", [])) > 0:
132-
metrics_found_collector += 1
133-
logger.debug(f"✓ Found metric via collector: {metric_name}")
134-
135-
except ValueError:
136-
logger.warning(f"Backend does not provide metric {metric_name} via collector")
137-
except Exception as e:
138-
logger.warning(f"Error querying metric {metric_name} via collector: {e}")
139-
140-
# We expect at least some metrics to be available via collector
141-
logger.info(f"Metrics found via collector: {metrics_found_collector}/{len(sample_metrics)}")
142-
143-
def setup_metric_data_types(self):
144-
"""Load expected metrics for validation."""
145-
self.expected_metrics = load_expected_metrics()
146-
147-
def test_metric_data_types(self):
148-
"""Verify that metrics have the correct data types (Gauge vs Sum)."""
149-
# This is a metadata validation test
150-
for metric_name, metric_info in self.expected_metrics.items():
151-
data_type = metric_info.get("data_type")
152-
assert data_type in ["Gauge", "Sum"], f"Metric {metric_name} has invalid data_type: {data_type}"
153-
logger.debug(f"Metric {metric_name}: data_type={data_type} ✓")
131+
}
132+
133+
for data in interfaces.otel_collector.get_data("/api/v2/series"):
134+
logger.info(f"In request {data['log_filename']}")
135+
payload = data["request"]["content"]
136+
for serie in payload["series"]:
137+
metric = serie["metric"]
138+
observed_metrics.add(metric)
139+
logger.info(f" {metric} {serie['points']}")
140+
141+
all_metric_has_be_seen = True
142+
for metric in expected_metrics:
143+
if metric not in observed_metrics:
144+
logger.error(f"Metric {metric} hasn't been observed")
145+
all_metric_has_be_seen = False
146+
else:
147+
logger.info(f"Metric {metric} has been observed")
148+
149+
# assert all_metric_has_be_seen

utils/_context/_scenarios/endtoend.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,8 @@ def __init__(
116116
self._supporting_containers.append(RabbitMqContainer())
117117

118118
if include_mysql_db:
119-
self._supporting_containers.append(MySqlContainer())
119+
self.mysql_container = MySqlContainer()
120+
self._supporting_containers.append(self.mysql_container)
120121

121122
if include_sqlserver:
122123
self._supporting_containers.append(MsSqlServerContainer())

0 commit comments

Comments
 (0)