Skip to content

Commit 337a986

Browse files
vasanttejaxrmx
andauthored
openai-v2: Fix service tier attribute names (#3952)
* fix: Fix service tier attribute names in openai-v2 instrumentation * polish: extracting service_tier from extra_body. * Extract service_tier from extra_body when not in kwargs In OpenAI SDK 1.26.0, service_tier is passed via extra_body. Update get_llm_request_attributes to check both kwargs and extra_body for service_tier to support both ways of passing it. * Add changelog entry for service tier attribute fix Fix #3920: Add changelog entry documenting the fix for service tier attribute names. * wip: adding checks for service_tier. * wip: fix linting errors. --------- Co-authored-by: Riccardo Magliocchetti <riccardo.magliocchetti@gmail.com>
1 parent e95c19d commit 337a986

File tree

6 files changed

+47
-34
lines changed

6 files changed

+47
-34
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
- Fix service tier attribute names: use `GEN_AI_OPENAI_REQUEST_SERVICE_TIER` for request
11+
attributes and `GEN_AI_OPENAI_RESPONSE_SERVICE_TIER` for response attributes.
12+
([#3920](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3920))
1013
- Added support for OpenAI embeddings instrumentation
1114
([#3461](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3461))
1215
- Record prompt and completion events regardless of span sampling decision.

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -370,7 +370,7 @@ def _set_response_attributes(
370370
if getattr(result, "service_tier", None):
371371
set_span_attribute(
372372
span,
373-
GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
373+
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
374374
result.service_tier,
375375
)
376376

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,8 +230,13 @@ def get_llm_request_attributes(
230230
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
231231
] = response_format
232232

233+
# service_tier can be passed directly or in extra_body (in SDK 1.26.0 it's via extra_body)
233234
service_tier = kwargs.get("service_tier")
234-
attributes[GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER] = (
235+
if service_tier is None:
236+
extra_body = kwargs.get("extra_body")
237+
if isinstance(extra_body, Mapping):
238+
service_tier = extra_body.get("service_tier")
239+
attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER] = (
235240
service_tier if service_tier != "auto" else None
236241
)
237242

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,8 @@ async def test_async_chat_completion_extra_params(
183183
response.model,
184184
response.usage.prompt_tokens,
185185
response.usage.completion_tokens,
186+
request_service_tier="default",
187+
response_service_tier=getattr(response, "service_tier", None),
186188
)
187189
assert (
188190
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SEED] == 42

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,8 @@ def test_chat_completion_extra_params(
221221
response.model,
222222
response.usage.prompt_tokens,
223223
response.usage.completion_tokens,
224+
request_service_tier="default",
225+
response_service_tier=getattr(response, "service_tier", None),
224226
)
225227
assert (
226228
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SEED] == 42

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_utils.py

Lines changed: 33 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,14 @@
2525
)
2626

2727

28+
def _assert_optional_attribute(span, attribute_name, expected_value):
29+
"""Helper to assert optional span attributes."""
30+
if expected_value is not None:
31+
assert expected_value == span.attributes[attribute_name]
32+
else:
33+
assert attribute_name not in span.attributes
34+
35+
2836
def assert_all_attributes(
2937
span: ReadableSpan,
3038
request_model: str,
@@ -35,6 +43,8 @@ def assert_all_attributes(
3543
operation_name: str = "chat",
3644
server_address: str = "api.openai.com",
3745
server_port: int = 443,
46+
request_service_tier: Optional[str] = None,
47+
response_service_tier: Optional[str] = None,
3848
):
3949
assert span.name == f"{operation_name} {request_model}"
4050
assert (
@@ -49,44 +59,35 @@ def assert_all_attributes(
4959
request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
5060
)
5161

52-
if response_model:
53-
assert (
54-
response_model
55-
== span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL]
56-
)
57-
else:
58-
assert GenAIAttributes.GEN_AI_RESPONSE_MODEL not in span.attributes
59-
60-
if response_id:
61-
assert (
62-
response_id == span.attributes[GenAIAttributes.GEN_AI_RESPONSE_ID]
63-
)
64-
else:
65-
assert GenAIAttributes.GEN_AI_RESPONSE_ID not in span.attributes
66-
67-
if input_tokens:
68-
assert (
69-
input_tokens
70-
== span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS]
71-
)
72-
else:
73-
assert GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS not in span.attributes
74-
75-
if output_tokens:
76-
assert (
77-
output_tokens
78-
== span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS]
79-
)
80-
else:
81-
assert (
82-
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS not in span.attributes
83-
)
62+
_assert_optional_attribute(
63+
span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, response_model
64+
)
65+
_assert_optional_attribute(
66+
span, GenAIAttributes.GEN_AI_RESPONSE_ID, response_id
67+
)
68+
_assert_optional_attribute(
69+
span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, input_tokens
70+
)
71+
_assert_optional_attribute(
72+
span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens
73+
)
8474

8575
assert server_address == span.attributes[ServerAttributes.SERVER_ADDRESS]
8676

8777
if server_port != 443 and server_port > 0:
8878
assert server_port == span.attributes[ServerAttributes.SERVER_PORT]
8979

80+
_assert_optional_attribute(
81+
span,
82+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
83+
request_service_tier,
84+
)
85+
_assert_optional_attribute(
86+
span,
87+
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
88+
response_service_tier,
89+
)
90+
9091

9192
def assert_log_parent(log, span):
9293
"""Assert that the log record has the correct parent span context"""

0 commit comments

Comments
 (0)