Skip to content

Commit bdcaefb

Browse files
authored
Merge branch 'main' into semconv-celery
2 parents f8f9df5 + 8fa0c1b commit bdcaefb

File tree

137 files changed

+799
-388
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

137 files changed

+799
-388
lines changed

CHANGELOG.md

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1111
1212
## Unreleased
1313

14-
- `opentelemetry-instrumentation-requests`, `opentelemetry-instrumentation-wsgi`, `opentelemetry-instrumentation-asgi` Detect synthetic sources on requests, ASGI, and WSGI.
15-
([#3674](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3674))
14+
## Version 1.39.0/0.60b0 (2025-12-03)
1615

1716
### Added
1817

18+
- `opentelemetry-instrumentation-requests`, `opentelemetry-instrumentation-wsgi`, `opentelemetry-instrumentation-asgi` Detect synthetic sources on requests, ASGI, and WSGI.
19+
([#3674](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3674))
1920
- `opentelemetry-instrumentation-aiohttp-client`: add support for url exclusions via `OTEL_PYTHON_EXCLUDED_URLS` / `OTEL_PYTHON_AIOHTTP_CLIENT_EXCLUDED_URLS`
2021
([#3850](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3850))
2122
- `opentelemetry-instrumentation-httpx`: add support for url exclusions via `OTEL_PYTHON_EXCLUDED_URLS` / `OTEL_PYTHON_HTTPX_EXCLUDED_URLS`
@@ -42,6 +43,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
4243

4344
### Fixed
4445

46+
- `opentelemetry-instrumentation-botocore`: bedrock: Add safety check for bedrock ConverseStream responses
47+
([#3990](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3990))
4548
- `opentelemetry-instrumentation-botocore`: bedrock: only decode JSON input buffer in Anthropic Claude streaming
4649
([#3875](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3875))
4750
- `opentelemetry-instrumentation-aiohttp-client`, `opentelemetry-instrumentation-aiohttp-server`: Fix readme links and text

_template/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "0.60b0.dev"
15+
__version__ = "0.61b0.dev"

eachdist.ini

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ sortfirst=
1616
ext/*
1717

1818
[stable]
19-
version=1.39.0.dev
19+
version=1.40.0.dev
2020

2121
packages=
2222
opentelemetry-sdk
@@ -35,7 +35,7 @@ packages=
3535
opentelemetry-exporter-credential-provider-gcp
3636

3737
[prerelease]
38-
version=0.60b0.dev
38+
version=0.61b0.dev
3939

4040
packages=
4141
all

exporter/opentelemetry-exporter-credential-provider-gcp/src/opentelemetry/gcp_credential_provider/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "0.60b0.dev"
15+
__version__ = "0.61b0.dev"

exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "0.60b0.dev"
15+
__version__ = "0.61b0.dev"

exporter/opentelemetry-exporter-richconsole/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ classifiers = [
2727
dependencies = [
2828
"opentelemetry-api ~= 1.12",
2929
"opentelemetry-sdk ~= 1.12",
30-
"opentelemetry-semantic-conventions == 0.60b0.dev",
30+
"opentelemetry-semantic-conventions == 0.61b0.dev",
3131
"rich>=10.0.0",
3232
]
3333

exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "0.60b0.dev"
15+
__version__ = "0.61b0.dev"

instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1010
- Ensure log event is written and completion hook is called even when model call results in exception. Put new
1111
log event (` gen_ai.client.inference.operation.details`) behind the flag `OTEL_SEMCONV_STABILITY_OPT_IN=gen_ai_latest_experimental`.
1212
Ensure same sem conv attributes are on the log and span. Fix an issue where the instrumentation would crash when a pydantic.BaseModel class was passed as the response schema ([#3905](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3905)).
13+
- Add the `GEN_AI_OUTPUT_TYPE` sem conv request attributes to events/spans generated in the stable instrumentation. This was added pre sem conv 1.36 so it should be in the stable instrumentation. Fix a bug in how system instructions were recorded in the `gen_ai.system.message` log event. It will now always be recorded as `{"content" : "text of system instructions"}`. See ([#4011](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4011)).
1314

1415
## Version 0.4b0 (2025-10-16)
1516

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 41 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,6 @@ def _to_dict(value: object):
172172

173173
def _create_request_attributes(
174174
config: Optional[GenerateContentConfigOrDict],
175-
is_experimental_mode: bool,
176175
allow_list: AllowList,
177176
) -> dict[str, Any]:
178177
if not config:
@@ -207,7 +206,7 @@ def _create_request_attributes(
207206
},
208207
)
209208
response_mime_type = config.get("response_mime_type")
210-
if response_mime_type and is_experimental_mode:
209+
if response_mime_type:
211210
if response_mime_type == "text/plain":
212211
attributes[gen_ai_attributes.GEN_AI_OUTPUT_TYPE] = "text"
213212
elif response_mime_type == "application/json":
@@ -505,31 +504,29 @@ def _maybe_log_completion_details(
505504
def _maybe_log_system_instruction(
506505
self, config: Optional[GenerateContentConfigOrDict] = None
507506
):
508-
system_instruction = None
509-
if config is not None:
510-
if isinstance(config, dict):
511-
system_instruction = config.get("system_instruction")
512-
else:
513-
system_instruction = config.system_instruction
507+
content_union = _config_to_system_instruction(config)
508+
if not content_union:
509+
return
510+
content = transformers.t_contents(content_union)[0]
511+
if not content.parts:
512+
return
513+
# System instruction is required to be text. An error will be returned by the API if it isn't.
514+
system_instruction = " ".join(
515+
part.text for part in content.parts if part.text
516+
)
514517
if not system_instruction:
515518
return
516-
attributes = {
517-
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
518-
}
519-
# TODO: determine if "role" should be reported here or not. It is unclear
520-
# since the caller does not supply a "role" and since this comes through
521-
# a property named "system_instruction" which would seem to align with
522-
# the default "role" that is allowed to be omitted by default.
523-
#
524-
# See also: "TODOS.md"
525-
body = {}
526-
if self._content_recording_enabled:
527-
body["content"] = _to_dict(system_instruction)
528-
else:
529-
body["content"] = _CONTENT_ELIDED
530519
self._otel_wrapper.log_system_prompt(
531-
attributes=attributes,
532-
body=body,
520+
attributes={
521+
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
522+
},
523+
body={
524+
"content": (
525+
system_instruction
526+
if self._content_recording_enabled
527+
else _CONTENT_ELIDED
528+
)
529+
},
533530
)
534531

535532
def _maybe_log_user_prompt(
@@ -716,13 +713,8 @@ def instrumented_generate_content(
716713
completion_hook,
717714
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
718715
)
719-
is_experimental_mode = (
720-
helper.sem_conv_opt_in_mode
721-
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
722-
)
723716
request_attributes = _create_request_attributes(
724717
config,
725-
is_experimental_mode,
726718
helper._generate_content_config_key_allowlist,
727719
)
728720
with helper.start_span_as_current_span(
@@ -739,7 +731,10 @@ def instrumented_generate_content(
739731
config=helper.wrapped_config(config),
740732
**kwargs,
741733
)
742-
if is_experimental_mode:
734+
if (
735+
helper.sem_conv_opt_in_mode
736+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
737+
):
743738
helper._update_response(response)
744739
if response.candidates:
745740
candidates += response.candidates
@@ -791,13 +786,8 @@ def instrumented_generate_content_stream(
791786
completion_hook,
792787
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
793788
)
794-
is_experimental_mode = (
795-
helper.sem_conv_opt_in_mode
796-
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
797-
)
798789
request_attributes = _create_request_attributes(
799790
config,
800-
is_experimental_mode,
801791
helper._generate_content_config_key_allowlist,
802792
)
803793
with helper.start_span_as_current_span(
@@ -814,7 +804,10 @@ def instrumented_generate_content_stream(
814804
config=helper.wrapped_config(config),
815805
**kwargs,
816806
):
817-
if is_experimental_mode:
807+
if (
808+
helper.sem_conv_opt_in_mode
809+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
810+
):
818811
helper._update_response(response)
819812
if response.candidates:
820813
candidates += response.candidates
@@ -865,13 +858,8 @@ async def instrumented_generate_content(
865858
completion_hook,
866859
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
867860
)
868-
is_experimental_mode = (
869-
helper.sem_conv_opt_in_mode
870-
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
871-
)
872861
request_attributes = _create_request_attributes(
873862
config,
874-
is_experimental_mode,
875863
helper._generate_content_config_key_allowlist,
876864
)
877865
candidates: list[Candidate] = []
@@ -889,7 +877,10 @@ async def instrumented_generate_content(
889877
config=helper.wrapped_config(config),
890878
**kwargs,
891879
)
892-
if is_experimental_mode:
880+
if (
881+
helper.sem_conv_opt_in_mode
882+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
883+
):
893884
helper._update_response(response)
894885
if response.candidates:
895886
candidates += response.candidates
@@ -940,13 +931,8 @@ async def instrumented_generate_content_stream(
940931
completion_hook,
941932
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
942933
)
943-
is_experimental_mode = (
944-
helper.sem_conv_opt_in_mode
945-
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
946-
)
947934
request_attributes = _create_request_attributes(
948935
config,
949-
is_experimental_mode,
950936
helper._generate_content_config_key_allowlist,
951937
)
952938
with helper.start_span_as_current_span(
@@ -955,7 +941,10 @@ async def instrumented_generate_content_stream(
955941
end_on_exit=False,
956942
) as span:
957943
span.set_attributes(request_attributes)
958-
if not is_experimental_mode:
944+
if (
945+
not helper.sem_conv_opt_in_mode
946+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
947+
):
959948
helper.process_request(contents, config, span)
960949
try:
961950
response_async_generator = await wrapped_func(
@@ -986,7 +975,10 @@ async def _response_async_generator_wrapper():
986975
with trace.use_span(span, end_on_exit=True):
987976
try:
988977
async for response in response_async_generator:
989-
if is_experimental_mode:
978+
if (
979+
helper.sem_conv_opt_in_mode
980+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
981+
):
990982
helper._update_response(response)
991983
if response.candidates:
992984
candidates += response.candidates

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py

Lines changed: 52 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from unittest.mock import patch
1818

1919
import pytest
20-
from google.genai.types import GenerateContentConfig
20+
from google.genai.types import GenerateContentConfig, Part
2121
from pydantic import BaseModel, Field
2222

2323
from opentelemetry.instrumentation._semconv import (
@@ -180,6 +180,57 @@ def test_records_system_prompt_as_log(self):
180180
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
181181
self.assertEqual(event_record.body["content"], "foo")
182182

183+
@patch.dict(
184+
"os.environ",
185+
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
186+
)
187+
def test_system_prompt_passed_as_list_of_text(self):
188+
config = GenerateContentConfig(
189+
system_instruction=["help", "me please."]
190+
)
191+
self.configure_valid_response()
192+
self.generate_content(
193+
model="gemini-2.0-flash", contents="Some input", config=config
194+
)
195+
self.otel.assert_has_event_named("gen_ai.system.message")
196+
event_record = self.otel.get_event_named("gen_ai.system.message")
197+
self.assertEqual(event_record.body["content"], "help me please.")
198+
199+
@patch.dict(
200+
"os.environ",
201+
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
202+
)
203+
def test_system_prompt_passed_as_list_of_text_parts(self):
204+
config = GenerateContentConfig(
205+
system_instruction=[
206+
Part.from_text(text="help"),
207+
Part.from_text(text="me please."),
208+
]
209+
)
210+
self.configure_valid_response()
211+
self.generate_content(
212+
model="gemini-2.0-flash", contents="Some input", config=config
213+
)
214+
self.otel.assert_has_event_named("gen_ai.system.message")
215+
event_record = self.otel.get_event_named("gen_ai.system.message")
216+
self.assertEqual(event_record.body["content"], "help me please.")
217+
218+
@patch.dict(
219+
"os.environ",
220+
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
221+
)
222+
def test_system_prompt_passed_is_invalid(self):
223+
config = GenerateContentConfig(
224+
system_instruction=[
225+
Part.from_uri(file_uri="test.jpg"),
226+
]
227+
)
228+
self.configure_valid_response()
229+
self.generate_content(
230+
model="gemini-2.0-flash", contents="Some input", config=config
231+
)
232+
self.otel.assert_does_not_have_event_named("gen_ai.system.message")
233+
183234
@patch.dict(
184235
"os.environ",
185236
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"},

0 commit comments

Comments
 (0)