diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/__init__.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/__init__.py index 8e61a68810..1eddfbcb3d 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/__init__.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/__init__.py @@ -16,7 +16,7 @@ from opentelemetry.instrumentation.langchain.version import __version__ from opentelemetry.instrumentation.utils import unwrap from opentelemetry.metrics import get_meter -from opentelemetry.semconv_ai import Meters, SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY +from opentelemetry.semconv_ai import Meters, SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, SpanAttributes from opentelemetry.trace import get_tracer from opentelemetry.trace.propagation import set_span_in_context from opentelemetry.trace.propagation.tracecontext import ( @@ -37,10 +37,23 @@ def __init__( exception_logger=None, disable_trace_context_propagation=False, use_legacy_attributes: bool = True, + metadata_key_prefix: str = SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES ): + """Create a Langchain instrumentor instance. + + Args: + exception_logger: A callable that takes an Exception as input. This will be + used to log exceptions that occur during instrumentation. If None, exceptions will not be logged. + disable_trace_context_propagation: If True, disables trace context propagation to LLM providers. + use_legacy_attributes: If True, uses span attributes for Inputs/Outputs instead of events. + metadata_key_prefix: Prefix for metadata keys added to spans. Defaults to + `SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES`. + Useful for using with other backends. + """ super().__init__() Config.exception_logger = exception_logger Config.use_legacy_attributes = use_legacy_attributes + Config.metadata_key_prefix = metadata_key_prefix self.disable_trace_context_propagation = disable_trace_context_propagation def instrumentation_dependencies(self) -> Collection[str]: diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py index 6b2a7eda07..cd4a022ec6 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py @@ -27,6 +27,7 @@ LLMResult, ) from opentelemetry import context as context_api +from opentelemetry.instrumentation.langchain.config import Config from opentelemetry.instrumentation.langchain.event_emitter import emit_event from opentelemetry.instrumentation.langchain.event_models import ( ChoiceEvent, @@ -290,7 +291,7 @@ def _create_span( for key, value in sanitized_metadata.items(): _set_span_attribute( span, - f"{SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.{key}", + f"{Config.metadata_key_prefix}.{key}", value, ) @@ -752,7 +753,7 @@ def _handle_error( return span = self._get_span(run_id) - span.set_status(Status(StatusCode.ERROR)) + span.set_status(Status(StatusCode.ERROR), str(error)) span.record_exception(error) self._end_span(span, run_id) diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/config.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/config.py index d1492ae947..b2bdcdaa3b 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/config.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/config.py @@ -1,9 +1,11 @@ from typing import Optional from opentelemetry._logs import Logger +from opentelemetry.semconv_ai import SpanAttributes class Config: exception_logger = None use_legacy_attributes = True event_logger: Optional[Logger] = None + metadata_key_prefix: str = SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py index 60eb7caa4a..e80147d968 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py @@ -346,7 +346,7 @@ def set_chat_response_usage( ) _set_span_attribute( span, - SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, + SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens, ) if record_token_usage: diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py b/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py index 16993f4b55..6bcfb94936 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py @@ -757,7 +757,7 @@ async def test_astream_with_events_with_content( assert len(chunks) == 144 logs = log_exporter.get_finished_logs() - assert len(logs) == 1 + assert len(logs) == 2 # Validate user message Event assert_message_in_logs( @@ -802,7 +802,7 @@ async def test_astream_with_events_with_no_content( assert len(chunks) == 144 logs = log_exporter.get_finished_logs() - assert len(logs) == 1 + assert len(logs) == 2 # Validate user message Event assert_message_in_logs(logs[0], "gen_ai.user.message", {}) diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_documents_chains.py b/packages/opentelemetry-instrumentation-langchain/tests/test_documents_chains.py index 443639e8fe..cbafaad10d 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_documents_chains.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_documents_chains.py @@ -93,7 +93,7 @@ def test_sequential_chain_with_events_with_content( ] == [span.name for span in spans] logs = log_exporter.get_finished_logs() - assert len(logs) == 1 + assert len(logs) == 2 # Validate user message Event assert_message_in_logs( @@ -107,12 +107,12 @@ def test_sequential_chain_with_events_with_content( ) # Validate AI choice Event - # _choice_event = { - # "index": 0, - # "finish_reason": "unknown", - # "message": {"content": response["output_text"]}, - # } - # assert_message_in_logs(logs[1], "gen_ai.choice", _choice_event) + _choice_event = { + "index": 0, + "finish_reason": "unknown", + "message": {"content": response["output_text"]}, + } + assert_message_in_logs(logs[1], "gen_ai.choice", _choice_event) @pytest.mark.vcr @@ -139,14 +139,14 @@ def test_sequential_chain_with_events_with_no_content( ] == [span.name for span in spans] logs = log_exporter.get_finished_logs() - assert len(logs) == 1 + assert len(logs) == 2 # Validate user message Event assert_message_in_logs(logs[0], "gen_ai.user.message", {}) # Validate AI choice Event - # choice_event = {"index": 0, "finish_reason": "unknown", "message": {}} - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) + choice_event = {"index": 0, "finish_reason": "unknown", "message": {}} + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) def assert_message_in_logs(log: LogData, event_name: str, expected_content: dict): diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_lcel.py b/packages/opentelemetry-instrumentation-langchain/tests/test_lcel.py index 043f7e405f..ea194370df 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_lcel.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_lcel.py @@ -185,7 +185,7 @@ class Joke(BaseModel): assert output_parser_task_span.parent.span_id == workflow_span.context.span_id logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs( @@ -198,23 +198,23 @@ class Joke(BaseModel): ) # Validate AI choice Event - # _choice_event = { - # "index": 0, - # "finish_reason": "function_call", - # "message": {"content": ""}, - # "tool_calls": [ - # { - # "id": "", - # "function": { - # "name": "Joke", - # "arguments": '{"setup":"Why couldn\'t the bicycle stand up by itself?","punchline":"It was two ' - # 'tired!"}', - # }, - # "type": "function", - # } - # ], - # } - # assert_message_in_logs(logs[2], "gen_ai.choice", _choice_event) + _choice_event = { + "index": 0, + "finish_reason": "function_call", + "message": {"content": ""}, + "tool_calls": [ + { + "id": "", + "function": { + "name": "Joke", + "arguments": '{"setup":"Why couldn\'t the bicycle stand up by itself?","punchline":"It was two ' + 'tired!"}', + }, + "type": "function", + } + ], + } + assert_message_in_logs(logs[2], "gen_ai.choice", _choice_event) @pytest.mark.vcr @@ -269,7 +269,7 @@ class Joke(BaseModel): assert output_parser_task_span.parent.span_id == workflow_span.context.span_id logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs(logs[0], "gen_ai.system.message", {}) @@ -278,13 +278,13 @@ class Joke(BaseModel): assert_message_in_logs(logs[1], "gen_ai.user.message", {}) # Validate AI choice Event - # _choice_event = { - # "index": 0, - # "finish_reason": "function_call", - # "message": {}, - # "tool_calls": [{"function": {"name": "Joke"}, "id": "", "type": "function"}], - # } - # assert_message_in_logs(logs[2], "gen_ai.choice", _choice_event) + _choice_event = { + "index": 0, + "finish_reason": "function_call", + "message": {}, + "tool_calls": [{"function": {"name": "Joke"}, "id": "", "type": "function"}], + } + assert_message_in_logs(logs[2], "gen_ai.choice", _choice_event) @pytest.mark.vcr @@ -382,7 +382,7 @@ async def test_async_lcel_with_events_with_content( assert output_parser_task_span.parent.span_id == workflow_span.context.span_id logs = log_exporter.get_finished_logs() - assert len(logs) == 1 + assert len(logs) == 2 # Validate user message Event assert_message_in_logs( @@ -394,12 +394,12 @@ async def test_async_lcel_with_events_with_content( assert response != "" # Validate AI choice Event - # _choice_event = { - # "index": 0, - # "finish_reason": "stop", - # "message": {"content": response}, - # } - # assert_message_in_logs(logs[1], "gen_ai.choice", _choice_event) + _choice_event = { + "index": 0, + "finish_reason": "stop", + "message": {"content": response}, + } + assert_message_in_logs(logs[1], "gen_ai.choice", _choice_event) @pytest.mark.vcr @@ -441,18 +441,18 @@ async def test_async_lcel_with_events_with_no_content( assert output_parser_task_span.parent.span_id == workflow_span.context.span_id logs = log_exporter.get_finished_logs() - assert len(logs) == 1 + assert len(logs) == 2 # Validate user message Event assert_message_in_logs(logs[0], "gen_ai.user.message", {}) # Validate AI choice Event - # _choice_event = { - # "index": 0, - # "finish_reason": "stop", - # "message": {}, - # } - # assert_message_in_logs(logs[1], "gen_ai.choice", _choice_event) + _choice_event = { + "index": 0, + "finish_reason": "stop", + "message": {}, + } + assert_message_in_logs(logs[1], "gen_ai.choice", _choice_event) @pytest.mark.vcr @@ -907,7 +907,7 @@ class Joke(BaseModel): ) == set([span.name for span in spans]) logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs( @@ -920,23 +920,23 @@ class Joke(BaseModel): ) # Validate AI choice Event - # _choice_event = { - # "index": 0, - # "finish_reason": "function_call", - # "message": {"content": ""}, - # "tool_calls": [ - # { - # "id": "", - # "function": { - # "name": "Joke", - # "arguments": '{"setup":"Why couldn\'t the bicycle stand up by ' - # 'itself?","punchline":"Because it was two tired!"}', - # }, - # "type": "function", - # } - # ], - # } - # assert_message_in_logs(logs[2], "gen_ai.choice", _choice_event) + _choice_event = { + "index": 0, + "finish_reason": "function_call", + "message": {"content": ""}, + "tool_calls": [ + { + "id": "", + "function": { + "name": "Joke", + "arguments": '{"setup":"Why couldn\'t the bicycle stand up by ' + 'itself?","punchline":"Because it was two tired!"}', + }, + "type": "function", + } + ], + } + assert_message_in_logs(logs[2], "gen_ai.choice", _choice_event) @pytest.mark.vcr @@ -983,7 +983,7 @@ class Joke(BaseModel): ) == set([span.name for span in spans]) logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs(logs[0], "gen_ai.system.message", {}) @@ -992,13 +992,13 @@ class Joke(BaseModel): assert_message_in_logs(logs[1], "gen_ai.user.message", {}) # Validate AI choice Event - # _choice_event = { - # "index": 0, - # "finish_reason": "function_call", - # "message": {}, - # "tool_calls": [{"function": {"name": "Joke"}, "id": "", "type": "function"}], - # } - # assert_message_in_logs(logs[2], "gen_ai.choice", _choice_event) + _choice_event = { + "index": 0, + "finish_reason": "function_call", + "message": {}, + "tool_calls": [{"function": {"name": "Joke"}, "id": "", "type": "function"}], + } + assert_message_in_logs(logs[2], "gen_ai.choice", _choice_event) def assert_message_in_logs(log: LogData, event_name: str, expected_content: dict): diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py b/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py index ecda85b167..096fb176d8 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py @@ -206,7 +206,7 @@ def test_custom_llm_with_events_with_content( "finish_reason": "unknown", "message": {"content": response}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.vcr @@ -250,7 +250,7 @@ def test_custom_llm_with_events_with_no_content( "finish_reason": "unknown", "message": {}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.vcr @@ -338,7 +338,7 @@ def test_openai_with_events_with_content( assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 2534 logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs( @@ -354,7 +354,7 @@ def test_openai_with_events_with_content( "finish_reason": "stop", "message": {"content": response.content}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist @pytest.mark.vcr @@ -389,7 +389,7 @@ def test_openai_with_events_with_no_content( assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 2534 logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs(logs[0], "gen_ai.system.message", {}) @@ -403,7 +403,7 @@ def test_openai_with_events_with_no_content( "finish_reason": "stop", "message": {}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist @pytest.mark.vcr @@ -536,7 +536,7 @@ class Joke(BaseModel): assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 111 logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs( @@ -564,7 +564,7 @@ class Joke(BaseModel): } ], } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist @pytest.mark.vcr @@ -609,7 +609,7 @@ class Joke(BaseModel): assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 111 logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs(logs[0], "gen_ai.system.message", {}) @@ -624,7 +624,7 @@ class Joke(BaseModel): "message": {}, "tool_calls": [{"function": {"name": "Joke"}, "id": "", "type": "function"}], } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist @pytest.mark.vcr @@ -745,7 +745,7 @@ def test_anthropic_with_events_with_content( ) logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs( @@ -763,7 +763,7 @@ def test_anthropic_with_events_with_content( "finish_reason": "unknown", "message": {"content": response.content}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist @pytest.mark.vcr @@ -801,7 +801,7 @@ def test_anthropic_with_events_with_no_content( ) logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs(logs[0], "gen_ai.system.message", {}) @@ -815,7 +815,7 @@ def test_anthropic_with_events_with_no_content( "finish_reason": "unknown", "message": {}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist @pytest.mark.vcr @@ -943,7 +943,7 @@ def test_bedrock_with_events_with_content( assert bedrock_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 43 logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs( @@ -961,7 +961,7 @@ def test_bedrock_with_events_with_content( "finish_reason": "unknown", "message": {"content": response.content}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist @pytest.mark.vcr @@ -1005,7 +1005,7 @@ def test_bedrock_with_events_with_no_content( assert bedrock_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 43 logs = log_exporter.get_finished_logs() - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs(logs[0], "gen_ai.system.message", {}) @@ -1019,7 +1019,7 @@ def test_bedrock_with_events_with_no_content( "finish_reason": "unknown", "message": {}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist # from: https://stackoverflow.com/a/41599695/2749989 @@ -1110,7 +1110,7 @@ def test_trace_propagation_with_events_with_content( logs = log_exporter.get_finished_logs() if issubclass(LLM, ChatOpenAI): - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs( @@ -1132,7 +1132,7 @@ def test_trace_propagation_with_events_with_content( "finish_reason": "length", "message": {"content": response.content}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist else: assert len(logs) == 2 @@ -1155,7 +1155,7 @@ def test_trace_propagation_with_events_with_content( "finish_reason": "length", "message": {"content": response}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.vcr @@ -1186,7 +1186,7 @@ def test_trace_propagation_with_events_with_no_content( logs = log_exporter.get_finished_logs() if issubclass(LLM, ChatOpenAI): - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs(logs[0], "gen_ai.system.message", {}) @@ -1200,7 +1200,7 @@ def test_trace_propagation_with_events_with_no_content( "finish_reason": "length", "message": {}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist else: assert len(logs) == 2 @@ -1217,7 +1217,7 @@ def test_trace_propagation_with_events_with_no_content( "finish_reason": "length", "message": {}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.vcr @@ -1305,7 +1305,7 @@ def test_trace_propagation_stream_with_events_with_content( "finish_reason": "length", "message": {"content": "".join(chunks)}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.vcr @@ -1354,7 +1354,7 @@ def test_trace_propagation_stream_with_events_with_no_content( "finish_reason": "length", "message": {}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.asyncio @@ -1419,7 +1419,7 @@ async def test_trace_propagation_async_with_events_with_content( logs = log_exporter.get_finished_logs() if issubclass(LLM, ChatOpenAI): - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs( @@ -1443,7 +1443,7 @@ async def test_trace_propagation_async_with_events_with_content( "content": response.content, }, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist else: assert len(logs) == 2 @@ -1466,7 +1466,7 @@ async def test_trace_propagation_async_with_events_with_content( "finish_reason": "length", "message": {"content": response}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.asyncio @@ -1498,7 +1498,7 @@ async def test_trace_propagation_async_with_events_with_no_content( logs = log_exporter.get_finished_logs() if issubclass(LLM, ChatOpenAI): - assert len(logs) == 2 + assert len(logs) == 3 # Validate system message Event assert_message_in_logs(logs[0], "gen_ai.system.message", {}) @@ -1512,7 +1512,7 @@ async def test_trace_propagation_async_with_events_with_no_content( "finish_reason": "length", "message": {}, } - # assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist + assert_message_in_logs(logs[2], "gen_ai.choice", choice_event) # logs[2] does not exist else: assert len(logs) == 2 @@ -1529,7 +1529,7 @@ async def test_trace_propagation_async_with_events_with_no_content( "finish_reason": "length", "message": {}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.asyncio @@ -1621,7 +1621,7 @@ async def test_trace_propagation_stream_async_with_events_with_content( "finish_reason": "length", "message": {"content": "".join(chunks)}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist @pytest.mark.asyncio @@ -1671,7 +1671,7 @@ async def test_trace_propagation_stream_async_with_events_with_no_content( "finish_reason": "length", "message": {}, } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) # logs[1] may not exist def assert_message_in_logs(log: LogData, event_name: str, expected_content: dict): diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py b/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py index e10fca6adb..010f8c5bb7 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py @@ -56,7 +56,7 @@ def test_structured_output_with_events_with_content( assert expected_spans.issubset(span_names) logs = log_exporter.get_finished_logs() - assert len(logs) == 1 + assert len(logs) == 2 # Validate user message Event assert_message_in_logs(logs[0], "gen_ai.user.message", {"content": query_text}) @@ -64,12 +64,12 @@ def test_structured_output_with_events_with_content( assert _result != "" # Validate AI choice Event - # choice_event = { - # "index": 0, - # "finish_reason": "stop", - # "message": {"content": _result.model_dump_json()}, - # } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) + choice_event = { + "index": 0, + "finish_reason": "stop", + "message": {"content": _result.model_dump_json()}, + } + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) @pytest.mark.vcr @@ -88,18 +88,18 @@ def test_structured_output_with_events_with_no_content( assert expected_spans.issubset(span_names) logs = log_exporter.get_finished_logs() - assert len(logs) == 1 + assert len(logs) == 2 # Validate user message Event assert_message_in_logs(logs[0], "gen_ai.user.message", {}) # Validate AI choice Event - # choice_event = { - # "index": 0, - # "finish_reason": "stop", - # "message": {}, - # } - # assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) + choice_event = { + "index": 0, + "finish_reason": "stop", + "message": {}, + } + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event) def assert_message_in_logs(log: LogData, event_name: str, expected_content: dict):