|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +from contextlib import contextmanager |
| 4 | +from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, ContextManager, Iterator |
| 5 | + |
| 6 | +from opentelemetry import context |
| 7 | + |
| 8 | +from ...constants import ONE_SECOND_IN_NANOSECONDS |
| 9 | + |
| 10 | +if TYPE_CHECKING: |
| 11 | + from ...main import Logfire, LogfireSpan |
| 12 | + from .types import EndpointConfig |
| 13 | + |
| 14 | + |
| 15 | +__all__ = ('instrument_llm_provider',) |
| 16 | + |
| 17 | + |
| 18 | +def instrument_llm_provider( |
| 19 | + logfire: Logfire, |
| 20 | + client: Any, |
| 21 | + suppress_otel: bool, |
| 22 | + scope_suffix: str, |
| 23 | + get_endpoint_config_fn: Callable[[Any], EndpointConfig], |
| 24 | + on_response_fn: Callable[[Any, LogfireSpan], Any], |
| 25 | + is_async_client_fn: Callable[[Any], bool], |
| 26 | +) -> ContextManager[None]: |
| 27 | + """Instruments the provided `client` with `logfire`.""" |
| 28 | + logfire_llm = logfire.with_settings(custom_scope_suffix=scope_suffix.lower(), tags=['LLM']) |
| 29 | + |
| 30 | + client._is_instrumented_by_logfire = True |
| 31 | + client._original_request_method = original_request_method = client._request |
| 32 | + |
| 33 | + is_async = is_async_client_fn(client) |
| 34 | + |
| 35 | + def _instrumentation_setup(**kwargs: Any) -> Any: |
| 36 | + if context.get_value('suppress_instrumentation'): |
| 37 | + return None, None, kwargs |
| 38 | + |
| 39 | + options = kwargs['options'] |
| 40 | + try: |
| 41 | + message_template, span_data, content_from_stream = get_endpoint_config_fn(options) |
| 42 | + except ValueError as exc: |
| 43 | + logfire_llm.warn( |
| 44 | + 'Unable to instrument {suffix} API call: {error}', suffix=scope_suffix, error=str(exc), kwargs=kwargs |
| 45 | + ) |
| 46 | + return None, None, kwargs |
| 47 | + |
| 48 | + span_data['async'] = is_async |
| 49 | + |
| 50 | + stream = kwargs['stream'] |
| 51 | + |
| 52 | + if stream and content_from_stream: |
| 53 | + stream_cls = kwargs['stream_cls'] |
| 54 | + assert stream_cls is not None, 'Expected `stream_cls` when streaming' |
| 55 | + |
| 56 | + if is_async: |
| 57 | + |
| 58 | + class LogfireInstrumentedAsyncStream(stream_cls): |
| 59 | + async def __stream__(self) -> AsyncIterator[Any]: |
| 60 | + with record_streaming(logfire_llm, span_data, content_from_stream) as record_chunk: |
| 61 | + async for chunk in super().__stream__(): # type: ignore |
| 62 | + record_chunk(chunk) |
| 63 | + yield chunk |
| 64 | + |
| 65 | + kwargs['stream_cls'] = LogfireInstrumentedAsyncStream |
| 66 | + else: |
| 67 | + |
| 68 | + class LogfireInstrumentedStream(stream_cls): |
| 69 | + def __stream__(self) -> Iterator[Any]: |
| 70 | + with record_streaming(logfire_llm, span_data, content_from_stream) as record_chunk: |
| 71 | + for chunk in super().__stream__(): # type: ignore |
| 72 | + record_chunk(chunk) |
| 73 | + yield chunk |
| 74 | + |
| 75 | + kwargs['stream_cls'] = LogfireInstrumentedStream |
| 76 | + |
| 77 | + return message_template, span_data, kwargs |
| 78 | + |
| 79 | + def instrumented_llm_request_sync(**kwargs: Any) -> Any: |
| 80 | + message_template, span_data, kwargs = _instrumentation_setup(**kwargs) |
| 81 | + if message_template is None: |
| 82 | + return original_request_method(**kwargs) |
| 83 | + stream = kwargs['stream'] |
| 84 | + with logfire_llm.span(message_template, **span_data) as span: |
| 85 | + with maybe_suppress_instrumentation(suppress_otel): |
| 86 | + if stream: |
| 87 | + return original_request_method(**kwargs) |
| 88 | + else: |
| 89 | + response = on_response_fn(original_request_method(**kwargs), span) |
| 90 | + return response |
| 91 | + |
| 92 | + async def instrumented_llm_request_async(**kwargs: Any) -> Any: |
| 93 | + message_template, span_data, kwargs = _instrumentation_setup(**kwargs) |
| 94 | + if message_template is None: |
| 95 | + return await original_request_method(**kwargs) |
| 96 | + stream = kwargs['stream'] |
| 97 | + with logfire_llm.span(message_template, **span_data) as span: |
| 98 | + with maybe_suppress_instrumentation(suppress_otel): |
| 99 | + if stream: |
| 100 | + return await original_request_method(**kwargs) |
| 101 | + else: |
| 102 | + response = on_response_fn(await original_request_method(**kwargs), span) |
| 103 | + return response |
| 104 | + |
| 105 | + if is_async: |
| 106 | + client._request = instrumented_llm_request_async |
| 107 | + else: |
| 108 | + client._request = instrumented_llm_request_sync |
| 109 | + |
| 110 | + @contextmanager |
| 111 | + def uninstrument_context(): |
| 112 | + """Context manager to remove instrumentation from the LLM client. |
| 113 | +
|
| 114 | + The user isn't required (or even expected) to use this context manager, |
| 115 | + which is why the instrumenting has already happened before. |
| 116 | + It exists mostly for tests and just in case users want it. |
| 117 | + """ |
| 118 | + try: |
| 119 | + yield |
| 120 | + finally: |
| 121 | + client._request = client._original_request_method |
| 122 | + del client._original_request_method |
| 123 | + client._is_instrumented_by_logfire = False |
| 124 | + |
| 125 | + return uninstrument_context() |
| 126 | + |
| 127 | + |
| 128 | +@contextmanager |
| 129 | +def maybe_suppress_instrumentation(suppress: bool) -> Iterator[None]: |
| 130 | + if suppress: |
| 131 | + new_context = context.set_value('suppress_instrumentation', True) |
| 132 | + token = context.attach(new_context) |
| 133 | + try: |
| 134 | + yield |
| 135 | + finally: |
| 136 | + context.detach(token) |
| 137 | + else: |
| 138 | + yield |
| 139 | + |
| 140 | + |
| 141 | +@contextmanager |
| 142 | +def record_streaming( |
| 143 | + logire_llm: Logfire, |
| 144 | + span_data: dict[str, Any], |
| 145 | + content_from_stream: Callable[[Any], str | None], |
| 146 | +): |
| 147 | + content: list[str] = [] |
| 148 | + |
| 149 | + def record_chunk(chunk: Any) -> Any: |
| 150 | + chunk_content = content_from_stream(chunk) |
| 151 | + if chunk_content is not None: |
| 152 | + content.append(chunk_content) |
| 153 | + |
| 154 | + timer = logire_llm._config.ns_timestamp_generator # type: ignore |
| 155 | + start = timer() |
| 156 | + try: |
| 157 | + yield record_chunk |
| 158 | + finally: |
| 159 | + duration = (timer() - start) / ONE_SECOND_IN_NANOSECONDS |
| 160 | + logire_llm.info( |
| 161 | + 'streaming response from {request_data[model]!r} took {duration:.2f}s', |
| 162 | + **span_data, |
| 163 | + duration=duration, |
| 164 | + response_data={'combined_chunk_content': ''.join(content), 'chunk_count': len(content)}, |
| 165 | + ) |
0 commit comments