From ce96aaf5357a170c749c8551dfc66f12fc1ca03c Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Wed, 19 Feb 2025 17:21:02 +0000 Subject: [PATCH 01/14] Refactoring code --- .fernignore | 2 + .../context.py => context_variables.py} | 30 +++-- src/humanloop/eval_utils/run.py | 119 +---------------- src/humanloop/otel/exporter.py | 2 +- src/humanloop/overload.py | 120 ++++++++++++++++++ src/humanloop/utilities/prompt.py | 4 +- 6 files changed, 148 insertions(+), 129 deletions(-) rename src/humanloop/{eval_utils/context.py => context_variables.py} (78%) create mode 100644 src/humanloop/overload.py diff --git a/.fernignore b/.fernignore index 670c4ceb..ee7245af 100644 --- a/.fernignore +++ b/.fernignore @@ -3,6 +3,8 @@ src/humanloop/eval_utils src/humanloop/prompt_utils.py src/humanloop/client.py +src/humanloop/overload.py +src/humanloop/context_variables.py mypy.ini README.md diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/context_variables.py similarity index 78% rename from src/humanloop/eval_utils/context.py rename to src/humanloop/context_variables.py index aa048abf..9b7b1070 100644 --- a/src/humanloop/eval_utils/context.py +++ b/src/humanloop/context_variables.py @@ -4,6 +4,18 @@ from opentelemetry.trace import Tracer +_UnsafeContextRead = RuntimeError("Attempting to read from a Context when variable was not set.") + + +class _UnsafeContextRead(RuntimeError): + message: str + + def __init__(self, context_variable_name: str): + super().__init__( + message=f"Attempting to read from a Context when variable {context_variable_name} was not set." + ) + + @dataclass class EvaluationContext: """Context Log to Humanloop. @@ -27,9 +39,8 @@ class EvaluationContext: run_id: str -_EVALUATION_CONTEXT_VAR: ContextVar[EvaluationContext] = ContextVar("__EVALUATION_CONTEXT") - -_UnsafeContextRead = RuntimeError("Attempting to read from thread Context when variable was not set.") +_EVALUATION_CONTEXT_VAR_NAME = "__EVALUATION_CONTEXT" +_EVALUATION_CONTEXT_VAR: ContextVar[EvaluationContext] = ContextVar(_EVALUATION_CONTEXT_VAR_NAME) def set_evaluation_context(context: EvaluationContext): @@ -40,7 +51,7 @@ def get_evaluation_context() -> EvaluationContext: try: return _EVALUATION_CONTEXT_VAR.get() except LookupError: - raise _UnsafeContextRead + raise _UnsafeContextRead(_EVALUATION_CONTEXT_VAR_NAME) def evaluation_context_set() -> bool: @@ -60,12 +71,12 @@ def log_belongs_to_evaluated_file(log_args: dict[str, Any]) -> bool: return False -def is_evaluated_file(file_path) -> bool: +def is_evaluated_file(file_path: str) -> bool: try: evaluation_context = _EVALUATION_CONTEXT_VAR.get() return evaluation_context.path == file_path except LookupError: - raise _UnsafeContextRead + raise _UnsafeContextRead(_EVALUATION_CONTEXT_VAR_NAME) @dataclass @@ -78,7 +89,8 @@ def in_prompt_utility(self) -> bool: return self._in_prompt_utility > 0 -_PROMPT_UTILITY_CONTEXT_VAR: ContextVar[PromptUtilityContext] = ContextVar("__PROMPT_UTILITY_CONTEXT") +_PROMPT_UTILITY_CONTEXT_VAR_NAME = "__PROMPT_UTILITY_CONTEXT" +_PROMPT_UTILITY_CONTEXT_VAR: ContextVar[PromptUtilityContext] = ContextVar(_PROMPT_UTILITY_CONTEXT_VAR_NAME) def in_prompt_utility_context() -> bool: @@ -108,7 +120,7 @@ def get_prompt_utility_context() -> PromptUtilityContext: try: return _PROMPT_UTILITY_CONTEXT_VAR.get() except LookupError: - raise _UnsafeContextRead + raise _UnsafeContextRead(_PROMPT_UTILITY_CONTEXT_VAR_NAME) def unset_prompt_utility_context(): @@ -120,4 +132,4 @@ def unset_prompt_utility_context(): else: raise ValueError("No matching unset_prompt_utility_context() call.") except LookupError: - raise _UnsafeContextRead + raise _UnsafeContextRead(_PROMPT_UTILITY_CONTEXT_VAR_NAME) diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py index 524768d6..f21cd985 100644 --- a/src/humanloop/eval_utils/run.py +++ b/src/humanloop/eval_utils/run.py @@ -23,26 +23,13 @@ from functools import partial from logging import INFO from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, TypeVar, Union -import warnings from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse from humanloop.core.api_error import ApiError -from humanloop.eval_utils.context import ( - EvaluationContext, - get_evaluation_context, - get_prompt_utility_context, - in_prompt_utility_context, - log_belongs_to_evaluated_file, - set_evaluation_context, -) +from humanloop.context_variables import EvaluationContext, set_evaluation_context from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File # We use TypedDicts for requests, which is consistent with the rest of the SDK -from humanloop.evaluators.client import EvaluatorsClient -from humanloop.flows.client import FlowsClient -from humanloop.otel.constants import HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME -from humanloop.otel.helpers import write_to_opentelemetry_span -from humanloop.prompts.client import PromptsClient from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator from humanloop.requests import FlowKernelRequestParams as FlowDict @@ -50,7 +37,6 @@ from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict from humanloop.requests import PromptKernelRequestParams as PromptDict from humanloop.requests import ToolKernelRequestParams as ToolDict -from humanloop.tools.client import ToolsClient from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats from humanloop.types import DatapointResponse as Datapoint from humanloop.types import EvaluationResponse, EvaluationStats @@ -60,14 +46,9 @@ from humanloop.types import NumericEvaluatorStatsResponse as NumericStats from humanloop.types import PromptKernelRequest as Prompt from humanloop.types import ToolKernelRequest as Tool -from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse -from humanloop.types.create_flow_log_response import CreateFlowLogResponse -from humanloop.types.create_prompt_log_response import CreatePromptLogResponse -from humanloop.types.create_tool_log_response import CreateToolLogResponse from humanloop.types.datapoint_response import DatapointResponse from humanloop.types.dataset_response import DatasetResponse from humanloop.types.evaluation_run_response import EvaluationRunResponse -from humanloop.types.prompt_call_response import PromptCallResponse from humanloop.types.run_stats_response import RunStatsResponse from pydantic import ValidationError @@ -97,9 +78,6 @@ RESET = "\033[0m" -CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient) - - class HumanloopUtilityError(Exception): def __init__(self, message): self.message = message @@ -108,101 +86,6 @@ def __str__(self): return self.message -def prompt_call_evaluation_aware(client: PromptsClient) -> PromptsClient: - client._call = client.call - - def _overload_call(self, **kwargs) -> PromptCallResponse: - if in_prompt_utility_context(): - kwargs = {**kwargs, "save": False} - - try: - response = self._call(**kwargs) - response = typing.cast(PromptCallResponse, response) - except Exception as e: - # TODO: Bug found in backend: not specifying a model 400s but creates a File - raise HumanloopUtilityError(message=str(e)) from e - - response_copy = response.dict() - prompt_utility_context = get_prompt_utility_context() - for idx, _ in enumerate(response_copy.get("logs", [])): - del response_copy["logs"][idx]["created_at"] - for idx, _ in enumerate(response_copy["prompt"].get("environments", [])): - del response_copy["prompt"]["environments"][idx]["created_at"] - del response_copy["prompt"]["last_used_at"] - del response_copy["prompt"]["updated_at"] - del response_copy["prompt"]["created_at"] - del response_copy["start_time"] - del response_copy["end_time"] - - with prompt_utility_context.tracer.start_as_current_span(HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME) as span: - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, - value=response_copy, - ) - return response - else: - return self._call(**kwargs) - - # Replace the original log method with the overloaded one - client.call = types.MethodType(_overload_call, client) - # Return the client with the overloaded log method - logger.debug("Overloaded the .log method of %s", client) - return client - - -def log_with_evaluation_context(client: CLIENT_TYPE) -> CLIENT_TYPE: - """ - Wrap the `log` method of the provided Humanloop client to use EVALUATION_CONTEXT. - - This makes the overloaded log actions be aware of whether the created Log is - part of an Evaluation (e.g. one started by eval_utils.run_eval). - """ - # Copy the original log method in a hidden attribute - client._log = client.log - - def _overload_log( - self, **kwargs - ) -> Union[ - CreatePromptLogResponse, - CreateToolLogResponse, - CreateFlowLogResponse, - CreateEvaluatorLogResponse, - ]: - if log_belongs_to_evaluated_file(log_args=kwargs): - evaluation_context = get_evaluation_context() - for attribute in ["source_datapoint_id", "run_id"]: - if attribute not in kwargs or kwargs[attribute] is None: - kwargs[attribute] = getattr(evaluation_context, attribute) - - # Call the original .log method - logger.debug( - "Logging %s inside _overloaded_log on Thread %s", - kwargs, - evaluation_context, - threading.get_ident(), - ) - - try: - response = self._log(**kwargs) - except Exception as e: - logger.error(f"Failed to log: {e}") - raise e - - # Notify the run_eval utility about one Log being created - if log_belongs_to_evaluated_file(log_args=kwargs): - evaluation_context = get_evaluation_context() - evaluation_context.upload_callback(log_id=response.id) - - return response - - # Replace the original log method with the overloaded one - client.log = types.MethodType(_overload_log, client) - # Return the client with the overloaded log method - logger.debug("Overloaded the .call method of %s", client) - return client - - def run_eval( client: "BaseHumanloop", file: File, diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py index 00ec52e3..d5cb4231 100644 --- a/src/humanloop/otel/exporter.py +++ b/src/humanloop/otel/exporter.py @@ -11,7 +11,7 @@ from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult from humanloop.core import ApiError as HumanloopApiError -from humanloop.eval_utils.context import ( +from humanloop.context_variables import ( EvaluationContext, evaluation_context_set, get_evaluation_context, diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py new file mode 100644 index 00000000..c80d96e8 --- /dev/null +++ b/src/humanloop/overload.py @@ -0,0 +1,120 @@ +import logging +import threading +import types +from typing import TypeVar, Union +import typing + +from humanloop.context_variables import ( + get_evaluation_context, + get_prompt_utility_context, + in_prompt_utility_context, + log_belongs_to_evaluated_file, +) +from humanloop.eval_utils.run import HumanloopUtilityError +from humanloop.flows.client import FlowsClient +from humanloop.otel.constants import HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME +from humanloop.otel.helpers import write_to_opentelemetry_span +from humanloop.prompts.client import PromptsClient +from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse +from humanloop.types.create_flow_log_response import CreateFlowLogResponse +from humanloop.types.create_prompt_log_response import CreatePromptLogResponse +from humanloop.types.create_tool_log_response import CreateToolLogResponse +from humanloop.types.prompt_call_response import PromptCallResponse + +logger = logging.getLogger("humanloop.sdk") + + +CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, FlowsClient) + + +def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE: + """ + Wrap the `log` method of the provided Humanloop client to use EVALUATION_CONTEXT. + + This makes the overloaded log actions be aware of whether the created Log is + part of an Evaluation (e.g. one started by eval_utils.run_eval). + """ + # Copy the original log method in a hidden attribute + client._log = client.log + + def _overload_log( + self, **kwargs + ) -> Union[ + CreatePromptLogResponse, + CreateToolLogResponse, + CreateFlowLogResponse, + CreateEvaluatorLogResponse, + ]: + if log_belongs_to_evaluated_file(log_args=kwargs): + evaluation_context = get_evaluation_context() + for attribute in ["source_datapoint_id", "run_id"]: + if attribute not in kwargs or kwargs[attribute] is None: + kwargs[attribute] = getattr(evaluation_context, attribute) + + # Call the original .log method + logger.debug( + "Logging %s inside _overloaded_log on Thread %s", + kwargs, + evaluation_context, + threading.get_ident(), + ) + + try: + response = self._log(**kwargs) + except Exception as e: + logger.error(f"Failed to log: {e}") + raise e + + # Notify the run_eval utility about one Log being created + if log_belongs_to_evaluated_file(log_args=kwargs): + evaluation_context = get_evaluation_context() + evaluation_context.upload_callback(log_id=response.id) + + return response + + # Replace the original log method with the overloaded one + client.log = types.MethodType(_overload_log, client) + # Return the client with the overloaded log method + logger.debug("Overloaded the .call method of %s", client) + return client + + +def overload_prompt_call(client: PromptsClient) -> PromptsClient: + client._call = client.call + + def _overload_call(self, **kwargs) -> PromptCallResponse: + if in_prompt_utility_context(): + try: + response = self._call(**kwargs) + response = typing.cast(PromptCallResponse, response) + except Exception as e: + # TODO: Bug found in backend: not specifying a model 400s but creates a File + raise HumanloopUtilityError(message=str(e)) from e + + response_copy = response.dict() + prompt_utility_context = get_prompt_utility_context() + for idx, _ in enumerate(response_copy.get("logs", [])): + del response_copy["logs"][idx]["created_at"] + for idx, _ in enumerate(response_copy["prompt"].get("environments", [])): + del response_copy["prompt"]["environments"][idx]["created_at"] + del response_copy["prompt"]["last_used_at"] + del response_copy["prompt"]["updated_at"] + del response_copy["prompt"]["created_at"] + del response_copy["start_time"] + del response_copy["end_time"] + + with prompt_utility_context.tracer.start_as_current_span(HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME) as span: + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, + value=response_copy, + ) + return response + else: + return self._call(**kwargs) + + # Replace the original log method with the overloaded one + client.call = types.MethodType(_overload_call, client) + # Return the client with the overloaded log method + logger.debug("Overloaded the .log method of %s", client) + return client diff --git a/src/humanloop/utilities/prompt.py b/src/humanloop/utilities/prompt.py index b02dcde2..ab189a6e 100644 --- a/src/humanloop/utilities/prompt.py +++ b/src/humanloop/utilities/prompt.py @@ -6,7 +6,7 @@ from opentelemetry.trace import Tracer from typing_extensions import Unpack -from humanloop.eval_utils.context import set_prompt_utility_context, unset_prompt_utility_context +from humanloop.context_variables import set_prompt_utility_context, unset_prompt_utility_context from humanloop.eval_utils.run import HumanloopUtilityError from humanloop.utilities.helpers import bind_args from humanloop.utilities.types import DecoratorPromptKernelRequestParams @@ -60,6 +60,8 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") output = None + # TODO: output ought to be None on errors, check + # all decorators output_stringified = jsonify_if_not_string( func=func, output=output, From ab78ce27f78e18bd443dddeca00254a04c9fb5a2 Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Sun, 23 Feb 2025 13:44:44 +0000 Subject: [PATCH 02/14] simplify decorators --- src/humanloop/client.py | 29 +-- src/humanloop/context.py | 61 +++++ src/humanloop/context_variables.py | 135 ---------- src/humanloop/eval_utils/run.py | 5 +- src/humanloop/otel/constants.py | 8 +- src/humanloop/otel/exporter.py | 313 +++-------------------- src/humanloop/otel/helpers.py | 4 +- src/humanloop/otel/processor.py | 230 +---------------- src/humanloop/otel/processor/__init__.py | 237 ----------------- src/humanloop/otel/processor/prompts.py | 204 --------------- src/humanloop/overload.py | 103 +++----- src/humanloop/utilities/flow.py | 54 ++-- src/humanloop/utilities/prompt.py | 100 +------- src/humanloop/utilities/tool.py | 44 ++-- 14 files changed, 242 insertions(+), 1285 deletions(-) create mode 100644 src/humanloop/context.py delete mode 100644 src/humanloop/context_variables.py delete mode 100644 src/humanloop/otel/processor/__init__.py delete mode 100644 src/humanloop/otel/processor/prompts.py diff --git a/src/humanloop/client.py b/src/humanloop/client.py index 21fbffcc..a77876c5 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -1,7 +1,7 @@ +from contextlib import contextmanager import os import typing -from typing import List, Optional, Sequence -from typing_extensions import Unpack +from typing import Any, List, Optional, Sequence import httpx from opentelemetry.sdk.resources import Resource @@ -10,7 +10,6 @@ from humanloop.core.client_wrapper import SyncClientWrapper from humanloop.eval_utils.run import prompt_call_evaluation_aware -from humanloop.utilities.types import DecoratorPromptKernelRequestParams from humanloop.eval_utils import log_with_evaluation_context, run_eval from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File @@ -147,11 +146,11 @@ def __init__( else: self._opentelemetry_tracer = opentelemetry_tracer + @contextmanager def prompt( self, *, - path: Optional[str] = None, - **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams], # type: ignore + path: str, ): """Decorator for declaring a [Prompt](https://humanloop.com/docs/explanation/prompts) in code. @@ -226,17 +225,14 @@ def call_llm(messages): :param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams` """ - return prompt_decorator_factory( - opentelemetry_tracer=self._opentelemetry_tracer, - path=path, - **prompt_kernel, - ) + return prompt_decorator_factory(path=path) def tool( self, *, - path: Optional[str] = None, - **tool_kernel: Unpack[ToolKernelRequestParams], # type: ignore + path: str, + attributes: dict[str, Any] | None = None, + setup_values: dict[str, Any] | None = None, ): """Decorator for declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code. @@ -309,14 +305,15 @@ def calculator(a: int, b: Optional[int]) -> int: return tool_decorator_factory( opentelemetry_tracer=self._opentelemetry_tracer, path=path, - **tool_kernel, + attributes=attributes, + setup_values=setup_values, ) def flow( self, *, - path: Optional[str] = None, - **flow_kernel: Unpack[FlowKernelRequestParams], # type: ignore + path: str = None, + attributes: dict[str, Any] | None = None, ): """Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code. @@ -371,7 +368,7 @@ def entrypoint(): return flow_decorator_factory( opentelemetry_tracer=self._opentelemetry_tracer, path=path, - **flow_kernel, + attributes=attributes, ) diff --git a/src/humanloop/context.py b/src/humanloop/context.py new file mode 100644 index 00000000..f71f694d --- /dev/null +++ b/src/humanloop/context.py @@ -0,0 +1,61 @@ +from dataclasses import dataclass +import threading +from typing import Callable, Optional +from opentelemetry import context as context_api + +from humanloop.otel.constants import ( + HUMANLOOP_CONTEXT_EVALUATION, + HUMANLOOP_CONTEXT_PROMPT_PATH, + HUMANLOOP_CONTEXT_TRACE_ID, +) + + +ResetToken = object + + +def get_trace_id() -> Optional[str]: + key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())) + context_api.get_value() + return context_api.get_value(key=key) + + +def set_trace_id(flow_log_id: str) -> ResetToken: + key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())) + return context_api.attach(context_api.set_value(key=key, value=flow_log_id)) + + +def reset_trace_id_context(token: ResetToken): + context_api.detach(token=token) + + +def set_prompt_path(path: str) -> ResetToken: + key = hash((HUMANLOOP_CONTEXT_PROMPT_PATH, threading.get_ident())) + return context_api.set_value(key=key, value=path) + + +def reset_prompt_path(token: ResetToken): + context_api.detach(token=token) + + +def get_prompt_path() -> Optional[str]: + key = hash((HUMANLOOP_CONTEXT_PROMPT_PATH, threading.get_ident())) + return context_api.get_value(key) + + +@dataclass +class EvaluationContext: + source_datapoint_id: str + run_id: str + callback: Callable[[str], None] + file_id: str + path: str + + +def set_evaluation_context(evaluation_context: EvaluationContext): + key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident())) + context_api.set_value(key, evaluation_context) + + +def get_evaluation_context() -> Optional[EvaluationContext]: + key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident())) + return context_api.get_value(key) diff --git a/src/humanloop/context_variables.py b/src/humanloop/context_variables.py deleted file mode 100644 index 9b7b1070..00000000 --- a/src/humanloop/context_variables.py +++ /dev/null @@ -1,135 +0,0 @@ -from contextvars import ContextVar -from dataclasses import dataclass -from typing import Any, Callable -from opentelemetry.trace import Tracer - - -_UnsafeContextRead = RuntimeError("Attempting to read from a Context when variable was not set.") - - -class _UnsafeContextRead(RuntimeError): - message: str - - def __init__(self, context_variable_name: str): - super().__init__( - message=f"Attempting to read from a Context when variable {context_variable_name} was not set." - ) - - -@dataclass -class EvaluationContext: - """Context Log to Humanloop. - - Per datapoint state that is set when an Evaluation is ran. - """ - - """Required for associating a Log with the Evaluation Run.""" - source_datapoint_id: str - - """Overloaded .log method call.""" - upload_callback: Callable[[str], None] - - """ID of the evaluated File.""" - file_id: str - - """Path of the evaluated File.""" - path: str - - """Required for associating a Log with the Evaluation Run.""" - run_id: str - - -_EVALUATION_CONTEXT_VAR_NAME = "__EVALUATION_CONTEXT" -_EVALUATION_CONTEXT_VAR: ContextVar[EvaluationContext] = ContextVar(_EVALUATION_CONTEXT_VAR_NAME) - - -def set_evaluation_context(context: EvaluationContext): - _EVALUATION_CONTEXT_VAR.set(context) - - -def get_evaluation_context() -> EvaluationContext: - try: - return _EVALUATION_CONTEXT_VAR.get() - except LookupError: - raise _UnsafeContextRead(_EVALUATION_CONTEXT_VAR_NAME) - - -def evaluation_context_set() -> bool: - try: - _EVALUATION_CONTEXT_VAR.get() - return True - except LookupError: - return False - - -def log_belongs_to_evaluated_file(log_args: dict[str, Any]) -> bool: - try: - evaluation_context: EvaluationContext = _EVALUATION_CONTEXT_VAR.get() - return evaluation_context.file_id == log_args.get("id") or evaluation_context.path == log_args.get("path") - except LookupError: - # Not in an evaluation context - return False - - -def is_evaluated_file(file_path: str) -> bool: - try: - evaluation_context = _EVALUATION_CONTEXT_VAR.get() - return evaluation_context.path == file_path - except LookupError: - raise _UnsafeContextRead(_EVALUATION_CONTEXT_VAR_NAME) - - -@dataclass -class PromptUtilityContext: - tracer: Tracer - _in_prompt_utility: int - - @property - def in_prompt_utility(self) -> bool: - return self._in_prompt_utility > 0 - - -_PROMPT_UTILITY_CONTEXT_VAR_NAME = "__PROMPT_UTILITY_CONTEXT" -_PROMPT_UTILITY_CONTEXT_VAR: ContextVar[PromptUtilityContext] = ContextVar(_PROMPT_UTILITY_CONTEXT_VAR_NAME) - - -def in_prompt_utility_context() -> bool: - try: - return _PROMPT_UTILITY_CONTEXT_VAR.get().in_prompt_utility - except LookupError: - return False - - -def set_prompt_utility_context(tracer: Tracer): - global _PROMPT_UTILITY_CONTEXT_VAR - try: - prompt_utility_context = _PROMPT_UTILITY_CONTEXT_VAR.get() - # Already set, push another context - prompt_utility_context._in_prompt_utility += 1 - _PROMPT_UTILITY_CONTEXT_VAR.set(prompt_utility_context) - except LookupError: - _PROMPT_UTILITY_CONTEXT_VAR.set( - PromptUtilityContext( - tracer=tracer, - _in_prompt_utility=1, - ) - ) - - -def get_prompt_utility_context() -> PromptUtilityContext: - try: - return _PROMPT_UTILITY_CONTEXT_VAR.get() - except LookupError: - raise _UnsafeContextRead(_PROMPT_UTILITY_CONTEXT_VAR_NAME) - - -def unset_prompt_utility_context(): - global _PROMPT_UTILITY_CONTEXT_VAR_TOKEN - try: - prompt_utility_context = _PROMPT_UTILITY_CONTEXT_VAR.get() - if prompt_utility_context._in_prompt_utility >= 1: - prompt_utility_context._in_prompt_utility -= 1 - else: - raise ValueError("No matching unset_prompt_utility_context() call.") - except LookupError: - raise _UnsafeContextRead(_PROMPT_UTILITY_CONTEXT_VAR_NAME) diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py index f21cd985..f455a753 100644 --- a/src/humanloop/eval_utils/run.py +++ b/src/humanloop/eval_utils/run.py @@ -16,17 +16,16 @@ import sys import threading import time -import types import typing from concurrent.futures import ThreadPoolExecutor from datetime import datetime from functools import partial from logging import INFO -from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, TypeVar, Union +from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse +from humanloop.context import EvaluationContext, set_evaluation_context from humanloop.core.api_error import ApiError -from humanloop.context_variables import EvaluationContext, set_evaluation_context from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File # We use TypedDicts for requests, which is consistent with the rest of the SDK diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py index ef6ac223..f12a0812 100644 --- a/src/humanloop/otel/constants.py +++ b/src/humanloop/otel/constants.py @@ -2,9 +2,13 @@ HUMANLOOP_FILE_KEY = "humanloop.file" # Attribute name prefix on Humanloop spans for log-related attributes HUMANLOOP_LOG_KEY = "humanloop.log" +HUMANLOOP_LOG_ID_KEY = "humanloop.log_id" HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type" HUMANLOOP_PATH_KEY = "humanloop.file.path" # Required for the exporter to know when to mark the Flow Log as complete HUMANLOOP_FLOW_PREREQUISITES_KEY = "humanloop.flow.prerequisites" -HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME = "humanloop_intercepted_hl_call" -HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE = "intercepted_call_response" +HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME = "humanloop_intercepted_hl_call" +HUMANLOOP_INTERCEPTED_PROMPT_CALL_RESPONSE = "intercepted_call_response" +HUMANLOOP_CONTEXT_PROMPT_PATH = "humanloop.context.prompt.path" +HUMANLOOP_CONTEXT_TRACE_ID = "humanloop.context.flow.trace_id" +HUMANLOOP_CONTEXT_EVALUATION = "humanloop.context.evaluation" diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py index d5cb4231..3983aaa2 100644 --- a/src/humanloop/otel/exporter.py +++ b/src/humanloop/otel/exporter.py @@ -1,32 +1,21 @@ import logging -import threading import typing from queue import Empty as EmptyQueue from queue import Queue from threading import Thread -from typing import Any, Optional, Sequence +from typing import Optional, Sequence from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult -from humanloop.core import ApiError as HumanloopApiError -from humanloop.context_variables import ( - EvaluationContext, - evaluation_context_set, - get_evaluation_context, - set_evaluation_context, -) +from humanloop.context import get_evaluation_context, EvaluationContext from humanloop.otel.constants import ( - HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, - HUMANLOOP_FLOW_PREREQUISITES_KEY, HUMANLOOP_LOG_KEY, + HUMANLOOP_PATH_KEY, ) -from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span -from humanloop.requests.flow_kernel_request import FlowKernelRequestParams -from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams -from humanloop.requests.tool_kernel_request import ToolKernelRequestParams +from humanloop.otel.helpers import read_from_opentelemetry_span, write_to_opentelemetry_span if typing.TYPE_CHECKING: from humanloop.client import Humanloop @@ -101,15 +90,7 @@ def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: return SpanExportResult.FAILURE for span in spans: - if not is_humanloop_span(span): - continue - - self._upload_queue.put( - ( - span, - get_evaluation_context() if evaluation_context_set() else None, - ), - ) + self._upload_queue.put((span, get_evaluation_context())) return SpanExportResult.SUCCESS @@ -128,265 +109,55 @@ def force_flush(self, timeout_millis: int = 10000) -> bool: return True def _do_work(self): - """Upload spans to Humanloop. - - Ran by worker threads. The threads use the self._shutdown flag to wait - for Spans to arrive. Setting a timeout on self._upload_queue.get() risks - shutting down the thread early as no Spans are produced e.g. while waiting - for user input into the instrumented feature or application. - - Each thread will upload a Span to Humanloop, provided the Span has all its - dependencies uploaded. The dependency happens in a Flow Trace context, where - the Trace parent must be uploaded first. The Span Processor will send in Spans - bottoms-up, while the upload of a Trace happens top-down. If a Span did not - have its span uploaded yet, it will be re-queued to be uploaded later. - """ - # Do work while the Exporter was not instructed to # wind down or the queue is not empty while self._upload_queue.qsize() > 0 or not self._shutdown: thread_args: tuple[ReadableSpan, Optional[EvaluationContext]] # type: ignore try: # Don't block or the thread will never be notified of the shutdown - thread_args = self._upload_queue.get( - block=False, - ) # type: ignore + thread_args = self._upload_queue.get(block=False) # type: ignore except EmptyQueue: # Wait for the another span to arrive continue span_to_export, evaluation_context = thread_args - if evaluation_context is not None: - # Context variables are thread scoped - # One existed in the eval_run utility thread - # so it must be copied over to the current - # exporter thread - set_evaluation_context(evaluation_context) - - if span_to_export.parent is None: - # Span cannot be part of a Flow trace - self._export_span_dispatch(span_to_export) - logger.debug( - "[HumanloopSpanExporter] _do_work on Thread %s: Dispatching span %s %s", - threading.get_ident(), - span_to_export.context.span_id, - span_to_export.name, + span_file_type = span_to_export.attributes.get(HUMANLOOP_FILE_TYPE_KEY) + if span_file_type is None: + raise ValueError("Span does not have type set") + + if span_file_type == "flow": + log_args = read_from_opentelemetry_span( + span=span_to_export, + key=HUMANLOOP_LOG_KEY, ) - - elif span_to_export.parent.span_id in self._span_to_uploaded_log_id: - # Span is part of a Flow trace and its parent has been uploaded - self._export_span_dispatch(span_to_export) - - else: - # Requeue the Span and upload after its parent - self._upload_queue.put((span_to_export, evaluation_context)) - - # Notify the shared queue that we are done - # with the current head of the task queue - self._upload_queue.task_done() - - def _export_span_dispatch(self, span: ReadableSpan) -> None: - """Call the appropriate BaseHumanloop.X.log based on the Span type.""" - file_type = span._attributes.get(HUMANLOOP_FILE_TYPE_KEY) # type: ignore - parent_span_id = span.parent.span_id if span.parent else None - - while parent_span_id and self._span_to_uploaded_log_id.get(parent_span_id) is None: - logger.debug( - "[HumanloopSpanExporter] _export_span_dispatch on Thread %s Span %s %s waiting for parent %s to be uploaded", - threading.get_ident(), - span.context.span_id, - span.name, - parent_span_id, - ) - - logger.debug( - "[HumanloopSpanExporter] Exporting span %s with file type %s", - span, - file_type, - ) - - if file_type == "prompt": - self._export_prompt_span(span=span) - elif file_type == "tool": - self._export_tool_span(span=span) - elif file_type == "flow": - self._export_flow_span(span=span) - else: - raise NotImplementedError(f"Unknown span type: {file_type}") - - def _export_prompt_span(self, span: ReadableSpan) -> None: - file_object: dict[str, Any] = read_from_opentelemetry_span( - span, - key=HUMANLOOP_FILE_KEY, - ) - log_object: dict[str, Any] = read_from_opentelemetry_span( - span, - key=HUMANLOOP_LOG_KEY, - ) - # NOTE: Due to OTEL conventions, attributes with value of None are removed - # on write to Span. If not present, instantiate these as empty - if "inputs" not in log_object: - log_object["inputs"] = {} - if "messages" not in log_object: - log_object["messages"] = [] - if "tools" not in file_object["prompt"]: - file_object["prompt"]["tools"] = [] - - path: str = file_object["path"] - prompt: PromptKernelRequestParams = file_object["prompt"] - - trace_parent_id = self._get_parent_in_trace(span) - - if "attributes" not in prompt or not prompt["attributes"]: - prompt["attributes"] = {} - - try: - log_response = self._client.prompts.log( - path=path, - prompt=prompt, - **log_object, - trace_parent_id=trace_parent_id, - ) - self._span_to_uploaded_log_id[span.context.span_id] = log_response.id - if trace_parent_id is not None: - self._keep_track_of_trace(log_response.id, trace_parent_id) - except HumanloopApiError: - self._span_to_uploaded_log_id[span.context.span_id] = None - self._mark_span_as_uploaded(span_id=span.context.span_id) - - def _export_tool_span(self, span: ReadableSpan) -> None: - file_object: dict[str, Any] = read_from_opentelemetry_span( - span, - key=HUMANLOOP_FILE_KEY, - ) - log_object: dict[str, Any] = read_from_opentelemetry_span( - span, - key=HUMANLOOP_LOG_KEY, - ) - - path: str = file_object["path"] - tool: ToolKernelRequestParams = file_object["tool"] - - # API expects an empty dictionary if user does not supply attributes - # NOTE: see comment in _export_prompt_span about OTEL conventions - if not tool.get("attributes"): - tool["attributes"] = {} - if not tool.get("setup_values"): - tool["setup_values"] = {} - if "parameters" in tool["function"] and "properties" not in tool["function"]["parameters"]: - tool["function"]["parameters"]["properties"] = {} - - trace_parent_id = self._get_parent_in_trace(span) - try: - log_response = self._client.tools.log( - path=path, - tool=tool, - **log_object, - trace_parent_id=trace_parent_id, - ) - self._span_to_uploaded_log_id[span.context.span_id] = log_response.id - if trace_parent_id is not None: - self._keep_track_of_trace(log_response.id, trace_parent_id) - except HumanloopApiError: - self._span_to_uploaded_log_id[span.context.span_id] = None - self._mark_span_as_uploaded(span_id=span.context.span_id) - - def _export_flow_span(self, span: ReadableSpan) -> None: - file_object: dict[str, Any] = read_from_opentelemetry_span( - span, - key=HUMANLOOP_FILE_KEY, - ) - log_object: dict[str, Any] = read_from_opentelemetry_span( - span, - key=HUMANLOOP_LOG_KEY, - ) - # Spans that must be uploaded before the Flow Span is completed - # We instantiate the list of prerequisites from the attribute - # passed by the Processor. Each uploaded child in the trace - # will check if it's the last one and mark the Flow Log as complete - try: - prerequisites: list[int] = read_from_opentelemetry_span( # type: ignore - span=span, - key=HUMANLOOP_FLOW_PREREQUISITES_KEY, - ) - self._spans_left_in_trace[span.context.span_id] = set(prerequisites) - except KeyError: - # OTEL will drop falsy attributes, so if a Flow has no prerequisites - # the attribute will not be present - self._spans_left_in_trace[span.context.span_id] = set() - - path: str = file_object["path"] - flow: FlowKernelRequestParams - if not file_object.get("flow"): - flow = {"attributes": {}} - else: - flow = file_object["flow"] - - trace_parent_id = self._get_parent_in_trace(span) - - if "output" not in log_object: - log_object["output"] = None - try: - log_response = self._client.flows.log( - path=path, - flow=flow, - **log_object, - trace_parent_id=trace_parent_id, - ) - if trace_parent_id is not None: - self._keep_track_of_trace( - log_id=log_response.id, - parent_log_id=trace_parent_id, + log_args = { + **log_args, + "log_status": "complete", + } + + if evaluation_context: + log_args = read_from_opentelemetry_span( + span=span_to_export, + key=HUMANLOOP_LOG_KEY, ) - # Exporting a flow log creates a new trace - self._traces.append({log_response.id}) - self._span_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id - except HumanloopApiError as e: - logger.error(str(e)) - self._span_to_uploaded_log_id[span.context.span_id] = None - self._mark_span_as_uploaded(span_id=span.context.span_id) - - def _mark_span_as_uploaded(self, span_id: int) -> None: - """Mark a Span as uploaded for Flow trace completion. - - If this Span corresponds to the last child in the Flow trace, - mark the Flow Log as complete. - """ - for trace_head_span_id, spans_left in self._spans_left_in_trace.items(): - if span_id in spans_left: - spans_left.remove(span_id) - self._mark_trace_complete_if_needed(trace_head_span_id=trace_head_span_id) - # Found the trace the span belongs to - # break from for loop - break - - def _mark_trace_complete_if_needed(self, trace_head_span_id: int): - spans_to_complete = self._spans_left_in_trace[trace_head_span_id] - if len(spans_to_complete) == 0: - flow_log_id = self._span_to_uploaded_log_id[trace_head_span_id] - if flow_log_id is None: - # Uploading the head of the Flow trace failed - logger.error( - "[HumanloopSpanExporter] Cannot complete Flow log %s, log ID is None", - trace_head_span_id, + span_file_path = read_from_opentelemetry_span( + span=span_to_export, + key=HUMANLOOP_PATH_KEY, ) - else: - self._client.flows.update_log(log_id=flow_log_id, trace_status="complete") + if span_file_path == evaluation_context.path: + log_args = { + **log_args, + "source_datapoint_id": evaluation_context.source_datapoint_id, + "run_id": evaluation_context.run_id, + } + write_to_opentelemetry_span( + span=span_to_export, + key=HUMANLOOP_LOG_KEY, + value=log_args, + ) + + id = self._client.export(span_to_export.to_json()) + if evaluation_context: + evaluation_context.callback(id) - def _keep_track_of_trace(self, log_id: str, parent_log_id: str): - found = False - for trace in self._traces: - if parent_log_id in trace: - trace.add(log_id) - found = True - if found: - break - - def _get_parent_in_trace(self, span: ReadableSpan) -> Optional[str]: - if span.parent is None: - return None - parent_log_id = self._span_to_uploaded_log_id[span.parent.span_id] - for trace in self._traces: - if parent_log_id in trace: - return parent_log_id - return None + self._upload_queue.task_done() diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py index 094f6e46..eacc07df 100644 --- a/src/humanloop/otel/helpers.py +++ b/src/humanloop/otel/helpers.py @@ -5,7 +5,7 @@ from opentelemetry.trace import SpanKind from opentelemetry.util.types import AttributeValue -from humanloop.otel.constants import HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME +from humanloop.otel.constants import HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME NestedDict = dict[str, Union["NestedDict", AttributeValue]] NestedList = list[Union["NestedList", NestedDict]] @@ -265,7 +265,7 @@ def is_llm_provider_call(span: ReadableSpan) -> bool: def is_intercepted_call(span: ReadableSpan) -> bool: - return span.name == HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME + return span.name == HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME def is_humanloop_span(span: ReadableSpan) -> bool: diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py index a70baf97..2f52c949 100644 --- a/src/humanloop/otel/processor.py +++ b/src/humanloop/otel/processor.py @@ -1,26 +1,15 @@ -from concurrent.futures import ThreadPoolExecutor import logging -from collections import defaultdict -import time -from typing import Any, TypedDict +from typing import TypedDict -from opentelemetry.sdk.trace import ReadableSpan + +from opentelemetry.sdk.trace import ReadableSpan, Span from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter -from pydantic import ValidationError as PydanticValidationError +from humanloop.context import get_prompt_path from humanloop.otel.constants import ( - HUMANLOOP_FILE_KEY, - HUMANLOOP_FILE_TYPE_KEY, - HUMANLOOP_FLOW_PREREQUISITES_KEY, - HUMANLOOP_LOG_KEY, -) -from humanloop.otel.helpers import ( - is_humanloop_span, - is_llm_provider_call, - read_from_opentelemetry_span, - write_to_opentelemetry_span, + HUMANLOOP_PATH_KEY, ) -from humanloop.types.prompt_kernel_request import PromptKernelRequest +from humanloop.otel.helpers import is_llm_provider_call logger = logging.getLogger("humanloop.sdk") @@ -49,204 +38,11 @@ class HumanloopSpanProcessor(SimpleSpanProcessor): def __init__(self, exporter: SpanExporter) -> None: super().__init__(exporter) - # span parent to span children map - self._children: dict[int, list[CompletableSpan]] = defaultdict(list) - # List of all span IDs that are contained in a Flow trace - # They are passed to the Exporter as a span attribute - # so the Exporter knows when to complete a trace - self._prerequisites: dict[int, list[int]] = {} - self._executor = ThreadPoolExecutor(max_workers=4) - - def shutdown(self): - self._executor.shutdown() - return super().shutdown() - - def on_start(self, span, parent_context=None): - span_id = span.context.span_id - parent_span_id = span.parent.span_id if span.parent else None - if span.name == "humanloop.flow": - self._prerequisites[span_id] = [] - if parent_span_id and is_humanloop_span(span): - for trace_head, all_trace_nodes in self._prerequisites.items(): - if parent_span_id == trace_head or parent_span_id in all_trace_nodes: - all_trace_nodes.append(span_id) - break - # Handle stream case: when Prompt instrumented function calls a provider with streaming: true - # The instrumentor span will end only when the ChunksResponse is consumed, which can happen - # after the span created by the Prompt utility finishes. To handle this, we register all instrumentor - # spans belonging to a Humanloop span, and their parent will wait for them to complete in onEnd before - # exporting the Humanloop span. - if parent_span_id and _is_instrumentor_span(span): - if parent_span_id not in self._children: - self._children[parent_span_id] = [] - self._children[parent_span_id].append( - { - "span": span, - "complete": False, - } - ) - - def on_end(self, span: ReadableSpan) -> None: - if is_humanloop_span(span=span): - # Wait for children to complete asynchronously - self._executor.submit(self._wait_for_children, span=span) - elif span.parent is not None and _is_instrumentor_span(span): - # If this is one of the children spans waited upon, update its completion status - - # Updating the child span status - self._children[span.parent.span_id] = [ - child if child["span"].context.span_id != span.context.span_id else {"span": span, "complete": True} - for child in self._children[span.parent.span_id] - ] - - # Export the instrumentor span - self.span_exporter.export([span]) - else: - # Unknown span, pass it to the Exporter - self.span_exporter.export([span]) - - def _wait_for_children(self, span: ReadableSpan): - """Wait for all children spans to complete before processing the Humanloop span.""" - span_id = span.context.span_id - while not all(child["complete"] for child in self._children[span_id]): - # TODO: This assumes that the children spans will complete - # The LLM provider might fail; address in future - logger.debug( - "[HumanloopSpanProcessor] Span %s %s waiting for children to complete: %s", - span_id, - span.name, - self._children[span_id], - ) - # All instrumentor spans have arrived, we can process the - # Humanloop parent span owning them - if span.name == "humanloop.flow": - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_FLOW_PREREQUISITES_KEY, - value=self._prerequisites[span_id], - ) - del self._prerequisites[span_id] - logger.debug("[HumanloopSpanProcessor] Dispatching span %s %s", span_id, span.name) - _process_span_dispatch(span, [child["span"] for child in self._children[span_id]]) - # Release references - del self._children[span_id] - # Pass Humanloop span to Exporter - logger.debug("[HumanloopSpanProcessor] Sending span %s %s to exporter", span_id, span.name) - self.span_exporter.export([span]) - - -def _is_instrumentor_span(span: ReadableSpan) -> bool: - """Determine if the span contains information of interest for Spans created by Humanloop decorators.""" - # At the moment we only enrich Spans created by the Prompt decorators - # As we add Instrumentors for other libraries, this function must - # be expanded - return is_llm_provider_call(span=span) - - -def _process_span_dispatch(span: ReadableSpan, children_spans: list[ReadableSpan]): - file_type = span.attributes[HUMANLOOP_FILE_TYPE_KEY] # type: ignore - - # Processing common to all Humanloop File types - if span.start_time: - span._attributes[f"{HUMANLOOP_LOG_KEY}.start_time"] = span.start_time / 1e9 # type: ignore - if span.end_time: - span._attributes[f"{HUMANLOOP_LOG_KEY}.end_time"] = span.end_time / 1e9 # type: ignore - span._attributes[f"{HUMANLOOP_LOG_KEY}.created_at"] = span.end_time / 1e9 # type: ignore - - # Processing specific to each Humanloop File type - if file_type == "prompt": - _process_prompt(prompt_span=span, children_spans=children_spans) - return - elif file_type == "tool": - pass - elif file_type == "flow": - pass - else: - logger.error( - "[HumanloopSpanProcessor] Unknown Humanloop File span %s %s", - span.context.span_id, - span.name, - ) - - -def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan]): - if len(children_spans) == 0: - return - for child_span in children_spans: - if is_llm_provider_call(child_span): - _enrich_prompt_kernel(prompt_span, child_span) - _enrich_prompt_log(prompt_span, child_span) - # NOTE: @prompt decorator expects a single LLM provider call - # to happen in the function. If there are more than one, we - # ignore the rest - break - - -def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan): - hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HUMANLOOP_FILE_KEY) - gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai") - llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm") - - prompt: dict[str, Any] = hl_file.get("prompt", {}) # type: ignore - - # Check if the Prompt Kernel keys were assigned default values - # via the @prompt arguments. Otherwise, use the information - # from the intercepted LLM provider call - prompt["model"] = prompt.get("model") or gen_ai_object.get("request", {}).get("model", None) - if prompt["model"] is None: - raise ValueError("Could not infer required parameter `model`. Please provide it in the @prompt decorator.") - prompt["endpoint"] = prompt.get("endpoint") or llm_object.get("request", {}).get("type") - prompt["provider"] = prompt.get("provider") or gen_ai_object.get("system", None) - if prompt["provider"]: - # Normalize provider name; Interceptors output the names with - # different capitalization e.g. OpenAI instead of openai - prompt["provider"] = prompt["provider"].lower() - prompt["temperature"] = prompt.get("temperature") or gen_ai_object.get("request", {}).get("temperature", None) - prompt["top_p"] = prompt.get("top_p") or gen_ai_object.get("request", {}).get("top_p", None) - prompt["max_tokens"] = prompt.get("max_tokens") or gen_ai_object.get("request", {}).get("max_tokens", None) - prompt["presence_penalty"] = prompt.get("presence_penalty") or llm_object.get("presence_penalty", None) - prompt["frequency_penalty"] = prompt.get("frequency_penalty") or llm_object.get("frequency_penalty", None) - prompt["tools"] = prompt.get("tools", []) - - try: - # Validate the Prompt Kernel - PromptKernelRequest.model_validate(obj=prompt) - except PydanticValidationError as e: - logger.error( - "[HumanloopSpanProcessor] Could not validate Prompt Kernel extracted from span: %s %s. Error: %s", - prompt_span.context.span_id, - prompt_span.name, - e, - ) - - # Write the enriched Prompt Kernel back to the span - hl_file["prompt"] = prompt - write_to_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_FILE_KEY, - # hl_file was modified in place via prompt_kernel reference - value=hl_file, - ) - - -def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan): - try: - hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HUMANLOOP_LOG_KEY) - except KeyError: - hl_log = {} - gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai") - - # TODO: Seed not added by Instrumentors in provider call - - if "output_tokens" not in hl_log: - hl_log["output_tokens"] = gen_ai_object.get("usage", {}).get("completion_tokens") - if len(gen_ai_object.get("completion", [])) > 0: - hl_log["finish_reason"] = gen_ai_object["completion"][0].get("finish_reason") - hl_log["messages"] = gen_ai_object.get("prompt") - write_to_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_LOG_KEY, - # hl_log was modified in place - value=hl_log, - ) + def on_start(self, span: Span): + if is_llm_provider_call(span): + prompt_path = get_prompt_path() + if prompt_path: + span.set_attribute(HUMANLOOP_PATH_KEY, prompt_path) + else: + raise ValueError("Provider call outside @prompt context manager") diff --git a/src/humanloop/otel/processor/__init__.py b/src/humanloop/otel/processor/__init__.py deleted file mode 100644 index bc4e5940..00000000 --- a/src/humanloop/otel/processor/__init__.py +++ /dev/null @@ -1,237 +0,0 @@ -from dataclasses import dataclass -import logging -from collections import defaultdict -from typing import Optional -import typing - -from opentelemetry.sdk.trace import ReadableSpan -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter - -from humanloop.base_client import BaseHumanloop -from humanloop.otel.constants import ( - HUMANLOOP_FILE_TYPE_KEY, - HUMANLOOP_FLOW_PREREQUISITES_KEY, - HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME, - HUMANLOOP_LOG_KEY, -) -from humanloop.otel.helpers import ( - is_humanloop_span, - is_llm_provider_call, - write_to_opentelemetry_span, -) -from humanloop.otel.processor.prompts import enhance_prompt_span - -if typing.TYPE_CHECKING: - from humanloop.base_client import BaseHumanloop - - -logger = logging.getLogger("humanloop.sdk") - - -# NOTE: Source of bugs, refactor to dataclass for type safety -# Instead of accessing via "key" -@dataclass -class DependantSpan: - span: ReadableSpan - finished: bool - - -class HumanloopSpanProcessor(SimpleSpanProcessor): - """Enrich Humanloop spans with data from their children spans. - - The decorators add Instrumentors to the OpenTelemetry TracerProvider - that log interactions with common LLM libraries. These Instrumentors - produce Spans which contain information that can be used to enrich the - Humanloop File Kernels. - - For example, Instrumentors for LLM provider libraries intercept - hyperparameters used in the API call to the model to build the - Prompt File definition when using the @prompt decorator. - - Spans created that are not created by Humanloop decorators, such as - those created by the Instrumentors mentioned above, will be passed - to the Exporter as they are. - """ - - def __init__( - self, - exporter: SpanExporter, - client: "BaseHumanloop", - ) -> None: - super().__init__(exporter) - # span parent to span children map - self._dependencies: dict[int, list[DependantSpan]] = defaultdict(list) - self._waiting: dict[int, ReadableSpan] = {} - # List of all span IDs that are contained in a Flow trace - # They are passed to the Exporter as a span attribute - # so the Exporter knows when to complete a trace - self._spans_to_complete_flow_trace: dict[int, list[int]] = {} - self._client = client - - def shutdown(self): - return super().shutdown() - - def on_start(self, span, parent_context=None): - """Hook executed on Span creation. - - Used for two cases: - 1. Complete the Flow trace after all Logs inside have been uploaded. The Flow trace - spans are created bottoms-up. By the time the Flow span reaches the on_end hook, - all spans inside have been passed to the Exporter. We attach the list of span IDs - to the Flow span as an attribute, so the Exporter knows what spans (Logs) must - be uploaded before the Flow trace is completed - 2. Instrument streaming Prompt decorated functions. The Instrumentor span will end only - when the ChunksResponse is consumed, while the Prompt-decorated span will end when - the function returns. - """ - self._track_flow_traces(span) - self._add_dependency_to_await(span) - - def on_end(self, span: ReadableSpan) -> None: - span_id = span.context.span_id - if is_humanloop_span(span=span): - if not self._must_wait(span): - self._send_to_exporter( - span=span, - dependencies=[dependency.span for dependency in self._dependencies[span.context.span_id]], - ) - else: - # Must wait for dependencies - self._waiting[span_id] = span - return - - if self._is_dependency(span): - self._mark_dependency_arrival(span) - self._send_to_exporter(span, []) - - waiting_span = self._get_waiting_parent(span) - if waiting_span is not None: - self._send_to_exporter( - span=span, - dependencies=[dependency.span for dependency in self._dependencies[span.context.span_id]], - ) - return - - # Be unopinionated and pass all other spans to Exporter - self._send_to_exporter(span=span, dependencies=[]) - - def _must_wait(self, span: ReadableSpan) -> bool: - if span.context.span_id not in self._dependencies: - return False - if all([dependency.finished for dependency in self._dependencies[span.context.span_id]]): - return False - return True - - def _get_waiting_parent(self, span: ReadableSpan) -> Optional[ReadableSpan]: - # We know this span has a parent, need to satisfy the type checker - parent_span_id = span.parent.span_id # type: ignore - if parent_span_id in self._waiting: - if all([dependency.finished for dependency in self._dependencies[parent_span_id]]): - waiting_span = self._waiting[parent_span_id] - del self._dependencies[parent_span_id] - del self._waiting[parent_span_id] - return waiting_span - return None - - def _add_dependency_to_await(self, span: ReadableSpan): - # We know this span has a parent, need to satisfy the type checker - if self._is_dependency(span): - parent_span_id = span.parent.span_id # type: ignore - self._dependencies[parent_span_id].append(DependantSpan(span=span, finished=False)) - - def _track_flow_traces(self, span: ReadableSpan): - span_id = span.context.span_id - - if span.name == "humanloop.flow": - # Head of a trace - self._spans_to_complete_flow_trace[span_id] = [] - - parent_span_id = span.parent.span_id if span.parent else None - if parent_span_id and is_humanloop_span(span): - # Log belongs to a trace, keep track of it - for trace_head, all_trace_nodes in self._spans_to_complete_flow_trace.items(): - if parent_span_id == trace_head or parent_span_id in all_trace_nodes: - all_trace_nodes.append(span_id) - break - - def _mark_dependency_arrival(self, span: ReadableSpan): - span_id = span.context.span_id - # We know this span has a parent, need to satisfy type checker - parent_span_id = span.parent.span_id # type: ignore - self._dependencies[parent_span_id] = [ - dependency if dependency.span.context.span_id != span_id else DependantSpan(span=span, finished=True) - for dependency in self._dependencies[parent_span_id] - ] - - def _send_to_exporter( - self, - span: ReadableSpan, - dependencies: list[ReadableSpan], - ): - """ - Write attributes to the Humanloop spans depending on their type - """ - - if is_humanloop_span(span): - # Processing common to all Humanloop File types - self._write_start_end_times(span=span) - - # Processing specific to each Humanloop File type - file_type = span.attributes[HUMANLOOP_FILE_TYPE_KEY] # type: ignore - span_id = span.context.span_id - if file_type == "prompt": - enhance_prompt_span( - client=self._client, - prompt_span=span, - dependencies=dependencies, - ) - elif file_type == "tool": - # No extra processing needed - pass - elif file_type == "flow": - trace = self._spans_to_complete_flow_trace.get(span_id, []) - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_FLOW_PREREQUISITES_KEY, - value=trace, - ) - else: - logger.error( - "[HumanloopSpanProcessor] Unknown Humanloop File span %s %s", - span_id, - span.name, - ) - - self.span_exporter.export([span]) - - # Cleanup - span_id = span.context.span_id - if span_id in self._waiting: - del self._waiting[span_id] - if span_id in self._dependencies: - del self._dependencies[span_id] - if span_id in self._spans_to_complete_flow_trace: - del self._spans_to_complete_flow_trace[span_id] - - @classmethod - def _is_dependency(cls, span: ReadableSpan) -> bool: - """Determine if the span contains information of interest for Spans created by Humanloop decorators.""" - # At the moment we only enrich Spans created by the Prompt decorators - # As we add Instrumentors for other libraries, this function must - # be expanded - return span.parent is not None and ( - is_llm_provider_call(span=span) or span.name == HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME - ) - - @classmethod - def _write_start_end_times(cls, span: ReadableSpan): - if span.start_time: - # NOTE: write_to_otel_span and read_from_otel_span have extra behavior - # OTEL canonical way to write keys is to use the dot notation, as below - # The 2 utilities encapsulate this behavior, allowing the dev to write - # complex objects. - # See doc-strings in humanloop.otel.helpers for more information - span._attributes[f"{HUMANLOOP_LOG_KEY}.start_time"] = span.start_time / 1e9 # type: ignore - if span.end_time: - span._attributes[f"{HUMANLOOP_LOG_KEY}.end_time"] = span.end_time / 1e9 # type: ignore - span._attributes[f"{HUMANLOOP_LOG_KEY}.created_at"] = span.end_time / 1e9 # type: ignore diff --git a/src/humanloop/otel/processor/prompts.py b/src/humanloop/otel/processor/prompts.py deleted file mode 100644 index c56882a8..00000000 --- a/src/humanloop/otel/processor/prompts.py +++ /dev/null @@ -1,204 +0,0 @@ -import deepdiff -import logging -from typing import Any -import typing -from opentelemetry.sdk.trace import ReadableSpan -from pydantic import ValidationError as PydanticValidationError - -from humanloop.eval_utils.run import HumanloopUtilityError -from humanloop.otel.constants import ( - HUMANLOOP_FILE_KEY, - HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, - HUMANLOOP_LOG_KEY, - HUMANLOOP_PATH_KEY, -) -from humanloop.otel.helpers import ( - is_intercepted_call, - is_llm_provider_call, - read_from_opentelemetry_span, - write_to_opentelemetry_span, -) -from humanloop.types.prompt_kernel_request import PromptKernelRequest - -if typing.TYPE_CHECKING: - from humanloop.client import BaseHumanloop - -logger = logging.getLogger("humanloop.sdk") - - -def enhance_prompt_span(client: "BaseHumanloop", prompt_span: ReadableSpan, dependencies: list[ReadableSpan]): - """Add information from the LLM provider span to the Prompt span. - - We are passing a list of children spans to the Prompt span, but more than one - is undefined behavior. - """ - if len(dependencies) == 0: - return - for child_span in dependencies: - if is_llm_provider_call(child_span): - _enrich_prompt_kernel(prompt_span, child_span) - _enrich_prompt_log(prompt_span, child_span) - # NOTE: @prompt decorator expects a single LLM provider call - # to happen in the function. If there are more than one, we - # ignore the rest - break - elif is_intercepted_call(child_span): - _enrich_prompt_kernel_from_intercepted_call(client, prompt_span, child_span) - _enrich_prompt_log_from_intercepted_call(prompt_span, child_span) - break - else: - raise NotImplementedError( - f"Span {child_span.context.span_id} is not a recognized LLM provider call or intercepted call." - ) - - -def _enrich_prompt_kernel_from_intercepted_call( - client: "BaseHumanloop", - prompt_span: ReadableSpan, - intercepted_call_span: ReadableSpan, -): - intercepted_response: dict[str, Any] = read_from_opentelemetry_span( - intercepted_call_span, - key=HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, - ) - hl_file = read_from_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_FILE_KEY, - ) - hl_path = read_from_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_PATH_KEY, - ) - prompt: dict[str, Any] = hl_file.get("prompt", {}) # type: ignore - - for key, value_from_utility in {**prompt, "path": hl_path}.items(): - if key not in intercepted_response["prompt"]: - continue - - if "values_changed" in deepdiff.DeepDiff( - value_from_utility, - intercepted_response["prompt"][key], - ignore_order=True, - ): - # TODO: We want this behavior? - # save=False in overloaded prompt_call will still create the File - # despite not saving the log, so we rollback the File - file_id = intercepted_response["prompt"]["id"] - client.prompts.delete(id=file_id) - raise HumanloopUtilityError( - f"The prompt.call() {key} argument does not match the one provided in the decorator" - ) - - for key in intercepted_response["prompt"].keys(): - if key not in prompt: - prompt[key] = intercepted_response["prompt"][key] - - try: - # Validate the Prompt Kernel - PromptKernelRequest.model_validate(obj=prompt) # type: ignore - except PydanticValidationError as e: - logger.error( - "[HumanloopSpanProcessor] Could not validate Prompt Kernel extracted from span: %s %s. Error: %s", - prompt_span.context.span_id, - prompt_span.name, - e, - ) - - hl_file["prompt"] = prompt - write_to_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_FILE_KEY, - value=hl_file, - ) - - -def _enrich_prompt_log_from_intercepted_call(prompt_span: ReadableSpan, intercepted_call_span: ReadableSpan): - hl_log: dict[str, Any] = read_from_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_LOG_KEY, - ) - response: dict[str, Any] = read_from_opentelemetry_span( - intercepted_call_span, - key=HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, - ) - hl_log["output_tokens"] = response["logs"][0]["output_tokens"] - hl_log["finish_reason"] = response["logs"][0]["finish_reason"] - hl_log["output_message"] = response["logs"][0]["output_message"] - - write_to_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_LOG_KEY, - # hl_log was modified in place - value=hl_log, - ) - - -def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan): - hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HUMANLOOP_FILE_KEY) - gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai") - llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm") - - prompt: dict[str, Any] = hl_file.get("prompt", {}) # type: ignore - - # Check if the Prompt Kernel keys were assigned default values - # via the @prompt arguments. Otherwise, use the information - # from the intercepted LLM provider call - prompt["model"] = prompt.get("model") or gen_ai_object.get("request", {}).get("model", None) - if prompt["model"] is None: - raise ValueError("Could not infer required parameter `model`. Please provide it in the @prompt decorator.") - prompt["endpoint"] = prompt.get("endpoint") or llm_object.get("request", {}).get("type") - prompt["provider"] = prompt.get("provider") or gen_ai_object.get("system", None) - if prompt["provider"]: - # Normalize provider name; Interceptors output the names with - # different capitalization e.g. OpenAI instead of openai - prompt["provider"] = prompt["provider"].lower() - prompt["temperature"] = prompt.get("temperature") or gen_ai_object.get("request", {}).get("temperature", None) - prompt["top_p"] = prompt.get("top_p") or gen_ai_object.get("request", {}).get("top_p", None) - prompt["max_tokens"] = prompt.get("max_tokens") or gen_ai_object.get("request", {}).get("max_tokens", None) - prompt["presence_penalty"] = prompt.get("presence_penalty") or llm_object.get("presence_penalty", None) - prompt["frequency_penalty"] = prompt.get("frequency_penalty") or llm_object.get("frequency_penalty", None) - prompt["tools"] = prompt.get("tools", []) - - try: - # Validate the Prompt Kernel - PromptKernelRequest.model_validate(obj=prompt) # type: ignore - except PydanticValidationError as e: - logger.error( - "[HumanloopSpanProcessor] Could not validate Prompt Kernel extracted from span: %s %s. Error: %s", - prompt_span.context.span_id, - prompt_span.name, - e, - ) - - # Write the enriched Prompt Kernel back to the span - hl_file["prompt"] = prompt - write_to_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_FILE_KEY, - # hl_file was modified in place via prompt_kernel reference - value=hl_file, - ) - - -def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan): - try: - hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HUMANLOOP_LOG_KEY) - except KeyError: - hl_log = {} - gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai") - - # TODO: Seed not added by Instrumentors in provider call - - if "output_tokens" not in hl_log: - hl_log["output_tokens"] = gen_ai_object.get("usage", {}).get("completion_tokens") - if len(gen_ai_object.get("completion", [])) > 0: - hl_log["finish_reason"] = gen_ai_object["completion"][0].get("finish_reason") - hl_log["messages"] = gen_ai_object.get("prompt") - # TODO: Need to fill in output_message - - write_to_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_LOG_KEY, - # hl_log was modified in place - value=hl_log, - ) diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py index c80d96e8..96dc49ee 100644 --- a/src/humanloop/overload.py +++ b/src/humanloop/overload.py @@ -1,19 +1,12 @@ import logging -import threading import types from typing import TypeVar, Union import typing -from humanloop.context_variables import ( - get_evaluation_context, - get_prompt_utility_context, - in_prompt_utility_context, - log_belongs_to_evaluated_file, -) +from humanloop.context import get_trace_id from humanloop.eval_utils.run import HumanloopUtilityError from humanloop.flows.client import FlowsClient -from humanloop.otel.constants import HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME -from humanloop.otel.helpers import write_to_opentelemetry_span + from humanloop.prompts.client import PromptsClient from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse from humanloop.types.create_flow_log_response import CreateFlowLogResponse @@ -38,37 +31,32 @@ def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE: client._log = client.log def _overload_log( - self, **kwargs + # It's safe to only consider kwargs since the original + # log method bans positional arguments + self, + **kwargs, ) -> Union[ CreatePromptLogResponse, CreateToolLogResponse, CreateFlowLogResponse, CreateEvaluatorLogResponse, ]: - if log_belongs_to_evaluated_file(log_args=kwargs): - evaluation_context = get_evaluation_context() - for attribute in ["source_datapoint_id", "run_id"]: - if attribute not in kwargs or kwargs[attribute] is None: - kwargs[attribute] = getattr(evaluation_context, attribute) - - # Call the original .log method - logger.debug( - "Logging %s inside _overloaded_log on Thread %s", - kwargs, - evaluation_context, - threading.get_ident(), - ) - + trace_id = get_trace_id() + if trace_id is not None: + if "trace_parent_id" in kwargs: + # TODO: revisit + logger.warning("Overriding trace_parent_id argument") + kwargs = { + **kwargs, + "trace_parent_id": trace_id, + } try: - response = self._log(**kwargs) + response = self._call(**kwargs) + response = typing.cast(PromptCallResponse, response) except Exception as e: - logger.error(f"Failed to log: {e}") - raise e - - # Notify the run_eval utility about one Log being created - if log_belongs_to_evaluated_file(log_args=kwargs): - evaluation_context = get_evaluation_context() - evaluation_context.upload_callback(log_id=response.id) + # TODO handle + # TODO: Bug found in backend: not specifying a model 400s but creates a File + raise HumanloopUtilityError(message=str(e)) from e return response @@ -83,38 +71,27 @@ def overload_prompt_call(client: PromptsClient) -> PromptsClient: client._call = client.call def _overload_call(self, **kwargs) -> PromptCallResponse: - if in_prompt_utility_context(): - try: - response = self._call(**kwargs) - response = typing.cast(PromptCallResponse, response) - except Exception as e: - # TODO: Bug found in backend: not specifying a model 400s but creates a File - raise HumanloopUtilityError(message=str(e)) from e - - response_copy = response.dict() - prompt_utility_context = get_prompt_utility_context() - for idx, _ in enumerate(response_copy.get("logs", [])): - del response_copy["logs"][idx]["created_at"] - for idx, _ in enumerate(response_copy["prompt"].get("environments", [])): - del response_copy["prompt"]["environments"][idx]["created_at"] - del response_copy["prompt"]["last_used_at"] - del response_copy["prompt"]["updated_at"] - del response_copy["prompt"]["created_at"] - del response_copy["start_time"] - del response_copy["end_time"] - - with prompt_utility_context.tracer.start_as_current_span(HUMANLOOP_INTERCEPTED_HL_CALL_SPAN_NAME) as span: - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_INTERCEPTED_HL_CALL_RESPONSE, - value=response_copy, - ) - return response - else: - return self._call(**kwargs) + # None if not logging inside a decorator + trace_id = get_trace_id() + if trace_id is not None: + if "trace_parent_id" in kwargs: + # TODO: revisit + logger.warning("Overriding trace_parent_id argument") + kwargs = { + **kwargs, + "trace_parent_id": trace_id, + } + + try: + response = self._call(**kwargs) + response = typing.cast(PromptCallResponse, response) + except Exception as e: + # TODO handle + # TODO: Bug found in backend: not specifying a model 400s but creates a File + raise HumanloopUtilityError(message=str(e)) from e + + return response # Replace the original log method with the overloaded one client.call = types.MethodType(_overload_call, client) - # Return the client with the overloaded log method - logger.debug("Overloaded the .log method of %s", client) return client diff --git a/src/humanloop/utilities/flow.py b/src/humanloop/utilities/flow.py index 9fedfe6c..d0c21bdb 100644 --- a/src/humanloop/utilities/flow.py +++ b/src/humanloop/utilities/flow.py @@ -1,12 +1,12 @@ import logging from functools import wraps -from typing import Any, Callable, Mapping, Optional, Sequence +from typing import Any, Callable, Mapping, Sequence -from opentelemetry.sdk.trace import Span -from opentelemetry.trace import Tracer -from typing_extensions import Unpack +from opentelemetry.trace import Span, Tracer +from opentelemetry import context as context_api -from humanloop.eval_utils.run import HumanloopUtilityError +from humanloop.base_client import BaseHumanloop +from humanloop.context import get_trace_id, set_trace_id from humanloop.utilities.helpers import bind_args from humanloop.eval_utils.types import File from humanloop.otel.constants import ( @@ -17,32 +17,41 @@ ) from humanloop.otel.helpers import jsonify_if_not_string, write_to_opentelemetry_span from humanloop.requests import FlowKernelRequestParams as FlowDict -from humanloop.requests.flow_kernel_request import FlowKernelRequestParams logger = logging.getLogger("humanloop.sdk") def flow( + client: "BaseHumanloop", opentelemetry_tracer: Tracer, - path: Optional[str] = None, - **flow_kernel: Unpack[FlowKernelRequestParams], # type: ignore + path: str, + attributes: dict[str, Any] | None = None, ): - flow_kernel["attributes"] = {k: v for k, v in flow_kernel.get("attributes", {}).items() if v is not None} + flow_kernel = {"attributes": attributes or {}} def decorator(func: Callable): + decorator_path = path or func.__name__ + file_type = "flow" + @wraps(func) def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: span: Span with opentelemetry_tracer.start_as_current_span("humanloop.flow") as span: # type: ignore - span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) - span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "flow") + trace_id = get_trace_id() + args_to_func = bind_args(func, args, kwargs) - if flow_kernel: - write_to_opentelemetry_span( - span=span, - key=f"{HUMANLOOP_FILE_KEY}.flow", - value=flow_kernel, # type: ignore - ) + # Create the trace ahead so we have a parent ID to reference + log_inputs = { + "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, + "messages": args_to_func.get("messages"), + "trace_parent_id": trace_id, + } + log_id = client.flows.log(path=path, flow=flow_kernel, **log_inputs).id + token = set_trace_id(log_id) + + span.set_attribute(HUMANLOOP_PATH_KEY, decorator_path) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) + write_to_opentelemetry_span(HUMANLOOP_FILE_KEY, flow_kernel) # Call the decorated function try: @@ -52,8 +61,6 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: output=output, ) error = None - except HumanloopUtilityError as e: - raise e except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") output = None @@ -65,8 +72,6 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: flow_log = { # TODO: Revisit and agree on - "inputs": {k: v for k, v in bind_args(func, args, kwargs).items() if k != "messages"}, - "messages": bind_args(func, args, kwargs).get("messages"), "output": output_stringified, "error": error, } @@ -76,15 +81,16 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: write_to_opentelemetry_span( span=span, key=HUMANLOOP_LOG_KEY, - value=flow_log, # type: ignore + value={**log_inputs}, # type: ignore ) + context_api.detach(token=token) # Return the output of the decorated function return output wrapper.file = File( # type: ignore - path=path if path else func.__name__, - type="flow", + path=decorator_path, + type=file_type, version=FlowDict(**flow_kernel), # type: ignore callable=wrapper, ) diff --git a/src/humanloop/utilities/prompt.py b/src/humanloop/utilities/prompt.py index ab189a6e..517b718c 100644 --- a/src/humanloop/utilities/prompt.py +++ b/src/humanloop/utilities/prompt.py @@ -1,97 +1,17 @@ import logging -from functools import wraps -from typing import Any, Callable, Mapping, Optional, Sequence -from opentelemetry.sdk.trace import Span -from opentelemetry.trace import Tracer -from typing_extensions import Unpack -from humanloop.context_variables import set_prompt_utility_context, unset_prompt_utility_context -from humanloop.eval_utils.run import HumanloopUtilityError -from humanloop.utilities.helpers import bind_args -from humanloop.utilities.types import DecoratorPromptKernelRequestParams -from humanloop.eval_utils import File -from humanloop.otel.constants import ( - HUMANLOOP_FILE_KEY, - HUMANLOOP_FILE_TYPE_KEY, - HUMANLOOP_LOG_KEY, - HUMANLOOP_PATH_KEY, -) -from humanloop.otel.helpers import jsonify_if_not_string, write_to_opentelemetry_span +from contextlib import contextmanager -logger = logging.getLogger("humanloop.sdk") - - -def prompt( - opentelemetry_tracer: Tracer, - path: Optional[str] = None, - # TODO: Template can be a list of objects? - **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams], # type: ignore -): - def decorator(func: Callable): - @wraps(func) - def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: - set_prompt_utility_context(tracer=opentelemetry_tracer) - span: Span - with opentelemetry_tracer.start_as_current_span("humanloop.prompt") as span: # type: ignore - span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) - span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "prompt") - - if prompt_kernel: - write_to_opentelemetry_span( - span=span, - key=f"{HUMANLOOP_FILE_KEY}.prompt", - value={ - **prompt_kernel, # type: ignore - "attributes": prompt_kernel.get("attributes") or None, # type: ignore - }, # type: ignore - ) - - # Call the decorated function - try: - output = func(*args, **kwargs) - output_stringified = jsonify_if_not_string( - func=func, - output=output, - ) - error = None - except HumanloopUtilityError as e: - raise e - except Exception as e: - logger.error(f"Error calling {func.__name__}: {e}") - output = None - # TODO: output ought to be None on errors, check - # all decorators - output_stringified = jsonify_if_not_string( - func=func, - output=output, - ) - error = str(e) +from humanloop.context import reset_prompt_path, set_prompt_path - prompt_log = { - "inputs": {k: v for k, v in bind_args(func, args, kwargs).items() if k != "messages"}, - "messages": bind_args(func, args, kwargs).get("messages"), - "output": output_stringified, - "error": error, - } - - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_LOG_KEY, - value=prompt_log, # type: ignore - ) - - # Return the output of the decorated function - unset_prompt_utility_context() - return output - - wrapper.file = File( # type: ignore - path=path if path else func.__name__, - type="prompt", - version={**prompt_kernel}, # type: ignore - callable=wrapper, - ) +logger = logging.getLogger("humanloop.sdk") - return wrapper - return decorator +@contextmanager +def prompt(path: str): + try: + token = set_prompt_path(path=path) + yield + finally: + reset_prompt_path(token=token) diff --git a/src/humanloop/utilities/tool.py b/src/humanloop/utilities/tool.py index f0de6a1d..67d33fc9 100644 --- a/src/humanloop/utilities/tool.py +++ b/src/humanloop/utilities/tool.py @@ -10,8 +10,8 @@ from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union from opentelemetry.trace import Tracer -from typing_extensions import Unpack +from humanloop.context import get_trace_id from humanloop.eval_utils.run import HumanloopUtilityError from humanloop.utilities.helpers import bind_args from humanloop.eval_utils import File @@ -33,32 +33,35 @@ def tool( opentelemetry_tracer: Tracer, - path: Optional[str] = None, - **tool_kernel: Unpack[ToolKernelRequestParams], # type: ignore + path: str, + attributes: dict[str, Any] | None = None, + setup_values: dict[str, Any] | None = None, ): def decorator(func: Callable): - enhanced_tool_kernel = _build_tool_kernel( + decorator_path = path or func.__name__ + file_type = "tool" + + tool_kernel = _build_tool_kernel( func=func, - attributes=tool_kernel.get("attributes"), - setup_values=tool_kernel.get("setup_values"), + attributes=attributes, + setup_values=setup_values, strict=True, ) - # Mypy complains about adding attribute on function, but it's nice UX - func.json_schema = enhanced_tool_kernel["function"] # type: ignore + # Mypy complains about adding attribute on function, but it's nice DX + func.json_schema = tool_kernel["function"] # type: ignore @wraps(func) def wrapper(*args, **kwargs): with opentelemetry_tracer.start_as_current_span("humanloop.tool") as span: # Write the Tool Kernel to the Span on HL_FILE_OT_KEY - span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) - span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "tool") - if enhanced_tool_kernel: - write_to_opentelemetry_span( - span=span, - key=f"{HUMANLOOP_FILE_KEY}.tool", - value=enhanced_tool_kernel, - ) + span.set_attribute(HUMANLOOP_PATH_KEY, decorator_path) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_FILE_KEY, + value=tool_kernel, + ) # Call the decorated function try: @@ -68,8 +71,6 @@ def wrapper(*args, **kwargs): output=output, ) error = None - except HumanloopUtilityError as e: - raise e except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") output = None @@ -84,6 +85,7 @@ def wrapper(*args, **kwargs): "inputs": bind_args(func, args, kwargs), "output": output_stringified, "error": error, + "trace_parent_id": get_trace_id(), } # Write the Tool Log to the Span on HL_LOG_OT_KEY @@ -97,9 +99,9 @@ def wrapper(*args, **kwargs): return output wrapper.file = File( # type: ignore - path=path if path else func.__name__, - type="tool", - version=enhanced_tool_kernel, + path=decorator_path, + type=file_type, + version=tool_kernel, callable=wrapper, ) From ee10e5a11dbf8b8dd61af4fb7ee12e1832b30c5a Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Mon, 24 Feb 2025 15:17:57 +0000 Subject: [PATCH 03/14] Use /otel endpoint to process endpoints --- src/humanloop/client.py | 35 ++++++++--------- src/humanloop/context.py | 24 ++++++++---- src/humanloop/eval_utils/__init__.py | 4 +- src/humanloop/eval_utils/run.py | 18 ++++----- src/humanloop/otel/constants.py | 5 +-- src/humanloop/otel/exporter.py | 16 ++++++-- src/humanloop/otel/helpers.py | 13 +------ src/humanloop/otel/processor.py | 26 ++++++++++--- src/humanloop/overload.py | 7 ++-- src/humanloop/utilities/flow.py | 57 +++++++++++++++++++--------- src/humanloop/utilities/prompt.py | 14 +++++-- src/humanloop/utilities/tool.py | 19 +++++----- 12 files changed, 142 insertions(+), 96 deletions(-) diff --git a/src/humanloop/client.py b/src/humanloop/client.py index a77876c5..1b83bfc3 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -8,15 +8,16 @@ from opentelemetry.sdk.trace import TracerProvider from opentelemetry.trace import Tracer +from humanloop.context import PromptContext, reset_prompt_context, set_prompt_context from humanloop.core.client_wrapper import SyncClientWrapper -from humanloop.eval_utils.run import prompt_call_evaluation_aware -from humanloop.eval_utils import log_with_evaluation_context, run_eval +from humanloop.eval_utils import run_eval from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop +from humanloop.overload import overload_call, overload_log from humanloop.utilities.flow import flow as flow_decorator_factory -from humanloop.utilities.prompt import prompt as prompt_decorator_factory +from humanloop.utilities.prompt import prompt from humanloop.utilities.tool import tool as tool_decorator_factory from humanloop.environment import HumanloopEnvironment from humanloop.evaluations.client import EvaluationsClient @@ -25,8 +26,6 @@ from humanloop.otel.processor import HumanloopSpanProcessor from humanloop.prompt_utils import populate_template from humanloop.prompts.client import PromptsClient -from humanloop.requests.flow_kernel_request import FlowKernelRequestParams -from humanloop.requests.tool_kernel_request import ToolKernelRequestParams class ExtendedEvalsClient(EvaluationsClient): @@ -119,9 +118,10 @@ def __init__( self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper) # Overload the .log method of the clients to be aware of Evaluation Context - self.prompts = log_with_evaluation_context(client=self.prompts) - self.prompts = prompt_call_evaluation_aware(client=self.prompts) - self.flows = log_with_evaluation_context(client=self.flows) + self.prompts = overload_log(client=self.prompts) + self.prompts = overload_call(client=self.prompts) + self.flows = overload_log(client=self.flows) + self.tools = overload_log(client=self.tools) if opentelemetry_tracer_provider is not None: self._tracer_provider = opentelemetry_tracer_provider @@ -135,10 +135,7 @@ def __init__( ) instrument_provider(provider=self._tracer_provider) self._tracer_provider.add_span_processor( - HumanloopSpanProcessor( - client=self, - exporter=HumanloopSpanExporter(client=self), - ), + HumanloopSpanProcessor(exporter=HumanloopSpanExporter(client=self)), ) if opentelemetry_tracer is None: @@ -151,6 +148,7 @@ def prompt( self, *, path: str, + template: Optional[str] = None, ): """Decorator for declaring a [Prompt](https://humanloop.com/docs/explanation/prompts) in code. @@ -225,14 +223,16 @@ def call_llm(messages): :param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams` """ - return prompt_decorator_factory(path=path) + + with prompt(path=path, template=template): + yield def tool( self, *, path: str, - attributes: dict[str, Any] | None = None, - setup_values: dict[str, Any] | None = None, + attributes: Optional[dict[str, Any]] = None, + setup_values: Optional[dict[str, Any]] = None, ): """Decorator for declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code. @@ -312,8 +312,8 @@ def calculator(a: int, b: Optional[int]) -> int: def flow( self, *, - path: str = None, - attributes: dict[str, Any] | None = None, + path: str, + attributes: Optional[dict[str, Any]] = None, ): """Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code. @@ -366,6 +366,7 @@ def entrypoint(): :param flow_kernel: Attributes that define the Flow. See `class:ToolKernelRequestParams` """ return flow_decorator_factory( + client=self, opentelemetry_tracer=self._opentelemetry_tracer, path=path, attributes=attributes, diff --git a/src/humanloop/context.py b/src/humanloop/context.py index f71f694d..daa7bd1f 100644 --- a/src/humanloop/context.py +++ b/src/humanloop/context.py @@ -15,7 +15,6 @@ def get_trace_id() -> Optional[str]: key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())) - context_api.get_value() return context_api.get_value(key=key) @@ -28,16 +27,27 @@ def reset_trace_id_context(token: ResetToken): context_api.detach(token=token) -def set_prompt_path(path: str) -> ResetToken: +@dataclass +class PromptContext: + path: str + template: Optional[str] + + +def set_prompt_context(prompt_context: PromptContext) -> ResetToken: key = hash((HUMANLOOP_CONTEXT_PROMPT_PATH, threading.get_ident())) - return context_api.set_value(key=key, value=path) + return context_api.attach( + context_api.set_value( + key=key, + value=prompt_context, + ) + ) -def reset_prompt_path(token: ResetToken): +def reset_prompt_context(token: ResetToken): context_api.detach(token=token) -def get_prompt_path() -> Optional[str]: +def get_prompt_context() -> Optional[PromptContext]: key = hash((HUMANLOOP_CONTEXT_PROMPT_PATH, threading.get_ident())) return context_api.get_value(key) @@ -51,9 +61,9 @@ class EvaluationContext: path: str -def set_evaluation_context(evaluation_context: EvaluationContext): +def set_evaluation_context(evaluation_context: EvaluationContext) -> ResetToken: key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident())) - context_api.set_value(key, evaluation_context) + return context_api.attach(context_api.set_value(key, evaluation_context)) def get_evaluation_context() -> Optional[EvaluationContext]: diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py index ac5a5eba..61f97716 100644 --- a/src/humanloop/eval_utils/__init__.py +++ b/src/humanloop/eval_utils/__init__.py @@ -1,4 +1,4 @@ -from .run import log_with_evaluation_context, run_eval +from .run import run_eval from .types import File -__all__ = ["run_eval", "log_with_evaluation_context", "File"] +__all__ = ["run_eval", "File"] diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py index f455a753..a36bfb50 100644 --- a/src/humanloop/eval_utils/run.py +++ b/src/humanloop/eval_utils/run.py @@ -166,7 +166,7 @@ def upload_callback(log_id: str): set_evaluation_context( EvaluationContext( source_datapoint_id=dp.id, - upload_callback=upload_callback, + callback=upload_callback, file_id=hl_file.id, run_id=run.id, path=hl_file.path, @@ -741,32 +741,32 @@ def _run_local_evaluators( else: log_dict = log datapoint_dict = datapoint.dict() if datapoint else None - for local_evaluator, eval_function in local_evaluators: + for local_evaluator in local_evaluators: start_time = datetime.now() try: - if local_evaluator.spec.arguments_type == "target_required": - judgement = eval_function( + if local_evaluator.hl_evaluator.spec.arguments_type == "target_required": + judgement = local_evaluator.function( log_dict, datapoint_dict, ) else: - judgement = eval_function(log_dict) + judgement = local_evaluator.function(log_dict) _ = client.evaluators.log( - version_id=local_evaluator.version_id, + version_id=local_evaluator.hl_evaluator.version_id, parent_id=log_id, judgment=judgement, - id=local_evaluator.id, + id=local_evaluator.hl_evaluator.id, start_time=start_time, end_time=datetime.now(), ) except Exception as e: _ = client.evaluators.log( parent_id=log_id, - id=local_evaluator.id, + id=local_evaluator.hl_evaluator.id, error=str(e), start_time=start_time, end_time=datetime.now(), ) - logger.warning(f"\nEvaluator {local_evaluator.path} failed with error {str(e)}") + logger.warning(f"\nEvaluator {local_evaluator.hl_evaluator.path} failed with error {str(e)}") progress_bar.increment() diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py index f12a0812..fa9bd4b2 100644 --- a/src/humanloop/otel/constants.py +++ b/src/humanloop/otel/constants.py @@ -5,10 +5,7 @@ HUMANLOOP_LOG_ID_KEY = "humanloop.log_id" HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type" HUMANLOOP_PATH_KEY = "humanloop.file.path" -# Required for the exporter to know when to mark the Flow Log as complete -HUMANLOOP_FLOW_PREREQUISITES_KEY = "humanloop.flow.prerequisites" -HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME = "humanloop_intercepted_hl_call" -HUMANLOOP_INTERCEPTED_PROMPT_CALL_RESPONSE = "intercepted_call_response" +# Opentelemetry context HUMANLOOP_CONTEXT_PROMPT_PATH = "humanloop.context.prompt.path" HUMANLOOP_CONTEXT_TRACE_ID = "humanloop.context.flow.trace_id" HUMANLOOP_CONTEXT_EVALUATION = "humanloop.context.evaluation" diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py index 3983aaa2..b3823d3c 100644 --- a/src/humanloop/otel/exporter.py +++ b/src/humanloop/otel/exporter.py @@ -9,6 +9,7 @@ from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult +import requests from humanloop.context import get_evaluation_context, EvaluationContext from humanloop.otel.constants import ( HUMANLOOP_FILE_TYPE_KEY, @@ -156,8 +157,17 @@ def _do_work(self): value=log_args, ) - id = self._client.export(span_to_export.to_json()) - if evaluation_context: - evaluation_context.callback(id) + response = requests.post( + f"{self._client._client_wrapper.get_base_url()}/import/otel", + headers=self._client._client_wrapper.get_headers(), + data=span_to_export.to_json().encode("ascii"), + ) + if response.status_code != 200: + # TODO: handle + pass + else: + if evaluation_context and span_file_path == evaluation_context.path: + log_id = response.json()["log_id"] + evaluation_context.callback(log_id) self._upload_queue.task_done() diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py index eacc07df..37ca8cea 100644 --- a/src/humanloop/otel/helpers.py +++ b/src/humanloop/otel/helpers.py @@ -5,7 +5,6 @@ from opentelemetry.trace import SpanKind from opentelemetry.util.types import AttributeValue -from humanloop.otel.constants import HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME NestedDict = dict[str, Union["NestedDict", AttributeValue]] NestedList = list[Union["NestedList", NestedDict]] @@ -178,12 +177,6 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDic # Remove the key prefix and the first dot to_process.append((span_key, span_value)) - if not to_process: - if key == "": - # Empty span attributes - return result - raise KeyError(f"Key {key} not found in span attributes") - for span_key, span_value in to_process: # type: ignore parts = span_key.split(".") len_parts = len(parts) @@ -264,10 +257,6 @@ def is_llm_provider_call(span: ReadableSpan) -> bool: ) -def is_intercepted_call(span: ReadableSpan) -> bool: - return span.name == HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME - - def is_humanloop_span(span: ReadableSpan) -> bool: """Check if the Span was created by the Humanloop SDK.""" return span.name.startswith("humanloop.") @@ -285,7 +274,7 @@ def module_is_installed(module_name: str) -> bool: return True -def jsonify_if_not_string(func: Callable, output: Any) -> str: +def process_output(func: Callable, output: Any) -> str: if not isinstance(output, str): try: output = json.dumps(output) diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py index 2f52c949..23e185ea 100644 --- a/src/humanloop/otel/processor.py +++ b/src/humanloop/otel/processor.py @@ -5,8 +5,11 @@ from opentelemetry.sdk.trace import ReadableSpan, Span from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter -from humanloop.context import get_prompt_path +from humanloop.context import get_prompt_context, get_trace_id from humanloop.otel.constants import ( + HUMANLOOP_FILE_KEY, + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY, ) from humanloop.otel.helpers import is_llm_provider_call @@ -39,10 +42,21 @@ class HumanloopSpanProcessor(SimpleSpanProcessor): def __init__(self, exporter: SpanExporter) -> None: super().__init__(exporter) - def on_start(self, span: Span): + def on_start(self, span: Span, parent_context): if is_llm_provider_call(span): - prompt_path = get_prompt_path() - if prompt_path: - span.set_attribute(HUMANLOOP_PATH_KEY, prompt_path) + context = get_prompt_context() + prompt_path, prompt_template = context.path, context.template + if context: + span.set_attribute(HUMANLOOP_PATH_KEY, context.path) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "prompt") + if prompt_template: + span.set_attribute( + f"{HUMANLOOP_FILE_KEY}.template", + prompt_template, + ) else: - raise ValueError("Provider call outside @prompt context manager") + raise ValueError(f"Provider call outside @prompt context manager: {prompt_path}") + trace_id = get_trace_id() + if trace_id: + span.set_attribute(f"{HUMANLOOP_LOG_KEY}.trace_parent_id", trace_id) + print(span) diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py index 96dc49ee..aaf6f02f 100644 --- a/src/humanloop/overload.py +++ b/src/humanloop/overload.py @@ -17,7 +17,7 @@ logger = logging.getLogger("humanloop.sdk") -CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, FlowsClient) +CLIENT_TYPE = TypeVar("CLIENT_TYPE") def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE: @@ -51,8 +51,7 @@ def _overload_log( "trace_parent_id": trace_id, } try: - response = self._call(**kwargs) - response = typing.cast(PromptCallResponse, response) + response = self._log(**kwargs) except Exception as e: # TODO handle # TODO: Bug found in backend: not specifying a model 400s but creates a File @@ -67,7 +66,7 @@ def _overload_log( return client -def overload_prompt_call(client: PromptsClient) -> PromptsClient: +def overload_call(client: PromptsClient) -> PromptsClient: client._call = client.call def _overload_call(self, **kwargs) -> PromptCallResponse: diff --git a/src/humanloop/utilities/flow.py b/src/humanloop/utilities/flow.py index d0c21bdb..0d0df6a2 100644 --- a/src/humanloop/utilities/flow.py +++ b/src/humanloop/utilities/flow.py @@ -1,21 +1,21 @@ import logging from functools import wraps -from typing import Any, Callable, Mapping, Sequence +from typing import Any, Callable, Mapping, Optional, Sequence from opentelemetry.trace import Span, Tracer from opentelemetry import context as context_api +import requests from humanloop.base_client import BaseHumanloop from humanloop.context import get_trace_id, set_trace_id from humanloop.utilities.helpers import bind_args from humanloop.eval_utils.types import File from humanloop.otel.constants import ( - HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY, ) -from humanloop.otel.helpers import jsonify_if_not_string, write_to_opentelemetry_span +from humanloop.otel.helpers import process_output, write_to_opentelemetry_span from humanloop.requests import FlowKernelRequestParams as FlowDict logger = logging.getLogger("humanloop.sdk") @@ -25,7 +25,7 @@ def flow( client: "BaseHumanloop", opentelemetry_tracer: Tracer, path: str, - attributes: dict[str, Any] | None = None, + attributes: Optional[dict[str, Any]] = None, ): flow_kernel = {"attributes": attributes or {}} @@ -46,34 +46,55 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: "messages": args_to_func.get("messages"), "trace_parent_id": trace_id, } - log_id = client.flows.log(path=path, flow=flow_kernel, **log_inputs).id - token = set_trace_id(log_id) + init_log = requests.post( + f"{client._client_wrapper.get_base_url()}/flows/log", + headers=client._client_wrapper.get_headers(), + json={ + "path": path, + "flow": flow_kernel, + "log_status": "incomplete", + **log_inputs, + }, + ).json() + # log = client.flows.log( + # path=path, + # **log_inputs, + # log_status="incomplete", + # ) + token = set_trace_id(init_log["id"]) span.set_attribute(HUMANLOOP_PATH_KEY, decorator_path) span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) - write_to_opentelemetry_span(HUMANLOOP_FILE_KEY, flow_kernel) # Call the decorated function try: output = func(*args, **kwargs) - output_stringified = jsonify_if_not_string( - func=func, - output=output, - ) + if ( + isinstance(output, dict) + and len(output.keys()) == 2 + and "role" in output + and "content" in output + ): + output_message = output + output = None + else: + output = process_output(func=func, output=output) + output_message = None error = None except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") output = None - output_stringified = jsonify_if_not_string( - func=func, - output=None, - ) + output_message = None error = str(e) flow_log = { - # TODO: Revisit and agree on - "output": output_stringified, + "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, + "messages": args_to_func.get("messages"), + "log_status": "complete", + "output": output, "error": error, + "output_message": output_message, + "id": init_log["id"], } # Write the Flow Log to the Span on HL_LOG_OT_KEY @@ -81,7 +102,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: write_to_opentelemetry_span( span=span, key=HUMANLOOP_LOG_KEY, - value={**log_inputs}, # type: ignore + value=flow_log, # type: ignore ) context_api.detach(token=token) diff --git a/src/humanloop/utilities/prompt.py b/src/humanloop/utilities/prompt.py index 517b718c..216c04ac 100644 --- a/src/humanloop/utilities/prompt.py +++ b/src/humanloop/utilities/prompt.py @@ -2,16 +2,22 @@ from contextlib import contextmanager +from typing import Optional -from humanloop.context import reset_prompt_path, set_prompt_path +from humanloop.context import PromptContext, reset_prompt_context, set_prompt_context logger = logging.getLogger("humanloop.sdk") @contextmanager -def prompt(path: str): +def prompt(path: str, template: Optional[str]): try: - token = set_prompt_path(path=path) + token = set_prompt_context( + PromptContext( + path=path, + template=template, + ) + ) yield finally: - reset_prompt_path(token=token) + reset_prompt_context(token=token) diff --git a/src/humanloop/utilities/tool.py b/src/humanloop/utilities/tool.py index 67d33fc9..9f41bbf8 100644 --- a/src/humanloop/utilities/tool.py +++ b/src/humanloop/utilities/tool.py @@ -12,7 +12,6 @@ from opentelemetry.trace import Tracer from humanloop.context import get_trace_id -from humanloop.eval_utils.run import HumanloopUtilityError from humanloop.utilities.helpers import bind_args from humanloop.eval_utils import File from humanloop.otel.constants import ( @@ -21,7 +20,7 @@ HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY, ) -from humanloop.otel.helpers import jsonify_if_not_string, write_to_opentelemetry_span +from humanloop.otel.helpers import process_output, write_to_opentelemetry_span from humanloop.requests.tool_function import ToolFunctionParams from humanloop.requests.tool_kernel_request import ToolKernelRequestParams @@ -34,11 +33,11 @@ def tool( opentelemetry_tracer: Tracer, path: str, - attributes: dict[str, Any] | None = None, - setup_values: dict[str, Any] | None = None, + attributes: Optional[dict[str, Any]] = None, + setup_values: Optional[dict[str, Any]] = None, ): def decorator(func: Callable): - decorator_path = path or func.__name__ + path_in_decorator = path or func.__name__ file_type = "tool" tool_kernel = _build_tool_kernel( @@ -55,18 +54,18 @@ def decorator(func: Callable): def wrapper(*args, **kwargs): with opentelemetry_tracer.start_as_current_span("humanloop.tool") as span: # Write the Tool Kernel to the Span on HL_FILE_OT_KEY - span.set_attribute(HUMANLOOP_PATH_KEY, decorator_path) - span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) write_to_opentelemetry_span( span=span, key=HUMANLOOP_FILE_KEY, value=tool_kernel, ) + span.set_attribute(HUMANLOOP_PATH_KEY, path_in_decorator) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) # Call the decorated function try: output = func(*args, **kwargs) - output_stringified = jsonify_if_not_string( + output_stringified = process_output( func=func, output=output, ) @@ -74,7 +73,7 @@ def wrapper(*args, **kwargs): except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") output = None - output_stringified = jsonify_if_not_string( + output_stringified = process_output( func=func, output=output, ) @@ -99,7 +98,7 @@ def wrapper(*args, **kwargs): return output wrapper.file = File( # type: ignore - path=decorator_path, + path=path_in_decorator, type=file_type, version=tool_kernel, callable=wrapper, From bf6196fca7ec1b8d6044686ff31a37631ba78c74 Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Mon, 24 Feb 2025 15:49:06 +0000 Subject: [PATCH 04/14] changed name of prompt context --- src/humanloop/context.py | 6 +++--- src/humanloop/otel/constants.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/humanloop/context.py b/src/humanloop/context.py index daa7bd1f..68894156 100644 --- a/src/humanloop/context.py +++ b/src/humanloop/context.py @@ -5,7 +5,7 @@ from humanloop.otel.constants import ( HUMANLOOP_CONTEXT_EVALUATION, - HUMANLOOP_CONTEXT_PROMPT_PATH, + HUMANLOOP_CONTEXT_PROMPT, HUMANLOOP_CONTEXT_TRACE_ID, ) @@ -34,7 +34,7 @@ class PromptContext: def set_prompt_context(prompt_context: PromptContext) -> ResetToken: - key = hash((HUMANLOOP_CONTEXT_PROMPT_PATH, threading.get_ident())) + key = hash((HUMANLOOP_CONTEXT_PROMPT, threading.get_ident())) return context_api.attach( context_api.set_value( key=key, @@ -48,7 +48,7 @@ def reset_prompt_context(token: ResetToken): def get_prompt_context() -> Optional[PromptContext]: - key = hash((HUMANLOOP_CONTEXT_PROMPT_PATH, threading.get_ident())) + key = hash((HUMANLOOP_CONTEXT_PROMPT, threading.get_ident())) return context_api.get_value(key) diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py index fa9bd4b2..67d8b325 100644 --- a/src/humanloop/otel/constants.py +++ b/src/humanloop/otel/constants.py @@ -6,6 +6,6 @@ HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type" HUMANLOOP_PATH_KEY = "humanloop.file.path" # Opentelemetry context -HUMANLOOP_CONTEXT_PROMPT_PATH = "humanloop.context.prompt.path" +HUMANLOOP_CONTEXT_PROMPT = "humanloop.context.prompt" HUMANLOOP_CONTEXT_TRACE_ID = "humanloop.context.flow.trace_id" HUMANLOOP_CONTEXT_EVALUATION = "humanloop.context.evaluation" From bc5fc8a4f169e2c347516011450510810a7e5475 Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Wed, 26 Feb 2025 14:36:41 +0000 Subject: [PATCH 05/14] refactoring --- src/humanloop/client.py | 9 ++-- src/humanloop/eval_utils/run.py | 45 ++++++++++---------- src/humanloop/otel/exporter.py | 69 +++++++------------------------ src/humanloop/otel/processor.py | 31 ++++---------- src/humanloop/overload.py | 1 - src/humanloop/utilities/flow.py | 51 +++++++++++++---------- src/humanloop/utilities/prompt.py | 31 ++++++++------ src/humanloop/utilities/tool.py | 41 ++++++++++-------- tests/utilities/test_flow.py | 8 ++-- tests/utilities/test_prompt.py | 4 +- tests/utilities/test_tool.py | 40 +++++++++--------- 11 files changed, 146 insertions(+), 184 deletions(-) diff --git a/src/humanloop/client.py b/src/humanloop/client.py index 1b83bfc3..e4b15954 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -8,7 +8,6 @@ from opentelemetry.sdk.trace import TracerProvider from opentelemetry.trace import Tracer -from humanloop.context import PromptContext, reset_prompt_context, set_prompt_context from humanloop.core.client_wrapper import SyncClientWrapper from humanloop.eval_utils import run_eval @@ -17,8 +16,8 @@ from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop from humanloop.overload import overload_call, overload_log from humanloop.utilities.flow import flow as flow_decorator_factory -from humanloop.utilities.prompt import prompt -from humanloop.utilities.tool import tool as tool_decorator_factory +from humanloop.utilities.prompt import prompt_decorator_factory +from humanloop.utilities.tool import tool_decorator_factory as tool_decorator_factory from humanloop.environment import HumanloopEnvironment from humanloop.evaluations.client import EvaluationsClient from humanloop.otel import instrument_provider @@ -223,9 +222,7 @@ def call_llm(messages): :param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams` """ - - with prompt(path=path, template=template): - yield + return prompt_decorator_factory(path=path, template=template) def tool( self, diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py index a36bfb50..c55579a2 100644 --- a/src/humanloop/eval_utils/run.py +++ b/src/humanloop/eval_utils/run.py @@ -104,6 +104,10 @@ def run_eval( :param workers: the number of threads to process datapoints using your function concurrently. :return: per Evaluator checks. """ + if workers > 32: + logger.warning("Too many workers requested, capping the number to 32.") + workers = min(workers, 32) + evaluators_worker_pool = ThreadPoolExecutor(max_workers=workers) file_ = _file_or_file_inside_hl_utility(file) @@ -183,7 +187,7 @@ def upload_callback(log_id: str): start_time = datetime.now() try: output = _call_function(function_, hl_file.type, dp) - if not _callable_is_hl_utility(file): + if not _callable_is_decorated(file): # function_ is a plain callable so we need to create a Log log_func( inputs=dp.inputs, @@ -277,7 +281,7 @@ class _LocalEvaluator: function: Callable -def _callable_is_hl_utility(file: File) -> bool: +def _callable_is_decorated(file: File) -> bool: """Check if a File is a decorated function.""" return hasattr(file["callable"], "file") @@ -348,34 +352,29 @@ def _get_checks( def _file_or_file_inside_hl_utility(file: File) -> File: - if _callable_is_hl_utility(file): + if _callable_is_decorated(file): # When the decorator inside `file` is a decorated function, # we need to validate that the other parameters of `file` # match the attributes of the decorator + decorated_fn_name = file["callable"].__name__ inner_file: File = file["callable"].file - if "path" in file and inner_file["path"] != file["path"]: - raise ValueError( - "`path` attribute specified in the `file` does not match the File path of the decorated function." - ) - if "version" in file and inner_file["version"] != file["version"]: - raise ValueError( - "`version` attribute in the `file` does not match the File version of the decorated function." - ) - if "type" in file and inner_file["type"] != file["type"]: - raise ValueError( - "`type` attribute of `file` argument does not match the File type of the decorated function." - ) - if "id" in file: - raise ValueError("Do not specify an `id` attribute in `file` argument when using a decorated function.") - # file on decorated function holds at least - # or more information than the `file` argument + for argument in ["version", "path", "type", "id"]: + if argument in file: + logger.warning( + f"Argument `file.{argument}` will be ignored: " + f"callable `{decorated_fn_name}` is managed by " + "the @{inner_file['type']} decorator." + ) + + # Use the file manifest in the decorated function file_ = copy.deepcopy(inner_file) + else: + # Simple function + # Raise error if one of path or id not provided file_ = file - - # Raise error if one of path or id not provided - if not file_.get("path") and not file_.get("id"): - raise ValueError("You must provide a path or id in your `file`.") + if not file_.get("path") and not file_.get("id"): + raise ValueError("You must provide a path or id in your `file`.") return file_ diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py index b3823d3c..20c277f0 100644 --- a/src/humanloop/otel/exporter.py +++ b/src/humanloop/otel/exporter.py @@ -26,29 +26,6 @@ class HumanloopSpanExporter(SpanExporter): - """Upload Spans created by SDK decorators to Humanloop. - - Spans not created by Humanloop SDK decorators will be dropped. - - Each Humanloop Span contains information about the File to log against and - the Log to create. We are using the .log actions that pass the kernel in the - request. This allows us to create new Versions if the decorated function - is changed. - - The exporter uploads Spans top-to-bottom, where a Span is uploaded only after - its parent Span has been uploaded. This is necessary for Flow Traces, where - the parent Span is a Flow Log and the children are the Logs in the Trace. - - The exporter keeps an upload queue and only uploads a Span if its direct parent has - been uploaded. - """ - - # NOTE: LLM Instrumentors will only intercept calls to the provider made via the - # official libraries e.g. import openai from openai. This is 100% the reason why - # prompt call is not intercepted by the Instrumentor. The way to fix this is likely - # overriding the hl_client.prompt.call utility. @James I'll do this since it will - # involve looking at the EvaluationContext deep magic. - DEFAULT_NUMBER_THREADS = 4 def __init__( @@ -62,8 +39,6 @@ def __init__( """ super().__init__() self._client = client - # Uploaded spans translate to a Log on Humanloop. The IDs are required to link Logs in a Flow Trace - self._span_to_uploaded_log_id: dict[int, Optional[str]] = {} # Work queue for the threads uploading the spans self._upload_queue: Queue = Queue() # Worker threads to export the spans @@ -81,9 +56,6 @@ def __init__( for thread in self._threads: thread.start() logger.debug("Exporter Thread %s started", thread.ident) - # Flow Log Span ID mapping to children Spans that must be uploaded first - self._spans_left_in_trace: dict[int, set[int]] = {} - self._traces: list[set[str]] = [] def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: if self._shutdown: @@ -122,40 +94,29 @@ def _do_work(self): continue span_to_export, evaluation_context = thread_args - span_file_type = span_to_export.attributes.get(HUMANLOOP_FILE_TYPE_KEY) - if span_file_type is None: + file_type = span_to_export.attributes.get(HUMANLOOP_FILE_TYPE_KEY) + file_path = span_to_export.attributes.get(HUMANLOOP_PATH_KEY) + if file_type is None: raise ValueError("Span does not have type set") - if span_file_type == "flow": - log_args = read_from_opentelemetry_span( - span=span_to_export, - key=HUMANLOOP_LOG_KEY, - ) - log_args = { - **log_args, - "log_status": "complete", - } + log_args = read_from_opentelemetry_span( + span=span_to_export, + key=HUMANLOOP_LOG_KEY, + ) if evaluation_context: - log_args = read_from_opentelemetry_span( - span=span_to_export, - key=HUMANLOOP_LOG_KEY, - ) - span_file_path = read_from_opentelemetry_span( - span=span_to_export, - key=HUMANLOOP_PATH_KEY, - ) - if span_file_path == evaluation_context.path: + if file_path == evaluation_context.path: log_args = { **log_args, "source_datapoint_id": evaluation_context.source_datapoint_id, "run_id": evaluation_context.run_id, } - write_to_opentelemetry_span( - span=span_to_export, - key=HUMANLOOP_LOG_KEY, - value=log_args, - ) + + write_to_opentelemetry_span( + span=span_to_export, + key=HUMANLOOP_LOG_KEY, + value=log_args, + ) response = requests.post( f"{self._client._client_wrapper.get_base_url()}/import/otel", @@ -166,7 +127,7 @@ def _do_work(self): # TODO: handle pass else: - if evaluation_context and span_file_path == evaluation_context.path: + if evaluation_context and file_path == evaluation_context.path: log_id = response.json()["log_id"] evaluation_context.callback(log_id) diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py index 23e185ea..572a3612 100644 --- a/src/humanloop/otel/processor.py +++ b/src/humanloop/otel/processor.py @@ -23,39 +23,24 @@ class CompletableSpan(TypedDict): class HumanloopSpanProcessor(SimpleSpanProcessor): - """Enrich Humanloop spans with data from their children spans. - - The decorators add Instrumentors to the OpenTelemetry TracerProvider - that log interactions with common LLM libraries. These Instrumentors - produce Spans which contain information that can be used to enrich the - Humanloop File Kernels. - - For example, Instrumentors for LLM provider libraries intercept - hyperparameters used in the API call to the model to build the - Prompt File definition when using the @prompt decorator. - - Spans created that are not created by Humanloop decorators, such as - those created by the Instrumentors mentioned above, will be passed - to the Exporter as they are. - """ - def __init__(self, exporter: SpanExporter) -> None: super().__init__(exporter) def on_start(self, span: Span, parent_context): if is_llm_provider_call(span): - context = get_prompt_context() - prompt_path, prompt_template = context.path, context.template - if context: - span.set_attribute(HUMANLOOP_PATH_KEY, context.path) + prompt_context = get_prompt_context() + if prompt_context: + path, template = prompt_context.path, prompt_context.template + span.set_attribute(HUMANLOOP_PATH_KEY, path) span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "prompt") - if prompt_template: + if template: span.set_attribute( f"{HUMANLOOP_FILE_KEY}.template", - prompt_template, + template, ) else: - raise ValueError(f"Provider call outside @prompt context manager: {prompt_path}") + # TODO: handle + raise ValueError("Provider call outside @prompt context manager") trace_id = get_trace_id() if trace_id: span.set_attribute(f"{HUMANLOOP_LOG_KEY}.trace_parent_id", trace_id) diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py index aaf6f02f..1cec39b9 100644 --- a/src/humanloop/overload.py +++ b/src/humanloop/overload.py @@ -5,7 +5,6 @@ from humanloop.context import get_trace_id from humanloop.eval_utils.run import HumanloopUtilityError -from humanloop.flows.client import FlowsClient from humanloop.prompts.client import PromptsClient from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse diff --git a/src/humanloop/utilities/flow.py b/src/humanloop/utilities/flow.py index 0d0df6a2..fff55067 100644 --- a/src/humanloop/utilities/flow.py +++ b/src/humanloop/utilities/flow.py @@ -1,6 +1,7 @@ import logging from functools import wraps -from typing import Any, Callable, Mapping, Optional, Sequence +from typing import Any, Callable, Mapping, Optional, Sequence, TypeVar +from typing_extensions import ParamSpec from opentelemetry.trace import Span, Tracer from opentelemetry import context as context_api @@ -8,6 +9,7 @@ from humanloop.base_client import BaseHumanloop from humanloop.context import get_trace_id, set_trace_id +from humanloop.types.chat_message import ChatMessage from humanloop.utilities.helpers import bind_args from humanloop.eval_utils.types import File from humanloop.otel.constants import ( @@ -21,6 +23,10 @@ logger = logging.getLogger("humanloop.sdk") +P = ParamSpec("P") +R = TypeVar("R") + + def flow( client: "BaseHumanloop", opentelemetry_tracer: Tracer, @@ -29,19 +35,19 @@ def flow( ): flow_kernel = {"attributes": attributes or {}} - def decorator(func: Callable): + def decorator(func: Callable[P, R]) -> Callable[P, R]: decorator_path = path or func.__name__ file_type = "flow" @wraps(func) - def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: span: Span with opentelemetry_tracer.start_as_current_span("humanloop.flow") as span: # type: ignore trace_id = get_trace_id() args_to_func = bind_args(func, args, kwargs) # Create the trace ahead so we have a parent ID to reference - log_inputs = { + init_log_inputs = { "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, "messages": args_to_func.get("messages"), "trace_parent_id": trace_id, @@ -53,7 +59,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: "path": path, "flow": flow_kernel, "log_status": "incomplete", - **log_inputs, + **init_log_inputs, }, ).json() # log = client.flows.log( @@ -66,34 +72,37 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: span.set_attribute(HUMANLOOP_PATH_KEY, decorator_path) span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) - # Call the decorated function + func_output: Optional[R] + log_output: str + log_error: Optional[str] + log_output_message: ChatMessage try: - output = func(*args, **kwargs) + func_output = func(*args, **kwargs) if ( - isinstance(output, dict) - and len(output.keys()) == 2 - and "role" in output - and "content" in output + isinstance(func_output, dict) + and len(func_output.keys()) == 2 + and "role" in func_output + and "content" in func_output ): - output_message = output - output = None + log_output_message = ChatMessage(**func_output) + log_output = None else: - output = process_output(func=func, output=output) - output_message = None - error = None + log_output = process_output(func=func, output=func_output) + log_output_message = None + log_error = None except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") output = None - output_message = None - error = str(e) + log_output_message = None + log_error = str(e) flow_log = { "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, "messages": args_to_func.get("messages"), "log_status": "complete", - "output": output, - "error": error, - "output_message": output_message, + "output": log_output, + "error": log_error, + "output_message": log_output_message, "id": init_log["id"], } diff --git a/src/humanloop/utilities/prompt.py b/src/humanloop/utilities/prompt.py index 216c04ac..77515ea3 100644 --- a/src/humanloop/utilities/prompt.py +++ b/src/humanloop/utilities/prompt.py @@ -1,23 +1,28 @@ +from functools import wraps import logging -from contextlib import contextmanager -from typing import Optional +from typing import Callable, Optional from humanloop.context import PromptContext, reset_prompt_context, set_prompt_context logger = logging.getLogger("humanloop.sdk") -@contextmanager -def prompt(path: str, template: Optional[str]): - try: - token = set_prompt_context( - PromptContext( - path=path, - template=template, +def prompt_decorator_factory(path: str, template: Optional[str]): + def decorator(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + token = set_prompt_context( + PromptContext( + path=path, + template=template, + ) ) - ) - yield - finally: - reset_prompt_context(token=token) + output = func(*args, **kwargs) + reset_prompt_context(token=token) + return output + + return wrapper + + return decorator diff --git a/src/humanloop/utilities/tool.py b/src/humanloop/utilities/tool.py index 9f41bbf8..d6f65474 100644 --- a/src/humanloop/utilities/tool.py +++ b/src/humanloop/utilities/tool.py @@ -7,7 +7,8 @@ from dataclasses import dataclass from functools import wraps from inspect import Parameter -from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union +from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypeVar, TypedDict, Union +from typing_extensions import ParamSpec from opentelemetry.trace import Tracer @@ -30,14 +31,17 @@ logger = logging.getLogger("humanloop.sdk") -def tool( +P = ParamSpec("P") +R = TypeVar("R") + + +def tool_decorator_factory( opentelemetry_tracer: Tracer, path: str, attributes: Optional[dict[str, Any]] = None, setup_values: Optional[dict[str, Any]] = None, ): - def decorator(func: Callable): - path_in_decorator = path or func.__name__ + def decorator(func: Callable[P, R]) -> Callable[P, R]: file_type = "tool" tool_kernel = _build_tool_kernel( @@ -51,7 +55,7 @@ def decorator(func: Callable): func.json_schema = tool_kernel["function"] # type: ignore @wraps(func) - def wrapper(*args, **kwargs): + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: with opentelemetry_tracer.start_as_current_span("humanloop.tool") as span: # Write the Tool Kernel to the Span on HL_FILE_OT_KEY write_to_opentelemetry_span( @@ -59,31 +63,34 @@ def wrapper(*args, **kwargs): key=HUMANLOOP_FILE_KEY, value=tool_kernel, ) - span.set_attribute(HUMANLOOP_PATH_KEY, path_in_decorator) + span.set_attribute(HUMANLOOP_PATH_KEY, path) span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) - # Call the decorated function + func_output: Optional[R] + log_output: str + log_error: Optional[str] + log_inputs: dict[str, Any] = bind_args(func, args, kwargs) try: - output = func(*args, **kwargs) - output_stringified = process_output( + func_output = func(*args, **kwargs) + log_output = process_output( func=func, - output=output, + output=func_output, ) - error = None + log_error = None except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") output = None - output_stringified = process_output( + log_output = process_output( func=func, output=output, ) - error = str(e) + log_error = str(e) # Populate known Tool Log attributes tool_log = { - "inputs": bind_args(func, args, kwargs), - "output": output_stringified, - "error": error, + "inputs": log_inputs, + "output": log_output, + "error": log_error, "trace_parent_id": get_trace_id(), } @@ -98,7 +105,7 @@ def wrapper(*args, **kwargs): return output wrapper.file = File( # type: ignore - path=path_in_decorator, + path=path, type=file_type, version=tool_kernel, callable=wrapper, diff --git a/tests/utilities/test_flow.py b/tests/utilities/test_flow.py index 5e89c168..0d92ec1f 100644 --- a/tests/utilities/test_flow.py +++ b/tests/utilities/test_flow.py @@ -12,8 +12,8 @@ from opentelemetry.sdk.trace import ReadableSpan from humanloop.utilities.flow import flow -from humanloop.utilities.prompt import prompt -from humanloop.utilities.tool import tool +from humanloop.utilities.prompt import prompt_decorator_factory +from humanloop.utilities.tool import tool_decorator_factory from humanloop.otel.constants import HUMANLOOP_FILE_KEY from humanloop.otel.exporter import HumanloopSpanExporter from humanloop.otel.helpers import read_from_opentelemetry_span @@ -25,7 +25,7 @@ def _test_scenario( opentelemetry_tracer: Tracer, ): - @tool(opentelemetry_tracer=opentelemetry_tracer) + @tool_decorator_factory(opentelemetry_tracer=opentelemetry_tracer) def _random_string() -> str: """Return a random string.""" return "".join( @@ -35,7 +35,7 @@ def _random_string() -> str: ) ) - @prompt( # type: ignore + @prompt_decorator_factory( # type: ignore opentelemetry_tracer=opentelemetry_tracer, path=None, template="You are an assistant on the following topics: {topics}.", diff --git a/tests/utilities/test_prompt.py b/tests/utilities/test_prompt.py index 2429f5c2..8880784b 100644 --- a/tests/utilities/test_prompt.py +++ b/tests/utilities/test_prompt.py @@ -14,7 +14,7 @@ from groq import NotFoundError as GroqNotFoundError from humanloop.client import Humanloop from humanloop.eval_utils.run import HumanloopUtilityError -from humanloop.utilities.prompt import prompt +from humanloop.utilities.prompt import prompt_decorator_factory from humanloop.otel.constants import HUMANLOOP_FILE_KEY from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span from humanloop.types.model_providers import ModelProviders @@ -49,7 +49,7 @@ def _test_scenario(opentelemetry_tracer: Tracer, **kwargs): call this function to setup the decorated function that is tested. """ - @prompt(opentelemetry_tracer=opentelemetry_tracer, **kwargs) + @prompt_decorator_factory(opentelemetry_tracer=opentelemetry_tracer, **kwargs) def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -> Optional[str]: load_dotenv() if provider == "openai": diff --git a/tests/utilities/test_tool.py b/tests/utilities/test_tool.py index 983c93f6..e6046037 100644 --- a/tests/utilities/test_tool.py +++ b/tests/utilities/test_tool.py @@ -3,7 +3,7 @@ from typing import Any, Optional, TypedDict, Union import pytest -from humanloop.utilities.tool import tool +from humanloop.utilities.tool import tool_decorator_factory from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_LOG_KEY from humanloop.otel.helpers import read_from_opentelemetry_span from jsonschema.protocols import Validator @@ -17,7 +17,7 @@ def test_calculator_decorator( # GIVEN a test OpenTelemetry configuration tracer, exporter = opentelemetry_test_configuration - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def calculator(operation: str, num1: float, num2: float) -> float: """Do arithmetic operations on two numbers.""" if operation == "add": @@ -55,7 +55,7 @@ def calculator(operation: str, num1: float, num2: float) -> float: def test_union_type(opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter]): tracer, _ = opentelemetry_test_configuration - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def foo(a: Union[int, float], b: float) -> float: return a + b @@ -76,7 +76,7 @@ def test_not_required_parameter( ): tracer, exporter = opentelemetry_test_configuration - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def test_calculator(a: Optional[float], b: float) -> float: if a is None: a = 0 @@ -98,7 +98,7 @@ def test_no_annotation_on_parameter( tracer, _ = opentelemetry_test_configuration # GIVEN a function annotated with @tool and without type hint on a parameter - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def calculator(a: Optional[float], b) -> float: if a is None: a = 0 @@ -133,7 +133,7 @@ def test_dict_annotation_no_sub_types( tracer, _ = opentelemetry_test_configuration # GIVEN a function annotated with @tool and without type hint on a parameter - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def calculator(a: Optional[float], b: dict) -> float: if a is None: a = 0 @@ -173,7 +173,7 @@ def test_list_annotation_no_sub_types( tracer, _ = opentelemetry_test_configuration # GIVEN a function annotated with @tool and without type hint on a parameter - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def calculator(a: Optional[float], b: Optional[list]) -> float: if a is None: a = 0 @@ -212,7 +212,7 @@ def test_tuple_annotation_no_sub_types( tracer, _ = opentelemetry_test_configuration # GIVEN a function annotated with @tool and without type hint on a parameter - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def calculator(a: Optional[float], b: Optional[tuple]) -> float: if a is None: a = 0 @@ -252,7 +252,7 @@ def test_function_without_return_annotation( # GIVEN a function annotated with @tool and without type hint on the return value # WHEN building the Tool kernel - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def foo(a: Optional[float], b: float) -> float: """Add two numbers.""" if a is None: @@ -270,7 +270,7 @@ def test_list_annotation_parameter( tracer, exporter = opentelemetry_test_configuration # WHEN defining a tool with a list parameter - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def foo(to_join: list[str]) -> str: return " ".join(to_join) @@ -295,7 +295,7 @@ def test_list_in_list_parameter_annotation( # GIVEN a tool definition with a list of lists parameter # WHEN building the Tool Kernel - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def nested_plain_join(to_join: list[list[str]]): return " ".join([val for sub_list in to_join for val in sub_list]) @@ -320,7 +320,7 @@ def test_complex_dict_annotation( # GIVEN a tool definition with a dictionary parameter # WHEN building the Tool Kernel - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def foo(a: dict[Union[int, str], list[str]]): return a @@ -345,7 +345,7 @@ def test_tuple_annotation( # GIVEN a tool definition with a tuple parameter # WHEN building the Tool Kernel - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def foo(a: Optional[tuple[int, Optional[str], float]]): return a @@ -371,7 +371,7 @@ def test_tool_no_args( # GIVEN a tool definition without arguments # WHEN building the Tool Kernel - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def foo(): return 42 @@ -406,7 +406,7 @@ class Foo(TypedDict): # WHEN defining a tool with a parameter of that type with pytest.raises(ValueError) as exc: - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def foo_bar(foo: Foo): return foo.a + foo.b # type: ignore @@ -432,9 +432,9 @@ def calculator(operation: str, num1: float, num2: float) -> float: else: raise ValueError(f"Invalid operation: {operation}") - higher_order_fn_tool = tool(opentelemetry_tracer=tracer)(calculator) + higher_order_fn_tool = tool_decorator_factory(opentelemetry_tracer=tracer)(calculator) - @tool(opentelemetry_tracer=tracer) # type: ignore + @tool_decorator_factory(opentelemetry_tracer=tracer) # type: ignore def calculator(operation: str, num1: float, num2: float) -> float: """Do arithmetic operations on two numbers.""" if operation == "add": @@ -478,7 +478,7 @@ def test_python310_syntax( tracer, _ = opentelemetry_test_configuration # GIVEN a function annotated with @tool where a parameter uses `|` for Optional - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def calculator(a: float, b: float | None = None) -> float: # NOTE: dummy function, only testing its signature not correctness if a is None: @@ -511,7 +511,7 @@ def test_python310_union_syntax( tracer, _ = opentelemetry_test_configuration # GIVEN a function annotated with @tool where a parameter uses `|` for Union - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def calculator(a: float, b: float | int | str) -> float: # NOTE: dummy function, only testing its signature not correctness return a + b # type: ignore @@ -542,7 +542,7 @@ def test_python_list_ellipsis( tracer, _ = opentelemetry_test_configuration # GIVEN a function annotated with @tool where a parameter uses `...` - @tool(opentelemetry_tracer=tracer) + @tool_decorator_factory(opentelemetry_tracer=tracer) def calculator(b: ...) -> float | None: # type: ignore # NOTE: dummy function, only testing its signature not correctness if isinstance(b, list): From ce09d3db56a2cc969cc99178698b07fb6356350d Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Sun, 2 Mar 2025 13:15:17 +0000 Subject: [PATCH 06/14] wip --- poetry.lock | 1312 +++-------------------------- pyproject.toml | 2 +- src/humanloop/context.py | 54 +- src/humanloop/eval_utils/run.py | 55 +- src/humanloop/otel/exporter.py | 53 +- src/humanloop/utilities/flow.py | 98 ++- src/humanloop/utilities/prompt.py | 11 +- src/humanloop/utilities/tool.py | 9 +- 8 files changed, 305 insertions(+), 1289 deletions(-) diff --git a/poetry.lock b/poetry.lock index ea6df3bd..eaa1b5ec 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,6 +6,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -17,6 +18,7 @@ version = "0.45.2" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "anthropic-0.45.2-py3-none-any.whl", hash = "sha256:ecd746f7274451dfcb7e1180571ead624c7e1195d1d46cb7c70143d2aedb4d35"}, {file = "anthropic-0.45.2.tar.gz", hash = "sha256:32a18b9ecd12c91b2be4cae6ca2ab46a06937b5aa01b21308d97a6d29794fb5e"}, @@ -41,6 +43,7 @@ version = "4.8.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, @@ -54,129 +57,28 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] trio = ["trio (>=0.26.1)"] -[[package]] -name = "asgiref" -version = "3.8.1" -description = "ASGI specs, helper code, and adapters" -optional = false -python-versions = ">=3.8" -files = [ - {file = "asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47"}, - {file = "asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4", markers = "python_version < \"3.11\""} - -[package.extras] -tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] - [[package]] name = "attrs" version = "25.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] - -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - -[[package]] -name = "bcrypt" -version = "4.2.1" -description = "Modern password hashing for your software and your servers" -optional = false -python-versions = ">=3.7" -files = [ - {file = "bcrypt-4.2.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:1340411a0894b7d3ef562fb233e4b6ed58add185228650942bdc885362f32c17"}, - {file = "bcrypt-4.2.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1ee315739bc8387aa36ff127afc99120ee452924e0df517a8f3e4c0187a0f5f"}, - {file = "bcrypt-4.2.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dbd0747208912b1e4ce730c6725cb56c07ac734b3629b60d4398f082ea718ad"}, - {file = "bcrypt-4.2.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:aaa2e285be097050dba798d537b6efd9b698aa88eef52ec98d23dcd6d7cf6fea"}, - {file = "bcrypt-4.2.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:76d3e352b32f4eeb34703370e370997065d28a561e4a18afe4fef07249cb4396"}, - {file = "bcrypt-4.2.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:b7703ede632dc945ed1172d6f24e9f30f27b1b1a067f32f68bf169c5f08d0425"}, - {file = "bcrypt-4.2.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:89df2aea2c43be1e1fa066df5f86c8ce822ab70a30e4c210968669565c0f4685"}, - {file = "bcrypt-4.2.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:04e56e3fe8308a88b77e0afd20bec516f74aecf391cdd6e374f15cbed32783d6"}, - {file = "bcrypt-4.2.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cfdf3d7530c790432046c40cda41dfee8c83e29482e6a604f8930b9930e94139"}, - {file = "bcrypt-4.2.1-cp37-abi3-win32.whl", hash = "sha256:adadd36274510a01f33e6dc08f5824b97c9580583bd4487c564fc4617b328005"}, - {file = "bcrypt-4.2.1-cp37-abi3-win_amd64.whl", hash = "sha256:8c458cd103e6c5d1d85cf600e546a639f234964d0228909d8f8dbeebff82d526"}, - {file = "bcrypt-4.2.1-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:8ad2f4528cbf0febe80e5a3a57d7a74e6635e41af1ea5675282a33d769fba413"}, - {file = "bcrypt-4.2.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:909faa1027900f2252a9ca5dfebd25fc0ef1417943824783d1c8418dd7d6df4a"}, - {file = "bcrypt-4.2.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cde78d385d5e93ece5479a0a87f73cd6fa26b171c786a884f955e165032b262c"}, - {file = "bcrypt-4.2.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:533e7f3bcf2f07caee7ad98124fab7499cb3333ba2274f7a36cf1daee7409d99"}, - {file = "bcrypt-4.2.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:687cf30e6681eeda39548a93ce9bfbb300e48b4d445a43db4298d2474d2a1e54"}, - {file = "bcrypt-4.2.1-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:041fa0155c9004eb98a232d54da05c0b41d4b8e66b6fc3cb71b4b3f6144ba837"}, - {file = "bcrypt-4.2.1-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f85b1ffa09240c89aa2e1ae9f3b1c687104f7b2b9d2098da4e923f1b7082d331"}, - {file = "bcrypt-4.2.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c6f5fa3775966cca251848d4d5393ab016b3afed251163c1436fefdec3b02c84"}, - {file = "bcrypt-4.2.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:807261df60a8b1ccd13e6599c779014a362ae4e795f5c59747f60208daddd96d"}, - {file = "bcrypt-4.2.1-cp39-abi3-win32.whl", hash = "sha256:b588af02b89d9fad33e5f98f7838bf590d6d692df7153647724a7f20c186f6bf"}, - {file = "bcrypt-4.2.1-cp39-abi3-win_amd64.whl", hash = "sha256:e84e0e6f8e40a242b11bce56c313edc2be121cec3e0ec2d76fce01f6af33c07c"}, - {file = "bcrypt-4.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76132c176a6d9953cdc83c296aeaed65e1a708485fd55abf163e0d9f8f16ce0e"}, - {file = "bcrypt-4.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e158009a54c4c8bc91d5e0da80920d048f918c61a581f0a63e4e93bb556d362f"}, - {file = "bcrypt-4.2.1.tar.gz", hash = "sha256:6765386e3ab87f569b276988742039baab087b2cdb01e809d74e74503c2faafe"}, -] - -[package.extras] -tests = ["pytest (>=3.2.1,!=3.3.0)"] -typecheck = ["mypy"] - -[[package]] -name = "build" -version = "1.2.2.post1" -description = "A simple, correct Python build frontend" -optional = false -python-versions = ">=3.8" -files = [ - {file = "build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5"}, - {file = "build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "os_name == \"nt\""} -importlib-metadata = {version = ">=4.6", markers = "python_full_version < \"3.10.2\""} -packaging = ">=19.1" -pyproject_hooks = "*" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} - -[package.extras] -docs = ["furo (>=2023.08.17)", "sphinx (>=7.0,<8.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)", "sphinx-issues (>=3.0.0)"] -test = ["build[uv,virtualenv]", "filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "setuptools (>=56.0.0)", "setuptools (>=56.0.0)", "setuptools (>=67.8.0)", "wheel (>=0.36.0)"] -typing = ["build[uv]", "importlib-metadata (>=5.1)", "mypy (>=1.9.0,<1.10.0)", "tomli", "typing-extensions (>=3.7.4.3)"] -uv = ["uv (>=0.1.18)"] -virtualenv = ["virtualenv (>=20.0.35)"] - -[[package]] -name = "cachetools" -version = "5.5.1" -description = "Extensible memoizing collections and decorators" -optional = false -python-versions = ">=3.7" -files = [ - {file = "cachetools-5.5.1-py3-none-any.whl", hash = "sha256:b76651fdc3b24ead3c648bbdeeb940c1b04d365b38b4af66788f9ec4a81d42bb"}, - {file = "cachetools-5.5.1.tar.gz", hash = "sha256:70f238fbba50383ef62e55c6aff6d9673175fe59f7c6782c7a0b9e38f4a9df95"}, -] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] [[package]] name = "certifi" @@ -184,6 +86,7 @@ version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, @@ -195,6 +98,7 @@ version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -290,108 +194,13 @@ files = [ {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] -[[package]] -name = "chroma-hnswlib" -version = "0.7.6" -description = "Chromas fork of hnswlib" -optional = false -python-versions = "*" -files = [ - {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f35192fbbeadc8c0633f0a69c3d3e9f1a4eab3a46b65458bbcbcabdd9e895c36"}, - {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f007b608c96362b8f0c8b6b2ac94f67f83fcbabd857c378ae82007ec92f4d82"}, - {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:456fd88fa0d14e6b385358515aef69fc89b3c2191706fd9aee62087b62aad09c"}, - {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dfaae825499c2beaa3b75a12d7ec713b64226df72a5c4097203e3ed532680da"}, - {file = "chroma_hnswlib-0.7.6-cp310-cp310-win_amd64.whl", hash = "sha256:2487201982241fb1581be26524145092c95902cb09fc2646ccfbc407de3328ec"}, - {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81181d54a2b1e4727369486a631f977ffc53c5533d26e3d366dda243fb0998ca"}, - {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4b4ab4e11f1083dd0a11ee4f0e0b183ca9f0f2ed63ededba1935b13ce2b3606f"}, - {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53db45cd9173d95b4b0bdccb4dbff4c54a42b51420599c32267f3abbeb795170"}, - {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c093f07a010b499c00a15bc9376036ee4800d335360570b14f7fe92badcdcf9"}, - {file = "chroma_hnswlib-0.7.6-cp311-cp311-win_amd64.whl", hash = "sha256:0540b0ac96e47d0aa39e88ea4714358ae05d64bbe6bf33c52f316c664190a6a3"}, - {file = "chroma_hnswlib-0.7.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e87e9b616c281bfbe748d01705817c71211613c3b063021f7ed5e47173556cb7"}, - {file = "chroma_hnswlib-0.7.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec5ca25bc7b66d2ecbf14502b5729cde25f70945d22f2aaf523c2d747ea68912"}, - {file = "chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305ae491de9d5f3c51e8bd52d84fdf2545a4a2bc7af49765cda286b7bb30b1d4"}, - {file = "chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:822ede968d25a2c88823ca078a58f92c9b5c4142e38c7c8b4c48178894a0a3c5"}, - {file = "chroma_hnswlib-0.7.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2fe6ea949047beed19a94b33f41fe882a691e58b70c55fdaa90274ae78be046f"}, - {file = "chroma_hnswlib-0.7.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feceff971e2a2728c9ddd862a9dd6eb9f638377ad98438876c9aeac96c9482f5"}, - {file = "chroma_hnswlib-0.7.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb0633b60e00a2b92314d0bf5bbc0da3d3320be72c7e3f4a9b19f4609dc2b2ab"}, - {file = "chroma_hnswlib-0.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:a566abe32fab42291f766d667bdbfa234a7f457dcbd2ba19948b7a978c8ca624"}, - {file = "chroma_hnswlib-0.7.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6be47853d9a58dedcfa90fc846af202b071f028bbafe1d8711bf64fe5a7f6111"}, - {file = "chroma_hnswlib-0.7.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a7af35bdd39a88bffa49f9bb4bf4f9040b684514a024435a1ef5cdff980579d"}, - {file = "chroma_hnswlib-0.7.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a53b1f1551f2b5ad94eb610207bde1bb476245fc5097a2bec2b476c653c58bde"}, - {file = "chroma_hnswlib-0.7.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3085402958dbdc9ff5626ae58d696948e715aef88c86d1e3f9285a88f1afd3bc"}, - {file = "chroma_hnswlib-0.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:77326f658a15adfb806a16543f7db7c45f06fd787d699e643642d6bde8ed49c4"}, - {file = "chroma_hnswlib-0.7.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:93b056ab4e25adab861dfef21e1d2a2756b18be5bc9c292aa252fa12bb44e6ae"}, - {file = "chroma_hnswlib-0.7.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fe91f018b30452c16c811fd6c8ede01f84e5a9f3c23e0758775e57f1c3778871"}, - {file = "chroma_hnswlib-0.7.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6c0e627476f0f4d9e153420d36042dd9c6c3671cfd1fe511c0253e38c2a1039"}, - {file = "chroma_hnswlib-0.7.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e9796a4536b7de6c6d76a792ba03e08f5aaa53e97e052709568e50b4d20c04f"}, - {file = "chroma_hnswlib-0.7.6-cp39-cp39-win_amd64.whl", hash = "sha256:d30e2db08e7ffdcc415bd072883a322de5995eb6ec28a8f8c054103bbd3ec1e0"}, - {file = "chroma_hnswlib-0.7.6.tar.gz", hash = "sha256:4dce282543039681160259d29fcde6151cc9106c6461e0485f57cdccd83059b7"}, -] - -[package.dependencies] -numpy = "*" - -[[package]] -name = "chromadb" -version = "0.6.3" -description = "Chroma." -optional = false -python-versions = ">=3.9" -files = [ - {file = "chromadb-0.6.3-py3-none-any.whl", hash = "sha256:4851258489a3612b558488d98d09ae0fe0a28d5cad6bd1ba64b96fdc419dc0e5"}, - {file = "chromadb-0.6.3.tar.gz", hash = "sha256:c8f34c0b704b9108b04491480a36d42e894a960429f87c6516027b5481d59ed3"}, -] - -[package.dependencies] -bcrypt = ">=4.0.1" -build = ">=1.0.3" -chroma-hnswlib = "0.7.6" -fastapi = ">=0.95.2" -grpcio = ">=1.58.0" -httpx = ">=0.27.0" -importlib-resources = "*" -kubernetes = ">=28.1.0" -mmh3 = ">=4.0.1" -numpy = ">=1.22.5" -onnxruntime = ">=1.14.1" -opentelemetry-api = ">=1.2.0" -opentelemetry-exporter-otlp-proto-grpc = ">=1.2.0" -opentelemetry-instrumentation-fastapi = ">=0.41b0" -opentelemetry-sdk = ">=1.2.0" -orjson = ">=3.9.12" -overrides = ">=7.3.1" -posthog = ">=2.4.0" -pydantic = ">=1.9" -pypika = ">=0.48.9" -PyYAML = ">=6.0.0" -rich = ">=10.11.0" -tenacity = ">=8.2.3" -tokenizers = ">=0.13.2" -tqdm = ">=4.65.0" -typer = ">=0.9.0" -typing_extensions = ">=4.5.0" -uvicorn = {version = ">=0.18.3", extras = ["standard"]} - -[[package]] -name = "click" -version = "8.1.8" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.7" -files = [ - {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, - {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - [[package]] name = "cohere" version = "5.13.12" description = "" optional = false python-versions = "<4.0,>=3.9" +groups = ["dev"] files = [ {file = "cohere-5.13.12-py3-none-any.whl", hash = "sha256:2a043591a3e5280b47716a6b311e4c7f58e799364113a9cb81b50cd4f6c95f7e"}, {file = "cohere-5.13.12.tar.gz", hash = "sha256:97bb9ac107e580780b941acbabd3aa5e71960e6835398292c46aaa8a0a4cab88"}, @@ -414,6 +223,8 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\" or platform_system == \"Windows\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -425,6 +236,7 @@ version = "15.0.1" description = "Colored terminal output for Python's logging module" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["dev"] files = [ {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, @@ -442,6 +254,7 @@ version = "8.2.0" description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "deepdiff-8.2.0-py3-none-any.whl", hash = "sha256:5091f2cdfd372b1b9f6bfd8065ba323ae31118dc4e42594371b38c8bea3fd0a4"}, {file = "deepdiff-8.2.0.tar.gz", hash = "sha256:6ec78f65031485735545ffbe7a61e716c3c2d12ca6416886d5e9291fc76c46c3"}, @@ -460,6 +273,7 @@ version = "1.2.18" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] files = [ {file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"}, {file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"}, @@ -469,7 +283,7 @@ files = [ wrapt = ">=1.10,<2" [package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"] [[package]] name = "distro" @@ -477,28 +291,20 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, ] -[[package]] -name = "durationpy" -version = "0.9" -description = "Module for converting between datetime.timedelta and Go's Duration strings." -optional = false -python-versions = "*" -files = [ - {file = "durationpy-0.9-py3-none-any.whl", hash = "sha256:e65359a7af5cedad07fb77a2dd3f390f8eb0b74cb845589fa6c057086834dd38"}, - {file = "durationpy-0.9.tar.gz", hash = "sha256:fd3feb0a69a0057d582ef643c355c40d2fa1c942191f914d12203b1a01ac722a"}, -] - [[package]] name = "exceptiongroup" version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -507,32 +313,13 @@ files = [ [package.extras] test = ["pytest (>=6)"] -[[package]] -name = "fastapi" -version = "0.115.8" -description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fastapi-0.115.8-py3-none-any.whl", hash = "sha256:753a96dd7e036b34eeef8babdfcfe3f28ff79648f86551eb36bfc1b0bf4a8cbf"}, - {file = "fastapi-0.115.8.tar.gz", hash = "sha256:0ce9111231720190473e222cdf0f07f7206ad7e53ea02beb1d2dc36e2f0741e9"}, -] - -[package.dependencies] -pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" -starlette = ">=0.40.0,<0.46.0" -typing-extensions = ">=4.8.0" - -[package.extras] -all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] -standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] - [[package]] name = "fastavro" version = "1.10.0" description = "Fast read/write of AVRO files" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"}, {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"}, @@ -579,6 +366,7 @@ version = "3.17.0" description = "A platform independent file lock." optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"}, {file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"}, @@ -587,7 +375,7 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] [[package]] name = "flatbuffers" @@ -595,6 +383,7 @@ version = "25.2.10" description = "The FlatBuffers serialization format for Python" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "flatbuffers-25.2.10-py2.py3-none-any.whl", hash = "sha256:ebba5f4d5ea615af3f7fd70fc310636fbb2bbd1f566ac0a23d98dd412de50051"}, {file = "flatbuffers-25.2.10.tar.gz", hash = "sha256:97e451377a41262f8d9bd4295cc836133415cc03d8cb966410a4af92eb00d26e"}, @@ -606,6 +395,7 @@ version = "2025.2.0" description = "File-system specification" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b"}, {file = "fsspec-2025.2.0.tar.gz", hash = "sha256:1c24b16eaa0a1798afa0337aa0db9b256718ab2a89c425371f5628d22c3b6afd"}, @@ -639,53 +429,13 @@ test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] tqdm = ["tqdm"] -[[package]] -name = "google-auth" -version = "2.38.0" -description = "Google Authentication Library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a"}, - {file = "google_auth-2.38.0.tar.gz", hash = "sha256:8285113607d3b80a3f1543b75962447ba8a09fe85783432a784fdeef6ac094c4"}, -] - -[package.dependencies] -cachetools = ">=2.0.0,<6.0" -pyasn1-modules = ">=0.2.1" -rsa = ">=3.1.4,<5" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] -enterprise-cert = ["cryptography", "pyopenssl"] -pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] -pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] -reauth = ["pyu2f (>=0.1.5)"] -requests = ["requests (>=2.20.0,<3.0.0.dev0)"] - -[[package]] -name = "googleapis-common-protos" -version = "1.67.0" -description = "Common protobufs used in Google APIs" -optional = false -python-versions = ">=3.7" -files = [ - {file = "googleapis_common_protos-1.67.0-py2.py3-none-any.whl", hash = "sha256:579de760800d13616f51cf8be00c876f00a9f146d3e6510e19d1f4111758b741"}, - {file = "googleapis_common_protos-1.67.0.tar.gz", hash = "sha256:21398025365f138be356d5923e9168737d94d46a72aefee4a6110a1f23463c86"}, -] - -[package.dependencies] -protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" - -[package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] - [[package]] name = "groq" version = "0.18.0" description = "The official Python library for the groq API" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "groq-0.18.0-py3-none-any.whl", hash = "sha256:81d5ac00057a45d8ce559d23ab5d3b3893011d1f12c35187ab35a9182d826ea6"}, {file = "groq-0.18.0.tar.gz", hash = "sha256:8e2ccfea406d68b3525af4b7c0e321fcb3d2a73fc60bb70b4156e6cd88c72f03"}, @@ -699,79 +449,13 @@ pydantic = ">=1.9.0,<3" sniffio = "*" typing-extensions = ">=4.10,<5" -[[package]] -name = "grpcio" -version = "1.70.0" -description = "HTTP/2-based RPC framework" -optional = false -python-versions = ">=3.8" -files = [ - {file = "grpcio-1.70.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:95469d1977429f45fe7df441f586521361e235982a0b39e33841549143ae2851"}, - {file = "grpcio-1.70.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:ed9718f17fbdb472e33b869c77a16d0b55e166b100ec57b016dc7de9c8d236bf"}, - {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:374d014f29f9dfdb40510b041792e0e2828a1389281eb590df066e1cc2b404e5"}, - {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2af68a6f5c8f78d56c145161544ad0febbd7479524a59c16b3e25053f39c87f"}, - {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7df14b2dcd1102a2ec32f621cc9fab6695effef516efbc6b063ad749867295"}, - {file = "grpcio-1.70.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c78b339869f4dbf89881e0b6fbf376313e4f845a42840a7bdf42ee6caed4b11f"}, - {file = "grpcio-1.70.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:58ad9ba575b39edef71f4798fdb5c7b6d02ad36d47949cd381d4392a5c9cbcd3"}, - {file = "grpcio-1.70.0-cp310-cp310-win32.whl", hash = "sha256:2b0d02e4b25a5c1f9b6c7745d4fa06efc9fd6a611af0fb38d3ba956786b95199"}, - {file = "grpcio-1.70.0-cp310-cp310-win_amd64.whl", hash = "sha256:0de706c0a5bb9d841e353f6343a9defc9fc35ec61d6eb6111802f3aa9fef29e1"}, - {file = "grpcio-1.70.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:17325b0be0c068f35770f944124e8839ea3185d6d54862800fc28cc2ffad205a"}, - {file = "grpcio-1.70.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:dbe41ad140df911e796d4463168e33ef80a24f5d21ef4d1e310553fcd2c4a386"}, - {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5ea67c72101d687d44d9c56068328da39c9ccba634cabb336075fae2eab0d04b"}, - {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb5277db254ab7586769e490b7b22f4ddab3876c490da0a1a9d7c695ccf0bf77"}, - {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7831a0fc1beeeb7759f737f5acd9fdcda520e955049512d68fda03d91186eea"}, - {file = "grpcio-1.70.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27cc75e22c5dba1fbaf5a66c778e36ca9b8ce850bf58a9db887754593080d839"}, - {file = "grpcio-1.70.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d63764963412e22f0491d0d32833d71087288f4e24cbcddbae82476bfa1d81fd"}, - {file = "grpcio-1.70.0-cp311-cp311-win32.whl", hash = "sha256:bb491125103c800ec209d84c9b51f1c60ea456038e4734688004f377cfacc113"}, - {file = "grpcio-1.70.0-cp311-cp311-win_amd64.whl", hash = "sha256:d24035d49e026353eb042bf7b058fb831db3e06d52bee75c5f2f3ab453e71aca"}, - {file = "grpcio-1.70.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:ef4c14508299b1406c32bdbb9fb7b47612ab979b04cf2b27686ea31882387cff"}, - {file = "grpcio-1.70.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:aa47688a65643afd8b166928a1da6247d3f46a2784d301e48ca1cc394d2ffb40"}, - {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:880bfb43b1bb8905701b926274eafce5c70a105bc6b99e25f62e98ad59cb278e"}, - {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e654c4b17d07eab259d392e12b149c3a134ec52b11ecdc6a515b39aceeec898"}, - {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2394e3381071045a706ee2eeb6e08962dd87e8999b90ac15c55f56fa5a8c9597"}, - {file = "grpcio-1.70.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b3c76701428d2df01964bc6479422f20e62fcbc0a37d82ebd58050b86926ef8c"}, - {file = "grpcio-1.70.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac073fe1c4cd856ebcf49e9ed6240f4f84d7a4e6ee95baa5d66ea05d3dd0df7f"}, - {file = "grpcio-1.70.0-cp312-cp312-win32.whl", hash = "sha256:cd24d2d9d380fbbee7a5ac86afe9787813f285e684b0271599f95a51bce33528"}, - {file = "grpcio-1.70.0-cp312-cp312-win_amd64.whl", hash = "sha256:0495c86a55a04a874c7627fd33e5beaee771917d92c0e6d9d797628ac40e7655"}, - {file = "grpcio-1.70.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa573896aeb7d7ce10b1fa425ba263e8dddd83d71530d1322fd3a16f31257b4a"}, - {file = "grpcio-1.70.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:d405b005018fd516c9ac529f4b4122342f60ec1cee181788249372524e6db429"}, - {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f32090238b720eb585248654db8e3afc87b48d26ac423c8dde8334a232ff53c9"}, - {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfa089a734f24ee5f6880c83d043e4f46bf812fcea5181dcb3a572db1e79e01c"}, - {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f19375f0300b96c0117aca118d400e76fede6db6e91f3c34b7b035822e06c35f"}, - {file = "grpcio-1.70.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:7c73c42102e4a5ec76608d9b60227d917cea46dff4d11d372f64cbeb56d259d0"}, - {file = "grpcio-1.70.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:0a5c78d5198a1f0aa60006cd6eb1c912b4a1520b6a3968e677dbcba215fabb40"}, - {file = "grpcio-1.70.0-cp313-cp313-win32.whl", hash = "sha256:fe9dbd916df3b60e865258a8c72ac98f3ac9e2a9542dcb72b7a34d236242a5ce"}, - {file = "grpcio-1.70.0-cp313-cp313-win_amd64.whl", hash = "sha256:4119fed8abb7ff6c32e3d2255301e59c316c22d31ab812b3fbcbaf3d0d87cc68"}, - {file = "grpcio-1.70.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:8058667a755f97407fca257c844018b80004ae8035565ebc2812cc550110718d"}, - {file = "grpcio-1.70.0-cp38-cp38-macosx_10_14_universal2.whl", hash = "sha256:879a61bf52ff8ccacbedf534665bb5478ec8e86ad483e76fe4f729aaef867cab"}, - {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:0ba0a173f4feacf90ee618fbc1a27956bfd21260cd31ced9bc707ef551ff7dc7"}, - {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558c386ecb0148f4f99b1a65160f9d4b790ed3163e8610d11db47838d452512d"}, - {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:412faabcc787bbc826f51be261ae5fa996b21263de5368a55dc2cf824dc5090e"}, - {file = "grpcio-1.70.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3b0f01f6ed9994d7a0b27eeddea43ceac1b7e6f3f9d86aeec0f0064b8cf50fdb"}, - {file = "grpcio-1.70.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7385b1cb064734005204bc8994eed7dcb801ed6c2eda283f613ad8c6c75cf873"}, - {file = "grpcio-1.70.0-cp38-cp38-win32.whl", hash = "sha256:07269ff4940f6fb6710951116a04cd70284da86d0a4368fd5a3b552744511f5a"}, - {file = "grpcio-1.70.0-cp38-cp38-win_amd64.whl", hash = "sha256:aba19419aef9b254e15011b230a180e26e0f6864c90406fdbc255f01d83bc83c"}, - {file = "grpcio-1.70.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:4f1937f47c77392ccd555728f564a49128b6a197a05a5cd527b796d36f3387d0"}, - {file = "grpcio-1.70.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:0cd430b9215a15c10b0e7d78f51e8a39d6cf2ea819fd635a7214fae600b1da27"}, - {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:e27585831aa6b57b9250abaf147003e126cd3a6c6ca0c531a01996f31709bed1"}, - {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1af8e15b0f0fe0eac75195992a63df17579553b0c4af9f8362cc7cc99ccddf4"}, - {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbce24409beaee911c574a3d75d12ffb8c3e3dd1b813321b1d7a96bbcac46bf4"}, - {file = "grpcio-1.70.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ff4a8112a79464919bb21c18e956c54add43ec9a4850e3949da54f61c241a4a6"}, - {file = "grpcio-1.70.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5413549fdf0b14046c545e19cfc4eb1e37e9e1ebba0ca390a8d4e9963cab44d2"}, - {file = "grpcio-1.70.0-cp39-cp39-win32.whl", hash = "sha256:b745d2c41b27650095e81dea7091668c040457483c9bdb5d0d9de8f8eb25e59f"}, - {file = "grpcio-1.70.0-cp39-cp39-win_amd64.whl", hash = "sha256:a31d7e3b529c94e930a117b2175b2efd179d96eb3c7a21ccb0289a8ab05b645c"}, - {file = "grpcio-1.70.0.tar.gz", hash = "sha256:8d1584a68d5922330025881e63a6c1b54cc8117291d382e4fa69339b6d914c56"}, -] - -[package.extras] -protobuf = ["grpcio-tools (>=1.70.0)"] - [[package]] name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -783,6 +467,7 @@ version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, @@ -798,67 +483,13 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] trio = ["trio (>=0.22.0,<1.0)"] -[[package]] -name = "httptools" -version = "0.6.4" -description = "A collection of framework independent HTTP protocol utils." -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "httptools-0.6.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c73ce323711a6ffb0d247dcd5a550b8babf0f757e86a52558fe5b86d6fefcc0"}, - {file = "httptools-0.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345c288418f0944a6fe67be8e6afa9262b18c7626c3ef3c28adc5eabc06a68da"}, - {file = "httptools-0.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deee0e3343f98ee8047e9f4c5bc7cedbf69f5734454a94c38ee829fb2d5fa3c1"}, - {file = "httptools-0.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca80b7485c76f768a3bc83ea58373f8db7b015551117375e4918e2aa77ea9b50"}, - {file = "httptools-0.6.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:90d96a385fa941283ebd231464045187a31ad932ebfa541be8edf5b3c2328959"}, - {file = "httptools-0.6.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:59e724f8b332319e2875efd360e61ac07f33b492889284a3e05e6d13746876f4"}, - {file = "httptools-0.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:c26f313951f6e26147833fc923f78f95604bbec812a43e5ee37f26dc9e5a686c"}, - {file = "httptools-0.6.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f47f8ed67cc0ff862b84a1189831d1d33c963fb3ce1ee0c65d3b0cbe7b711069"}, - {file = "httptools-0.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0614154d5454c21b6410fdf5262b4a3ddb0f53f1e1721cfd59d55f32138c578a"}, - {file = "httptools-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8787367fbdfccae38e35abf7641dafc5310310a5987b689f4c32cc8cc3ee975"}, - {file = "httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b0f7fe4fd38e6a507bdb751db0379df1e99120c65fbdc8ee6c1d044897a636"}, - {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40a5ec98d3f49904b9fe36827dcf1aadfef3b89e2bd05b0e35e94f97c2b14721"}, - {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dacdd3d10ea1b4ca9df97a0a303cbacafc04b5cd375fa98732678151643d4988"}, - {file = "httptools-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:288cd628406cc53f9a541cfaf06041b4c71d751856bab45e3702191f931ccd17"}, - {file = "httptools-0.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:df017d6c780287d5c80601dafa31f17bddb170232d85c066604d8558683711a2"}, - {file = "httptools-0.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:85071a1e8c2d051b507161f6c3e26155b5c790e4e28d7f236422dbacc2a9cc44"}, - {file = "httptools-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69422b7f458c5af875922cdb5bd586cc1f1033295aa9ff63ee196a87519ac8e1"}, - {file = "httptools-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e603a3bff50db08cd578d54f07032ca1631450ceb972c2f834c2b860c28ea2"}, - {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81"}, - {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f"}, - {file = "httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970"}, - {file = "httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660"}, - {file = "httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083"}, - {file = "httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3"}, - {file = "httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071"}, - {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5"}, - {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0"}, - {file = "httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8"}, - {file = "httptools-0.6.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d3f0d369e7ffbe59c4b6116a44d6a8eb4783aae027f2c0b366cf0aa964185dba"}, - {file = "httptools-0.6.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:94978a49b8f4569ad607cd4946b759d90b285e39c0d4640c6b36ca7a3ddf2efc"}, - {file = "httptools-0.6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40dc6a8e399e15ea525305a2ddba998b0af5caa2566bcd79dcbe8948181eeaff"}, - {file = "httptools-0.6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab9ba8dcf59de5181f6be44a77458e45a578fc99c31510b8c65b7d5acc3cf490"}, - {file = "httptools-0.6.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc411e1c0a7dcd2f902c7c48cf079947a7e65b5485dea9decb82b9105ca71a43"}, - {file = "httptools-0.6.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d54efd20338ac52ba31e7da78e4a72570cf729fac82bc31ff9199bedf1dc7440"}, - {file = "httptools-0.6.4-cp38-cp38-win_amd64.whl", hash = "sha256:df959752a0c2748a65ab5387d08287abf6779ae9165916fe053e68ae1fbdc47f"}, - {file = "httptools-0.6.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:85797e37e8eeaa5439d33e556662cc370e474445d5fab24dcadc65a8ffb04003"}, - {file = "httptools-0.6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:db353d22843cf1028f43c3651581e4bb49374d85692a85f95f7b9a130e1b2cab"}, - {file = "httptools-0.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ffd262a73d7c28424252381a5b854c19d9de5f56f075445d33919a637e3547"}, - {file = "httptools-0.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c346571fa50d2e9856a37d7cd9435a25e7fd15e236c397bf224afaa355fe9"}, - {file = "httptools-0.6.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aafe0f1918ed07b67c1e838f950b1c1fabc683030477e60b335649b8020e1076"}, - {file = "httptools-0.6.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0e563e54979e97b6d13f1bbc05a96109923e76b901f786a5eae36e99c01237bd"}, - {file = "httptools-0.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:b799de31416ecc589ad79dd85a0b2657a8fe39327944998dea368c1d4c9e55e6"}, - {file = "httptools-0.6.4.tar.gz", hash = "sha256:4e93eee4add6493b59a5c514da98c939b244fce4a0d8879cd3f466562f4b7d5c"}, -] - -[package.extras] -test = ["Cython (>=0.29.24)"] - [[package]] name = "httpx" version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -871,7 +502,7 @@ httpcore = "==1.*" idna = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -883,6 +514,7 @@ version = "0.4.0" description = "Consume Server-Sent Event (SSE) messages with HTTPX." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, @@ -894,6 +526,7 @@ version = "0.28.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "huggingface_hub-0.28.1-py3-none-any.whl", hash = "sha256:aa6b9a3ffdae939b72c464dbb0d7f99f56e649b55c3d52406f49e0a5a620c0a7"}, {file = "huggingface_hub-0.28.1.tar.gz", hash = "sha256:893471090c98e3b6efbdfdacafe4052b20b84d59866fb6f54c33d9af18c303ae"}, @@ -928,6 +561,7 @@ version = "10.0" description = "Human friendly output for text interfaces using Python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["dev"] files = [ {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, @@ -942,6 +576,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -956,6 +591,7 @@ version = "8.4.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, @@ -967,29 +603,7 @@ zipp = ">=0.5" [package.extras] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] - -[[package]] -name = "importlib-resources" -version = "6.5.2" -description = "Read resources from Python packages" -optional = false -python-versions = ">=3.9" -files = [ - {file = "importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec"}, - {file = "importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c"}, -] - -[package.dependencies] -zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "zipp (>=3.17)"] -type = ["pytest-mypy"] +test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] [[package]] name = "iniconfig" @@ -997,6 +611,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -1008,6 +623,7 @@ version = "0.8.2" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "jiter-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ca8577f6a413abe29b079bc30f907894d7eb07a865c4df69475e868d73e71c7b"}, {file = "jiter-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b25bd626bde7fb51534190c7e3cb97cee89ee76b76d7585580e22f34f5e3f393"}, @@ -1093,6 +709,7 @@ version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, @@ -1114,6 +731,7 @@ version = "2024.10.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, @@ -1122,74 +740,13 @@ files = [ [package.dependencies] referencing = ">=0.31.0" -[[package]] -name = "kubernetes" -version = "32.0.0" -description = "Kubernetes python client" -optional = false -python-versions = ">=3.6" -files = [ - {file = "kubernetes-32.0.0-py2.py3-none-any.whl", hash = "sha256:60fd8c29e8e43d9c553ca4811895a687426717deba9c0a66fb2dcc3f5ef96692"}, - {file = "kubernetes-32.0.0.tar.gz", hash = "sha256:319fa840345a482001ac5d6062222daeb66ec4d1bcb3087402aed685adf0aecb"}, -] - -[package.dependencies] -certifi = ">=14.05.14" -durationpy = ">=0.7" -google-auth = ">=1.0.1" -oauthlib = ">=3.2.2" -python-dateutil = ">=2.5.3" -pyyaml = ">=5.4.1" -requests = "*" -requests-oauthlib = "*" -six = ">=1.9.0" -urllib3 = ">=1.24.2" -websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" - -[package.extras] -adal = ["adal (>=1.0.2)"] - -[[package]] -name = "markdown-it-py" -version = "3.0.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false -python-versions = ">=3.8" -files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, -] - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, -] - [[package]] name = "mmh3" version = "5.1.0" description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"}, {file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"}, @@ -1282,23 +839,13 @@ plot = ["matplotlib (==3.10.0)", "pandas (==2.2.3)"] test = ["pytest (==8.3.4)", "pytest-sugar (==1.0.0)"] type = ["mypy (==1.14.1)"] -[[package]] -name = "monotonic" -version = "1.6" -description = "An implementation of time.monotonic() for Python 2 & < 3.3" -optional = false -python-versions = "*" -files = [ - {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, - {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, -] - [[package]] name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, @@ -1307,7 +854,7 @@ files = [ [package.extras] develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] docs = ["sphinx"] -gmpy = ["gmpy2 (>=2.1.0a4)"] +gmpy = ["gmpy2 (>=2.1.0a4) ; platform_python_implementation != \"PyPy\""] tests = ["pytest (>=4.6)"] [[package]] @@ -1316,6 +863,7 @@ version = "1.0.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, @@ -1362,6 +910,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -1373,6 +922,7 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -1412,28 +962,13 @@ files = [ {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] -[[package]] -name = "oauthlib" -version = "3.2.2" -description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -optional = false -python-versions = ">=3.6" -files = [ - {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, - {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, -] - -[package.extras] -rsa = ["cryptography (>=3.0.0)"] -signals = ["blinker (>=1.4.0)"] -signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] - [[package]] name = "onnxruntime" version = "1.19.2" description = "ONNX Runtime is a runtime accelerator for Machine Learning models" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "onnxruntime-1.19.2-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:84fa57369c06cadd3c2a538ae2a26d76d583e7c34bdecd5769d71ca5c0fc750e"}, {file = "onnxruntime-1.19.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdc471a66df0c1cdef774accef69e9f2ca168c851ab5e4f2f3341512c7ef4666"}, @@ -1476,6 +1011,7 @@ version = "1.63.2" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "openai-1.63.2-py3-none-any.whl", hash = "sha256:1f38b27b5a40814c2b7d8759ec78110df58c4a614c25f182809ca52b080ff4d4"}, {file = "openai-1.63.2.tar.gz", hash = "sha256:aeabeec984a7d2957b4928ceaa339e2ead19c61cfcf35ae62b7c363368d26360"}, @@ -1501,6 +1037,7 @@ version = "1.27.0" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, @@ -1510,46 +1047,13 @@ files = [ deprecated = ">=1.2.6" importlib-metadata = ">=6.0,<=8.4.0" -[[package]] -name = "opentelemetry-exporter-otlp-proto-common" -version = "1.27.0" -description = "OpenTelemetry Protobuf encoding" -optional = false -python-versions = ">=3.8" -files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.27.0-py3-none-any.whl", hash = "sha256:675db7fffcb60946f3a5c43e17d1168a3307a94a930ecf8d2ea1f286f3d4f79a"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.27.0.tar.gz", hash = "sha256:159d27cf49f359e3798c4c3eb8da6ef4020e292571bd8c5604a2a573231dd5c8"}, -] - -[package.dependencies] -opentelemetry-proto = "1.27.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-grpc" -version = "1.27.0" -description = "OpenTelemetry Collector Protobuf over gRPC Exporter" -optional = false -python-versions = ">=3.8" -files = [ - {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0-py3-none-any.whl", hash = "sha256:56b5bbd5d61aab05e300d9d62a6b3c134827bbd28d0b12f2649c2da368006c9e"}, - {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0.tar.gz", hash = "sha256:af6f72f76bcf425dfb5ad11c1a6d6eca2863b91e63575f89bb7b4b55099d968f"}, -] - -[package.dependencies] -deprecated = ">=1.2.6" -googleapis-common-protos = ">=1.52,<2.0" -grpcio = ">=1.0.0,<2.0.0" -opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.27.0" -opentelemetry-proto = "1.27.0" -opentelemetry-sdk = ">=1.27.0,<1.28.0" - [[package]] name = "opentelemetry-instrumentation" version = "0.48b0" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"}, {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"}, @@ -1566,6 +1070,7 @@ version = "0.33.9" description = "OpenTelemetry Anthropic instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_anthropic-0.33.9-py3-none-any.whl", hash = "sha256:443fc46d7de9d95a86efebb4de1119672ba86f6da113cc7e1bb8129ce9978439"}, {file = "opentelemetry_instrumentation_anthropic-0.33.9.tar.gz", hash = "sha256:1866e832a777cfd407f83b3782f0788e702a9ede02eaaf7b6680d32f0c03d1e2"}, @@ -1577,33 +1082,13 @@ opentelemetry-instrumentation = ">=0.48b0,<0.49" opentelemetry-semantic-conventions = ">=0.48b0,<0.49" opentelemetry-semantic-conventions-ai = "0.4.2" -[[package]] -name = "opentelemetry-instrumentation-asgi" -version = "0.48b0" -description = "ASGI instrumentation for OpenTelemetry" -optional = false -python-versions = ">=3.8" -files = [ - {file = "opentelemetry_instrumentation_asgi-0.48b0-py3-none-any.whl", hash = "sha256:ddb1b5fc800ae66e85a4e2eca4d9ecd66367a8c7b556169d9e7b57e10676e44d"}, - {file = "opentelemetry_instrumentation_asgi-0.48b0.tar.gz", hash = "sha256:04c32174b23c7fa72ddfe192dad874954968a6a924608079af9952964ecdf785"}, -] - -[package.dependencies] -asgiref = ">=3.0,<4.0" -opentelemetry-api = ">=1.12,<2.0" -opentelemetry-instrumentation = "0.48b0" -opentelemetry-semantic-conventions = "0.48b0" -opentelemetry-util-http = "0.48b0" - -[package.extras] -instruments = ["asgiref (>=3.0,<4.0)"] - [[package]] name = "opentelemetry-instrumentation-bedrock" version = "0.33.9" description = "OpenTelemetry Bedrock instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_bedrock-0.33.9-py3-none-any.whl", hash = "sha256:b6e1ac590b3c0c5bb1df0266feb9d6e349df396d4b3d1a0da5377cb8e6e16816"}, {file = "opentelemetry_instrumentation_bedrock-0.33.9.tar.gz", hash = "sha256:4441e5f2093edb1cbcd05298a39d180ea88d6efeb1bbe355886a97a57f6b542e"}, @@ -1622,6 +1107,7 @@ version = "0.33.9" description = "OpenTelemetry Cohere instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_cohere-0.33.9-py3-none-any.whl", hash = "sha256:a94ab72d0c438a154236f9907acee1a07f581408dbd8b06f0cb9301ef29b656b"}, {file = "opentelemetry_instrumentation_cohere-0.33.9.tar.gz", hash = "sha256:931f24768337026a933cb7dd4850530e0545772f08abaf37f4664f1e768b73db"}, @@ -1633,33 +1119,13 @@ opentelemetry-instrumentation = ">=0.48b0,<0.49" opentelemetry-semantic-conventions = ">=0.48b0,<0.49" opentelemetry-semantic-conventions-ai = "0.4.2" -[[package]] -name = "opentelemetry-instrumentation-fastapi" -version = "0.48b0" -description = "OpenTelemetry FastAPI Instrumentation" -optional = false -python-versions = ">=3.8" -files = [ - {file = "opentelemetry_instrumentation_fastapi-0.48b0-py3-none-any.whl", hash = "sha256:afeb820a59e139d3e5d96619600f11ce0187658b8ae9e3480857dd790bc024f2"}, - {file = "opentelemetry_instrumentation_fastapi-0.48b0.tar.gz", hash = "sha256:21a72563ea412c0b535815aeed75fc580240f1f02ebc72381cfab672648637a2"}, -] - -[package.dependencies] -opentelemetry-api = ">=1.12,<2.0" -opentelemetry-instrumentation = "0.48b0" -opentelemetry-instrumentation-asgi = "0.48b0" -opentelemetry-semantic-conventions = "0.48b0" -opentelemetry-util-http = "0.48b0" - -[package.extras] -instruments = ["fastapi (>=0.58,<1.0)"] - [[package]] name = "opentelemetry-instrumentation-groq" version = "0.33.9" description = "OpenTelemetry Groq instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_groq-0.33.9-py3-none-any.whl", hash = "sha256:52256832c06f9d1ba8c11efce0854f012e7900c313e410a02c8feb85b0e35407"}, {file = "opentelemetry_instrumentation_groq-0.33.9.tar.gz", hash = "sha256:d83201c516a760fdc478413b855c6d9fb1aed48eb8d4166fa2dc7c762058f6b1"}, @@ -1677,6 +1143,7 @@ version = "0.33.9" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_openai-0.33.9-py3-none-any.whl", hash = "sha256:9a54ec31a66c212cd42b7f02701beecea4068effdf227b11c96fecfbc6544f40"}, {file = "opentelemetry_instrumentation_openai-0.33.9.tar.gz", hash = "sha256:5989a6049e63a09a6e9d699c077f7bbc932c0bda5a08f9ec0f4e88fd0c38d8b7"}, @@ -1695,6 +1162,7 @@ version = "0.33.9" description = "OpenTelemetry Replicate instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_replicate-0.33.9-py3-none-any.whl", hash = "sha256:cf2a0b83dfd150cb7a6827d405b088ed0a46beec7f652bfcc4acb5ffd3d2044a"}, {file = "opentelemetry_instrumentation_replicate-0.33.9.tar.gz", hash = "sha256:e18f2ce224ae1efc2158263aaec6c7b487d7498da9a08d1a594df484e86fce88"}, @@ -1708,17 +1176,18 @@ opentelemetry-semantic-conventions-ai = "0.4.2" [[package]] name = "opentelemetry-proto" -version = "1.27.0" +version = "1.30.0" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "opentelemetry_proto-1.27.0-py3-none-any.whl", hash = "sha256:b133873de5581a50063e1e4b29cdcf0c5e253a8c2d8dc1229add20a4c3830ace"}, - {file = "opentelemetry_proto-1.27.0.tar.gz", hash = "sha256:33c9345d91dafd8a74fc3d7576c5a38f18b7fdf8d02983ac67485386132aedd6"}, + {file = "opentelemetry_proto-1.30.0-py3-none-any.whl", hash = "sha256:c6290958ff3ddacc826ca5abbeb377a31c2334387352a259ba0df37c243adc11"}, + {file = "opentelemetry_proto-1.30.0.tar.gz", hash = "sha256:afe5c9c15e8b68d7c469596e5b32e8fc085eb9febdd6fb4e20924a93a0389179"}, ] [package.dependencies] -protobuf = ">=3.19,<5.0" +protobuf = ">=5.0,<6.0" [[package]] name = "opentelemetry-sdk" @@ -1726,6 +1195,7 @@ version = "1.27.0" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, @@ -1742,6 +1212,7 @@ version = "0.48b0" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, @@ -1757,138 +1228,31 @@ version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] -[[package]] -name = "opentelemetry-util-http" -version = "0.48b0" -description = "Web util for OpenTelemetry" -optional = false -python-versions = ">=3.8" -files = [ - {file = "opentelemetry_util_http-0.48b0-py3-none-any.whl", hash = "sha256:76f598af93aab50328d2a69c786beaedc8b6a7770f7a818cc307eb353debfffb"}, - {file = "opentelemetry_util_http-0.48b0.tar.gz", hash = "sha256:60312015153580cc20f322e5cdc3d3ecad80a71743235bdb77716e742814623c"}, -] - [[package]] name = "orderly-set" version = "5.3.0" description = "Orderly set" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "orderly_set-5.3.0-py3-none-any.whl", hash = "sha256:c2c0bfe604f5d3d9b24e8262a06feb612594f37aa3845650548befd7772945d1"}, {file = "orderly_set-5.3.0.tar.gz", hash = "sha256:80b3d8fdd3d39004d9aad389eaa0eab02c71f0a0511ba3a6d54a935a6c6a0acc"}, ] -[[package]] -name = "orjson" -version = "3.10.15" -description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -optional = false -python-versions = ">=3.8" -files = [ - {file = "orjson-3.10.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:552c883d03ad185f720d0c09583ebde257e41b9521b74ff40e08b7dec4559c04"}, - {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:616e3e8d438d02e4854f70bfdc03a6bcdb697358dbaa6bcd19cbe24d24ece1f8"}, - {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c2c79fa308e6edb0ffab0a31fd75a7841bf2a79a20ef08a3c6e3b26814c8ca8"}, - {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cb85490aa6bf98abd20607ab5c8324c0acb48d6da7863a51be48505646c814"}, - {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763dadac05e4e9d2bc14938a45a2d0560549561287d41c465d3c58aec818b164"}, - {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a330b9b4734f09a623f74a7490db713695e13b67c959713b78369f26b3dee6bf"}, - {file = "orjson-3.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a61a4622b7ff861f019974f73d8165be1bd9a0855e1cad18ee167acacabeb061"}, - {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd271247691574416b3228db667b84775c497b245fa275c6ab90dc1ffbbd2b3"}, - {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:e4759b109c37f635aa5c5cc93a1b26927bfde24b254bcc0e1149a9fada253d2d"}, - {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e992fd5cfb8b9f00bfad2fd7a05a4299db2bbe92e6440d9dd2fab27655b3182"}, - {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f95fb363d79366af56c3f26b71df40b9a583b07bbaaf5b317407c4d58497852e"}, - {file = "orjson-3.10.15-cp310-cp310-win32.whl", hash = "sha256:f9875f5fea7492da8ec2444839dcc439b0ef298978f311103d0b7dfd775898ab"}, - {file = "orjson-3.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:17085a6aa91e1cd70ca8533989a18b5433e15d29c574582f76f821737c8d5806"}, - {file = "orjson-3.10.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c4cc83960ab79a4031f3119cc4b1a1c627a3dc09df125b27c4201dff2af7eaa6"}, - {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddbeef2481d895ab8be5185f2432c334d6dec1f5d1933a9c83014d188e102cef"}, - {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9e590a0477b23ecd5b0ac865b1b907b01b3c5535f5e8a8f6ab0e503efb896334"}, - {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6be38bd103d2fd9bdfa31c2720b23b5d47c6796bcb1d1b598e3924441b4298d"}, - {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ff4f6edb1578960ed628a3b998fa54d78d9bb3e2eb2cfc5c2a09732431c678d0"}, - {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0482b21d0462eddd67e7fce10b89e0b6ac56570424662b685a0d6fccf581e13"}, - {file = "orjson-3.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bb5cc3527036ae3d98b65e37b7986a918955f85332c1ee07f9d3f82f3a6899b5"}, - {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d569c1c462912acdd119ccbf719cf7102ea2c67dd03b99edcb1a3048651ac96b"}, - {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:1e6d33efab6b71d67f22bf2962895d3dc6f82a6273a965fab762e64fa90dc399"}, - {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c33be3795e299f565681d69852ac8c1bc5c84863c0b0030b2b3468843be90388"}, - {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eea80037b9fae5339b214f59308ef0589fc06dc870578b7cce6d71eb2096764c"}, - {file = "orjson-3.10.15-cp311-cp311-win32.whl", hash = "sha256:d5ac11b659fd798228a7adba3e37c010e0152b78b1982897020a8e019a94882e"}, - {file = "orjson-3.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:cf45e0214c593660339ef63e875f32ddd5aa3b4adc15e662cdb80dc49e194f8e"}, - {file = "orjson-3.10.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9d11c0714fc85bfcf36ada1179400862da3288fc785c30e8297844c867d7505a"}, - {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dba5a1e85d554e3897fa9fe6fbcff2ed32d55008973ec9a2b992bd9a65d2352d"}, - {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7723ad949a0ea502df656948ddd8b392780a5beaa4c3b5f97e525191b102fff0"}, - {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6fd9bc64421e9fe9bd88039e7ce8e58d4fead67ca88e3a4014b143cec7684fd4"}, - {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dadba0e7b6594216c214ef7894c4bd5f08d7c0135f4dd0145600be4fbcc16767"}, - {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48f59114fe318f33bbaee8ebeda696d8ccc94c9e90bc27dbe72153094e26f41"}, - {file = "orjson-3.10.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:035fb83585e0f15e076759b6fedaf0abb460d1765b6a36f48018a52858443514"}, - {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d13b7fe322d75bf84464b075eafd8e7dd9eae05649aa2a5354cfa32f43c59f17"}, - {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:7066b74f9f259849629e0d04db6609db4cf5b973248f455ba5d3bd58a4daaa5b"}, - {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88dc3f65a026bd3175eb157fea994fca6ac7c4c8579fc5a86fc2114ad05705b7"}, - {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b342567e5465bd99faa559507fe45e33fc76b9fb868a63f1642c6bc0735ad02a"}, - {file = "orjson-3.10.15-cp312-cp312-win32.whl", hash = "sha256:0a4f27ea5617828e6b58922fdbec67b0aa4bb844e2d363b9244c47fa2180e665"}, - {file = "orjson-3.10.15-cp312-cp312-win_amd64.whl", hash = "sha256:ef5b87e7aa9545ddadd2309efe6824bd3dd64ac101c15dae0f2f597911d46eaa"}, - {file = "orjson-3.10.15-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bae0e6ec2b7ba6895198cd981b7cca95d1487d0147c8ed751e5632ad16f031a6"}, - {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f93ce145b2db1252dd86af37d4165b6faa83072b46e3995ecc95d4b2301b725a"}, - {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c203f6f969210128af3acae0ef9ea6aab9782939f45f6fe02d05958fe761ef9"}, - {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8918719572d662e18b8af66aef699d8c21072e54b6c82a3f8f6404c1f5ccd5e0"}, - {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f71eae9651465dff70aa80db92586ad5b92df46a9373ee55252109bb6b703307"}, - {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e117eb299a35f2634e25ed120c37c641398826c2f5a3d3cc39f5993b96171b9e"}, - {file = "orjson-3.10.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13242f12d295e83c2955756a574ddd6741c81e5b99f2bef8ed8d53e47a01e4b7"}, - {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7946922ada8f3e0b7b958cc3eb22cfcf6c0df83d1fe5521b4a100103e3fa84c8"}, - {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:b7155eb1623347f0f22c38c9abdd738b287e39b9982e1da227503387b81b34ca"}, - {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:208beedfa807c922da4e81061dafa9c8489c6328934ca2a562efa707e049e561"}, - {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eca81f83b1b8c07449e1d6ff7074e82e3fd6777e588f1a6632127f286a968825"}, - {file = "orjson-3.10.15-cp313-cp313-win32.whl", hash = "sha256:c03cd6eea1bd3b949d0d007c8d57049aa2b39bd49f58b4b2af571a5d3833d890"}, - {file = "orjson-3.10.15-cp313-cp313-win_amd64.whl", hash = "sha256:fd56a26a04f6ba5fb2045b0acc487a63162a958ed837648c5781e1fe3316cfbf"}, - {file = "orjson-3.10.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5e8afd6200e12771467a1a44e5ad780614b86abb4b11862ec54861a82d677746"}, - {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da9a18c500f19273e9e104cca8c1f0b40a6470bcccfc33afcc088045d0bf5ea6"}, - {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb00b7bfbdf5d34a13180e4805d76b4567025da19a197645ca746fc2fb536586"}, - {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33aedc3d903378e257047fee506f11e0833146ca3e57a1a1fb0ddb789876c1e1"}, - {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd0099ae6aed5eb1fc84c9eb72b95505a3df4267e6962eb93cdd5af03be71c98"}, - {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c864a80a2d467d7786274fce0e4f93ef2a7ca4ff31f7fc5634225aaa4e9e98c"}, - {file = "orjson-3.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c25774c9e88a3e0013d7d1a6c8056926b607a61edd423b50eb5c88fd7f2823ae"}, - {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e78c211d0074e783d824ce7bb85bf459f93a233eb67a5b5003498232ddfb0e8a"}, - {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:43e17289ffdbbac8f39243916c893d2ae41a2ea1a9cbb060a56a4d75286351ae"}, - {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:781d54657063f361e89714293c095f506c533582ee40a426cb6489c48a637b81"}, - {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6875210307d36c94873f553786a808af2788e362bd0cf4c8e66d976791e7b528"}, - {file = "orjson-3.10.15-cp38-cp38-win32.whl", hash = "sha256:305b38b2b8f8083cc3d618927d7f424349afce5975b316d33075ef0f73576b60"}, - {file = "orjson-3.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:5dd9ef1639878cc3efffed349543cbf9372bdbd79f478615a1c633fe4e4180d1"}, - {file = "orjson-3.10.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ffe19f3e8d68111e8644d4f4e267a069ca427926855582ff01fc012496d19969"}, - {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d433bf32a363823863a96561a555227c18a522a8217a6f9400f00ddc70139ae2"}, - {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da03392674f59a95d03fa5fb9fe3a160b0511ad84b7a3914699ea5a1b3a38da2"}, - {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a63bb41559b05360ded9132032239e47983a39b151af1201f07ec9370715c82"}, - {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3766ac4702f8f795ff3fa067968e806b4344af257011858cc3d6d8721588b53f"}, - {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a1c73dcc8fadbd7c55802d9aa093b36878d34a3b3222c41052ce6b0fc65f8e8"}, - {file = "orjson-3.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b299383825eafe642cbab34be762ccff9fd3408d72726a6b2a4506d410a71ab3"}, - {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:abc7abecdbf67a173ef1316036ebbf54ce400ef2300b4e26a7b843bd446c2480"}, - {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:3614ea508d522a621384c1d6639016a5a2e4f027f3e4a1c93a51867615d28829"}, - {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:295c70f9dc154307777ba30fe29ff15c1bcc9dfc5c48632f37d20a607e9ba85a"}, - {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:63309e3ff924c62404923c80b9e2048c1f74ba4b615e7584584389ada50ed428"}, - {file = "orjson-3.10.15-cp39-cp39-win32.whl", hash = "sha256:a2f708c62d026fb5340788ba94a55c23df4e1869fec74be455e0b2f5363b8507"}, - {file = "orjson-3.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:efcf6c735c3d22ef60c4aa27a5238f1a477df85e9b15f2142f9d669beb2d13fd"}, - {file = "orjson-3.10.15.tar.gz", hash = "sha256:05ca7fe452a2e9d8d9d706a2984c95b9c2ebc5db417ce0b7a49b91d50642a23e"}, -] - -[[package]] -name = "overrides" -version = "7.7.0" -description = "A decorator to automatically detect mismatch when overriding a method." -optional = false -python-versions = ">=3.6" -files = [ - {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, - {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, -] - [[package]] name = "packaging" version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -1900,6 +1264,7 @@ version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, @@ -1986,6 +1351,7 @@ version = "1.20.2" description = "parse() is the opposite of format()" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"}, {file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"}, @@ -1997,6 +1363,7 @@ version = "0.6.4" description = "Simplifies to build parse types based on the parse module" optional = false python-versions = "!=3.0.*,!=3.1.*,>=2.7" +groups = ["dev"] files = [ {file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"}, {file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"}, @@ -2007,9 +1374,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""} six = ">=1.15" [package.extras] -develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"] +develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"] docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"] -testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"] +testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"] [[package]] name = "pluggy" @@ -2017,6 +1384,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -2026,48 +1394,25 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] -[[package]] -name = "posthog" -version = "3.13.0" -description = "Integrate PostHog into any python application." -optional = false -python-versions = "*" -files = [ - {file = "posthog-3.13.0-py2.py3-none-any.whl", hash = "sha256:0afd0132055a3da9c6b0ecf763e7f2ce2b66659ef16169883394d0835c30d501"}, - {file = "posthog-3.13.0.tar.gz", hash = "sha256:54e9de232459846b1686a0cfb58acb02b7ccda379d837e1eb1c3af62c3775915"}, -] - -[package.dependencies] -backoff = ">=1.10.0" -monotonic = ">=1.5" -python-dateutil = ">2.1" -requests = ">=2.7,<3.0" -six = ">=1.5" - -[package.extras] -dev = ["black", "django-stubs", "flake8", "flake8-print", "isort", "lxml", "mypy", "mypy-baseline", "pre-commit", "pydantic", "types-mock", "types-python-dateutil", "types-requests", "types-setuptools", "types-six"] -langchain = ["langchain (>=0.2.0)"] -sentry = ["django", "sentry-sdk"] -test = ["anthropic", "coverage", "django", "flake8", "freezegun (==1.5.1)", "langchain-anthropic (>=0.2.0)", "langchain-community (>=0.2.0)", "langchain-openai (>=0.2.0)", "langgraph", "mock (>=2.0.0)", "openai", "pydantic", "pylint", "pytest", "pytest-asyncio", "pytest-timeout"] - [[package]] name = "protobuf" -version = "4.25.6" +version = "5.29.3" description = "" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ - {file = "protobuf-4.25.6-cp310-abi3-win32.whl", hash = "sha256:61df6b5786e2b49fc0055f636c1e8f0aff263808bb724b95b164685ac1bcc13a"}, - {file = "protobuf-4.25.6-cp310-abi3-win_amd64.whl", hash = "sha256:b8f837bfb77513fe0e2f263250f423217a173b6d85135be4d81e96a4653bcd3c"}, - {file = "protobuf-4.25.6-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:6d4381f2417606d7e01750e2729fe6fbcda3f9883aa0c32b51d23012bded6c91"}, - {file = "protobuf-4.25.6-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:5dd800da412ba7f6f26d2c08868a5023ce624e1fdb28bccca2dc957191e81fb5"}, - {file = "protobuf-4.25.6-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:4434ff8bb5576f9e0c78f47c41cdf3a152c0b44de475784cd3fd170aef16205a"}, - {file = "protobuf-4.25.6-cp38-cp38-win32.whl", hash = "sha256:8bad0f9e8f83c1fbfcc34e573352b17dfce7d0519512df8519994168dc015d7d"}, - {file = "protobuf-4.25.6-cp38-cp38-win_amd64.whl", hash = "sha256:b6905b68cde3b8243a198268bb46fbec42b3455c88b6b02fb2529d2c306d18fc"}, - {file = "protobuf-4.25.6-cp39-cp39-win32.whl", hash = "sha256:3f3b0b39db04b509859361ac9bca65a265fe9342e6b9406eda58029f5b1d10b2"}, - {file = "protobuf-4.25.6-cp39-cp39-win_amd64.whl", hash = "sha256:6ef2045f89d4ad8d95fd43cd84621487832a61d15b49500e4c1350e8a0ef96be"}, - {file = "protobuf-4.25.6-py3-none-any.whl", hash = "sha256:07972021c8e30b870cfc0863409d033af940213e0e7f64e27fe017b929d2c9f7"}, - {file = "protobuf-4.25.6.tar.gz", hash = "sha256:f8cfbae7c5afd0d0eaccbe73267339bff605a2315860bb1ba08eb66670a9a91f"}, + {file = "protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888"}, + {file = "protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a"}, + {file = "protobuf-5.29.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e"}, + {file = "protobuf-5.29.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84"}, + {file = "protobuf-5.29.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f"}, + {file = "protobuf-5.29.3-cp38-cp38-win32.whl", hash = "sha256:84a57163a0ccef3f96e4b6a20516cedcf5bb3a95a657131c5c3ac62200d23252"}, + {file = "protobuf-5.29.3-cp38-cp38-win_amd64.whl", hash = "sha256:b89c115d877892a512f79a8114564fb435943b59067615894c3b13cd3e1fa107"}, + {file = "protobuf-5.29.3-cp39-cp39-win32.whl", hash = "sha256:0eb32bfa5219fc8d4111803e9a690658aa2e6366384fd0851064b963b6d1f2a7"}, + {file = "protobuf-5.29.3-cp39-cp39-win_amd64.whl", hash = "sha256:6ce8cc3389a20693bfde6c6562e03474c40851b44975c9b2bf6df7d8c4f864da"}, + {file = "protobuf-5.29.3-py3-none-any.whl", hash = "sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f"}, + {file = "protobuf-5.29.3.tar.gz", hash = "sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620"}, ] [[package]] @@ -2076,6 +1421,7 @@ version = "19.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pyarrow-19.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c318eda14f6627966997a7d8c374a87d084a94e4e38e9abbe97395c215830e0c"}, {file = "pyarrow-19.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:62ef8360ff256e960f57ce0299090fb86423afed5e46f18f1225f960e05aae3d"}, @@ -2124,37 +1470,13 @@ files = [ [package.extras] test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] -[[package]] -name = "pyasn1" -version = "0.6.1" -description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, - {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, -] - -[[package]] -name = "pyasn1-modules" -version = "0.4.1" -description = "A collection of ASN.1-based protocols modules" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, - {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, -] - -[package.dependencies] -pyasn1 = ">=0.4.6,<0.7.0" - [[package]] name = "pydantic" version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, @@ -2167,7 +1489,7 @@ typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -2175,6 +1497,7 @@ version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, @@ -2281,47 +1604,14 @@ files = [ [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" -[[package]] -name = "pygments" -version = "2.19.1" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, - {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, -] - -[package.extras] -windows-terminal = ["colorama (>=0.4.6)"] - -[[package]] -name = "pypika" -version = "0.48.9" -description = "A SQL query builder API for Python" -optional = false -python-versions = "*" -files = [ - {file = "PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378"}, -] - -[[package]] -name = "pyproject-hooks" -version = "1.2.0" -description = "Wrappers to call pyproject.toml-based build backend hooks." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"}, - {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, -] - [[package]] name = "pyreadline3" version = "3.5.4" description = "A python implementation of GNU readline." optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "sys_platform == \"win32\"" files = [ {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, {file = "pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7"}, @@ -2336,6 +1626,7 @@ version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -2358,6 +1649,7 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -2376,6 +1668,7 @@ version = "1.7.0" description = "Adds the ability to retry flaky tests in CI environments" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"}, {file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"}, @@ -2393,6 +1686,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2407,6 +1701,7 @@ version = "1.0.1" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, @@ -2421,6 +1716,7 @@ version = "2025.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, @@ -2432,6 +1728,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -2494,6 +1791,7 @@ version = "0.36.2" description = "JSON Referencing + Python" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, @@ -2510,6 +1808,7 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -2613,6 +1912,7 @@ version = "1.0.4" description = "Python client for Replicate" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"}, {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"}, @@ -2630,6 +1930,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -2645,49 +1946,13 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] -[[package]] -name = "requests-oauthlib" -version = "2.0.0" -description = "OAuthlib authentication support for Requests." -optional = false -python-versions = ">=3.4" -files = [ - {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, - {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, -] - -[package.dependencies] -oauthlib = ">=3.0.0" -requests = ">=2.0.0" - -[package.extras] -rsa = ["oauthlib[signedtoken] (>=3.0.0)"] - -[[package]] -name = "rich" -version = "13.9.4" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, - {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, -] - -[package.dependencies] -markdown-it-py = ">=2.2.0" -pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} - -[package.extras] -jupyter = ["ipywidgets (>=7.5.1,<9)"] - [[package]] name = "rpds-py" version = "0.22.3" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "rpds_py-0.22.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967"}, {file = "rpds_py-0.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37"}, @@ -2794,26 +2059,13 @@ files = [ {file = "rpds_py-0.22.3.tar.gz", hash = "sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d"}, ] -[[package]] -name = "rsa" -version = "4.9" -description = "Pure-Python RSA implementation" -optional = false -python-versions = ">=3.6,<4" -files = [ - {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, - {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, -] - -[package.dependencies] -pyasn1 = ">=0.1.3" - [[package]] name = "ruff" version = "0.5.7" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, @@ -2841,30 +2093,20 @@ version = "75.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3"}, {file = "setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] -core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] - -[[package]] -name = "shellingham" -version = "1.5.4" -description = "Tool to Detect Surrounding Shell" -optional = false -python-versions = ">=3.7" -files = [ - {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, - {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, -] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "six" @@ -2872,6 +2114,7 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -2883,35 +2126,19 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] -[[package]] -name = "starlette" -version = "0.45.3" -description = "The little ASGI library that shines." -optional = false -python-versions = ">=3.9" -files = [ - {file = "starlette-0.45.3-py3-none-any.whl", hash = "sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d"}, - {file = "starlette-0.45.3.tar.gz", hash = "sha256:2cbcba2a75806f8a41c722141486f37c28e30a0921c5f6fe4346cb0dcee1302f"}, -] - -[package.dependencies] -anyio = ">=3.6.2,<5" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] - [[package]] name = "sympy" version = "1.13.3" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, @@ -2923,27 +2150,13 @@ mpmath = ">=1.1.0,<1.4" [package.extras] dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] -[[package]] -name = "tenacity" -version = "9.0.0" -description = "Retry code until it succeeds" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, - {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, -] - -[package.extras] -doc = ["reno", "sphinx"] -test = ["pytest", "tornado (>=4.5)", "typeguard"] - [[package]] name = "tiktoken" version = "0.9.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"}, {file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"}, @@ -2991,6 +2204,7 @@ version = "0.21.0" description = "" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "tokenizers-0.21.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3c4c93eae637e7d2aaae3d376f06085164e1660f89304c0ab2b1d08a406636b2"}, {file = "tokenizers-0.21.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f53ea537c925422a2e0e92a24cce96f6bc5046bbef24a1652a5edc8ba975f62e"}, @@ -3023,6 +2237,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -3064,6 +2280,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -3079,29 +2296,13 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] -[[package]] -name = "typer" -version = "0.15.1" -description = "Typer, build great CLIs. Easy to code. Based on Python type hints." -optional = false -python-versions = ">=3.7" -files = [ - {file = "typer-0.15.1-py3-none-any.whl", hash = "sha256:7994fb7b8155b64d3402518560648446072864beefd44aa2dc36972a5972e847"}, - {file = "typer-0.15.1.tar.gz", hash = "sha256:a0588c0a7fa68a1978a069818657778f86abe6ff5ea6abf472f940a08bfe4f0a"}, -] - -[package.dependencies] -click = ">=8.0.0" -rich = ">=10.11.0" -shellingham = ">=1.3.0" -typing-extensions = ">=3.7.4.3" - [[package]] name = "types-jsonschema" version = "4.23.0.20241208" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"}, {file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"}, @@ -3116,6 +2317,7 @@ version = "2.9.0.20241206" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"}, {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"}, @@ -3127,6 +2329,7 @@ version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, @@ -3141,6 +2344,7 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -3152,6 +2356,7 @@ version = "2025.1" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["dev"] files = [ {file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"}, {file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"}, @@ -3163,277 +2368,25 @@ version = "2.3.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] -[[package]] -name = "uvicorn" -version = "0.34.0" -description = "The lightning-fast ASGI server." -optional = false -python-versions = ">=3.9" -files = [ - {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, - {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, -] - -[package.dependencies] -click = ">=7.0" -colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} -h11 = ">=0.8" -httptools = {version = ">=0.6.3", optional = true, markers = "extra == \"standard\""} -python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} -pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} -typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} -uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} -watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} -websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} - -[package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] - -[[package]] -name = "uvloop" -version = "0.21.0" -description = "Fast implementation of asyncio event loop on top of libuv" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, - {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, - {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f38b2e090258d051d68a5b14d1da7203a3c3677321cf32a95a6f4db4dd8b6f26"}, - {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c43e0f13022b998eb9b973b5e97200c8b90823454d4bc06ab33829e09fb9bb"}, - {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:10d66943def5fcb6e7b37310eb6b5639fd2ccbc38df1177262b0640c3ca68c1f"}, - {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:67dd654b8ca23aed0a8e99010b4c34aca62f4b7fce88f39d452ed7622c94845c"}, - {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c0f3fa6200b3108919f8bdabb9a7f87f20e7097ea3c543754cabc7d717d95cf8"}, - {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0878c2640cf341b269b7e128b1a5fed890adc4455513ca710d77d5e93aa6d6a0"}, - {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9fb766bb57b7388745d8bcc53a359b116b8a04c83a2288069809d2b3466c37e"}, - {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a375441696e2eda1c43c44ccb66e04d61ceeffcd76e4929e527b7fa401b90fb"}, - {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:baa0e6291d91649c6ba4ed4b2f982f9fa165b5bbd50a9e203c416a2797bab3c6"}, - {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4509360fcc4c3bd2c70d87573ad472de40c13387f5fda8cb58350a1d7475e58d"}, - {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:359ec2c888397b9e592a889c4d72ba3d6befba8b2bb01743f72fffbde663b59c"}, - {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7089d2dc73179ce5ac255bdf37c236a9f914b264825fdaacaded6990a7fb4c2"}, - {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa4dcdbd9ae0a372f2167a207cd98c9f9a1ea1188a8a526431eef2f8116cc8d"}, - {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc"}, - {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb"}, - {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f"}, - {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281"}, - {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af"}, - {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6"}, - {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816"}, - {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc"}, - {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553"}, - {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:17df489689befc72c39a08359efac29bbee8eee5209650d4b9f34df73d22e414"}, - {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc09f0ff191e61c2d592a752423c767b4ebb2986daa9ed62908e2b1b9a9ae206"}, - {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0ce1b49560b1d2d8a2977e3ba4afb2414fb46b86a1b64056bc4ab929efdafbe"}, - {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e678ad6fe52af2c58d2ae3c73dc85524ba8abe637f134bf3564ed07f555c5e79"}, - {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:460def4412e473896ef179a1671b40c039c7012184b627898eea5072ef6f017a"}, - {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:10da8046cc4a8f12c91a1c39d1dd1585c41162a15caaef165c2174db9ef18bdc"}, - {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c097078b8031190c934ed0ebfee8cc5f9ba9642e6eb88322b9958b649750f72b"}, - {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46923b0b5ee7fc0020bef24afe7836cb068f5050ca04caf6b487c513dc1a20b2"}, - {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53e420a3afe22cdcf2a0f4846e377d16e718bc70103d7088a4f7623567ba5fb0"}, - {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb67cdbc0e483da00af0b2c3cdad4b7c61ceb1ee0f33fe00e09c81e3a6cb75"}, - {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:221f4f2a1f46032b403bf3be628011caf75428ee3cc204a22addf96f586b19fd"}, - {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2d1f581393673ce119355d56da84fe1dd9d2bb8b3d13ce792524e1607139feff"}, - {file = "uvloop-0.21.0.tar.gz", hash = "sha256:3bf12b0fda68447806a7ad847bfa591613177275d35b6724b1ee573faa3704e3"}, -] - -[package.extras] -dev = ["Cython (>=3.0,<4.0)", "setuptools (>=60)"] -docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] -test = ["aiohttp (>=3.10.5)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] - -[[package]] -name = "watchfiles" -version = "1.0.4" -description = "Simple, modern and high performance file watching and code reload in python." -optional = false -python-versions = ">=3.9" -files = [ - {file = "watchfiles-1.0.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ba5bb3073d9db37c64520681dd2650f8bd40902d991e7b4cfaeece3e32561d08"}, - {file = "watchfiles-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f25d0ba0fe2b6d2c921cf587b2bf4c451860086534f40c384329fb96e2044d1"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47eb32ef8c729dbc4f4273baece89398a4d4b5d21a1493efea77a17059f4df8a"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:076f293100db3b0b634514aa0d294b941daa85fc777f9c698adb1009e5aca0b1"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1eacd91daeb5158c598fe22d7ce66d60878b6294a86477a4715154990394c9b3"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13c2ce7b72026cfbca120d652f02c7750f33b4c9395d79c9790b27f014c8a5a2"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90192cdc15ab7254caa7765a98132a5a41471cf739513cc9bcf7d2ffcc0ec7b2"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278aaa395f405972e9f523bd786ed59dfb61e4b827856be46a42130605fd0899"}, - {file = "watchfiles-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a462490e75e466edbb9fc4cd679b62187153b3ba804868452ef0577ec958f5ff"}, - {file = "watchfiles-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8d0d0630930f5cd5af929040e0778cf676a46775753e442a3f60511f2409f48f"}, - {file = "watchfiles-1.0.4-cp310-cp310-win32.whl", hash = "sha256:cc27a65069bcabac4552f34fd2dce923ce3fcde0721a16e4fb1b466d63ec831f"}, - {file = "watchfiles-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:8b1f135238e75d075359cf506b27bf3f4ca12029c47d3e769d8593a2024ce161"}, - {file = "watchfiles-1.0.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2a9f93f8439639dc244c4d2902abe35b0279102bca7bbcf119af964f51d53c19"}, - {file = "watchfiles-1.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9eea33ad8c418847dd296e61eb683cae1c63329b6d854aefcd412e12d94ee235"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31f1a379c9dcbb3f09cf6be1b7e83b67c0e9faabed0471556d9438a4a4e14202"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab594e75644421ae0a2484554832ca5895f8cab5ab62de30a1a57db460ce06c6"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc2eb5d14a8e0d5df7b36288979176fbb39672d45184fc4b1c004d7c3ce29317"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f68d8e9d5a321163ddacebe97091000955a1b74cd43724e346056030b0bacee"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9ce064e81fe79faa925ff03b9f4c1a98b0bbb4a1b8c1b015afa93030cb21a49"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b77d5622ac5cc91d21ae9c2b284b5d5c51085a0bdb7b518dba263d0af006132c"}, - {file = "watchfiles-1.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1941b4e39de9b38b868a69b911df5e89dc43767feeda667b40ae032522b9b5f1"}, - {file = "watchfiles-1.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f8c4998506241dedf59613082d1c18b836e26ef2a4caecad0ec41e2a15e4226"}, - {file = "watchfiles-1.0.4-cp311-cp311-win32.whl", hash = "sha256:4ebbeca9360c830766b9f0df3640b791be569d988f4be6c06d6fae41f187f105"}, - {file = "watchfiles-1.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:05d341c71f3d7098920f8551d4df47f7b57ac5b8dad56558064c3431bdfc0b74"}, - {file = "watchfiles-1.0.4-cp311-cp311-win_arm64.whl", hash = "sha256:32b026a6ab64245b584acf4931fe21842374da82372d5c039cba6bf99ef722f3"}, - {file = "watchfiles-1.0.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:229e6ec880eca20e0ba2f7e2249c85bae1999d330161f45c78d160832e026ee2"}, - {file = "watchfiles-1.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5717021b199e8353782dce03bd8a8f64438832b84e2885c4a645f9723bf656d9"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0799ae68dfa95136dde7c472525700bd48777875a4abb2ee454e3ab18e9fc712"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:43b168bba889886b62edb0397cab5b6490ffb656ee2fcb22dec8bfeb371a9e12"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb2c46e275fbb9f0c92e7654b231543c7bbfa1df07cdc4b99fa73bedfde5c844"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:857f5fc3aa027ff5e57047da93f96e908a35fe602d24f5e5d8ce64bf1f2fc733"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55ccfd27c497b228581e2838d4386301227fc0cb47f5a12923ec2fe4f97b95af"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c11ea22304d17d4385067588123658e9f23159225a27b983f343fcffc3e796a"}, - {file = "watchfiles-1.0.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:74cb3ca19a740be4caa18f238298b9d472c850f7b2ed89f396c00a4c97e2d9ff"}, - {file = "watchfiles-1.0.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c7cce76c138a91e720d1df54014a047e680b652336e1b73b8e3ff3158e05061e"}, - {file = "watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94"}, - {file = "watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c"}, - {file = "watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90"}, - {file = "watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9"}, - {file = "watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902"}, - {file = "watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1"}, - {file = "watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303"}, - {file = "watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80"}, - {file = "watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc"}, - {file = "watchfiles-1.0.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d3452c1ec703aa1c61e15dfe9d482543e4145e7c45a6b8566978fbb044265a21"}, - {file = "watchfiles-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7b75fee5a16826cf5c46fe1c63116e4a156924d668c38b013e6276f2582230f0"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e997802d78cdb02623b5941830ab06f8860038faf344f0d288d325cc9c5d2ff"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0611d244ce94d83f5b9aff441ad196c6e21b55f77f3c47608dcf651efe54c4a"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9745a4210b59e218ce64c91deb599ae8775c8a9da4e95fb2ee6fe745fc87d01a"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4810ea2ae622add560f4aa50c92fef975e475f7ac4900ce5ff5547b2434642d8"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:740d103cd01458f22462dedeb5a3382b7f2c57d07ff033fbc9465919e5e1d0f3"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdbd912a61543a36aef85e34f212e5d2486e7c53ebfdb70d1e0b060cc50dd0bf"}, - {file = "watchfiles-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0bc80d91ddaf95f70258cf78c471246846c1986bcc5fd33ccc4a1a67fcb40f9a"}, - {file = "watchfiles-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab0311bb2ffcd9f74b6c9de2dda1612c13c84b996d032cd74799adb656af4e8b"}, - {file = "watchfiles-1.0.4-cp39-cp39-win32.whl", hash = "sha256:02a526ee5b5a09e8168314c905fc545c9bc46509896ed282aeb5a8ba9bd6ca27"}, - {file = "watchfiles-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:a5ae5706058b27c74bac987d615105da17724172d5aaacc6c362a40599b6de43"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdcc92daeae268de1acf5b7befcd6cfffd9a047098199056c72e4623f531de18"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8d3d9203705b5797f0af7e7e5baa17c8588030aaadb7f6a86107b7247303817"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdef5a1be32d0b07dcea3318a0be95d42c98ece24177820226b56276e06b63b0"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:342622287b5604ddf0ed2d085f3a589099c9ae8b7331df3ae9845571586c4f3d"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9fe37a2de80aa785d340f2980276b17ef697ab8db6019b07ee4fd28a8359d2f3"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9d1ef56b56ed7e8f312c934436dea93bfa3e7368adfcf3df4c0da6d4de959a1e"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b42cac65beae3a362629950c444077d1b44f1790ea2772beaea95451c086bb"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e0227b8ed9074c6172cf55d85b5670199c99ab11fd27d2c473aa30aec67ee42"}, - {file = "watchfiles-1.0.4.tar.gz", hash = "sha256:6ba473efd11062d73e4f00c2b730255f9c1bdd73cd5f9fe5b5da8dbd4a717205"}, -] - -[package.dependencies] -anyio = ">=3.0.0" - -[[package]] -name = "websocket-client" -version = "1.8.0" -description = "WebSocket client for Python with low level API options" -optional = false -python-versions = ">=3.8" -files = [ - {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, - {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, -] - -[package.extras] -docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] -optional = ["python-socks", "wsaccel"] -test = ["websockets"] - -[[package]] -name = "websockets" -version = "15.0" -description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" -optional = false -python-versions = ">=3.9" -files = [ - {file = "websockets-15.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5e6ee18a53dd5743e6155b8ff7e8e477c25b29b440f87f65be8165275c87fef0"}, - {file = "websockets-15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ee06405ea2e67366a661ed313e14cf2a86e84142a3462852eb96348f7219cee3"}, - {file = "websockets-15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8711682a629bbcaf492f5e0af72d378e976ea1d127a2d47584fa1c2c080b436b"}, - {file = "websockets-15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94c4a9b01eede952442c088d415861b0cf2053cbd696b863f6d5022d4e4e2453"}, - {file = "websockets-15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:45535fead66e873f411c1d3cf0d3e175e66f4dd83c4f59d707d5b3e4c56541c4"}, - {file = "websockets-15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e389efe46ccb25a1f93d08c7a74e8123a2517f7b7458f043bd7529d1a63ffeb"}, - {file = "websockets-15.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:67a04754d121ea5ca39ddedc3f77071651fb5b0bc6b973c71c515415b44ed9c5"}, - {file = "websockets-15.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:bd66b4865c8b853b8cca7379afb692fc7f52cf898786537dfb5e5e2d64f0a47f"}, - {file = "websockets-15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a4cc73a6ae0a6751b76e69cece9d0311f054da9b22df6a12f2c53111735657c8"}, - {file = "websockets-15.0-cp310-cp310-win32.whl", hash = "sha256:89da58e4005e153b03fe8b8794330e3f6a9774ee9e1c3bd5bc52eb098c3b0c4f"}, - {file = "websockets-15.0-cp310-cp310-win_amd64.whl", hash = "sha256:4ff380aabd7a74a42a760ee76c68826a8f417ceb6ea415bd574a035a111fd133"}, - {file = "websockets-15.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:dd24c4d256558429aeeb8d6c24ebad4e982ac52c50bc3670ae8646c181263965"}, - {file = "websockets-15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f83eca8cbfd168e424dfa3b3b5c955d6c281e8fc09feb9d870886ff8d03683c7"}, - {file = "websockets-15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4095a1f2093002c2208becf6f9a178b336b7572512ee0a1179731acb7788e8ad"}, - {file = "websockets-15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb915101dfbf318486364ce85662bb7b020840f68138014972c08331458d41f3"}, - {file = "websockets-15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:45d464622314973d78f364689d5dbb9144e559f93dca11b11af3f2480b5034e1"}, - {file = "websockets-15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace960769d60037ca9625b4c578a6f28a14301bd2a1ff13bb00e824ac9f73e55"}, - {file = "websockets-15.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c7cd4b1015d2f60dfe539ee6c95bc968d5d5fad92ab01bb5501a77393da4f596"}, - {file = "websockets-15.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4f7290295794b5dec470867c7baa4a14182b9732603fd0caf2a5bf1dc3ccabf3"}, - {file = "websockets-15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3abd670ca7ce230d5a624fd3d55e055215d8d9b723adee0a348352f5d8d12ff4"}, - {file = "websockets-15.0-cp311-cp311-win32.whl", hash = "sha256:110a847085246ab8d4d119632145224d6b49e406c64f1bbeed45c6f05097b680"}, - {file = "websockets-15.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7bbbe2cd6ed80aceef2a14e9f1c1b61683194c216472ed5ff33b700e784e37"}, - {file = "websockets-15.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cccc18077acd34c8072578394ec79563664b1c205f7a86a62e94fafc7b59001f"}, - {file = "websockets-15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d4c22992e24f12de340ca5f824121a5b3e1a37ad4360b4e1aaf15e9d1c42582d"}, - {file = "websockets-15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1206432cc6c644f6fc03374b264c5ff805d980311563202ed7fef91a38906276"}, - {file = "websockets-15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d3cc75ef3e17490042c47e0523aee1bcc4eacd2482796107fd59dd1100a44bc"}, - {file = "websockets-15.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b89504227a5311610e4be16071465885a0a3d6b0e82e305ef46d9b064ce5fb72"}, - {file = "websockets-15.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56e3efe356416bc67a8e093607315951d76910f03d2b3ad49c4ade9207bf710d"}, - {file = "websockets-15.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0f2205cdb444a42a7919690238fb5979a05439b9dbb73dd47c863d39640d85ab"}, - {file = "websockets-15.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:aea01f40995fa0945c020228ab919b8dfc93fc8a9f2d3d705ab5b793f32d9e99"}, - {file = "websockets-15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9f8e33747b1332db11cf7fcf4a9512bef9748cb5eb4d3f7fbc8c30d75dc6ffc"}, - {file = "websockets-15.0-cp312-cp312-win32.whl", hash = "sha256:32e02a2d83f4954aa8c17e03fe8ec6962432c39aca4be7e8ee346b05a3476904"}, - {file = "websockets-15.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffc02b159b65c05f2ed9ec176b715b66918a674bd4daed48a9a7a590dd4be1aa"}, - {file = "websockets-15.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d2244d8ab24374bed366f9ff206e2619345f9cd7fe79aad5225f53faac28b6b1"}, - {file = "websockets-15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3a302241fbe825a3e4fe07666a2ab513edfdc6d43ce24b79691b45115273b5e7"}, - {file = "websockets-15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:10552fed076757a70ba2c18edcbc601c7637b30cdfe8c24b65171e824c7d6081"}, - {file = "websockets-15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c53f97032b87a406044a1c33d1e9290cc38b117a8062e8a8b285175d7e2f99c9"}, - {file = "websockets-15.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1caf951110ca757b8ad9c4974f5cac7b8413004d2f29707e4d03a65d54cedf2b"}, - {file = "websockets-15.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bf1ab71f9f23b0a1d52ec1682a3907e0c208c12fef9c3e99d2b80166b17905f"}, - {file = "websockets-15.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bfcd3acc1a81f106abac6afd42327d2cf1e77ec905ae11dc1d9142a006a496b6"}, - {file = "websockets-15.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c8c5c8e1bac05ef3c23722e591ef4f688f528235e2480f157a9cfe0a19081375"}, - {file = "websockets-15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:86bfb52a9cfbcc09aba2b71388b0a20ea5c52b6517c0b2e316222435a8cdab72"}, - {file = "websockets-15.0-cp313-cp313-win32.whl", hash = "sha256:26ba70fed190708551c19a360f9d7eca8e8c0f615d19a574292b7229e0ae324c"}, - {file = "websockets-15.0-cp313-cp313-win_amd64.whl", hash = "sha256:ae721bcc8e69846af00b7a77a220614d9b2ec57d25017a6bbde3a99473e41ce8"}, - {file = "websockets-15.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c348abc5924caa02a62896300e32ea80a81521f91d6db2e853e6b1994017c9f6"}, - {file = "websockets-15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5294fcb410ed0a45d5d1cdedc4e51a60aab5b2b3193999028ea94afc2f554b05"}, - {file = "websockets-15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c24ba103ecf45861e2e1f933d40b2d93f5d52d8228870c3e7bf1299cd1cb8ff1"}, - {file = "websockets-15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc8821a03bcfb36e4e4705316f6b66af28450357af8a575dc8f4b09bf02a3dee"}, - {file = "websockets-15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc5ae23ada6515f31604f700009e2df90b091b67d463a8401c1d8a37f76c1d7"}, - {file = "websockets-15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ac67b542505186b3bbdaffbc303292e1ee9c8729e5d5df243c1f20f4bb9057e"}, - {file = "websockets-15.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c86dc2068f1c5ca2065aca34f257bbf4f78caf566eb230f692ad347da191f0a1"}, - {file = "websockets-15.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:30cff3ef329682b6182c01c568f551481774c476722020b8f7d0daacbed07a17"}, - {file = "websockets-15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:98dcf978d4c6048965d1762abd534c9d53bae981a035bfe486690ba11f49bbbb"}, - {file = "websockets-15.0-cp39-cp39-win32.whl", hash = "sha256:37d66646f929ae7c22c79bc73ec4074d6db45e6384500ee3e0d476daf55482a9"}, - {file = "websockets-15.0-cp39-cp39-win_amd64.whl", hash = "sha256:24d5333a9b2343330f0f4eb88546e2c32a7f5c280f8dd7d3cc079beb0901781b"}, - {file = "websockets-15.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b499caef4bca9cbd0bd23cd3386f5113ee7378094a3cb613a2fa543260fe9506"}, - {file = "websockets-15.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:17f2854c6bd9ee008c4b270f7010fe2da6c16eac5724a175e75010aacd905b31"}, - {file = "websockets-15.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89f72524033abbfde880ad338fd3c2c16e31ae232323ebdfbc745cbb1b3dcc03"}, - {file = "websockets-15.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1657a9eecb29d7838e3b415458cc494e6d1b194f7ac73a34aa55c6fb6c72d1f3"}, - {file = "websockets-15.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e413352a921f5ad5d66f9e2869b977e88d5103fc528b6deb8423028a2befd842"}, - {file = "websockets-15.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8561c48b0090993e3b2a54db480cab1d23eb2c5735067213bb90f402806339f5"}, - {file = "websockets-15.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:190bc6ef8690cd88232a038d1b15714c258f79653abad62f7048249b09438af3"}, - {file = "websockets-15.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:327adab7671f3726b0ba69be9e865bba23b37a605b585e65895c428f6e47e766"}, - {file = "websockets-15.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bd8ef197c87afe0a9009f7a28b5dc613bfc585d329f80b7af404e766aa9e8c7"}, - {file = "websockets-15.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:789c43bf4a10cd067c24c321238e800b8b2716c863ddb2294d2fed886fa5a689"}, - {file = "websockets-15.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7394c0b7d460569c9285fa089a429f58465db930012566c03046f9e3ab0ed181"}, - {file = "websockets-15.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ea4f210422b912ebe58ef0ad33088bc8e5c5ff9655a8822500690abc3b1232d"}, - {file = "websockets-15.0-py3-none-any.whl", hash = "sha256:51ffd53c53c4442415b613497a34ba0aa7b99ac07f1e4a62db5dcd640ae6c3c3"}, - {file = "websockets-15.0.tar.gz", hash = "sha256:ca36151289a15b39d8d683fd8b7abbe26fc50be311066c5f8dcf3cb8cee107ab"}, -] - [[package]] name = "wrapt" version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, @@ -3522,20 +2475,21 @@ version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] type = ["pytest-mypy"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = ">=3.9,<4" -content-hash = "a4ea51a34af494df07d810d923e6856a0418fb138c6357ba0cf8358713440219" +content-hash = "7a32deefe3fb6b3f77e1463856a38a80855375cbd0bc707e5713ac7bd35406bb" diff --git a/pyproject.toml b/pyproject.toml index 5ea42ae0..0f94f284 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,7 @@ parse = ">=1" pydantic = ">= 1.9.2" pydantic-core = "^2.18.2" typing_extensions = ">= 4.0.0" +opentelemetry-proto = "^1.30.0" [tool.poetry.dev-dependencies] mypy = "1.0.1" @@ -59,7 +60,6 @@ pytest-asyncio = "^0.23.5" python-dateutil = "^2.9.0" types-python-dateutil = "^2.9.0.20240316" anthropic = ">=0.37.1" -chromadb = "^0.6.3" cohere = "^5.11.2" groq = ">=0.11.0" jsonschema = "^4.23.0" diff --git a/src/humanloop/context.py b/src/humanloop/context.py index 68894156..f7a001ab 100644 --- a/src/humanloop/context.py +++ b/src/humanloop/context.py @@ -1,6 +1,7 @@ +from contextlib import contextmanager from dataclasses import dataclass import threading -from typing import Callable, Optional +from typing import Callable, Generator, Optional from opentelemetry import context as context_api from humanloop.otel.constants import ( @@ -10,20 +11,16 @@ ) -ResetToken = object - - def get_trace_id() -> Optional[str]: key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())) return context_api.get_value(key=key) -def set_trace_id(flow_log_id: str) -> ResetToken: +@contextmanager +def set_trace_id(flow_log_id: str) -> Generator[None, None, None]: key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())) - return context_api.attach(context_api.set_value(key=key, value=flow_log_id)) - - -def reset_trace_id_context(token: ResetToken): + token = context_api.attach(context_api.set_value(key=key, value=flow_log_id)) + yield context_api.detach(token=token) @@ -33,18 +30,17 @@ class PromptContext: template: Optional[str] -def set_prompt_context(prompt_context: PromptContext) -> ResetToken: +@contextmanager +def set_prompt_context(prompt_context: PromptContext) -> Generator[None, None, None]: key = hash((HUMANLOOP_CONTEXT_PROMPT, threading.get_ident())) - return context_api.attach( + reset_token = context_api.attach( context_api.set_value( key=key, value=prompt_context, ) ) - - -def reset_prompt_context(token: ResetToken): - context_api.detach(token=token) + yield + context_api.detach(token=reset_token) def get_prompt_context() -> Optional[PromptContext]: @@ -52,18 +48,36 @@ def get_prompt_context() -> Optional[PromptContext]: return context_api.get_value(key) -@dataclass class EvaluationContext: source_datapoint_id: str run_id: str callback: Callable[[str], None] file_id: str path: str - - -def set_evaluation_context(evaluation_context: EvaluationContext) -> ResetToken: + logging_counter: int + + def __init__( + self, + source_datapoint_id: str, + run_id: str, + callback: Callable[[str], None], + file_id: str, + path: str, + ): + self.source_datapoint_id = source_datapoint_id + self.run_id = run_id + self.callback = callback + self.file_id = file_id + self.path = path + self.logging_counter = 0 + + +@contextmanager +def set_evaluation_context(evaluation_context: EvaluationContext) -> Generator[None, None, None]: key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident())) - return context_api.attach(context_api.set_value(key, evaluation_context)) + reset_token = context_api.attach(context_api.set_value(key, evaluation_context)) + yield + context_api.detach(token=reset_token) def get_evaluation_context() -> Optional[EvaluationContext]: diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py index c55579a2..57ea95be 100644 --- a/src/humanloop/eval_utils/run.py +++ b/src/humanloop/eval_utils/run.py @@ -167,7 +167,7 @@ def upload_callback(log_id: str): ) # Set the Evaluation Context for current datapoint - set_evaluation_context( + with set_evaluation_context( EvaluationContext( source_datapoint_id=dp.id, callback=upload_callback, @@ -175,38 +175,37 @@ def upload_callback(log_id: str): run_id=run.id, path=hl_file.path, ) - ) - - log_func = _get_log_func( - client=client, - file_type=hl_file.type, - file_id=hl_file.id, - version_id=hl_file.version_id, - run_id=run.id, - ) - start_time = datetime.now() - try: - output = _call_function(function_, hl_file.type, dp) - if not _callable_is_decorated(file): - # function_ is a plain callable so we need to create a Log + ): + log_func = _get_log_func( + client=client, + file_type=hl_file.type, + file_id=hl_file.id, + version_id=hl_file.version_id, + run_id=run.id, + ) + start_time = datetime.now() + try: + output = _call_function(function_, hl_file.type, dp) + if not _callable_is_decorated(file): + # function_ is a plain callable so we need to create a Log + log_func( + inputs=dp.inputs, + output=output, + start_time=start_time, + end_time=datetime.now(), + ) + except Exception as e: log_func( inputs=dp.inputs, - output=output, + error=str(e), + source_datapoint_id=dp.id, + run_id=run.id, start_time=start_time, end_time=datetime.now(), ) - except Exception as e: - log_func( - inputs=dp.inputs, - error=str(e), - source_datapoint_id=dp.id, - run_id=run.id, - start_time=start_time, - end_time=datetime.now(), - ) - logger.warning( - msg=f"\nYour {hl_file.type}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}" - ) + logger.warning( + msg=f"\nYour {hl_file.type}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}" + ) with ThreadPoolExecutor(max_workers=workers) as executor: for datapoint in hl_dataset.datapoints: diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py index 20c277f0..9b44a4a8 100644 --- a/src/humanloop/otel/exporter.py +++ b/src/humanloop/otel/exporter.py @@ -17,6 +17,14 @@ HUMANLOOP_PATH_KEY, ) from humanloop.otel.helpers import read_from_opentelemetry_span, write_to_opentelemetry_span +from opentelemetry.proto.common.v1.common_pb2 import KeyValue, Link +from opentelemetry.proto.trace.v1.trace_pb2 import ( + TracesData, + ResourceSpans, + InstrumentationScope, + ScopeSpans, + Span as ProtoBufferSpan, +) if typing.TYPE_CHECKING: from humanloop.client import Humanloop @@ -118,8 +126,50 @@ def _do_work(self): value=log_args, ) + payload = TracesData( + resource_spans=[ + ResourceSpans( + scope_spans=ScopeSpans( + scope=InstrumentationScope( + name="humanloop-otel", + version="0.1.0", + ), + spans=[ + ProtoBufferSpan( + trace_id=span_to_export.context.trace_id, + span_id=span_to_export.span_id, + name=span_to_export.name, + kind=span_to_export.kind, + start_time_unix_nano=span_to_export.start_time, + end_time_unix_nano=span_to_export.end_time, + attributes=[ + KeyValue( + key=key, + value=value, + ) + for key, value in span_to_export.attributes.items() + ], + dropped_attributes_count=len(span_to_export.dropped_attributes), + dropped_events_count=len(span_to_export.dropped_events), + dropped_links_count=len(span_to_export.dropped_links), + links=[ + Link( + trace_id=link.trace_id, + span_id=link.span_id, + attributes=link.attributes, + ) + for link in span_to_export.links + ], + events=[], + ) + ], + ) + ) + ] + ) + response = requests.post( - f"{self._client._client_wrapper.get_base_url()}/import/otel", + f"{self._client._client_wrapper.get_base_url()}/import/otel/v1/traces", headers=self._client._client_wrapper.get_headers(), data=span_to_export.to_json().encode("ascii"), ) @@ -127,6 +177,7 @@ def _do_work(self): # TODO: handle pass else: + print("FOO", response.json()) if evaluation_context and file_path == evaluation_context.path: log_id = response.json()["log_id"] evaluation_context.callback(log_id) diff --git a/src/humanloop/utilities/flow.py b/src/humanloop/utilities/flow.py index fff55067..c0cf0278 100644 --- a/src/humanloop/utilities/flow.py +++ b/src/humanloop/utilities/flow.py @@ -1,6 +1,6 @@ import logging from functools import wraps -from typing import Any, Callable, Mapping, Optional, Sequence, TypeVar +from typing import Any, Callable, Optional, TypeVar from typing_extensions import ParamSpec from opentelemetry.trace import Span, Tracer @@ -67,56 +67,54 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: # **log_inputs, # log_status="incomplete", # ) - token = set_trace_id(init_log["id"]) - - span.set_attribute(HUMANLOOP_PATH_KEY, decorator_path) - span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) - - func_output: Optional[R] - log_output: str - log_error: Optional[str] - log_output_message: ChatMessage - try: - func_output = func(*args, **kwargs) - if ( - isinstance(func_output, dict) - and len(func_output.keys()) == 2 - and "role" in func_output - and "content" in func_output - ): - log_output_message = ChatMessage(**func_output) - log_output = None - else: - log_output = process_output(func=func, output=func_output) + with set_trace_id(init_log["id"]): + span.set_attribute(HUMANLOOP_PATH_KEY, decorator_path) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) + + func_output: Optional[R] + log_output: str + log_error: Optional[str] + log_output_message: ChatMessage + try: + func_output = func(*args, **kwargs) + if ( + isinstance(func_output, dict) + and len(func_output.keys()) == 2 + and "role" in func_output + and "content" in func_output + ): + log_output_message = ChatMessage(**func_output) + log_output = None + else: + log_output = process_output(func=func, output=func_output) + log_output_message = None + log_error = None + except Exception as e: + logger.error(f"Error calling {func.__name__}: {e}") + output = None log_output_message = None - log_error = None - except Exception as e: - logger.error(f"Error calling {func.__name__}: {e}") - output = None - log_output_message = None - log_error = str(e) - - flow_log = { - "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, - "messages": args_to_func.get("messages"), - "log_status": "complete", - "output": log_output, - "error": log_error, - "output_message": log_output_message, - "id": init_log["id"], - } - - # Write the Flow Log to the Span on HL_LOG_OT_KEY - if flow_log: - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_LOG_KEY, - value=flow_log, # type: ignore - ) - - context_api.detach(token=token) - # Return the output of the decorated function - return output + log_error = str(e) + + flow_log = { + "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, + "messages": args_to_func.get("messages"), + "log_status": "complete", + "output": log_output, + "error": log_error, + "output_message": log_output_message, + "id": init_log["id"], + } + + # Write the Flow Log to the Span on HL_LOG_OT_KEY + if flow_log: + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=flow_log, # type: ignore + ) + + # Return the output of the decorated function + return func_output wrapper.file = File( # type: ignore path=decorator_path, diff --git a/src/humanloop/utilities/prompt.py b/src/humanloop/utilities/prompt.py index 77515ea3..8b52dd2f 100644 --- a/src/humanloop/utilities/prompt.py +++ b/src/humanloop/utilities/prompt.py @@ -4,7 +4,7 @@ from typing import Callable, Optional -from humanloop.context import PromptContext, reset_prompt_context, set_prompt_context +from humanloop.context import PromptContext, set_prompt_context logger = logging.getLogger("humanloop.sdk") @@ -13,15 +13,14 @@ def prompt_decorator_factory(path: str, template: Optional[str]): def decorator(func: Callable): @wraps(func) def wrapper(*args, **kwargs): - token = set_prompt_context( + with set_prompt_context( PromptContext( path=path, template=template, ) - ) - output = func(*args, **kwargs) - reset_prompt_context(token=token) - return output + ): + output = func(*args, **kwargs) + return output return wrapper diff --git a/src/humanloop/utilities/tool.py b/src/humanloop/utilities/tool.py index d6f65474..4314808f 100644 --- a/src/humanloop/utilities/tool.py +++ b/src/humanloop/utilities/tool.py @@ -66,10 +66,11 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: span.set_attribute(HUMANLOOP_PATH_KEY, path) span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) - func_output: Optional[R] - log_output: str - log_error: Optional[str] log_inputs: dict[str, Any] = bind_args(func, args, kwargs) + log_error: Optional[str] + log_output: str + + func_output: Optional[R] try: func_output = func(*args, **kwargs) log_output = process_output( @@ -102,7 +103,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: ) # Return the output of the decorated function - return output + return func_output wrapper.file = File( # type: ignore path=path, From 02e1ed4559c6707ec4167f27ae8deccc04cbc6c7 Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Mon, 3 Mar 2025 10:38:40 +0000 Subject: [PATCH 07/14] QA refactor sdk with evals --- poetry.lock | 2 +- pyproject.toml | 3 + src/humanloop/client.py | 1 - src/humanloop/eval_utils/run.py | 274 +++++++++++++++++++++----------- src/humanloop/otel/exporter.py | 113 +++++++------ src/humanloop/otel/processor.py | 9 +- src/humanloop/utilities/flow.py | 18 +-- 7 files changed, 270 insertions(+), 150 deletions(-) diff --git a/poetry.lock b/poetry.lock index eaa1b5ec..0d9a93c7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2492,4 +2492,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.9,<4" -content-hash = "7a32deefe3fb6b3f77e1463856a38a80855375cbd0bc707e5713ac7bd35406bb" +content-hash = "8968de9bd0b7af7b55c9aa9799f974a083c39b0e3af8c352e9f8a31cbdbbefcb" diff --git a/pyproject.toml b/pyproject.toml index 0f94f284..b3b854d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,9 @@ packages = [ { include = "humanloop", from = "src"} ] +[tool.poetry.group.dev.dependencies] +protobuf = "^5.29.3" + [project.urls] Repository = 'https://github.com/humanloop/humanloop-python' diff --git a/src/humanloop/client.py b/src/humanloop/client.py index e4b15954..2cf90959 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -142,7 +142,6 @@ def __init__( else: self._opentelemetry_tracer = opentelemetry_tracer - @contextmanager def prompt( self, *, diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py index 57ea95be..20817745 100644 --- a/src/humanloop/eval_utils/run.py +++ b/src/humanloop/eval_utils/run.py @@ -13,22 +13,40 @@ import inspect import json import logging +import signal import sys import threading import time import typing -from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import datetime from functools import partial from logging import INFO -from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union +from typing import ( + Callable, + Dict, + List, + Literal, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse -from humanloop.context import EvaluationContext, set_evaluation_context from humanloop.core.api_error import ApiError +from humanloop.context import ( + EvaluationContext, + get_evaluation_context, + set_evaluation_context, +) from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File # We use TypedDicts for requests, which is consistent with the rest of the SDK +from humanloop.evaluators.client import EvaluatorsClient +from humanloop.flows.client import FlowsClient +from humanloop.prompts.client import PromptsClient from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator from humanloop.requests import FlowKernelRequestParams as FlowDict @@ -36,6 +54,7 @@ from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict from humanloop.requests import PromptKernelRequestParams as PromptDict from humanloop.requests import ToolKernelRequestParams as ToolDict +from humanloop.tools.client import ToolsClient from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats from humanloop.types import DatapointResponse as Datapoint from humanloop.types import EvaluationResponse, EvaluationStats @@ -77,6 +96,9 @@ RESET = "\033[0m" +CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient) + + class HumanloopUtilityError(Exception): def __init__(self, message): self.message = message @@ -104,10 +126,6 @@ def run_eval( :param workers: the number of threads to process datapoints using your function concurrently. :return: per Evaluator checks. """ - if workers > 32: - logger.warning("Too many workers requested, capping the number to 32.") - workers = min(workers, 32) - evaluators_worker_pool = ThreadPoolExecutor(max_workers=workers) file_ = _file_or_file_inside_hl_utility(file) @@ -133,12 +151,22 @@ def run_eval( function=function_, ) - # Header of the CLI Report - logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n") - logger.info(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}") - logger.info(f"{CYAN}Run ID: {run.id}{RESET}") + def handle_exit_signal(signum, frame): + client.evaluations.update_evaluation_run( + id=evaluation.id, + run_id=run.id, + status="cancelled", + ) + evaluators_worker_pool.shutdown(wait=False) + sys.exit(signum) - _PROGRESS_BAR = _SimpleProgressBar(len(hl_dataset.datapoints)) + signal.signal(signal.SIGINT, handle_exit_signal) + signal.signal(signal.SIGTERM, handle_exit_signal) + + # Header of the CLI Report + sys.stdout.write(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n\n") + sys.stdout.write(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}\n") + sys.stdout.write(f"{CYAN}Run ID: {run.id}{RESET}\n") # This will apply apply the local callable to each datapoint # and log the results to Humanloop @@ -146,13 +174,17 @@ def run_eval( # Generate locally if a file `callable` is provided if function_ is None: # TODO: trigger run when updated API is available - logger.info(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}") + sys.stdout.write(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}\n") else: # Running the evaluation locally - logger.info( - f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers{RESET} " + sys.stdout.write( + f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers...{RESET}\n\n" ) + _PROGRESS_BAR = _SimpleProgressBar(len(hl_dataset.datapoints)) + + if function_ is not None: + # Generate locally if a file `callable` is provided def _process_datapoint(dp: Datapoint): def upload_callback(log_id: str): """Logic ran after the Log has been created.""" @@ -186,13 +218,17 @@ def upload_callback(log_id: str): start_time = datetime.now() try: output = _call_function(function_, hl_file.type, dp) - if not _callable_is_decorated(file): - # function_ is a plain callable so we need to create a Log + evaluation_context = get_evaluation_context() + if not evaluation_context.logging_counter == 0: + # function_ did not Log against the source_datapoint_id/ run_id pair + # so we need to create a Log log_func( inputs=dp.inputs, output=output, start_time=start_time, end_time=datetime.now(), + source_datapoint_id=dp.id, + run_id=run.id, ) except Exception as e: log_func( @@ -203,31 +239,46 @@ def upload_callback(log_id: str): start_time=start_time, end_time=datetime.now(), ) - logger.warning( - msg=f"\nYour {hl_file.type}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}" - ) + error_message = str(e).replace("\n", " ") + if len(error_message) > 100: + sys.stderr.write( + f"\n{RED}Your {hl_file.type}'s `callable` failed for Datapoint: {dp.id}. Error: {error_message[:100]}...{RESET}\n" + ) + else: + sys.stderr.write( + f"\n{RED}Your {hl_file.type}'s `callable` failed for Datapoint: {dp.id}. Error: {error_message}{RESET}\n" + ) with ThreadPoolExecutor(max_workers=workers) as executor: + futures = [] for datapoint in hl_dataset.datapoints: - executor.submit(_process_datapoint, datapoint) + futures.append(executor.submit(_process_datapoint, datapoint)) + # Program hangs if any uncaught exceptions are not handled here + for future in as_completed(futures): + try: + future.result() + except Exception: + pass stats = _wait_for_evaluation_to_complete( client=client, evaluation=evaluation, run=run, ) - logger.info(f"\n{CYAN}View your Evaluation:{RESET}\n{evaluation.url}\n") + sys.stderr.write(f"\n{CYAN}View your Evaluation:{RESET}\n{evaluation.url}\n") # Print Evaluation results - logger.info(stats.report) + sys.stderr.write(stats.report) - return _get_checks( + checks = _get_checks( client=client, evaluation=evaluation, stats=stats, evaluators=evaluators, run=run, ) + evaluators_worker_pool.shutdown(wait=False) + return checks class _SimpleProgressBar: @@ -245,6 +296,9 @@ def __init__(self, total: int): def increment(self): """Increment the progress bar by one finished task.""" with self._lock: + # NOTE: There is a deadlock here that needs further investigation + if self._progress == self._total: + return self._progress += 1 if self._start_time is None: self._start_time = time.time() @@ -270,9 +324,6 @@ def increment(self): sys.stderr.write("\033[K") # Clear the line from the cursor to the end sys.stderr.write(progress_display) - if self._progress >= self._total: - sys.stderr.write("\n") - @dataclass class _LocalEvaluator: @@ -280,7 +331,7 @@ class _LocalEvaluator: function: Callable -def _callable_is_decorated(file: File) -> bool: +def _callable_is_hl_utility(file: File) -> bool: """Check if a File is a decorated function.""" return hasattr(file["callable"], "file") @@ -292,16 +343,24 @@ def _wait_for_evaluation_to_complete( ): # Wait for the Evaluation to complete then print the results complete = False + + wrote_explainer = False + while not complete: stats = client.evaluations.get_stats(id=evaluation.id) - logger.info(f"\r{stats.progress}") run_stats = next( (run_stats for run_stats in stats.run_stats if run_stats.run_id == run.id), None, ) complete = run_stats is not None and run_stats.status == "completed" if not complete: + if not wrote_explainer: + sys.stderr.write("\n\nWaiting for Evaluators on Humanloop runtime...\n\n") + wrote_explainer = True + sys.stderr.write(stats.progress + "\n") + # Move the cursor up in stderr a number of lines equal to the number of lines in stats.progress time.sleep(5) + return stats @@ -351,29 +410,34 @@ def _get_checks( def _file_or_file_inside_hl_utility(file: File) -> File: - if _callable_is_decorated(file): + if _callable_is_hl_utility(file): # When the decorator inside `file` is a decorated function, # we need to validate that the other parameters of `file` # match the attributes of the decorator - decorated_fn_name = file["callable"].__name__ inner_file: File = file["callable"].file - for argument in ["version", "path", "type", "id"]: - if argument in file: - logger.warning( - f"Argument `file.{argument}` will be ignored: " - f"callable `{decorated_fn_name}` is managed by " - "the @{inner_file['type']} decorator." - ) - - # Use the file manifest in the decorated function + if "path" in file and inner_file["path"] != file["path"]: + raise ValueError( + "`path` attribute specified in the `file` does not match the File path of the decorated function." + ) + if "version" in file and inner_file["version"] != file["version"]: + raise ValueError( + "`version` attribute in the `file` does not match the File version of the decorated function." + ) + if "type" in file and inner_file["type"] != file["type"]: + raise ValueError( + "`type` attribute of `file` argument does not match the File type of the decorated function." + ) + if "id" in file: + raise ValueError("Do not specify an `id` attribute in `file` argument when using a decorated function.") + # file on decorated function holds at least + # or more information than the `file` argument file_ = copy.deepcopy(inner_file) - else: - # Simple function - # Raise error if one of path or id not provided - file_ = file - if not file_.get("path") and not file_.get("id"): - raise ValueError("You must provide a path or id in your `file`.") + file_ = copy.deepcopy(file) + + # Raise error if one of path or id not provided + if not file_.get("path") and not file_.get("id"): + raise ValueError("You must provide a path or id in your `file`.") return file_ @@ -382,13 +446,14 @@ def _get_file_type(file: File) -> FileType: # Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow` try: type_ = typing.cast(FileType, file.pop("type")) - logger.info( - f"{CYAN}Evaluating your {type_} function corresponding to `{file.get('path') or file.get('id')}` on Humanloop{RESET} \n\n" + sys.stdout.write( + f"{CYAN}Evaluating your {type_} function corresponding to `{file.get('path') or file.get('id')}` on Humanloop{RESET}\n\n" ) return type_ or "flow" except KeyError as _: type_ = "flow" - logger.warning("No `file` type specified, defaulting to flow.") + sys.stdout.write(f"{CYAN}No `file` type specified, defaulting to flow.{RESET}\n") + return type_ def _get_file_callable(file: File, type_: FileType) -> Optional[Callable]: @@ -398,7 +463,9 @@ def _get_file_callable(file: File, type_: FileType) -> Optional[Callable]: if type_ == "flow": raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.") else: - logger.info(f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.") + sys.stdout.write( + f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.\n" + ) return function_ @@ -422,7 +489,7 @@ def _upsert_file( try: Prompt.model_validate(version) except ValidationError as error_: - logger.error(msg="Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)") + sys.stdout.write(f"Invalid Prompt `version` in your `file` request. \n\nValidation error: \n{error_}") raise error_ try: hl_file = client.prompts.upsert(**file_dict) @@ -433,7 +500,7 @@ def _upsert_file( try: Tool.model_validate(version) except ValidationError as error_: - logger.error(msg="Invalid Tool `version` in your `file` request. \n\nValidation error: \n)") + sys.stdout.write(f"Invalid Tool `version` in your `file` request. \n\nValidation error: \n{error_}") raise error_ hl_file = client.tools.upsert(**file_dict) @@ -597,7 +664,7 @@ def _get_log_func( "run_id": run_id, } if file_type == "flow": - return partial(client.flows.log, **log_request, trace_status="complete") + return partial(client.flows.log, **log_request) elif file_type == "prompt": return partial(client.prompts.log, **log_request) elif file_type == "evaluator": @@ -657,12 +724,12 @@ def _check_evaluation_threshold( evaluator_stat = evaluator_stats_by_path[evaluator_path] score = _get_score_from_evaluator_stat(stat=evaluator_stat) if score >= threshold: - logger.info( + sys.stderr.write( f"{GREEN}✅ Latest eval [{score}] above threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" ) return True else: - logger.info( + sys.stderr.write( f"{RED}❌ Latest score [{score}] below the threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" ) return False @@ -691,7 +758,7 @@ def _check_evaluation_improvement( evaluation=evaluation, ) if len(stats.run_stats) == 1: - logger.info(f"{YELLOW}⚠️ No previous versions to compare with.{RESET}") + sys.stderr.write(f"{YELLOW}⚠️ No previous versions to compare with.{RESET}\n") return True, 0, 0 previous_evaluator_stats_by_path = _get_evaluator_stats_by_path( @@ -707,10 +774,10 @@ def _check_evaluation_improvement( raise ValueError(f"Could not find score for Evaluator {evaluator_path}.") diff = round(latest_score - previous_score, 2) if diff >= 0: - logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}") + sys.stderr.write(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}\n") return True, latest_score, diff else: - logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}") + sys.stderr.write(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}\n") return False, latest_score, diff else: raise ValueError(f"Evaluator {evaluator_path} not found in the stats.") @@ -726,45 +793,68 @@ def _run_local_evaluators( ): """Run local Evaluators on the Log and send the judgments to Humanloop.""" # Need to get the full log to pass to the evaluators - log = client.logs.get(id=log_id) - if not isinstance(log, dict): - log_dict = log.dict() - else: - log_dict = log - # Wait for the Flow trace to complete before running evaluators - while file_type == "flow" and log_dict["trace_status"] != "complete": + try: log = client.logs.get(id=log_id) if not isinstance(log, dict): log_dict = log.dict() else: log_dict = log - datapoint_dict = datapoint.dict() if datapoint else None - for local_evaluator in local_evaluators: - start_time = datetime.now() - try: - if local_evaluator.hl_evaluator.spec.arguments_type == "target_required": - judgement = local_evaluator.function( - log_dict, - datapoint_dict, - ) + # Wait for the Flow trace to complete before running evaluators + while file_type == "flow" and log_dict["log_status"] != "complete": + log = client.logs.get(id=log_id) + if not isinstance(log, dict): + log_dict = log.dict() else: - judgement = local_evaluator.function(log_dict) - - _ = client.evaluators.log( - version_id=local_evaluator.hl_evaluator.version_id, - parent_id=log_id, - judgment=judgement, - id=local_evaluator.hl_evaluator.id, - start_time=start_time, - end_time=datetime.now(), + log_dict = log + datapoint_dict = datapoint.dict() if datapoint else None + + for local_evaluator in local_evaluators: + start_time = datetime.now() + try: + if local_evaluator.hl_evaluator.spec.arguments_type == "target_required": + judgement = local_evaluator.function( + log_dict, + datapoint_dict, + ) + else: + judgement = local_evaluator.function(log_dict) + + _ = client.evaluators.log( + version_id=local_evaluator.hl_evaluator.version_id, + parent_id=log_id, + judgment=judgement, + id=local_evaluator.hl_evaluator.id, + start_time=start_time, + end_time=datetime.now(), + ) + except Exception as e: + _ = client.evaluators.log( + parent_id=log_id, + id=local_evaluator.hl_evaluator.id, + version_id=local_evaluator.hl_evaluator.version_id, + error=str(e), + start_time=start_time, + end_time=datetime.now(), + ) + error_message = str(e).replace("\n", " ") + if len(error_message) > 100: + sys.stderr.write( + f"{RED}Evaluator {local_evaluator.hl_evaluator.path} failed with error {error_message[:100]}...{RESET}\n" + ) + else: + sys.stderr.write( + f"{RED}Evaluator {local_evaluator.hl_evaluator.path} failed with error {error_message}{RESET}\n" + ) + except Exception as e: + error_message = str(e).replace("\n", " ") + if len(error_message) > 100: + sys.stderr.write( + f"{RED}Failed to run local Evaluators for source datapoint {datapoint.dict()['id'] if datapoint else None}: {error_message[:100]}...{RESET}\n" ) - except Exception as e: - _ = client.evaluators.log( - parent_id=log_id, - id=local_evaluator.hl_evaluator.id, - error=str(e), - start_time=start_time, - end_time=datetime.now(), + else: + sys.stderr.write( + f"{RED}Failed to run local Evaluators for source datapoint {datapoint.dict()['id'] if datapoint else None}: {error_message}{RESET}\n" ) - logger.warning(f"\nEvaluator {local_evaluator.hl_evaluator.path} failed with error {str(e)}") - progress_bar.increment() + pass + finally: + progress_bar.increment() diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py index 9b44a4a8..bc612236 100644 --- a/src/humanloop/otel/exporter.py +++ b/src/humanloop/otel/exporter.py @@ -16,12 +16,12 @@ HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY, ) -from humanloop.otel.helpers import read_from_opentelemetry_span, write_to_opentelemetry_span -from opentelemetry.proto.common.v1.common_pb2 import KeyValue, Link +from google.protobuf.json_format import MessageToJson +from humanloop.otel.helpers import is_llm_provider_call, read_from_opentelemetry_span, write_to_opentelemetry_span +from opentelemetry.proto.common.v1.common_pb2 import KeyValue, AnyValue, InstrumentationScope from opentelemetry.proto.trace.v1.trace_pb2 import ( TracesData, ResourceSpans, - InstrumentationScope, ScopeSpans, Span as ProtoBufferSpan, ) @@ -102,15 +102,19 @@ def _do_work(self): continue span_to_export, evaluation_context = thread_args + file_type = span_to_export.attributes.get(HUMANLOOP_FILE_TYPE_KEY) file_path = span_to_export.attributes.get(HUMANLOOP_PATH_KEY) if file_type is None: raise ValueError("Span does not have type set") - log_args = read_from_opentelemetry_span( - span=span_to_export, - key=HUMANLOOP_LOG_KEY, - ) + try: + log_args = read_from_opentelemetry_span( + span=span_to_export, + key=HUMANLOOP_LOG_KEY, + ) + except Exception: + log_args = {} if evaluation_context: if file_path == evaluation_context.path: @@ -129,57 +133,74 @@ def _do_work(self): payload = TracesData( resource_spans=[ ResourceSpans( - scope_spans=ScopeSpans( - scope=InstrumentationScope( - name="humanloop-otel", - version="0.1.0", - ), - spans=[ - ProtoBufferSpan( - trace_id=span_to_export.context.trace_id, - span_id=span_to_export.span_id, - name=span_to_export.name, - kind=span_to_export.kind, - start_time_unix_nano=span_to_export.start_time, - end_time_unix_nano=span_to_export.end_time, - attributes=[ - KeyValue( - key=key, - value=value, - ) - for key, value in span_to_export.attributes.items() - ], - dropped_attributes_count=len(span_to_export.dropped_attributes), - dropped_events_count=len(span_to_export.dropped_events), - dropped_links_count=len(span_to_export.dropped_links), - links=[ - Link( - trace_id=link.trace_id, - span_id=link.span_id, - attributes=link.attributes, - ) - for link in span_to_export.links - ], - events=[], - ) - ], - ) + scope_spans=[ + ScopeSpans( + scope=InstrumentationScope( + name="humanloop.sdk.provider" + if is_llm_provider_call(span_to_export) + else "humanloop.sdk.decorator", + version="0.1.0", + ), + spans=[ + ProtoBufferSpan( + trace_id=span_to_export.context.trace_id.to_bytes(length=16, byteorder="big"), + span_id=span_to_export.context.span_id.to_bytes(length=8, byteorder="big"), + name=span_to_export.name, + kind={ + 0: ProtoBufferSpan.SpanKind.SPAN_KIND_INTERNAL, + 1: ProtoBufferSpan.SpanKind.SPAN_KIND_SERVER, + 2: ProtoBufferSpan.SpanKind.SPAN_KIND_CLIENT, + 3: ProtoBufferSpan.SpanKind.SPAN_KIND_PRODUCER, + 4: ProtoBufferSpan.SpanKind.SPAN_KIND_CONSUMER, + }[span_to_export.kind.value], + start_time_unix_nano=span_to_export.start_time, + end_time_unix_nano=span_to_export.end_time, + attributes=[ + KeyValue( + key=key, + value=AnyValue(string_value=str(value)), + ) + for key, value in span_to_export.attributes.items() + ], + dropped_attributes_count=span_to_export.dropped_attributes, + dropped_events_count=span_to_export.dropped_events, + dropped_links_count=span_to_export.dropped_links, + links=[ + ProtoBufferSpan.Link( + trace_id=link.trace_id, + span_id=link.span_id, + attributes=[ + KeyValue( + key=key, + value=AnyValue(string_value=str(value)), + ) + for key, value in link.attributes.items() + ], + ) + for link in span_to_export.links + ], + events=[], + ) + ], + ) + ] ) ] ) response = requests.post( f"{self._client._client_wrapper.get_base_url()}/import/otel/v1/traces", - headers=self._client._client_wrapper.get_headers(), - data=span_to_export.to_json().encode("ascii"), + headers={ + **self._client._client_wrapper.get_headers(), + }, + data=MessageToJson(payload), ) if response.status_code != 200: # TODO: handle pass else: - print("FOO", response.json()) if evaluation_context and file_path == evaluation_context.path: - log_id = response.json()["log_id"] + log_id = response.json()["records"][0]["log_id"] evaluation_context.callback(log_id) self._upload_queue.task_done() diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py index 572a3612..6dd539c4 100644 --- a/src/humanloop/otel/processor.py +++ b/src/humanloop/otel/processor.py @@ -44,4 +44,11 @@ def on_start(self, span: Span, parent_context): trace_id = get_trace_id() if trace_id: span.set_attribute(f"{HUMANLOOP_LOG_KEY}.trace_parent_id", trace_id) - print(span) + + def on_end(self, span: ReadableSpan): + if is_llm_provider_call(span): + prompt_context = get_prompt_context() + if prompt_context is None: + return + + self.span_exporter.export([span]) diff --git a/src/humanloop/utilities/flow.py b/src/humanloop/utilities/flow.py index c0cf0278..fe1572a6 100644 --- a/src/humanloop/utilities/flow.py +++ b/src/humanloop/utilities/flow.py @@ -8,7 +8,7 @@ import requests from humanloop.base_client import BaseHumanloop -from humanloop.context import get_trace_id, set_trace_id +from humanloop.context import get_evaluation_context, get_trace_id, set_trace_id from humanloop.types.chat_message import ChatMessage from humanloop.utilities.helpers import bind_args from humanloop.eval_utils.types import File @@ -83,7 +83,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: and "role" in func_output and "content" in func_output ): - log_output_message = ChatMessage(**func_output) + log_output_message = func_output log_output = None else: log_output = process_output(func=func, output=func_output) @@ -91,9 +91,10 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: log_error = None except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") - output = None + log_output = None log_output_message = None log_error = str(e) + func_output = None flow_log = { "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, @@ -106,12 +107,11 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: } # Write the Flow Log to the Span on HL_LOG_OT_KEY - if flow_log: - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_LOG_KEY, - value=flow_log, # type: ignore - ) + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=flow_log, # type: ignore + ) # Return the output of the decorated function return func_output From 2455724b7930955563aa97872c987d5f18423f0b Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Thu, 6 Mar 2025 12:16:55 +0000 Subject: [PATCH 08/14] Error handling in decorators --- src/humanloop/context.py | 23 +++++------ src/humanloop/eval_utils/run.py | 64 ++++++++++++++++++++++++++++--- src/humanloop/otel/constants.py | 2 +- src/humanloop/otel/exporter.py | 2 +- src/humanloop/overload.py | 40 ++++++++++++------- src/humanloop/utilities/flow.py | 3 +- src/humanloop/utilities/prompt.py | 4 +- tests/utilities/test_prompt.py | 8 ++-- 8 files changed, 106 insertions(+), 40 deletions(-) diff --git a/src/humanloop/context.py b/src/humanloop/context.py index f7a001ab..c2563d3b 100644 --- a/src/humanloop/context.py +++ b/src/humanloop/context.py @@ -1,12 +1,12 @@ from contextlib import contextmanager from dataclasses import dataclass import threading -from typing import Callable, Generator, Optional +from typing import Any, Callable, Generator, Literal, Optional from opentelemetry import context as context_api from humanloop.otel.constants import ( HUMANLOOP_CONTEXT_EVALUATION, - HUMANLOOP_CONTEXT_PROMPT, + HUMANLOOP_CONTEXT_DECORATOR, HUMANLOOP_CONTEXT_TRACE_ID, ) @@ -25,14 +25,15 @@ def set_trace_id(flow_log_id: str) -> Generator[None, None, None]: @dataclass -class PromptContext: +class DecoratorContext: path: str - template: Optional[str] + type: Literal["prompt", "tool", "flow"] + version: dict[str, Optional[Any]] @contextmanager -def set_prompt_context(prompt_context: PromptContext) -> Generator[None, None, None]: - key = hash((HUMANLOOP_CONTEXT_PROMPT, threading.get_ident())) +def set_decorator_context(prompt_context: DecoratorContext) -> Generator[None, None, None]: + key = hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident())) reset_token = context_api.attach( context_api.set_value( key=key, @@ -43,15 +44,15 @@ def set_prompt_context(prompt_context: PromptContext) -> Generator[None, None, N context_api.detach(token=reset_token) -def get_prompt_context() -> Optional[PromptContext]: - key = hash((HUMANLOOP_CONTEXT_PROMPT, threading.get_ident())) +def get_decorator_context() -> Optional[DecoratorContext]: + key = hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident())) return context_api.get_value(key) class EvaluationContext: source_datapoint_id: str run_id: str - callback: Callable[[str], None] + logging_callback: Callable[[str], None] file_id: str path: str logging_counter: int @@ -60,13 +61,13 @@ def __init__( self, source_datapoint_id: str, run_id: str, - callback: Callable[[str], None], + logging_callback: Callable[[str], None], file_id: str, path: str, ): self.source_datapoint_id = source_datapoint_id self.run_id = run_id - self.callback = callback + self.logging_callback = logging_callback self.file_id = file_id self.path = path self.logging_counter = 0 diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py index 20817745..a385b310 100644 --- a/src/humanloop/eval_utils/run.py +++ b/src/humanloop/eval_utils/run.py @@ -67,9 +67,15 @@ from humanloop.types.datapoint_response import DatapointResponse from humanloop.types.dataset_response import DatasetResponse from humanloop.types.evaluation_run_response import EvaluationRunResponse +from humanloop.types.evaluator_log_response import EvaluatorLogResponse +from humanloop.types.flow_log_response import FlowLogResponse +from humanloop.types.log_response import LogResponse +from humanloop.types.prompt_log_response import PromptLogResponse from humanloop.types.run_stats_response import RunStatsResponse from pydantic import ValidationError +from humanloop.types.tool_log_response import ToolLogResponse + if typing.TYPE_CHECKING: from humanloop.client import BaseHumanloop @@ -99,11 +105,13 @@ CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient) -class HumanloopUtilityError(Exception): - def __init__(self, message): +class HumanloopDecoratorError(Exception): + def __init__(self, message: Optional[str] = None): self.message = message def __str__(self): + if self.message is None: + return super().__str__() return self.message @@ -202,7 +210,7 @@ def upload_callback(log_id: str): with set_evaluation_context( EvaluationContext( source_datapoint_id=dp.id, - callback=upload_callback, + logging_callback=upload_callback, file_id=hl_file.id, run_id=run.id, path=hl_file.path, @@ -219,10 +227,14 @@ def upload_callback(log_id: str): try: output = _call_function(function_, hl_file.type, dp) evaluation_context = get_evaluation_context() - if not evaluation_context.logging_counter == 0: + if evaluation_context is None: + raise HumanloopDecoratorError( + "Internal error: evaluation context is not set while processing a datapoint." + ) + if evaluation_context.logging_counter == 0: # function_ did not Log against the source_datapoint_id/ run_id pair # so we need to create a Log - log_func( + log = log_func( inputs=dp.inputs, output=output, start_time=start_time, @@ -230,6 +242,10 @@ def upload_callback(log_id: str): source_datapoint_id=dp.id, run_id=run.id, ) + evaluation_context.logging_counter += 1 + evaluation_context.logging_callback(log.id) + except HumanloopDecoratorError as e: + raise e except Exception as e: log_func( inputs=dp.inputs, @@ -648,13 +664,49 @@ def _call_function( return output +def _get_log_func( + client: "BaseHumanloop", + file_type: Literal["flow"], + file_id: str, + version_id: str, + run_id: str, +) -> Callable[..., FlowLogResponse]: ... + + +def _get_log_func( + client: "BaseHumanloop", + file_type: Literal["prompt"], + file_id: str, + version_id: str, + run_id: str, +) -> Callable[..., PromptLogResponse]: ... + + +def _get_log_func( + client: "BaseHumanloop", + file_type: Literal["tool"], + file_id: str, + version_id: str, + run_id: str, +) -> Callable[..., ToolLogResponse]: ... + + +def _get_log_func( + client: "BaseHumanloop", + file_type: Literal["evaluator"], + file_id: str, + version_id: str, + run_id: str, +) -> Callable[..., EvaluatorLogResponse]: ... + + def _get_log_func( client: "BaseHumanloop", file_type: FileType, file_id: str, version_id: str, run_id: str, -) -> Callable: +) -> Callable[..., LogResponse]: """Returns the appropriate log function pre-filled with common parameters.""" log_request = { # TODO: why does the Log `id` field refer to the file ID in the API? diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py index 67d8b325..c0af0ca3 100644 --- a/src/humanloop/otel/constants.py +++ b/src/humanloop/otel/constants.py @@ -6,6 +6,6 @@ HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type" HUMANLOOP_PATH_KEY = "humanloop.file.path" # Opentelemetry context -HUMANLOOP_CONTEXT_PROMPT = "humanloop.context.prompt" +HUMANLOOP_CONTEXT_DECORATOR = "humanloop.context.decorator" HUMANLOOP_CONTEXT_TRACE_ID = "humanloop.context.flow.trace_id" HUMANLOOP_CONTEXT_EVALUATION = "humanloop.context.evaluation" diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py index bc612236..48fd7a12 100644 --- a/src/humanloop/otel/exporter.py +++ b/src/humanloop/otel/exporter.py @@ -201,6 +201,6 @@ def _do_work(self): else: if evaluation_context and file_path == evaluation_context.path: log_id = response.json()["records"][0]["log_id"] - evaluation_context.callback(log_id) + evaluation_context.logging_callback(log_id) self._upload_queue.task_done() diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py index 1cec39b9..43ab6317 100644 --- a/src/humanloop/overload.py +++ b/src/humanloop/overload.py @@ -1,12 +1,16 @@ +import inspect import logging import types from typing import TypeVar, Union import typing -from humanloop.context import get_trace_id -from humanloop.eval_utils.run import HumanloopUtilityError +from humanloop.context import get_decorator_context, get_trace_id +from humanloop.eval_utils.run import HumanloopDecoratorError +from humanloop.evaluators.client import EvaluatorsClient +from humanloop.flows.client import FlowsClient from humanloop.prompts.client import PromptsClient +from humanloop.tools.client import ToolsClient from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse from humanloop.types.create_flow_log_response import CreateFlowLogResponse from humanloop.types.create_prompt_log_response import CreatePromptLogResponse @@ -16,7 +20,7 @@ logger = logging.getLogger("humanloop.sdk") -CLIENT_TYPE = TypeVar("CLIENT_TYPE") +CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, FlowsClient, EvaluatorsClient, ToolsClient) def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE: @@ -41,10 +45,20 @@ def _overload_log( CreateEvaluatorLogResponse, ]: trace_id = get_trace_id() + if trace_id is not None and type(client) is FlowsClient: + context = get_decorator_context() + if context is None: + raise HumanloopDecoratorError("Internal error: trace_id context is set outside a decorator context.") + raise HumanloopDecoratorError( + f"Using flows.log() in this context is not allowed at line {inspect.currentframe().f_lineno}: " + f"Flow decorator for File {context.path} manages the tracing and trace completion." + ) if trace_id is not None: if "trace_parent_id" in kwargs: - # TODO: revisit - logger.warning("Overriding trace_parent_id argument") + logger.warning( + "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.", + inspect.currentframe().f_lineno, + ) kwargs = { **kwargs, "trace_parent_id": trace_id, @@ -52,9 +66,8 @@ def _overload_log( try: response = self._log(**kwargs) except Exception as e: - # TODO handle - # TODO: Bug found in backend: not specifying a model 400s but creates a File - raise HumanloopUtilityError(message=str(e)) from e + # Re-raising as HumanloopDecoratorError so the decorators don't catch it + raise HumanloopDecoratorError from e return response @@ -73,8 +86,10 @@ def _overload_call(self, **kwargs) -> PromptCallResponse: trace_id = get_trace_id() if trace_id is not None: if "trace_parent_id" in kwargs: - # TODO: revisit - logger.warning("Overriding trace_parent_id argument") + logger.warning( + "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.", + inspect.currentframe().f_lineno, + ) kwargs = { **kwargs, "trace_parent_id": trace_id, @@ -84,9 +99,8 @@ def _overload_call(self, **kwargs) -> PromptCallResponse: response = self._call(**kwargs) response = typing.cast(PromptCallResponse, response) except Exception as e: - # TODO handle - # TODO: Bug found in backend: not specifying a model 400s but creates a File - raise HumanloopUtilityError(message=str(e)) from e + # Re-raising as HumanloopDecoratorError so the decorators don't catch it + raise HumanloopDecoratorError from e return response diff --git a/src/humanloop/utilities/flow.py b/src/humanloop/utilities/flow.py index fe1572a6..c2f00b28 100644 --- a/src/humanloop/utilities/flow.py +++ b/src/humanloop/utilities/flow.py @@ -4,11 +4,10 @@ from typing_extensions import ParamSpec from opentelemetry.trace import Span, Tracer -from opentelemetry import context as context_api import requests from humanloop.base_client import BaseHumanloop -from humanloop.context import get_evaluation_context, get_trace_id, set_trace_id +from humanloop.context import get_trace_id, set_trace_id from humanloop.types.chat_message import ChatMessage from humanloop.utilities.helpers import bind_args from humanloop.eval_utils.types import File diff --git a/src/humanloop/utilities/prompt.py b/src/humanloop/utilities/prompt.py index 8b52dd2f..a30294aa 100644 --- a/src/humanloop/utilities/prompt.py +++ b/src/humanloop/utilities/prompt.py @@ -4,7 +4,7 @@ from typing import Callable, Optional -from humanloop.context import PromptContext, set_prompt_context +from humanloop.context import PromptContext, set_decorator_context logger = logging.getLogger("humanloop.sdk") @@ -13,7 +13,7 @@ def prompt_decorator_factory(path: str, template: Optional[str]): def decorator(func: Callable): @wraps(func) def wrapper(*args, **kwargs): - with set_prompt_context( + with set_decorator_context( PromptContext( path=path, template=template, diff --git a/tests/utilities/test_prompt.py b/tests/utilities/test_prompt.py index 8880784b..cafda67d 100644 --- a/tests/utilities/test_prompt.py +++ b/tests/utilities/test_prompt.py @@ -13,7 +13,7 @@ from groq import Groq from groq import NotFoundError as GroqNotFoundError from humanloop.client import Humanloop -from humanloop.eval_utils.run import HumanloopUtilityError +from humanloop.eval_utils.run import HumanloopDecoratorError from humanloop.utilities.prompt import prompt_decorator_factory from humanloop.otel.constants import HUMANLOOP_FILE_KEY from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span @@ -449,7 +449,7 @@ def call_llm_with_hl_call(): ) return response.logs[0].output_message.content # type: ignore [union-attr] - with pytest.raises(HumanloopUtilityError): + with pytest.raises(HumanloopDecoratorError): call_llm_with_hl_call() response = humanloop_client.directories.get(id=test_directory.id) @@ -481,7 +481,7 @@ def call_llm_with_hl_call(): return response.logs[0].output_message.content - with pytest.raises(HumanloopUtilityError): + with pytest.raises(HumanloopDecoratorError): call_llm_with_hl_call() response = humanloop_client.directories.get(id=test_directory.id) @@ -532,7 +532,7 @@ def call_llm_with_hl_call(): return response.logs[0].output_message.content - with pytest.raises(HumanloopUtilityError): + with pytest.raises(HumanloopDecoratorError): call_llm_with_hl_call() response = humanloop_client.directories.get(id=test_directory.id) From 2da96ae0a742ad730601b661b4bc7b89e00dbf14 Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Thu, 6 Mar 2025 12:49:08 +0000 Subject: [PATCH 09/14] update lock file --- poetry.lock | 553 +--------------------------------------------------- 1 file changed, 3 insertions(+), 550 deletions(-) diff --git a/poetry.lock b/poetry.lock index 749c5dc5..e8454e18 100644 --- a/poetry.lock +++ b/poetry.lock @@ -77,119 +77,8 @@ benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hyp cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] - -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - -[[package]] -name = "bcrypt" -version = "4.3.0" -description = "Modern password hashing for your software and your servers" -optional = false -python-versions = ">=3.8" -files = [ - {file = "bcrypt-4.3.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f01e060f14b6b57bbb72fc5b4a83ac21c443c9a2ee708e04a10e9192f90a6281"}, - {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5eeac541cefd0bb887a371ef73c62c3cd78535e4887b310626036a7c0a817bb"}, - {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59e1aa0e2cd871b08ca146ed08445038f42ff75968c7ae50d2fdd7860ade2180"}, - {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:0042b2e342e9ae3d2ed22727c1262f76cc4f345683b5c1715f0250cf4277294f"}, - {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74a8d21a09f5e025a9a23e7c0fd2c7fe8e7503e4d356c0a2c1486ba010619f09"}, - {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:0142b2cb84a009f8452c8c5a33ace5e3dfec4159e7735f5afe9a4d50a8ea722d"}, - {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:12fa6ce40cde3f0b899729dbd7d5e8811cb892d31b6f7d0334a1f37748b789fd"}, - {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:5bd3cca1f2aa5dbcf39e2aa13dd094ea181f48959e1071265de49cc2b82525af"}, - {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:335a420cfd63fc5bc27308e929bee231c15c85cc4c496610ffb17923abf7f231"}, - {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:0e30e5e67aed0187a1764911af023043b4542e70a7461ad20e837e94d23e1d6c"}, - {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b8d62290ebefd49ee0b3ce7500f5dbdcf13b81402c05f6dafab9a1e1b27212f"}, - {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2ef6630e0ec01376f59a006dc72918b1bf436c3b571b80fa1968d775fa02fe7d"}, - {file = "bcrypt-4.3.0-cp313-cp313t-win32.whl", hash = "sha256:7a4be4cbf241afee43f1c3969b9103a41b40bcb3a3f467ab19f891d9bc4642e4"}, - {file = "bcrypt-4.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c1949bf259a388863ced887c7861da1df681cb2388645766c89fdfd9004c669"}, - {file = "bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d"}, - {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b"}, - {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e"}, - {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59"}, - {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753"}, - {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761"}, - {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb"}, - {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d"}, - {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f"}, - {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732"}, - {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef"}, - {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304"}, - {file = "bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51"}, - {file = "bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62"}, - {file = "bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3"}, - {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24"}, - {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef"}, - {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b"}, - {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676"}, - {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1"}, - {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe"}, - {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0"}, - {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f"}, - {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23"}, - {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe"}, - {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505"}, - {file = "bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a"}, - {file = "bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b"}, - {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c950d682f0952bafcceaf709761da0a32a942272fad381081b51096ffa46cea1"}, - {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:107d53b5c67e0bbc3f03ebf5b030e0403d24dda980f8e244795335ba7b4a027d"}, - {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:b693dbb82b3c27a1604a3dff5bfc5418a7e6a781bb795288141e5f80cf3a3492"}, - {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:b6354d3760fcd31994a14c89659dee887f1351a06e5dac3c1142307172a79f90"}, - {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a839320bf27d474e52ef8cb16449bb2ce0ba03ca9f44daba6d93fa1d8828e48a"}, - {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:bdc6a24e754a555d7316fa4774e64c6c3997d27ed2d1964d55920c7c227bc4ce"}, - {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:55a935b8e9a1d2def0626c4269db3fcd26728cbff1e84f0341465c31c4ee56d8"}, - {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57967b7a28d855313a963aaea51bf6df89f833db4320da458e5b3c5ab6d4c938"}, - {file = "bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18"}, -] - -[package.extras] -tests = ["pytest (>=3.2.1,!=3.3.0)"] -typecheck = ["mypy"] - -[[package]] -name = "build" -version = "1.2.2.post1" -description = "A simple, correct Python build frontend" -optional = false -python-versions = ">=3.8" -files = [ - {file = "build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5"}, - {file = "build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "os_name == \"nt\""} -importlib-metadata = {version = ">=4.6", markers = "python_full_version < \"3.10.2\""} -packaging = ">=19.1" -pyproject_hooks = "*" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} - -[package.extras] -docs = ["furo (>=2023.08.17)", "sphinx (>=7.0,<8.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)", "sphinx-issues (>=3.0.0)"] -test = ["build[uv,virtualenv]", "filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "setuptools (>=56.0.0)", "setuptools (>=56.0.0)", "setuptools (>=67.8.0)", "wheel (>=0.36.0)"] -typing = ["build[uv]", "importlib-metadata (>=5.1)", "mypy (>=1.9.0,<1.10.0)", "tomli", "typing-extensions (>=3.7.4.3)"] -uv = ["uv (>=0.1.18)"] -virtualenv = ["virtualenv (>=20.0.35)"] - -[[package]] -name = "cachetools" -version = "5.5.2" -description = "Extensible memoizing collections and decorators" -optional = false -python-versions = ">=3.7" -files = [ - {file = "cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a"}, - {file = "cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4"}, -] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] [[package]] name = "certifi" @@ -424,26 +313,6 @@ files = [ [package.extras] test = ["pytest (>=6)"] -[[package]] -name = "fastapi" -version = "0.115.11" -description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fastapi-0.115.11-py3-none-any.whl", hash = "sha256:32e1541b7b74602e4ef4a0260ecaf3aadf9d4f19590bba3e1bf2ac4666aa2c64"}, - {file = "fastapi-0.115.11.tar.gz", hash = "sha256:cc81f03f688678b92600a65a5e618b93592c65005db37157147204d8924bf94f"}, -] - -[package.dependencies] -pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" -starlette = ">=0.40.0,<0.47.0" -typing-extensions = ">=4.8.0" - -[package.extras] -all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] -standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] - [[package]] name = "fastavro" version = "1.10.0" @@ -560,47 +429,6 @@ test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] tqdm = ["tqdm"] -[[package]] -name = "google-auth" -version = "2.38.0" -description = "Google Authentication Library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a"}, - {file = "google_auth-2.38.0.tar.gz", hash = "sha256:8285113607d3b80a3f1543b75962447ba8a09fe85783432a784fdeef6ac094c4"}, -] - -[package.dependencies] -cachetools = ">=2.0.0,<6.0" -pyasn1-modules = ">=0.2.1" -rsa = ">=3.1.4,<5" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] -enterprise-cert = ["cryptography", "pyopenssl"] -pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] -pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] -reauth = ["pyu2f (>=0.1.5)"] -requests = ["requests (>=2.20.0,<3.0.0.dev0)"] - -[[package]] -name = "googleapis-common-protos" -version = "1.69.0" -description = "Common protobufs used in Google APIs" -optional = false -python-versions = ">=3.7" -files = [ - {file = "googleapis_common_protos-1.69.0-py2.py3-none-any.whl", hash = "sha256:17835fdc4fa8da1d61cfe2d4d5d57becf7c61d4112f8d81c67eaa9d7ce43042d"}, - {file = "googleapis_common_protos-1.69.0.tar.gz", hash = "sha256:5a46d58af72846f59009b9c4710425b9af2139555c71837081706b213b298187"}, -] - -[package.dependencies] -protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" - -[package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] - [[package]] name = "groq" version = "0.18.0" @@ -912,68 +740,6 @@ files = [ [package.dependencies] referencing = ">=0.31.0" -[[package]] -name = "kubernetes" -version = "32.0.1" -description = "Kubernetes python client" -optional = false -python-versions = ">=3.6" -files = [ - {file = "kubernetes-32.0.1-py2.py3-none-any.whl", hash = "sha256:35282ab8493b938b08ab5526c7ce66588232df00ef5e1dbe88a419107dc10998"}, - {file = "kubernetes-32.0.1.tar.gz", hash = "sha256:42f43d49abd437ada79a79a16bd48a604d3471a117a8347e87db693f2ba0ba28"}, -] - -[package.dependencies] -certifi = ">=14.05.14" -durationpy = ">=0.7" -google-auth = ">=1.0.1" -oauthlib = ">=3.2.2" -python-dateutil = ">=2.5.3" -pyyaml = ">=5.4.1" -requests = "*" -requests-oauthlib = "*" -six = ">=1.9.0" -urllib3 = ">=1.24.2" -websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" - -[package.extras] -adal = ["adal (>=1.0.2)"] - -[[package]] -name = "markdown-it-py" -version = "3.0.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false -python-versions = ">=3.8" -files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, -] - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, -] - [[package]] name = "mmh3" version = "5.1.0" @@ -1628,31 +1394,6 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] -[[package]] -name = "posthog" -version = "3.18.1" -description = "Integrate PostHog into any python application." -optional = false -python-versions = "*" -files = [ - {file = "posthog-3.18.1-py2.py3-none-any.whl", hash = "sha256:6865104b7cf3a5b13949e2bc2aab9b37b5fbf5f9e045fa55b9eabe21b3850200"}, - {file = "posthog-3.18.1.tar.gz", hash = "sha256:ce115b8422f26c57cd4143499115b741f5683c93d0b5b87bab391579aaef084b"}, -] - -[package.dependencies] -backoff = ">=1.10.0" -distro = ">=1.5.0" -monotonic = ">=1.5" -python-dateutil = ">2.1" -requests = ">=2.7,<3.0" -six = ">=1.5" - -[package.extras] -dev = ["black", "django-stubs", "flake8", "flake8-print", "isort", "lxml", "mypy", "mypy-baseline", "pre-commit", "pydantic", "types-mock", "types-python-dateutil", "types-requests", "types-setuptools", "types-six"] -langchain = ["langchain (>=0.2.0)"] -sentry = ["django", "sentry-sdk"] -test = ["anthropic", "coverage", "django", "flake8", "freezegun (==1.5.1)", "langchain-anthropic (>=0.2.0)", "langchain-community (>=0.2.0)", "langchain-openai (>=0.2.0)", "langgraph", "mock (>=2.0.0)", "openai", "parameterized (>=0.8.1)", "pydantic", "pylint", "pytest", "pytest-asyncio", "pytest-timeout"] - [[package]] name = "protobuf" version = "5.29.3" @@ -2391,24 +2132,6 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] -[[package]] -name = "starlette" -version = "0.46.0" -description = "The little ASGI library that shines." -optional = false -python-versions = ">=3.9" -files = [ - {file = "starlette-0.46.0-py3-none-any.whl", hash = "sha256:913f0798bd90ba90a9156383bcf1350a17d6259451d0d8ee27fc0cf2db609038"}, - {file = "starlette-0.46.0.tar.gz", hash = "sha256:b359e4567456b28d473d0193f34c0de0ed49710d75ef183a74a5ce0499324f50"}, -] - -[package.dependencies] -anyio = ">=3.6.2,<5" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] - [[package]] name = "sympy" version = "1.13.3" @@ -2573,23 +2296,6 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] -[[package]] -name = "typer" -version = "0.15.2" -description = "Typer, build great CLIs. Easy to code. Based on Python type hints." -optional = false -python-versions = ">=3.7" -files = [ - {file = "typer-0.15.2-py3-none-any.whl", hash = "sha256:46a499c6107d645a9c13f7ee46c5d5096cae6f5fc57dd11eccbbb9ae3e44ddfc"}, - {file = "typer-0.15.2.tar.gz", hash = "sha256:ab2fab47533a813c49fe1f16b1a370fd5819099c00b119e0633df65f22144ba5"}, -] - -[package.dependencies] -click = ">=8.0.0" -rich = ">=10.11.0" -shellingham = ">=1.3.0" -typing-extensions = ">=3.7.4.3" - [[package]] name = "types-jsonschema" version = "4.23.0.20241208" @@ -2623,6 +2329,7 @@ version = "2.32.0.20250306" description = "Typing stubs for requests" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "types_requests-2.32.0.20250306-py3-none-any.whl", hash = "sha256:25f2cbb5c8710b2022f8bbee7b2b66f319ef14aeea2f35d80f18c9dbf3b60a0b"}, {file = "types_requests-2.32.0.20250306.tar.gz", hash = "sha256:0962352694ec5b2f95fda877ee60a159abdf84a0fc6fdace599f20acb41a03d1"}, @@ -2673,260 +2380,6 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] -[[package]] -name = "uvicorn" -version = "0.34.0" -description = "The lightning-fast ASGI server." -optional = false -python-versions = ">=3.9" -files = [ - {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, - {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, -] - -[package.dependencies] -click = ">=7.0" -colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} -h11 = ">=0.8" -httptools = {version = ">=0.6.3", optional = true, markers = "extra == \"standard\""} -python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} -pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} -typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} -uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} -watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} -websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} - -[package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] - -[[package]] -name = "uvloop" -version = "0.21.0" -description = "Fast implementation of asyncio event loop on top of libuv" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, - {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, - {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f38b2e090258d051d68a5b14d1da7203a3c3677321cf32a95a6f4db4dd8b6f26"}, - {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c43e0f13022b998eb9b973b5e97200c8b90823454d4bc06ab33829e09fb9bb"}, - {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:10d66943def5fcb6e7b37310eb6b5639fd2ccbc38df1177262b0640c3ca68c1f"}, - {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:67dd654b8ca23aed0a8e99010b4c34aca62f4b7fce88f39d452ed7622c94845c"}, - {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c0f3fa6200b3108919f8bdabb9a7f87f20e7097ea3c543754cabc7d717d95cf8"}, - {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0878c2640cf341b269b7e128b1a5fed890adc4455513ca710d77d5e93aa6d6a0"}, - {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9fb766bb57b7388745d8bcc53a359b116b8a04c83a2288069809d2b3466c37e"}, - {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a375441696e2eda1c43c44ccb66e04d61ceeffcd76e4929e527b7fa401b90fb"}, - {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:baa0e6291d91649c6ba4ed4b2f982f9fa165b5bbd50a9e203c416a2797bab3c6"}, - {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4509360fcc4c3bd2c70d87573ad472de40c13387f5fda8cb58350a1d7475e58d"}, - {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:359ec2c888397b9e592a889c4d72ba3d6befba8b2bb01743f72fffbde663b59c"}, - {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7089d2dc73179ce5ac255bdf37c236a9f914b264825fdaacaded6990a7fb4c2"}, - {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa4dcdbd9ae0a372f2167a207cd98c9f9a1ea1188a8a526431eef2f8116cc8d"}, - {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc"}, - {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb"}, - {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f"}, - {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281"}, - {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af"}, - {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6"}, - {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816"}, - {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc"}, - {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553"}, - {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:17df489689befc72c39a08359efac29bbee8eee5209650d4b9f34df73d22e414"}, - {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc09f0ff191e61c2d592a752423c767b4ebb2986daa9ed62908e2b1b9a9ae206"}, - {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0ce1b49560b1d2d8a2977e3ba4afb2414fb46b86a1b64056bc4ab929efdafbe"}, - {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e678ad6fe52af2c58d2ae3c73dc85524ba8abe637f134bf3564ed07f555c5e79"}, - {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:460def4412e473896ef179a1671b40c039c7012184b627898eea5072ef6f017a"}, - {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:10da8046cc4a8f12c91a1c39d1dd1585c41162a15caaef165c2174db9ef18bdc"}, - {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c097078b8031190c934ed0ebfee8cc5f9ba9642e6eb88322b9958b649750f72b"}, - {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46923b0b5ee7fc0020bef24afe7836cb068f5050ca04caf6b487c513dc1a20b2"}, - {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53e420a3afe22cdcf2a0f4846e377d16e718bc70103d7088a4f7623567ba5fb0"}, - {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb67cdbc0e483da00af0b2c3cdad4b7c61ceb1ee0f33fe00e09c81e3a6cb75"}, - {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:221f4f2a1f46032b403bf3be628011caf75428ee3cc204a22addf96f586b19fd"}, - {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2d1f581393673ce119355d56da84fe1dd9d2bb8b3d13ce792524e1607139feff"}, - {file = "uvloop-0.21.0.tar.gz", hash = "sha256:3bf12b0fda68447806a7ad847bfa591613177275d35b6724b1ee573faa3704e3"}, -] - -[package.extras] -dev = ["Cython (>=3.0,<4.0)", "setuptools (>=60)"] -docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] -test = ["aiohttp (>=3.10.5)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] - -[[package]] -name = "watchfiles" -version = "1.0.4" -description = "Simple, modern and high performance file watching and code reload in python." -optional = false -python-versions = ">=3.9" -files = [ - {file = "watchfiles-1.0.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ba5bb3073d9db37c64520681dd2650f8bd40902d991e7b4cfaeece3e32561d08"}, - {file = "watchfiles-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f25d0ba0fe2b6d2c921cf587b2bf4c451860086534f40c384329fb96e2044d1"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47eb32ef8c729dbc4f4273baece89398a4d4b5d21a1493efea77a17059f4df8a"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:076f293100db3b0b634514aa0d294b941daa85fc777f9c698adb1009e5aca0b1"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1eacd91daeb5158c598fe22d7ce66d60878b6294a86477a4715154990394c9b3"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13c2ce7b72026cfbca120d652f02c7750f33b4c9395d79c9790b27f014c8a5a2"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90192cdc15ab7254caa7765a98132a5a41471cf739513cc9bcf7d2ffcc0ec7b2"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278aaa395f405972e9f523bd786ed59dfb61e4b827856be46a42130605fd0899"}, - {file = "watchfiles-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a462490e75e466edbb9fc4cd679b62187153b3ba804868452ef0577ec958f5ff"}, - {file = "watchfiles-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8d0d0630930f5cd5af929040e0778cf676a46775753e442a3f60511f2409f48f"}, - {file = "watchfiles-1.0.4-cp310-cp310-win32.whl", hash = "sha256:cc27a65069bcabac4552f34fd2dce923ce3fcde0721a16e4fb1b466d63ec831f"}, - {file = "watchfiles-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:8b1f135238e75d075359cf506b27bf3f4ca12029c47d3e769d8593a2024ce161"}, - {file = "watchfiles-1.0.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2a9f93f8439639dc244c4d2902abe35b0279102bca7bbcf119af964f51d53c19"}, - {file = "watchfiles-1.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9eea33ad8c418847dd296e61eb683cae1c63329b6d854aefcd412e12d94ee235"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31f1a379c9dcbb3f09cf6be1b7e83b67c0e9faabed0471556d9438a4a4e14202"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab594e75644421ae0a2484554832ca5895f8cab5ab62de30a1a57db460ce06c6"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc2eb5d14a8e0d5df7b36288979176fbb39672d45184fc4b1c004d7c3ce29317"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f68d8e9d5a321163ddacebe97091000955a1b74cd43724e346056030b0bacee"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9ce064e81fe79faa925ff03b9f4c1a98b0bbb4a1b8c1b015afa93030cb21a49"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b77d5622ac5cc91d21ae9c2b284b5d5c51085a0bdb7b518dba263d0af006132c"}, - {file = "watchfiles-1.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1941b4e39de9b38b868a69b911df5e89dc43767feeda667b40ae032522b9b5f1"}, - {file = "watchfiles-1.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f8c4998506241dedf59613082d1c18b836e26ef2a4caecad0ec41e2a15e4226"}, - {file = "watchfiles-1.0.4-cp311-cp311-win32.whl", hash = "sha256:4ebbeca9360c830766b9f0df3640b791be569d988f4be6c06d6fae41f187f105"}, - {file = "watchfiles-1.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:05d341c71f3d7098920f8551d4df47f7b57ac5b8dad56558064c3431bdfc0b74"}, - {file = "watchfiles-1.0.4-cp311-cp311-win_arm64.whl", hash = "sha256:32b026a6ab64245b584acf4931fe21842374da82372d5c039cba6bf99ef722f3"}, - {file = "watchfiles-1.0.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:229e6ec880eca20e0ba2f7e2249c85bae1999d330161f45c78d160832e026ee2"}, - {file = "watchfiles-1.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5717021b199e8353782dce03bd8a8f64438832b84e2885c4a645f9723bf656d9"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0799ae68dfa95136dde7c472525700bd48777875a4abb2ee454e3ab18e9fc712"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:43b168bba889886b62edb0397cab5b6490ffb656ee2fcb22dec8bfeb371a9e12"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb2c46e275fbb9f0c92e7654b231543c7bbfa1df07cdc4b99fa73bedfde5c844"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:857f5fc3aa027ff5e57047da93f96e908a35fe602d24f5e5d8ce64bf1f2fc733"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55ccfd27c497b228581e2838d4386301227fc0cb47f5a12923ec2fe4f97b95af"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c11ea22304d17d4385067588123658e9f23159225a27b983f343fcffc3e796a"}, - {file = "watchfiles-1.0.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:74cb3ca19a740be4caa18f238298b9d472c850f7b2ed89f396c00a4c97e2d9ff"}, - {file = "watchfiles-1.0.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c7cce76c138a91e720d1df54014a047e680b652336e1b73b8e3ff3158e05061e"}, - {file = "watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94"}, - {file = "watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c"}, - {file = "watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90"}, - {file = "watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9"}, - {file = "watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902"}, - {file = "watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1"}, - {file = "watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303"}, - {file = "watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80"}, - {file = "watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc"}, - {file = "watchfiles-1.0.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d3452c1ec703aa1c61e15dfe9d482543e4145e7c45a6b8566978fbb044265a21"}, - {file = "watchfiles-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7b75fee5a16826cf5c46fe1c63116e4a156924d668c38b013e6276f2582230f0"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e997802d78cdb02623b5941830ab06f8860038faf344f0d288d325cc9c5d2ff"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0611d244ce94d83f5b9aff441ad196c6e21b55f77f3c47608dcf651efe54c4a"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9745a4210b59e218ce64c91deb599ae8775c8a9da4e95fb2ee6fe745fc87d01a"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4810ea2ae622add560f4aa50c92fef975e475f7ac4900ce5ff5547b2434642d8"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:740d103cd01458f22462dedeb5a3382b7f2c57d07ff033fbc9465919e5e1d0f3"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdbd912a61543a36aef85e34f212e5d2486e7c53ebfdb70d1e0b060cc50dd0bf"}, - {file = "watchfiles-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0bc80d91ddaf95f70258cf78c471246846c1986bcc5fd33ccc4a1a67fcb40f9a"}, - {file = "watchfiles-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab0311bb2ffcd9f74b6c9de2dda1612c13c84b996d032cd74799adb656af4e8b"}, - {file = "watchfiles-1.0.4-cp39-cp39-win32.whl", hash = "sha256:02a526ee5b5a09e8168314c905fc545c9bc46509896ed282aeb5a8ba9bd6ca27"}, - {file = "watchfiles-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:a5ae5706058b27c74bac987d615105da17724172d5aaacc6c362a40599b6de43"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdcc92daeae268de1acf5b7befcd6cfffd9a047098199056c72e4623f531de18"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8d3d9203705b5797f0af7e7e5baa17c8588030aaadb7f6a86107b7247303817"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdef5a1be32d0b07dcea3318a0be95d42c98ece24177820226b56276e06b63b0"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:342622287b5604ddf0ed2d085f3a589099c9ae8b7331df3ae9845571586c4f3d"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9fe37a2de80aa785d340f2980276b17ef697ab8db6019b07ee4fd28a8359d2f3"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9d1ef56b56ed7e8f312c934436dea93bfa3e7368adfcf3df4c0da6d4de959a1e"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b42cac65beae3a362629950c444077d1b44f1790ea2772beaea95451c086bb"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e0227b8ed9074c6172cf55d85b5670199c99ab11fd27d2c473aa30aec67ee42"}, - {file = "watchfiles-1.0.4.tar.gz", hash = "sha256:6ba473efd11062d73e4f00c2b730255f9c1bdd73cd5f9fe5b5da8dbd4a717205"}, -] - -[package.dependencies] -anyio = ">=3.0.0" - -[[package]] -name = "websocket-client" -version = "1.8.0" -description = "WebSocket client for Python with low level API options" -optional = false -python-versions = ">=3.8" -files = [ - {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, - {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, -] - -[package.extras] -docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] -optional = ["python-socks", "wsaccel"] -test = ["websockets"] - -[[package]] -name = "websockets" -version = "15.0.1" -description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" -optional = false -python-versions = ">=3.9" -files = [ - {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, - {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, - {file = "websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a"}, - {file = "websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e"}, - {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf"}, - {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb"}, - {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d"}, - {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9"}, - {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c"}, - {file = "websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256"}, - {file = "websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41"}, - {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431"}, - {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57"}, - {file = "websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905"}, - {file = "websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562"}, - {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792"}, - {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413"}, - {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8"}, - {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3"}, - {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf"}, - {file = "websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85"}, - {file = "websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065"}, - {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3"}, - {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665"}, - {file = "websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2"}, - {file = "websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215"}, - {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5"}, - {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65"}, - {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe"}, - {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4"}, - {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597"}, - {file = "websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9"}, - {file = "websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7"}, - {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931"}, - {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675"}, - {file = "websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151"}, - {file = "websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22"}, - {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f"}, - {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8"}, - {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375"}, - {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d"}, - {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4"}, - {file = "websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa"}, - {file = "websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561"}, - {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5"}, - {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a"}, - {file = "websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b"}, - {file = "websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770"}, - {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb"}, - {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054"}, - {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee"}, - {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed"}, - {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880"}, - {file = "websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411"}, - {file = "websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123"}, - {file = "websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f"}, - {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, -] - [[package]] name = "wrapt" version = "1.17.2" From 8556149622a02bc53d0547ffefafa49b8e5cbaf5 Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Fri, 7 Mar 2025 03:44:39 +0000 Subject: [PATCH 10/14] QA pass on python --- .fernignore | 14 +- poetry.lock | 14 +- pyproject.toml | 5 +- src/humanloop/client.py | 11 +- src/humanloop/context.py | 86 ++- .../{utilities => decorators}/__init__.py | 0 src/humanloop/decorators/flow.py | 133 ++++ .../{utilities => decorators}/helpers.py | 0 src/humanloop/decorators/prompt.py | 43 ++ .../{utilities => decorators}/tool.py | 29 +- src/humanloop/error.py | 11 + .../{eval_utils => evals}/__init__.py | 0 src/humanloop/{eval_utils => evals}/run.py | 346 +++++------ src/humanloop/{eval_utils => evals}/types.py | 10 +- src/humanloop/otel/constants.py | 6 +- src/humanloop/otel/exporter.py | 206 ------ src/humanloop/otel/exporter/__init__.py | 154 +++++ src/humanloop/otel/exporter/proto.py | 73 +++ src/humanloop/otel/processor.py | 38 +- src/humanloop/overload.py | 53 +- src/humanloop/utilities/flow.py | 127 ---- src/humanloop/utilities/prompt.py | 27 - src/humanloop/utilities/types.py | 12 - tests/conftest.py | 10 +- tests/integration/chat_agent/__init__.py | 0 tests/integration/chat_agent/conftest.py | 177 ------ .../integration/chat_agent/test_chat_agent.py | 67 -- tests/integration/evaluate_medqa/__init__.py | 0 tests/integration/evaluate_medqa/conftest.py | 202 ------ .../evaluate_medqa/test_evaluate_medqa.py | 75 --- tests/otel/test_helpers.py | 9 +- tests/utilities/__init__.py | 0 tests/utilities/test_flow.py | 298 --------- tests/utilities/test_prompt.py | 584 ------------------ tests/utilities/test_tool.py | 567 ----------------- 35 files changed, 768 insertions(+), 2619 deletions(-) rename src/humanloop/{utilities => decorators}/__init__.py (100%) create mode 100644 src/humanloop/decorators/flow.py rename src/humanloop/{utilities => decorators}/helpers.py (100%) create mode 100644 src/humanloop/decorators/prompt.py rename src/humanloop/{utilities => decorators}/tool.py (94%) create mode 100644 src/humanloop/error.py rename src/humanloop/{eval_utils => evals}/__init__.py (100%) rename src/humanloop/{eval_utils => evals}/run.py (76%) rename src/humanloop/{eval_utils => evals}/types.py (94%) delete mode 100644 src/humanloop/otel/exporter.py create mode 100644 src/humanloop/otel/exporter/__init__.py create mode 100644 src/humanloop/otel/exporter/proto.py delete mode 100644 src/humanloop/utilities/flow.py delete mode 100644 src/humanloop/utilities/prompt.py delete mode 100644 src/humanloop/utilities/types.py delete mode 100644 tests/integration/chat_agent/__init__.py delete mode 100644 tests/integration/chat_agent/conftest.py delete mode 100644 tests/integration/chat_agent/test_chat_agent.py delete mode 100644 tests/integration/evaluate_medqa/__init__.py delete mode 100644 tests/integration/evaluate_medqa/conftest.py delete mode 100644 tests/integration/evaluate_medqa/test_evaluate_medqa.py delete mode 100644 tests/utilities/__init__.py delete mode 100644 tests/utilities/test_flow.py delete mode 100644 tests/utilities/test_prompt.py delete mode 100644 tests/utilities/test_tool.py diff --git a/.fernignore b/.fernignore index ee7245af..718b7c3a 100644 --- a/.fernignore +++ b/.fernignore @@ -1,19 +1,19 @@ # Specify files that shouldn't be modified by Fern -src/humanloop/eval_utils + +## Custom code + +src/humanloop/evals src/humanloop/prompt_utils.py src/humanloop/client.py src/humanloop/overload.py -src/humanloop/context_variables.py +src/humanloop/context.py mypy.ini README.md - -# Directories used by SDK decorators - -src/humanloop/utilities +src/humanloop/decorators src/humanloop/otel -# Tests +## Tests tests/ diff --git a/poetry.lock b/poetry.lock index e8454e18..abcd91cb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2311,6 +2311,18 @@ files = [ [package.dependencies] referencing = "*" +[[package]] +name = "types-protobuf" +version = "5.29.1.20250208" +description = "Typing stubs for protobuf" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_protobuf-5.29.1.20250208-py3-none-any.whl", hash = "sha256:c5f8bfb4afdc1b5cbca1848f2c8b361a2090add7401f410b22b599ef647bf483"}, + {file = "types_protobuf-5.29.1.20250208.tar.gz", hash = "sha256:c1acd6a59ab554dbe09b5d1fa7dd701e2fcfb2212937a3af1c03b736060b792a"}, +] + [[package]] name = "types-python-dateutil" version = "2.9.0.20241206" @@ -2492,4 +2504,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.9,<4" -content-hash = "8968de9bd0b7af7b55c9aa9799f974a083c39b0e3af8c352e9f8a31cbdbbefcb" +content-hash = "228369a2cf47fc8534f45277fd0a0118ab96b5ec90b0c6088de2f00f06a502d4" diff --git a/pyproject.toml b/pyproject.toml index 11c09c37..3ba977cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,9 +30,6 @@ packages = [ { include = "humanloop", from = "src"} ] -[tool.poetry.group.dev.dependencies] -protobuf = "^5.29.3" - [project.urls] Repository = 'https://github.com/humanloop/humanloop-python' @@ -69,6 +66,8 @@ jsonschema = "^4.23.0" numpy = "<2.0.0" onnxruntime = "<=1.19.2" openai = "^1.52.2" +protobuf = "^5.29.3" +types-protobuf = "^5.29.1.20250208" pandas = "^2.2.0" parse-type = ">=0.6.4" pyarrow = "^19.0.0" diff --git a/src/humanloop/client.py b/src/humanloop/client.py index b7c976b5..cb6c9c64 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -1,4 +1,3 @@ -from contextlib import contextmanager import os import typing from typing import Any, List, Optional, Sequence @@ -10,14 +9,14 @@ from humanloop.core.client_wrapper import SyncClientWrapper -from humanloop.eval_utils import run_eval -from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File +from humanloop.evals import run_eval +from humanloop.evals.types import Dataset, Evaluator, EvaluatorCheck, File from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop from humanloop.overload import overload_call, overload_log -from humanloop.utilities.flow import flow as flow_decorator_factory -from humanloop.utilities.prompt import prompt_decorator_factory -from humanloop.utilities.tool import tool_decorator_factory as tool_decorator_factory +from humanloop.decorators.flow import flow as flow_decorator_factory +from humanloop.decorators.prompt import prompt_decorator_factory +from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory from humanloop.environment import HumanloopEnvironment from humanloop.evaluations.client import EvaluationsClient from humanloop.otel import instrument_provider diff --git a/src/humanloop/context.py b/src/humanloop/context.py index c2563d3b..aa5940ce 100644 --- a/src/humanloop/context.py +++ b/src/humanloop/context.py @@ -4,6 +4,7 @@ from typing import Any, Callable, Generator, Literal, Optional from opentelemetry import context as context_api +from humanloop.error import HumanloopRuntimeError from humanloop.otel.constants import ( HUMANLOOP_CONTEXT_EVALUATION, HUMANLOOP_CONTEXT_DECORATOR, @@ -12,13 +13,13 @@ def get_trace_id() -> Optional[str]: - key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())) - return context_api.get_value(key=key) + key = str(hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident()))) + return context_api.get_value(key=key) # type: ignore [return-value] @contextmanager def set_trace_id(flow_log_id: str) -> Generator[None, None, None]: - key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident())) + key = str(hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident()))) token = context_api.attach(context_api.set_value(key=key, value=flow_log_id)) yield context_api.detach(token=token) @@ -28,59 +29,106 @@ def set_trace_id(flow_log_id: str) -> Generator[None, None, None]: class DecoratorContext: path: str type: Literal["prompt", "tool", "flow"] - version: dict[str, Optional[Any]] + version: dict[str, Any] @contextmanager -def set_decorator_context(prompt_context: DecoratorContext) -> Generator[None, None, None]: - key = hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident())) +def set_decorator_context( + decorator_context: DecoratorContext, +) -> Generator[DecoratorContext, None, None]: + key = str(hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident()))) reset_token = context_api.attach( context_api.set_value( key=key, - value=prompt_context, + value=decorator_context, ) ) - yield + yield decorator_context context_api.detach(token=reset_token) def get_decorator_context() -> Optional[DecoratorContext]: - key = hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident())) - return context_api.get_value(key) + key = str(hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident()))) + return context_api.get_value(key) # type: ignore [return-value] class EvaluationContext: source_datapoint_id: str run_id: str - logging_callback: Callable[[str], None] file_id: str path: str - logging_counter: int + _logged: bool + _callback: Callable[[str], None] + _context_log_belongs_eval_file: bool + + @property + def logged(self) -> bool: + return self._logged + + @contextmanager + def spy_log_args( + self, + log_args: dict[str, Any], + path: Optional[str] = None, + file_id: Optional[str] = None, + ) -> Generator[dict[str, Any], None, None]: + if path is None and file_id is None: + raise HumanloopRuntimeError( + "Internal error: Evaluation context called without providing a path of file_id" + ) + + if self.path is not None and self.path == path: + self._logged = True + self._context_log_belongs_eval_file = True + yield { + **log_args, + "source_datapoint_id": self.source_datapoint_id, + "run_id": self.run_id, + } + elif self.file_id is not None and self.file_id == file_id: + self._logged = True + self._context_log_belongs_eval_file = True + yield { + **log_args, + "source_datapoint_id": self.source_datapoint_id, + "run_id": self.run_id, + } + else: + yield log_args + self._context_log_belongs_eval_file = False + + @property + def callback(self) -> Optional[Callable[[str], None]]: + if self._context_log_belongs_eval_file: + return self._callback + return None def __init__( self, source_datapoint_id: str, run_id: str, - logging_callback: Callable[[str], None], + eval_callback: Callable[[str], None], file_id: str, path: str, ): self.source_datapoint_id = source_datapoint_id self.run_id = run_id - self.logging_callback = logging_callback + self._callback = eval_callback self.file_id = file_id self.path = path - self.logging_counter = 0 + self._logged = False @contextmanager -def set_evaluation_context(evaluation_context: EvaluationContext) -> Generator[None, None, None]: - key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident())) +def set_evaluation_context( + evaluation_context: EvaluationContext, +) -> Generator[None, None, None]: + key = str(hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident()))) reset_token = context_api.attach(context_api.set_value(key, evaluation_context)) yield context_api.detach(token=reset_token) def get_evaluation_context() -> Optional[EvaluationContext]: - key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident())) - return context_api.get_value(key) + key = str(hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident()))) + return context_api.get_value(key) # type: ignore [return-value] diff --git a/src/humanloop/utilities/__init__.py b/src/humanloop/decorators/__init__.py similarity index 100% rename from src/humanloop/utilities/__init__.py rename to src/humanloop/decorators/__init__.py diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py new file mode 100644 index 00000000..b43b1b7d --- /dev/null +++ b/src/humanloop/decorators/flow.py @@ -0,0 +1,133 @@ +import logging +from functools import wraps +from typing import Any, Callable, Optional, TypeVar +from typing_extensions import ParamSpec + +from opentelemetry.trace import Span, Tracer + +from humanloop.base_client import BaseHumanloop +from humanloop.context import ( + DecoratorContext, + get_trace_id, + set_decorator_context, + set_trace_id, +) +from humanloop.evals.run import HumanloopRuntimeError +from humanloop.types.chat_message import ChatMessage +from humanloop.decorators.helpers import bind_args +from humanloop.evals.types import File +from humanloop.otel.constants import ( + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_LOG_KEY, + HUMANLOOP_FILE_PATH_KEY, + HUMANLOOP_FLOW_SPAN_NAME, +) +from humanloop.otel.helpers import process_output, write_to_opentelemetry_span +from humanloop.requests import FlowKernelRequestParams as FlowDict +from humanloop.types.flow_log_response import FlowLogResponse + +logger = logging.getLogger("humanloop.sdk") + + +P = ParamSpec("P") +R = TypeVar("R") + + +def flow( + client: "BaseHumanloop", + opentelemetry_tracer: Tracer, + path: str, + attributes: Optional[dict[str, Any]] = None, +): + flow_kernel = {"attributes": attributes or {}} + + def decorator(func: Callable[P, R]) -> Callable[P, Optional[R]]: + decorator_path = path or func.__name__ + file_type = "flow" + + @wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: + span: Span + with set_decorator_context( + DecoratorContext( + path=decorator_path, + type="flow", + version=flow_kernel, + ) + ) as decorator_context: + with opentelemetry_tracer.start_as_current_span(HUMANLOOP_FLOW_SPAN_NAME) as span: # type: ignore + span.set_attribute(HUMANLOOP_FILE_PATH_KEY, decorator_path) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) + trace_id = get_trace_id() + func_args = bind_args(func, args, kwargs) + + # Create the trace ahead so we have a parent ID to reference + init_log_inputs = { + "inputs": {k: v for k, v in func_args.items() if k != "messages"}, + "messages": func_args.get("messages"), + "trace_parent_id": trace_id, + } + this_flow_log: FlowLogResponse = client.flows._log( # type: ignore [attr-defined] + path=decorator_context.path, + flow=decorator_context.version, + log_status="incomplete", + **init_log_inputs, + ) + + with set_trace_id(this_flow_log.id): + func_output: Optional[R] + log_output: Optional[str] + log_error: Optional[str] + log_output_message: Optional[ChatMessage] + try: + func_output = func(*args, **kwargs) + if ( + isinstance(func_output, dict) + and len(func_output.keys()) == 2 + and "role" in func_output + and "content" in func_output + ): + log_output_message = func_output # type: ignore [assignment] + log_output = None + else: + log_output = process_output(func=func, output=func_output) + log_output_message = None + log_error = None + except HumanloopRuntimeError as e: + # Critical error, re-raise + client.logs.delete(id=this_flow_log.id) + span.record_exception(e) + raise e + except Exception as e: + logger.error(f"Error calling {func.__name__}: {e}") + log_output = None + log_output_message = None + log_error = str(e) + func_output = None + + updated_flow_log = { + "log_status": "complete", + "output": log_output, + "error": log_error, + "output_message": log_output_message, + "id": this_flow_log.id, + } + # Write the Flow Log to the Span on HL_LOG_OT_KEY + write_to_opentelemetry_span( + span=span, # type: ignore [arg-type] + key=HUMANLOOP_LOG_KEY, + value=updated_flow_log, # type: ignore + ) + # Return the output of the decorated function + return func_output # type: ignore [return-value] + + wrapper.file = File( # type: ignore + path=decorator_path, + type=file_type, # type: ignore [arg-type, typeddict-item] + version=FlowDict(**flow_kernel), # type: ignore + callable=wrapper, + ) + + return wrapper + + return decorator diff --git a/src/humanloop/utilities/helpers.py b/src/humanloop/decorators/helpers.py similarity index 100% rename from src/humanloop/utilities/helpers.py rename to src/humanloop/decorators/helpers.py diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py new file mode 100644 index 00000000..468bffcd --- /dev/null +++ b/src/humanloop/decorators/prompt.py @@ -0,0 +1,43 @@ +from functools import wraps +import logging + +from typing_extensions import ParamSpec +from typing import Callable, Optional, TypeVar + +from humanloop.context import DecoratorContext, set_decorator_context +from humanloop.evals.types import File + +logger = logging.getLogger("humanloop.sdk") + +P = ParamSpec("P") +R = TypeVar("R") + + +def prompt_decorator_factory(path: str, template: Optional[str]): + def decorator(func: Callable[P, R]) -> Callable[P, R]: + @wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + with set_decorator_context( + DecoratorContext( + path=path, + type="prompt", + version={ + "template": template, + }, + ) + ): + output = func(*args, **kwargs) + return output + + wrapper.file = File( # type: ignore [attr-defined] + path=path, + type="prompt", + version={ # type: ignore [typeddict-item] + "template": template, + }, + callable=wrapper, + ) + + return wrapper + + return decorator diff --git a/src/humanloop/utilities/tool.py b/src/humanloop/decorators/tool.py similarity index 94% rename from src/humanloop/utilities/tool.py rename to src/humanloop/decorators/tool.py index 4314808f..102de834 100644 --- a/src/humanloop/utilities/tool.py +++ b/src/humanloop/decorators/tool.py @@ -12,14 +12,15 @@ from opentelemetry.trace import Tracer -from humanloop.context import get_trace_id -from humanloop.utilities.helpers import bind_args -from humanloop.eval_utils import File +from humanloop.context import get_evaluation_context, get_trace_id +from humanloop.decorators.helpers import bind_args +from humanloop.evals import File +from humanloop.evals.run import HumanloopRuntimeError from humanloop.otel.constants import ( HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, - HUMANLOOP_PATH_KEY, + HUMANLOOP_FILE_PATH_KEY, ) from humanloop.otel.helpers import process_output, write_to_opentelemetry_span from humanloop.requests.tool_function import ToolFunctionParams @@ -41,7 +42,7 @@ def tool_decorator_factory( attributes: Optional[dict[str, Any]] = None, setup_values: Optional[dict[str, Any]] = None, ): - def decorator(func: Callable[P, R]) -> Callable[P, R]: + def decorator(func: Callable[P, R]) -> Callable[P, Optional[R]]: file_type = "tool" tool_kernel = _build_tool_kernel( @@ -56,14 +57,18 @@ def decorator(func: Callable[P, R]) -> Callable[P, R]: @wraps(func) def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: + evaluation_context = get_evaluation_context() + if evaluation_context is not None: + if evaluation_context.path == path: + raise HumanloopRuntimeError("Tools cannot be evaluated with the `evaluations.run()` utility.") with opentelemetry_tracer.start_as_current_span("humanloop.tool") as span: # Write the Tool Kernel to the Span on HL_FILE_OT_KEY write_to_opentelemetry_span( span=span, key=HUMANLOOP_FILE_KEY, - value=tool_kernel, + value=tool_kernel, # type: ignore [arg-type] ) - span.set_attribute(HUMANLOOP_PATH_KEY, path) + span.set_attribute(HUMANLOOP_FILE_PATH_KEY, path) span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) log_inputs: dict[str, Any] = bind_args(func, args, kwargs) @@ -78,6 +83,9 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: output=func_output, ) log_error = None + except HumanloopRuntimeError as e: + # Critical error, re-raise + raise e except Exception as e: logger.error(f"Error calling {func.__name__}: {e}") output = None @@ -87,19 +95,18 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: ) log_error = str(e) - # Populate known Tool Log attributes + # Populate Tool Log attributes tool_log = { "inputs": log_inputs, "output": log_output, "error": log_error, "trace_parent_id": get_trace_id(), } - # Write the Tool Log to the Span on HL_LOG_OT_KEY write_to_opentelemetry_span( span=span, key=HUMANLOOP_LOG_KEY, - value=tool_log, + value=tool_log, # type: ignore [arg-type] ) # Return the output of the decorated function @@ -107,7 +114,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: wrapper.file = File( # type: ignore path=path, - type=file_type, + type=file_type, # type: ignore [arg-type, typeddict-item] version=tool_kernel, callable=wrapper, ) diff --git a/src/humanloop/error.py b/src/humanloop/error.py new file mode 100644 index 00000000..ed7ac177 --- /dev/null +++ b/src/humanloop/error.py @@ -0,0 +1,11 @@ +from typing import Optional + + +class HumanloopRuntimeError(Exception): + def __init__(self, message: Optional[str] = None): + self.message = message + + def __str__(self) -> str: + if self.message is None: + return super().__str__() + return self.message diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/evals/__init__.py similarity index 100% rename from src/humanloop/eval_utils/__init__.py rename to src/humanloop/evals/__init__.py diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/evals/run.py similarity index 76% rename from src/humanloop/eval_utils/run.py rename to src/humanloop/evals/run.py index dd698159..25c2228b 100644 --- a/src/humanloop/eval_utils/run.py +++ b/src/humanloop/evals/run.py @@ -18,7 +18,7 @@ import threading import time import typing -from concurrent.futures import ThreadPoolExecutor, as_completed +from concurrent.futures import Future, ThreadPoolExecutor from datetime import datetime from functools import partial from logging import INFO @@ -41,7 +41,8 @@ get_evaluation_context, set_evaluation_context, ) -from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File +from humanloop.error import HumanloopRuntimeError +from humanloop.evals.types import Dataset, Evaluator, EvaluatorCheck, File # We use TypedDicts for requests, which is consistent with the rest of the SDK from humanloop.evaluators.client import EvaluatorsClient @@ -67,14 +68,11 @@ from humanloop.types.datapoint_response import DatapointResponse from humanloop.types.dataset_response import DatasetResponse from humanloop.types.evaluation_run_response import EvaluationRunResponse -from humanloop.types.evaluator_log_response import EvaluatorLogResponse from humanloop.types.flow_log_response import FlowLogResponse from humanloop.types.log_response import LogResponse -from humanloop.types.prompt_log_response import PromptLogResponse from humanloop.types.run_stats_response import RunStatsResponse from pydantic import ValidationError -from humanloop.types.tool_log_response import ToolLogResponse if typing.TYPE_CHECKING: from humanloop.client import BaseHumanloop @@ -105,16 +103,6 @@ CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient) -class HumanloopDecoratorError(Exception): - def __init__(self, message: Optional[str] = None): - self.message = message - - def __str__(self): - if self.message is None: - return super().__str__() - return self.message - - def run_eval( client: "BaseHumanloop", file: File, @@ -137,39 +125,63 @@ def run_eval( evaluators_worker_pool = ThreadPoolExecutor(max_workers=workers) file_ = _file_or_file_inside_hl_utility(file) - is_decorator = _callable_is_hl_utility(file_) type_ = _get_file_type(file_) function_ = _get_file_callable(file_, type_) + if hasattr(function_, "file"): + decorator_type = function_.file["type"] # type: ignore [attr-defined, union-attr] + if decorator_type != type_: + raise HumanloopRuntimeError( + "The type of the decorated function does not match the type of the file. Expected `%s`, got `%s`." + % (type_.capitalize(), decorator_type.capitalize()) + ) - hl_file = _upsert_file(file=file_, type=type_, client=client) - hl_dataset = _upsert_dataset(dataset=dataset, client=client) - local_evaluators = _upsert_local_evaluators( - evaluators=evaluators, - client=client, - function=function_, - type=type_, - ) + try: + hl_file = _upsert_file(file=file_, type=type_, client=client) + except ValidationError as e: + sys.stdout.write(f"{RED}Error in your `file` argument:\n\n{e}{RESET}") + return [] + except Exception as e: + sys.stdout.write(f"{RED}Error in your `file` argument:\n\n{e}{RESET}") + return [] + try: + hl_dataset = _upsert_dataset(dataset=dataset, client=client) + except Exception as e: + sys.stdout.write(f"{RED}Error in your `dataset` argument:\n\n{e}{RESET}") + return [] + try: + local_evaluators = _upsert_local_evaluators( + evaluators=evaluators, # type: ignore [arg-type] + client=client, + function=function_, + type=type_, + ) + except Exception as e: + sys.stdout.write(f"{RED}Error in your `evaluators` argument:\n\n{e}{RESET}") + return [] _assert_dataset_evaluators_fit(hl_dataset, local_evaluators) evaluation, run = _get_new_run( client=client, evaluation_name=name, - evaluators=evaluators, + evaluators=evaluators, # type: ignore [arg-type] hl_file=hl_file, hl_dataset=hl_dataset, function=function_, ) - def handle_exit_signal(signum, frame): - sys.stderr.write( - f"\n{RED}Received signal {signum}, cancelling Evaluation and shutting down threads...{RESET}\n" - ) + def _cancel_evaluation(): client.evaluations.update_evaluation_run( id=evaluation.id, run_id=run.id, status="cancelled", ) evaluators_worker_pool.shutdown(wait=False) + + def handle_exit_signal(signum, frame): + sys.stderr.write( + f"\n{RED}Received signal {signum}, cancelling Evaluation and shutting down threads...{RESET}\n" + ) + _cancel_evaluation() sys.exit(signum) signal.signal(signal.SIGINT, handle_exit_signal) @@ -207,7 +219,7 @@ def upload_callback(log_id: str): log_id=log_id, datapoint=dp, local_evaluators=local_evaluators, - file_type=hl_file.type, + file_type=hl_file.type, # type: ignore [arg-type] progress_bar=_PROGRESS_BAR, ) @@ -215,74 +227,74 @@ def upload_callback(log_id: str): with set_evaluation_context( EvaluationContext( source_datapoint_id=dp.id, - logging_callback=upload_callback, + eval_callback=upload_callback, file_id=hl_file.id, run_id=run.id, path=hl_file.path, - logged=False, ) ): log_func = _get_log_func( client=client, - file_type=hl_file.type, + file_type=hl_file.type, # type: ignore [arg-type] file_id=hl_file.id, version_id=hl_file.version_id, run_id=run.id, ) start_time = datetime.now() + evaluation_context = get_evaluation_context() + if evaluation_context is None: + raise HumanloopRuntimeError( + "Internal error: evaluation context is not set while processing a datapoint." + ) try: - output = _call_function(function_, hl_file.type, dp) - evaluation_context = get_evaluation_context() - if evaluation_context is None: - raise HumanloopDecoratorError( - "Internal error: evaluation context is not set while processing a datapoint." - ) - if evaluation_context.logging_counter == 0: + output = _call_function(function_, hl_file.type, dp) # type: ignore [arg-type] + if not evaluation_context.logged: # function_ did not Log against the source_datapoint_id/ run_id pair # so we need to create a Log log = log_func( - inputs=dp.inputs, - output=output, - start_time=start_time, - end_time=datetime.now(), - source_datapoint_id=dp.id, - run_id=run.id, + **{ + "inputs": dp.inputs, + "output": output, + "start_time": start_time, + "end_time": datetime.now(), + "source_datapoint_id": dp.id, + "run_id": run.id, + "log_status": "complete", + } ) - evaluation_context.logging_counter += 1 - evaluation_context.logging_callback(log.id) - except HumanloopDecoratorError as e: + evaluation_context._callback(log.id) + except HumanloopRuntimeError as e: raise e except Exception as e: - log_func( - inputs=dp.inputs, - error=str(e), - source_datapoint_id=dp.id, - run_id=run.id, - start_time=start_time, - end_time=datetime.now(), - source_datapoint_id=dp.id, - run_id=run.id, + log = log_func( + **{ + "inputs": dp.inputs, + "error": str(e), + "source_datapoint_id": dp.id, + "run_id": run.id, + "start_time": start_time, + "end_time": datetime.now(), + "log_status": "complete", + } + ) + evaluation_context._callback(log.id) + error_message = _get_error_message(e, length_limit=True) + sys.stderr.write( + f"\n{RED}Evaluated callable failed for Datapoint `{dp.id}`:\n{error_message}{RESET}\n" ) - error_message = str(e).replace("\n", " ") - if len(error_message) > 100: - sys.stderr.write( - f"\n{RED}Your {hl_file.type}'s `callable` failed for Datapoint: {dp.id}. Error: {error_message[:100]}...{RESET}\n" - ) - else: - sys.stderr.write( - f"\n{RED}Your {hl_file.type}'s `callable` failed for Datapoint: {dp.id}. Error: {error_message}{RESET}\n" - ) + futures: list[Future] = [] with ThreadPoolExecutor(max_workers=workers) as executor: - futures = [] for datapoint in hl_dataset.datapoints: futures.append(executor.submit(_process_datapoint, datapoint)) - # Program hangs if any uncaught exceptions are not handled here - for future in as_completed(futures): - try: - future.result() - except Exception: - pass + + for future in futures: + try: + future.result() + except Exception as e: + sys.stderr.write(f"\n{RED}Error processing datapoint:\n{_get_error_message(e)}{RESET}\n") + _cancel_evaluation() + return [] stats = _wait_for_evaluation_to_complete( client=client, @@ -298,7 +310,7 @@ def upload_callback(log_id: str): client=client, evaluation=evaluation, stats=stats, - evaluators=evaluators, + evaluators=evaluators, # type: ignore [arg-type] run=run, ) evaluators_worker_pool.shutdown(wait=False) @@ -356,7 +368,7 @@ class _LocalEvaluator: def _callable_is_hl_utility(file: File) -> bool: """Check if a File is a decorated function.""" - return hasattr(file["callable"], "file") + return hasattr(file.get("callable", {}), "file") def _wait_for_evaluation_to_complete( @@ -380,9 +392,9 @@ def _wait_for_evaluation_to_complete( if not waiting_for_local_evals_message_printed: sys.stderr.write("\n\nWaiting for Evaluators on Humanloop runtime...\n") waiting_for_local_evals_message_printed = True - sys.stderr.write(stats.progress) + sys.stderr.write(stats.progress) # type: ignore [arg-type] # Move the cursor up in stderr a number of lines equal to the number of lines in stats.progress - sys.stderr.write("\033[A" * (stats.progress.count("\n"))) + # sys.stderr.write("\033[A" * (stats.progress.count("\n"))) time.sleep(5) return stats @@ -435,33 +447,48 @@ def _get_checks( def _file_or_file_inside_hl_utility(file: File) -> File: if _callable_is_hl_utility(file): - # When the decorator inside `file` is a decorated function, - # we need to validate that the other parameters of `file` - # match the attributes of the decorator - inner_file: File = file["callable"].file + inner_file: File = file["callable"].file # type: ignore [misc, attr-defined] if "path" in file and inner_file["path"] != file["path"]: - raise ValueError( - "`path` attribute specified in the `file` does not match the File path of the decorated function." + raise HumanloopRuntimeError( + "`path` attribute specified in the `file` does not match the path of the decorated function. " + f"Expected `{inner_file['path']}`, got `{file['path']}`." ) - if "version" in file and inner_file["version"] != file["version"]: - raise ValueError( - "`version` attribute in the `file` does not match the File version of the decorated function." + if "id" in file: + raise HumanloopRuntimeError( + "Do not specify an `id` attribute in `file` argument when using a decorated function." ) + if "version" in file: + if inner_file["type"] != "prompt": + raise HumanloopRuntimeError( + f"Do not specify a `version` attribute in `file` argument when using a {inner_file['type'].capitalize()} decorated function." + ) if "type" in file and inner_file["type"] != file["type"]: - raise ValueError( - "`type` attribute of `file` argument does not match the File type of the decorated function." + raise HumanloopRuntimeError( + "Attribute `type` of `file` argument does not match the file type of the decorated function. " + f"Expected `{inner_file['type']}`, got `{file['type']}`." ) if "id" in file: - raise ValueError("Do not specify an `id` attribute in `file` argument when using a decorated function.") + raise HumanloopRuntimeError( + "Do not specify an `id` attribute in `file` argument when using a decorated function." + ) # file on decorated function holds at least # or more information than the `file` argument file_ = copy.deepcopy(inner_file) + if file_["type"] == "prompt": + sys.stdout.write( + f"{YELLOW}" + "The @prompt decorator will not spy on provider calls when passed to `evaluations.run()`. " + "Using the `version` in `file` argument instead.\n" + f"{RESET}" + ) + # TODO: document this + file_["version"] = file["version"] else: file_ = copy.deepcopy(file) - # Raise error if one of path or id not provided + # Raise error if neither path nor id is provided if not file_.get("path") and not file_.get("id"): - raise ValueError("You must provide a path or id in your `file`.") + raise HumanloopRuntimeError("You must provide a path or id in your `file`.") return file_ @@ -469,14 +496,14 @@ def _file_or_file_inside_hl_utility(file: File) -> File: def _get_file_type(file: File) -> FileType: # Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow` try: - type_ = typing.cast(FileType, file.pop("type")) + type_ = typing.cast(FileType, file.pop("type")) # type: ignore [arg-type, misc] sys.stdout.write( f"{CYAN}Evaluating your {type_} function corresponding to `{file.get('path') or file.get('id')}` on Humanloop{RESET}\n\n" ) return type_ or "flow" except KeyError as _: type_ = "flow" - sys.stdout.write(f"{CYAN}No `file` type specified, defaulting to flow.{RESET}\n") + sys.stdout.write(f"{YELLOW}No `file` type specified, defaulting to flow.{RESET}\n") return type_ @@ -488,7 +515,7 @@ def _get_file_callable(file: File, type_: FileType) -> Optional[Callable]: raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.") else: sys.stdout.write( - f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.\n" + f"{CYAN}No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.\n\n{RESET}" ) return function_ @@ -507,29 +534,20 @@ def _upsert_file( except ValidationError: flow_version = {"attributes": version} file_dict = {**file, **flow_version} - hl_file = client.flows.upsert(**file_dict) + hl_file = client.flows.upsert(**file_dict) # type: ignore [arg-type, assignment] elif type == "prompt": - try: - Prompt.model_validate(version) - except ValidationError as error_: - sys.stdout.write(f"Invalid Prompt `version` in your `file` request. \n\nValidation error: \n{error_}") - raise error_ - try: - hl_file = client.prompts.upsert(**file_dict) - except ApiError as error_: - raise error_ + # Will throw error if version is invalid + Prompt.model_validate(version) + hl_file = client.prompts.upsert(**file_dict) # type: ignore [arg-type, assignment] elif type == "tool": - try: - Tool.model_validate(version) - except ValidationError as error_: - sys.stdout.write(f"Invalid Tool `version` in your `file` request. \n\nValidation error: \n{error_}") - raise error_ - hl_file = client.tools.upsert(**file_dict) + # Will throw error if version is invalid + Tool.model_validate(version) + hl_file = client.tools.upsert(**file_dict) # type: ignore [arg-type, assignment] elif type == "evaluator": - hl_file = client.evaluators.upsert(**file_dict) + hl_file = client.evaluators.upsert(**file_dict) # type: ignore [arg-type, assignment] else: raise NotImplementedError(f"Unsupported File type: {type}") @@ -581,11 +599,17 @@ def _upsert_local_evaluators( attributes={"code": inspect.getsource(eval_function)}, evaluator_type="external", ) - evaluator = client.evaluators.upsert( - id=evaluator_request.get("id"), - path=evaluator_request.get("path"), - spec=spec, - ) + try: + evaluator = client.evaluators.upsert( + id=evaluator_request.get("id"), + path=evaluator_request.get("path"), + spec=spec, + ) + except Exception as error_: + sys.stdout.write( + f"Error upserting Evaluator {evaluator_request.get('path') or evaluator_request.get('id')} on Humanloop:\n\n{error_}" + ) + raise error_ local_evaluators.append(_LocalEvaluator(hl_evaluator=evaluator, function=eval_function)) return local_evaluators @@ -602,7 +626,7 @@ def _assert_dataset_evaluators_fit( break if requires_target: missing_target = 0 - for _datapoint in hl_dataset.datapoints: + for _datapoint in hl_dataset.datapoints: # type: ignore [union-attr] if not _datapoint.target: missing_target += 1 if missing_target > 0: @@ -614,7 +638,7 @@ def _assert_dataset_evaluators_fit( def _get_new_run( client: "BaseHumanloop", - evaluation_name: str, + evaluation_name: Optional[str], evaluators: list[Evaluator], hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse], hl_dataset: DatasetResponse, @@ -633,7 +657,7 @@ def _get_new_run( if error_.status_code == 409: evals = client.evaluations.list(file_id=hl_file.id, size=50) for page in evals.iter_pages(): - evaluation = next((e for e in page.items if e.name == evaluation_name), None) + evaluation = next((e for e in page.items if e.name == evaluation_name), None) # type: ignore [union-attr] else: raise error_ if not evaluation: @@ -674,43 +698,7 @@ def _call_function( def _get_log_func( client: "BaseHumanloop", - file_type: Literal["flow"], - file_id: str, - version_id: str, - run_id: str, -) -> Callable[..., FlowLogResponse]: ... - - -def _get_log_func( - client: "BaseHumanloop", - file_type: Literal["prompt"], - file_id: str, - version_id: str, - run_id: str, -) -> Callable[..., PromptLogResponse]: ... - - -def _get_log_func( - client: "BaseHumanloop", - file_type: Literal["tool"], - file_id: str, - version_id: str, - run_id: str, -) -> Callable[..., ToolLogResponse]: ... - - -def _get_log_func( - client: "BaseHumanloop", - file_type: Literal["evaluator"], - file_id: str, - version_id: str, - run_id: str, -) -> Callable[..., EvaluatorLogResponse]: ... - - -def _get_log_func( - client: "BaseHumanloop", - file_type: FileType, + file_type: Literal["flow", "prompt"], file_id: str, version_id: str, run_id: str, @@ -724,13 +712,9 @@ def _get_log_func( "run_id": run_id, } if file_type == "flow": - return partial(client.flows.log, **log_request) + return partial(client.flows._log, **log_request) # type: ignore [attr-defined] elif file_type == "prompt": - return partial(client.prompts.log, **log_request) - elif file_type == "evaluator": - return partial(client.evaluators.log, **log_request) - elif file_type == "tool": - return partial(client.tools.log, **log_request) + return partial(client.prompts._log, **log_request) # type: ignore [attr-defined] else: raise NotImplementedError(f"Unsupported File version: {file_type}") @@ -744,7 +728,7 @@ def _get_score_from_evaluator_stat( if stat.total_logs: score = round(stat.num_true / stat.total_logs, 2) elif isinstance(stat, NumericStats): - score = round(stat.mean, 2) + score = round(stat.mean, 2) # type: ignore [arg-type] else: raise ValueError(f"Unsupported Evaluator Stat type: {type(stat)}") return score @@ -761,7 +745,7 @@ def _get_evaluator_stats_by_path( evaluators_by_id[evaluator_stat.evaluator_version_id].version.path: evaluator_stat for evaluator_stat in stat.evaluator_stats } - return evaluator_stats_by_path + return evaluator_stats_by_path # type: ignore [return-value] def _check_evaluation_threshold( @@ -776,14 +760,14 @@ def _check_evaluation_threshold( evaluator_stats_by_path = _get_evaluator_stats_by_path( stat=next( (stat for stat in stats.run_stats if stat.run_id == run_id), - None, + None, # type: ignore [arg-type] ), evaluation=evaluation, ) if evaluator_path in evaluator_stats_by_path: evaluator_stat = evaluator_stats_by_path[evaluator_path] score = _get_score_from_evaluator_stat(stat=evaluator_stat) - if score >= threshold: + if score >= threshold: # type: ignore [operator] sys.stderr.write( f"{GREEN}✅ Latest eval [{score}] above threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" ) @@ -813,7 +797,7 @@ def _check_evaluation_improvement( latest_evaluator_stats_by_path = _get_evaluator_stats_by_path( stat=next( (stat for stat in stats.run_stats if stat.run_id == run_id), - None, + None, # type: ignore [arg-type] ), evaluation=evaluation, ) @@ -843,6 +827,20 @@ def _check_evaluation_improvement( raise ValueError(f"Evaluator {evaluator_path} not found in the stats.") +def _get_error_message(e: Exception, length_limit: bool = False) -> str: + import traceback + + # Get the full traceback + trace_info = traceback.format_exc() + + # Extract the last 200 characters of the traceback + last_trace_part = ( + (trace_info[-200:] + "..." if len(trace_info) > 200 else trace_info) if length_limit else trace_info + ) + + return f"\n{last_trace_part}" + + def _run_local_evaluators( client: "BaseHumanloop", log_id: str, @@ -897,18 +895,16 @@ def _run_local_evaluators( _ = client.evaluators.log( parent_id=log_id, id=local_evaluator.id, - error=str(e), + error=_get_error_message(e, length_limit=True), start_time=start_time, end_time=datetime.now(), ) - error_message = str(e).replace("\n", " ") - sys.stderr.write( - f"{RED}Evaluator {local_evaluator.path} failed with error {error_message[:100]}...{RESET}\n" - ) + error_message = _get_error_message(e, length_limit=True) + sys.stderr.write(f"{RED}Evaluator `{local_evaluator.path}` failed: {error_message}{RESET}\n") except Exception as e: - error_message = str(e).replace("\n", " ") + error_message = _get_error_message(e, length_limit=True) sys.stderr.write( - f"{RED}Failed to run local Evaluators for source datapoint {datapoint.dict()['id'] if datapoint else None}: {error_message[:100]}...{RESET}\n" + f"{RED}Failed to run local Evaluators for source datapoint `{datapoint.dict()['id'] if datapoint else None}`:\n{error_message}{RESET}\n" ) pass finally: diff --git a/src/humanloop/eval_utils/types.py b/src/humanloop/evals/types.py similarity index 94% rename from src/humanloop/eval_utils/types.py rename to src/humanloop/evals/types.py index 845a8542..86c3dc64 100644 --- a/src/humanloop/eval_utils/types.py +++ b/src/humanloop/evals/types.py @@ -19,9 +19,13 @@ ) # Responses are Pydantic models and we leverage them for improved request validation -from humanloop.types import UpdateDatesetAction as UpdateDatasetAction # TODO: fix original type typo +from humanloop.types import ( + UpdateDatesetAction as UpdateDatasetAction, +) # TODO: fix original type typo -EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator] +EvaluatorDict = Union[ + CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator +] Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict] FileType = Literal["flow", "prompt", "tool", "evaluator"] @@ -38,7 +42,7 @@ class Identifiers(TypedDict): class File(Identifiers): """A File on Humanloop (Flow, Prompt, Tool, Evaluator).""" - type: NotRequired[FileType] + type: Literal["flow", "prompt"] """The type of File this callable relates to on Humanloop.""" version: NotRequired[Version] """The contents uniquely define the version of the File on Humanloop.""" diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py index c0af0ca3..8a28572f 100644 --- a/src/humanloop/otel/constants.py +++ b/src/humanloop/otel/constants.py @@ -4,8 +4,12 @@ HUMANLOOP_LOG_KEY = "humanloop.log" HUMANLOOP_LOG_ID_KEY = "humanloop.log_id" HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type" -HUMANLOOP_PATH_KEY = "humanloop.file.path" +HUMANLOOP_FILE_PATH_KEY = "humanloop.file.path" # Opentelemetry context HUMANLOOP_CONTEXT_DECORATOR = "humanloop.context.decorator" HUMANLOOP_CONTEXT_TRACE_ID = "humanloop.context.flow.trace_id" HUMANLOOP_CONTEXT_EVALUATION = "humanloop.context.evaluation" + +HUMANLOOP_FLOW_SPAN_NAME = "humanloop.flow" +HUMANLOOP_PROMPT_SPAN_NAME = "humanloop.prompt" +HUMANLOOP_TOOL_SPAN_NAME = "humanloop.tool" diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py deleted file mode 100644 index ddc03eb1..00000000 --- a/src/humanloop/otel/exporter.py +++ /dev/null @@ -1,206 +0,0 @@ -import logging - -import typing -from queue import Empty as EmptyQueue -from queue import Queue -from threading import Thread -from typing import Optional, Sequence - -from opentelemetry.sdk.trace import ReadableSpan -from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult - -import requests -from humanloop.context import get_evaluation_context, EvaluationContext -from humanloop.otel.constants import ( - HUMANLOOP_FILE_TYPE_KEY, - HUMANLOOP_LOG_KEY, - HUMANLOOP_PATH_KEY, -) -from google.protobuf.json_format import MessageToJson -from humanloop.otel.helpers import is_llm_provider_call, read_from_opentelemetry_span, write_to_opentelemetry_span -from opentelemetry.proto.common.v1.common_pb2 import KeyValue, AnyValue, InstrumentationScope -from opentelemetry.proto.trace.v1.trace_pb2 import ( - TracesData, - ResourceSpans, - ScopeSpans, - Span as ProtoBufferSpan, -) - -if typing.TYPE_CHECKING: - from humanloop.client import Humanloop - - -logger = logging.getLogger("humanloop.sdk") - - -class HumanloopSpanExporter(SpanExporter): - DEFAULT_NUMBER_THREADS = 4 - - def __init__( - self, - client: "Humanloop", - worker_threads: Optional[int] = None, - ) -> None: - """Upload Spans created by SDK decorators to Humanloop. - - Spans not created by Humanloop SDK decorators will be ignored. - """ - super().__init__() - self._client = client - # Work queue for the threads uploading the spans - self._upload_queue: Queue = Queue() - # Worker threads to export the spans - self._threads: list[Thread] = [ - Thread( - target=self._do_work, - daemon=True, - ) - for _ in range(worker_threads or self.DEFAULT_NUMBER_THREADS) - ] - # Signals threads no more work will arrive and - # they should wind down after they empty the queue - self._shutdown: bool = False - # Init the upload threads - for thread in self._threads: - thread.start() - logger.debug("Exporter Thread %s started", thread.ident) - - def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: - if self._shutdown: - logger.warning("[HumanloopSpanExporter] Shutting down, not accepting new spans") - return SpanExportResult.FAILURE - - for span in spans: - self._upload_queue.put((span, get_evaluation_context())) - - return SpanExportResult.SUCCESS - - def shutdown(self) -> None: - self._shutdown = True - for thread in self._threads: - thread.join(timeout=5) - logger.debug("[HumanloopSpanExporter] Exporter Thread %s joined", thread.ident) - - def force_flush(self, timeout_millis: int = 5000) -> bool: - self._shutdown = True - for thread in self._threads: - thread.join(timeout=timeout_millis / 1000) - self._upload_queue.join() - - return True - - def _do_work(self): - # Do work while the Exporter was not instructed to - # wind down or the queue is not empty - while self._upload_queue.qsize() > 0 or not self._shutdown: - thread_args: tuple[ReadableSpan, Optional[EvaluationContext]] # type: ignore - try: - # Don't block or the thread will never be notified of the shutdown - thread_args = self._upload_queue.get(block=False) # type: ignore - except EmptyQueue: - # Wait for the another span to arrive - continue - - span_to_export, evaluation_context = thread_args - - file_type = span_to_export.attributes.get(HUMANLOOP_FILE_TYPE_KEY) - file_path = span_to_export.attributes.get(HUMANLOOP_PATH_KEY) - if file_type is None: - raise ValueError("Span does not have type set") - - try: - log_args = read_from_opentelemetry_span( - span=span_to_export, - key=HUMANLOOP_LOG_KEY, - ) - except Exception: - log_args = {} - - if evaluation_context: - if file_path == evaluation_context.path: - log_args = { - **log_args, - "source_datapoint_id": evaluation_context.source_datapoint_id, - "run_id": evaluation_context.run_id, - } - - write_to_opentelemetry_span( - span=span_to_export, - key=HUMANLOOP_LOG_KEY, - value=log_args, - ) - - payload = TracesData( - resource_spans=[ - ResourceSpans( - scope_spans=[ - ScopeSpans( - scope=InstrumentationScope( - name="humanloop.sdk.provider" - if is_llm_provider_call(span_to_export) - else "humanloop.sdk.decorator", - version="0.1.0", - ), - spans=[ - ProtoBufferSpan( - trace_id=span_to_export.context.trace_id.to_bytes(length=16, byteorder="big"), - span_id=span_to_export.context.span_id.to_bytes(length=8, byteorder="big"), - name=span_to_export.name, - kind={ - 0: ProtoBufferSpan.SpanKind.SPAN_KIND_INTERNAL, - 1: ProtoBufferSpan.SpanKind.SPAN_KIND_SERVER, - 2: ProtoBufferSpan.SpanKind.SPAN_KIND_CLIENT, - 3: ProtoBufferSpan.SpanKind.SPAN_KIND_PRODUCER, - 4: ProtoBufferSpan.SpanKind.SPAN_KIND_CONSUMER, - }[span_to_export.kind.value], - start_time_unix_nano=span_to_export.start_time, - end_time_unix_nano=span_to_export.end_time, - attributes=[ - KeyValue( - key=key, - value=AnyValue(string_value=str(value)), - ) - for key, value in span_to_export.attributes.items() - ], - dropped_attributes_count=span_to_export.dropped_attributes, - dropped_events_count=span_to_export.dropped_events, - dropped_links_count=span_to_export.dropped_links, - links=[ - ProtoBufferSpan.Link( - trace_id=link.trace_id, - span_id=link.span_id, - attributes=[ - KeyValue( - key=key, - value=AnyValue(string_value=str(value)), - ) - for key, value in link.attributes.items() - ], - ) - for link in span_to_export.links - ], - events=[], - ) - ], - ) - ] - ) - ] - ) - - response = requests.post( - f"{self._client._client_wrapper.get_base_url()}/import/otel/v1/traces", - headers={ - **self._client._client_wrapper.get_headers(), - }, - data=MessageToJson(payload), - ) - if response.status_code != 200: - # TODO: handle - pass - else: - if evaluation_context and file_path == evaluation_context.path: - log_id = response.json()["records"][0]["log_id"] - evaluation_context.logging_callback(log_id) - - self._upload_queue.task_done() diff --git a/src/humanloop/otel/exporter/__init__.py b/src/humanloop/otel/exporter/__init__.py new file mode 100644 index 00000000..ab997d87 --- /dev/null +++ b/src/humanloop/otel/exporter/__init__.py @@ -0,0 +1,154 @@ +import logging + +import typing +from queue import Empty as EmptyQueue +from queue import Queue +from threading import Thread +from typing import Optional, Sequence + +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult + +import requests +from typing import Callable +from humanloop.context import get_evaluation_context +from humanloop.evals.run import HumanloopRuntimeError +from humanloop.otel.constants import ( + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_LOG_KEY, + HUMANLOOP_FILE_PATH_KEY, +) +from humanloop.otel.exporter.proto import serialize_span +from humanloop.otel.helpers import ( + read_from_opentelemetry_span, + write_to_opentelemetry_span, +) + + +if typing.TYPE_CHECKING: + from humanloop.client import Humanloop + + +logger = logging.getLogger("humanloop.sdk") + + +class HumanloopSpanExporter(SpanExporter): + DEFAULT_NUMBER_THREADS = 1 + + def __init__( + self, + client: "Humanloop", + worker_threads: Optional[int] = None, + ) -> None: + """Upload Spans created by SDK decorators to Humanloop. + + Spans not created by Humanloop SDK decorators will be ignored. + """ + super().__init__() + self._client = client + # Work queue for the threads uploading the spans + self._upload_queue: Queue = Queue() + # Worker threads to export the spans + self._threads: list[Thread] = [ + Thread( + target=self._do_work, + daemon=True, + ) + for _ in range(worker_threads or self.DEFAULT_NUMBER_THREADS) + ] + # Signals threads no more work will arrive and + # they should wind down after they empty the queue + self._shutdown: bool = False + # Init the upload threads + for thread in self._threads: + thread.start() + logger.debug("Exporter Thread %s started", thread.ident) + + def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: + if self._shutdown: + logger.warning("[HumanloopSpanExporter] Shutting down, not accepting new spans") + return SpanExportResult.FAILURE + + for span in spans: + file_type = span.attributes.get(HUMANLOOP_FILE_TYPE_KEY) # type: ignore [union-attr] + if file_type is None: + raise HumanloopRuntimeError("Internal error: Span does not have type set") + + try: + log_args = read_from_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + ) + path = read_from_opentelemetry_span( + span=span, + key=HUMANLOOP_FILE_PATH_KEY, + ) + evaluation_context = get_evaluation_context() + if evaluation_context is not None: + with evaluation_context.spy_log_args( + path=path, # type: ignore [arg-type] + log_args=log_args, # type: ignore [arg-type] + ) as log_args: + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=log_args, + ) + eval_context_callback = evaluation_context.callback + else: + eval_context_callback = None + except HumanloopRuntimeError as e: + raise e + except Exception as e: + # No log args, no callback + eval_context_callback = None + + self._upload_queue.put((span, eval_context_callback)) + + return SpanExportResult.SUCCESS + + def shutdown(self) -> None: + self._shutdown = True + for thread in self._threads: + thread.join(timeout=5) + logger.debug("[HumanloopSpanExporter] Exporter Thread %s joined", thread.ident) + + def force_flush(self, timeout_millis: int = 5000) -> bool: + self._shutdown = True + for thread in self._threads: + thread.join(timeout=timeout_millis / 1000) + self._upload_queue.join() + + return True + + def _do_work(self): + # Do work while the Exporter was not instructed to + # wind down or the queue is not empty + while self._upload_queue.qsize() > 0 or not self._shutdown: + thread_args: tuple[ReadableSpan, Optional[Callable[[str], None]]] # type: ignore + try: + # Don't block or the thread will never be notified of the shutdown + thread_args = self._upload_queue.get(block=False) # type: ignore + except EmptyQueue: + # Wait for the another span to arrive + continue + + span_to_export, eval_context_callback = thread_args + + response = requests.post( + f"{self._client._client_wrapper.get_base_url()}/import/otel/v1/traces", + headers={ + **self._client._client_wrapper.get_headers(), + }, + data=serialize_span(span_to_export), + ) + print("RECV", span_to_export.attributes, response.json(), response.status_code) + if response.status_code != 200: + pass + else: + if eval_context_callback: + print("HELLO") + log_id = response.json()["records"][0] + eval_context_callback(log_id) + + self._upload_queue.task_done() diff --git a/src/humanloop/otel/exporter/proto.py b/src/humanloop/otel/exporter/proto.py new file mode 100644 index 00000000..437ffe86 --- /dev/null +++ b/src/humanloop/otel/exporter/proto.py @@ -0,0 +1,73 @@ +from opentelemetry.proto.common.v1.common_pb2 import KeyValue, AnyValue, InstrumentationScope +from opentelemetry.proto.trace.v1.trace_pb2 import ( + TracesData, + ResourceSpans, + ScopeSpans, + Span as ProtoBufferSpan, +) +from google.protobuf.json_format import MessageToJson + +from opentelemetry.sdk.trace import ReadableSpan +from humanloop.otel.helpers import is_llm_provider_call + + +def serialize_span(span_to_export: ReadableSpan) -> str: + payload = TracesData( + resource_spans=[ + ResourceSpans( + scope_spans=[ + ScopeSpans( + scope=InstrumentationScope( + name="humanloop.sdk.provider" + if is_llm_provider_call(span_to_export) + else "humanloop.sdk.decorator", + version="0.1.0", + ), + spans=[ + ProtoBufferSpan( + trace_id=span_to_export.context.trace_id.to_bytes(length=16, byteorder="big"), + span_id=span_to_export.context.span_id.to_bytes(length=8, byteorder="big"), + name=span_to_export.name, + kind={ + 0: ProtoBufferSpan.SpanKind.SPAN_KIND_INTERNAL, + 1: ProtoBufferSpan.SpanKind.SPAN_KIND_SERVER, + 2: ProtoBufferSpan.SpanKind.SPAN_KIND_CLIENT, + 3: ProtoBufferSpan.SpanKind.SPAN_KIND_PRODUCER, + 4: ProtoBufferSpan.SpanKind.SPAN_KIND_CONSUMER, + }[span_to_export.kind.value], + start_time_unix_nano=span_to_export.start_time, # type: ignore [attr-defined, arg-type] + end_time_unix_nano=span_to_export.end_time, # type: ignore [attr-defined, arg-type] + attributes=[ + KeyValue( + key=key, + value=AnyValue(string_value=str(value)), + ) + for key, value in span_to_export.attributes.items() # type: ignore [union-attr] + ], + dropped_attributes_count=span_to_export.dropped_attributes, + dropped_events_count=span_to_export.dropped_events, + dropped_links_count=span_to_export.dropped_links, + links=[ + ProtoBufferSpan.Link( + trace_id=link.context.trace_id.to_bytes(length=16, byteorder="big"), + span_id=link.context.span_id.to_bytes(length=8, byteorder="big"), + attributes=[ + KeyValue( + key=key, + value=AnyValue(string_value=str(value)), + ) + for key, value in link.attributes.items() # type: ignore [union-attr] + ], + ) + for link in span_to_export.links + ], + events=[], + ) + ], + ) + ] + ) + ] + ) + + return MessageToJson(payload) diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py index 6dd539c4..cf253562 100644 --- a/src/humanloop/otel/processor.py +++ b/src/humanloop/otel/processor.py @@ -1,54 +1,54 @@ import logging -from typing import TypedDict +from typing import Optional +from opentelemetry import context as context_api from opentelemetry.sdk.trace import ReadableSpan, Span from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter -from humanloop.context import get_prompt_context, get_trace_id +from humanloop.context import get_decorator_context, get_evaluation_context, get_trace_id from humanloop.otel.constants import ( HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, - HUMANLOOP_PATH_KEY, + HUMANLOOP_FILE_PATH_KEY, ) from humanloop.otel.helpers import is_llm_provider_call logger = logging.getLogger("humanloop.sdk") -class CompletableSpan(TypedDict): - span: ReadableSpan - complete: bool - - class HumanloopSpanProcessor(SimpleSpanProcessor): def __init__(self, exporter: SpanExporter) -> None: super().__init__(exporter) - def on_start(self, span: Span, parent_context): + def on_start(self, span: Span, parent_context=...): if is_llm_provider_call(span): - prompt_context = get_prompt_context() - if prompt_context: - path, template = prompt_context.path, prompt_context.template - span.set_attribute(HUMANLOOP_PATH_KEY, path) + decorator_context = get_decorator_context() + if decorator_context and decorator_context.type == "prompt": + path, template = ( + decorator_context.path, + decorator_context.version["template"], + ) + span.set_attribute(HUMANLOOP_FILE_PATH_KEY, path) span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "prompt") if template: span.set_attribute( f"{HUMANLOOP_FILE_KEY}.template", template, ) - else: - # TODO: handle - raise ValueError("Provider call outside @prompt context manager") trace_id = get_trace_id() if trace_id: span.set_attribute(f"{HUMANLOOP_LOG_KEY}.trace_parent_id", trace_id) def on_end(self, span: ReadableSpan): if is_llm_provider_call(span): - prompt_context = get_prompt_context() - if prompt_context is None: + decorator_context = get_decorator_context() + if decorator_context is None or decorator_context.type != "prompt": + # User made a provider call outside a @prompt context, ignore the span + return + evaluation_context = get_evaluation_context() + if evaluation_context is not None and evaluation_context.path == decorator_context.path: + # User made a provider call inside a @prompt context, ignore the span return - self.span_exporter.export([span]) diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py index 43ab6317..bff1a610 100644 --- a/src/humanloop/overload.py +++ b/src/humanloop/overload.py @@ -2,10 +2,13 @@ import logging import types from typing import TypeVar, Union -import typing -from humanloop.context import get_decorator_context, get_trace_id -from humanloop.eval_utils.run import HumanloopDecoratorError +from humanloop.context import ( + get_decorator_context, + get_evaluation_context, + get_trace_id, +) +from humanloop.evals.run import HumanloopRuntimeError from humanloop.evaluators.client import EvaluatorsClient from humanloop.flows.client import FlowsClient @@ -31,7 +34,7 @@ def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE: part of an Evaluation (e.g. one started by eval_utils.run_eval). """ # Copy the original log method in a hidden attribute - client._log = client.log + client._log = client.log # type: ignore [attr-defined] def _overload_log( # It's safe to only consider kwargs since the original @@ -48,38 +51,49 @@ def _overload_log( if trace_id is not None and type(client) is FlowsClient: context = get_decorator_context() if context is None: - raise HumanloopDecoratorError("Internal error: trace_id context is set outside a decorator context.") - raise HumanloopDecoratorError( - f"Using flows.log() in this context is not allowed at line {inspect.currentframe().f_lineno}: " - f"Flow decorator for File {context.path} manages the tracing and trace completion." + raise HumanloopRuntimeError("Internal error: trace_id context is set outside a decorator context.") + raise HumanloopRuntimeError( + f"Using `flows.log()` is not allowed: Flow decorator " + f"for File {context.path} manages the tracing and trace completion." ) if trace_id is not None: if "trace_parent_id" in kwargs: logger.warning( "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.", - inspect.currentframe().f_lineno, + inspect.currentframe().f_lineno, # type: ignore [union-attr] ) kwargs = { **kwargs, "trace_parent_id": trace_id, } - try: - response = self._log(**kwargs) - except Exception as e: - # Re-raising as HumanloopDecoratorError so the decorators don't catch it - raise HumanloopDecoratorError from e + evaluation_context = get_evaluation_context() + if evaluation_context is not None: + with evaluation_context.spy_log_args(path=kwargs.get("path"), log_args=kwargs) as kwargs: + try: + response = self._log(**kwargs) + except Exception as e: + # Re-raising as HumanloopDecoratorError so the decorators don't catch it + raise HumanloopRuntimeError from e + if evaluation_context.callback is not None: + evaluation_context.callback(response.id) + else: + try: + response = self._log(**kwargs) + except Exception as e: + # Re-raising as HumanloopDecoratorError so the decorators don't catch it + raise HumanloopRuntimeError from e return response # Replace the original log method with the overloaded one - client.log = types.MethodType(_overload_log, client) + client.log = types.MethodType(_overload_log, client) # type: ignore [assignment] # Return the client with the overloaded log method logger.debug("Overloaded the .call method of %s", client) return client def overload_call(client: PromptsClient) -> PromptsClient: - client._call = client.call + client._call = client.call # type: ignore [attr-defined] def _overload_call(self, **kwargs) -> PromptCallResponse: # None if not logging inside a decorator @@ -88,7 +102,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse: if "trace_parent_id" in kwargs: logger.warning( "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.", - inspect.currentframe().f_lineno, + inspect.currentframe().f_lineno, # type: ignore [union-attr] ) kwargs = { **kwargs, @@ -97,13 +111,12 @@ def _overload_call(self, **kwargs) -> PromptCallResponse: try: response = self._call(**kwargs) - response = typing.cast(PromptCallResponse, response) except Exception as e: # Re-raising as HumanloopDecoratorError so the decorators don't catch it - raise HumanloopDecoratorError from e + raise HumanloopRuntimeError from e return response # Replace the original log method with the overloaded one - client.call = types.MethodType(_overload_call, client) + client.call = types.MethodType(_overload_call, client) # type: ignore [assignment] return client diff --git a/src/humanloop/utilities/flow.py b/src/humanloop/utilities/flow.py deleted file mode 100644 index c2f00b28..00000000 --- a/src/humanloop/utilities/flow.py +++ /dev/null @@ -1,127 +0,0 @@ -import logging -from functools import wraps -from typing import Any, Callable, Optional, TypeVar -from typing_extensions import ParamSpec - -from opentelemetry.trace import Span, Tracer -import requests - -from humanloop.base_client import BaseHumanloop -from humanloop.context import get_trace_id, set_trace_id -from humanloop.types.chat_message import ChatMessage -from humanloop.utilities.helpers import bind_args -from humanloop.eval_utils.types import File -from humanloop.otel.constants import ( - HUMANLOOP_FILE_TYPE_KEY, - HUMANLOOP_LOG_KEY, - HUMANLOOP_PATH_KEY, -) -from humanloop.otel.helpers import process_output, write_to_opentelemetry_span -from humanloop.requests import FlowKernelRequestParams as FlowDict - -logger = logging.getLogger("humanloop.sdk") - - -P = ParamSpec("P") -R = TypeVar("R") - - -def flow( - client: "BaseHumanloop", - opentelemetry_tracer: Tracer, - path: str, - attributes: Optional[dict[str, Any]] = None, -): - flow_kernel = {"attributes": attributes or {}} - - def decorator(func: Callable[P, R]) -> Callable[P, R]: - decorator_path = path or func.__name__ - file_type = "flow" - - @wraps(func) - def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: - span: Span - with opentelemetry_tracer.start_as_current_span("humanloop.flow") as span: # type: ignore - trace_id = get_trace_id() - args_to_func = bind_args(func, args, kwargs) - - # Create the trace ahead so we have a parent ID to reference - init_log_inputs = { - "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, - "messages": args_to_func.get("messages"), - "trace_parent_id": trace_id, - } - init_log = requests.post( - f"{client._client_wrapper.get_base_url()}/flows/log", - headers=client._client_wrapper.get_headers(), - json={ - "path": path, - "flow": flow_kernel, - "log_status": "incomplete", - **init_log_inputs, - }, - ).json() - # log = client.flows.log( - # path=path, - # **log_inputs, - # log_status="incomplete", - # ) - with set_trace_id(init_log["id"]): - span.set_attribute(HUMANLOOP_PATH_KEY, decorator_path) - span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, file_type) - - func_output: Optional[R] - log_output: str - log_error: Optional[str] - log_output_message: ChatMessage - try: - func_output = func(*args, **kwargs) - if ( - isinstance(func_output, dict) - and len(func_output.keys()) == 2 - and "role" in func_output - and "content" in func_output - ): - log_output_message = func_output - log_output = None - else: - log_output = process_output(func=func, output=func_output) - log_output_message = None - log_error = None - except Exception as e: - logger.error(f"Error calling {func.__name__}: {e}") - log_output = None - log_output_message = None - log_error = str(e) - func_output = None - - flow_log = { - "inputs": {k: v for k, v in args_to_func.items() if k != "messages"}, - "messages": args_to_func.get("messages"), - "log_status": "complete", - "output": log_output, - "error": log_error, - "output_message": log_output_message, - "id": init_log["id"], - } - - # Write the Flow Log to the Span on HL_LOG_OT_KEY - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_LOG_KEY, - value=flow_log, # type: ignore - ) - - # Return the output of the decorated function - return func_output - - wrapper.file = File( # type: ignore - path=decorator_path, - type=file_type, - version=FlowDict(**flow_kernel), # type: ignore - callable=wrapper, - ) - - return wrapper - - return decorator diff --git a/src/humanloop/utilities/prompt.py b/src/humanloop/utilities/prompt.py deleted file mode 100644 index a30294aa..00000000 --- a/src/humanloop/utilities/prompt.py +++ /dev/null @@ -1,27 +0,0 @@ -from functools import wraps -import logging - - -from typing import Callable, Optional - -from humanloop.context import PromptContext, set_decorator_context - -logger = logging.getLogger("humanloop.sdk") - - -def prompt_decorator_factory(path: str, template: Optional[str]): - def decorator(func: Callable): - @wraps(func) - def wrapper(*args, **kwargs): - with set_decorator_context( - PromptContext( - path=path, - template=template, - ) - ): - output = func(*args, **kwargs) - return output - - return wrapper - - return decorator diff --git a/src/humanloop/utilities/types.py b/src/humanloop/utilities/types.py deleted file mode 100644 index f52f0178..00000000 --- a/src/humanloop/utilities/types.py +++ /dev/null @@ -1,12 +0,0 @@ -from typing_extensions import NotRequired - -from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams - - -class DecoratorPromptKernelRequestParams(PromptKernelRequestParams): - """See :class:`PromptKernelRequestParams` for more information. - - Allows the `model` field to be optional for Prompt decorator. - """ - - model: NotRequired[str] # type: ignore diff --git a/tests/conftest.py b/tests/conftest.py index 033a2d09..80e3b336 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -98,10 +98,7 @@ def opentelemetry_hl_test_configuration( Humanloop Spans. """ exporter = InMemorySpanExporter() - processor = HumanloopSpanProcessor( - exporter=exporter, - client=humanloop_client, - ) + processor = HumanloopSpanProcessor(exporter=exporter) opentelemetry_test_provider.add_span_processor(processor) instrumentors: list[BaseInstrumentor] = [ OpenAIInstrumentor(), @@ -142,10 +139,7 @@ def opentelemetry_hl_with_exporter_test_configuration( """Configure OTel backend with HumanloopSpanProcessor and a HumanloopSpanExporter where HTTP calls are mocked. """ - processor = HumanloopSpanProcessor( - exporter=hl_test_exporter, - client=humanloop_client, # type: ignore [arg-type] - ) + processor = HumanloopSpanProcessor(exporter=hl_test_exporter) opentelemetry_test_provider.add_span_processor(processor) instrumentor = OpenAIInstrumentor() instrumentor.instrument(tracer_provider=opentelemetry_test_provider) diff --git a/tests/integration/chat_agent/__init__.py b/tests/integration/chat_agent/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/integration/chat_agent/conftest.py b/tests/integration/chat_agent/conftest.py deleted file mode 100644 index 2ca3c92e..00000000 --- a/tests/integration/chat_agent/conftest.py +++ /dev/null @@ -1,177 +0,0 @@ -from dataclasses import dataclass -from typing import Any, Callable - -import pytest - -from humanloop.client import Humanloop -from tests.conftest import APIKeys - - -@dataclass -class SurferAgentScenario: - calculator: Callable[[str, int, int], str] - pick_random_number: Callable[[], int] - call_agent: Callable[[list[str]], str] - agent_chat_workflow: Callable[[], str] - - -@pytest.fixture() -def surfer_agent_scenario_factory( - humanloop_client: Humanloop, - get_test_path: Callable[[str], str], - api_keys: APIKeys, -) -> Callable[[bool], SurferAgentScenario]: - def factory(use_overload_call: bool) -> SurferAgentScenario: - import json - import random - from openai import OpenAI - - TOPICS = ["math", "science"] - TONE = "groovy 80s surfer dude" - LLM_HYPERPARAMETERS = { - "temperature": 0.7, - "max_tokens": 200, - "top_p": 1, - "stop": "\n\n\n", - "presence_penalty": 0.5, - "frequency_penalty": 0.5, - "seed": 42, - } - PROMPT_TEMPLATE = ( - "You are a helpful assistant knowledgeable on the following topics: {topics}. " - "When you reply you should use the following tone of voice: {tone}" - ) - - client = OpenAI(api_key=api_keys.openai) - - @humanloop_client.tool(path=get_test_path("Calculator")) - def calculator(operation: str, num1: int, num2: int) -> float: - """Do arithmetic operations on two numbers.""" - if operation == "add": - return num1 + num2 - elif operation == "subtract": - return num1 - num2 - elif operation == "multiply": - return num1 * num2 - elif operation == "divide": - return num1 / num2 - else: - raise NotImplementedError("Invalid operation") - - @humanloop_client.tool(path=get_test_path("Random Number")) - def pick_random_number(): - """Pick a random number between 1 and 100.""" - return random.randint(1, 100) - - @humanloop_client.prompt( - path=get_test_path("Agent Prompt"), - template=PROMPT_TEMPLATE, - tools=[ - pick_random_number.json_schema, - calculator.json_schema, - ], - ) - def call_agent(messages: list[dict[str, Any]]) -> str: # type: ignore [call-arg] - if use_overload_call: - output = humanloop_client.prompts.call( - path=get_test_path("Agent Prompt"), - messages=messages, # type: ignore [arg-type] - prompt={ # type: ignore [arg-type] - "model": "gpt-4o-mini", - "tools": [ - # TODO: json_schema property should specify - # an OpenAI/ sHL format or be aware of the context - # it's called from; note the difference between - # prompts.call and openai.chat.completions.create - calculator.json_schema, - pick_random_number.json_schema, - ], - **LLM_HYPERPARAMETERS, # type: ignore - }, - ) - - if output.logs[0].output_message.tool_calls: # type: ignore [union-attr] - for tool_call in output.logs[0].output_message.tool_calls: # type: ignore [union-attr] - arguments = json.loads(tool_call.function.arguments) # type: ignore [arg-type] - if tool_call.function.name == "calculator": - result = calculator(**arguments) - - elif tool_call.function.name == "pick_random_number": - result = pick_random_number(**arguments) - - else: - raise NotImplementedError("Invalid tool call") - - return f"[TOOL CALL: {tool_call.function.name}] {result}" - - return output.logs[0].output_message.content # type: ignore - - else: - output = client.chat.completions.create( # type: ignore [call-overload] - model="gpt-4o-mini", - messages=messages, - # Use .json_schema property on decorated functions to easily access - # the definition for function calls - tools=[ - { - "type": "function", - "function": calculator.json_schema, - }, - { - "type": "function", - "function": pick_random_number.json_schema, - }, - ], - **LLM_HYPERPARAMETERS, - ) - - # Check if tool calls are present in the output - if output.choices[0].message.tool_calls: # type: ignore [attr-defined] - for tool_call in output.choices[0].message.tool_calls: # type: ignore [attr-defined] - arguments = json.loads(tool_call.function.arguments) - if tool_call.function.name == "calculator": - result = calculator(**arguments) - - elif tool_call.function.name == "pick_random_number": - result = pick_random_number(**arguments) - - else: - raise NotImplementedError("Invalid tool call") - - return f"[TOOL CALL: {tool_call.function.name}] {result}" - - return output.choices[0].message.content # type: ignore [attr-defined] - - @humanloop_client.flow(path=get_test_path("Agent Workflow")) - def agent_chat_workflow(): - messages = [ - { - "role": "system", - "content": PROMPT_TEMPLATE.format( - topics=" ".join(TOPICS), - tone=TONE, - ), - }, - ] - input_output_pairs = [] - while True: - user_input = input("You: ") - input_output = [user_input] - if user_input == "exit": - break - messages.append({"role": "user", "content": user_input}) - response = call_agent(messages=messages) - messages.append({"role": "assistant", "content": str(response)}) - input_output.append(str(response)) - print(f"Agent: {response}") - input_output_pairs.append(input_output) - return json.dumps(input_output_pairs) - - return SurferAgentScenario( - calculator=calculator, - pick_random_number=pick_random_number, - call_agent=call_agent, - agent_chat_workflow=agent_chat_workflow, - ) - - return factory diff --git a/tests/integration/chat_agent/test_chat_agent.py b/tests/integration/chat_agent/test_chat_agent.py deleted file mode 100644 index f65538c3..00000000 --- a/tests/integration/chat_agent/test_chat_agent.py +++ /dev/null @@ -1,67 +0,0 @@ -# """This script demonstrates instrumenting a simple conversational agent with function calling. - -# The example uses the Humanloop SDK to declare Files in code. - -# Type 'exit' to end the conversation. -# """ - -import time -from typing import Callable, ContextManager, TextIO -from unittest.mock import MagicMock, patch - -import pytest -from humanloop import Humanloop -from tests.conftest import DirectoryIdentifiers -from tests.integration.chat_agent.conftest import SurferAgentScenario - - -@pytest.mark.skip("skip for demo") -@pytest.mark.parametrize("use_call", [False]) -@patch("builtins.input") -def test_scenario_runs( - mocked_input: MagicMock, - surfer_agent_scenario_factory: Callable[[bool], SurferAgentScenario], - capture_stdout: ContextManager[TextIO], - humanloop_client: Humanloop, - test_directory: DirectoryIdentifiers, - use_call: bool, -): - surfer_agent_scenario = surfer_agent_scenario_factory(use_call) - scenario_io = [ - "How are you?", - "Tubular", - "exit", - ] - mocked_input.side_effect = scenario_io - with capture_stdout() as console_output: # type: ignore [operator] - surfer_agent_scenario.agent_chat_workflow() - - # Wait for the HL workspace to be updated - time.sleep(5) - - lines = console_output.getvalue().splitlines() - assert len(lines) == 2 - assert "Agent" in lines[0] - - response = humanloop_client.directories.get(test_directory.id) - assert len(response.files) == 2 - flow_file = [file for file in response.files if file.type == "flow"][0] - flow_logs = humanloop_client.logs.list(file_id=flow_file.id) - assert flow_logs.items and len(flow_logs.items) == 1 - flow_log = flow_logs.items[0] - assert flow_log.trace_status == "complete" # type: ignore - # List will not pass the children to the trace_children attribute - assert len(flow_log.trace_children) == 0 # type: ignore - response = humanloop_client.logs.get(flow_log.id) # type: ignore [assignment] - if not isinstance(response, dict): - response = response.dict() # type: ignore [assignment] - assert response["trace_status"] == "complete" # type: ignore - assert len(response["trace_children"]) == 2 # type: ignore [index] - messages = response["trace_children"][1]["messages"] # type: ignore [index] - assert len(messages) == 4 - # Messages are in reverse order - if not use_call: - # TODO: Some special characters are dropped when - # using prompt.call inside - assert messages[2]["content"] == scenario_io[0] - assert messages[0]["content"] == scenario_io[1] diff --git a/tests/integration/evaluate_medqa/__init__.py b/tests/integration/evaluate_medqa/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/integration/evaluate_medqa/conftest.py b/tests/integration/evaluate_medqa/conftest.py deleted file mode 100644 index 18f52d81..00000000 --- a/tests/integration/evaluate_medqa/conftest.py +++ /dev/null @@ -1,202 +0,0 @@ -from dataclasses import dataclass -import json -from typing import Callable -import pytest - -import os -import requests -from humanloop.client import Humanloop - -from tests.assets import levenshtein, exact_match -from tests.conftest import APIKeys - - -@pytest.fixture(scope="session", autouse=True) -def medqa_knowledge_base_path() -> str: - file_path = "tests/assets/medqa.parquet" - if not os.path.exists(file_path): - os.makedirs("tests/assets", exist_ok=True) - url = "https://github.com/humanloop/humanloop-cookbook/raw/refs/heads/main/assets/sources/textbooks.parquet" - response = requests.get(url) - with open(file_path, "wb+") as file: - file.write(response.content) - return file_path - - -@pytest.fixture(scope="session", autouse=True) -def medqa_dataset_path() -> str: - file_path = "tests/assets/datapoints.jsonl" - if not os.path.exists(file_path): - os.makedirs("tests/assets", exist_ok=True) - url = "https://raw.githubusercontent.com/humanloop/humanloop-cookbook/refs/heads/main/assets/datapoints.jsonl" - response = requests.get(url) - with open(file_path, "wb+") as file: - file.write(response.content) - return file_path - - -@dataclass -class MedQAScenario: - retrieval_tool: tuple[str, Callable[[str], str]] - call_model: tuple[str, Callable[..., str]] - ask_question: tuple[str, Callable[..., str]] - medqa_dataset_path: tuple[str, list[dict]] - levenshtein_path: str - exact_match_path: str - - -@pytest.fixture() -def evaluate_medqa_scenario_factory( - humanloop_client: "Humanloop", - get_test_path: Callable[[str], str], - api_keys: APIKeys, - medqa_knowledge_base_path: str, - medqa_dataset_path: str, -) -> Callable[[bool], MedQAScenario]: - def factory(use_overload_call: bool) -> MedQAScenario: - import inspect - - from chromadb import chromadb # type: ignore - from openai import OpenAI - import pandas as pd # type: ignore - - chroma = chromadb.Client() - collection = chroma.get_or_create_collection(name="MedQA") - knowledge_base = pd.read_parquet(medqa_knowledge_base_path) - knowledge_base = knowledge_base.sample(10, random_state=42) - collection.add( - documents=knowledge_base["contents"].to_list(), - ids=knowledge_base["id"].to_list(), - ) - - openai = OpenAI(api_key=api_keys.openai) - - MODEL = "gpt-4o-mini" - TEMPLATE = [ - { - "role": "system", - "content": """Answer the following question factually. - - Question: {{question}} - - Options: - - {{option_A}} - - {{option_B}} - - {{option_C}} - - {{option_D}} - - {{option_E}} - - --- - - Here is some retrieved information that might be helpful. - Retrieved data: - {{retrieved_data}} - - --- - - Give you answer in 3 sections using the following format. Do not include the quotes or the brackets. Do include the "---" separators. - ``` - - --- - - --- - - ``` - """, - } - ] - - @humanloop_client.tool(path=get_test_path("Retrieval")) - def retrieval_tool(question: str) -> str: - """Retrieve most relevant document from the vector db (Chroma) for the question.""" - response = collection.query(query_texts=[question], n_results=1) - retrieved_doc = response["documents"][0][0] # type: ignore [index] - return retrieved_doc - - @humanloop_client.prompt(path=get_test_path("Call Model")) - def call_model(**inputs): - """Populate the Prompt template.""" - messages = humanloop_client.prompts.populate_template(TEMPLATE, inputs) - - if use_overload_call: - chat_completion = humanloop_client.prompts.call( - path=get_test_path("Call Model"), - prompt={ - "model": MODEL, - "temperature": 0, - "presence_penalty": 0, - "frequency_penalty": 0, - }, - messages=messages, - ) - - return chat_completion.logs[0].output_message.content - else: - # Call OpenAI to get response - chat_completion = openai.chat.completions.create( - model=MODEL, - temperature=0, - presence_penalty=0, - frequency_penalty=0, - messages=messages, - ) - return chat_completion.choices[0].message.content - - @humanloop_client.flow( - path=get_test_path("Pipeline"), - attributes={ - "prompt": { - "template": [ - { - "role": "system", - "content": 'Answer the following question factually.\n\nQuestion: {{question}}\n\nOptions:\n- {{option_A}}\n- {{option_B}}\n- {{option_C}}\n- {{option_D}}\n- {{option_E}}\n\n---\n\nHere is some retrieved information that might be helpful.\nRetrieved data:\n{{retrieved_data}}\n\n---\n\nGive you answer in 3 sections using the following format. Do not include the quotes or the brackets. Do include the "---" separators.\n```\n\n---\n\n---\n\n```\n', - } - ], - "model_name": "gpt-4o", - "temperature": 0, - }, - "tool": { - "name": "retrieval_tool_v3", - "description": "Retrieval tool for MedQA.", - "source_code": inspect.getsource(retrieval_tool), - }, - }, - ) - def ask_question(**inputs) -> str: - """Ask a question and get an answer using a simple RAG pipeline""" - - # Retrieve context - retrieved_data = retrieval_tool(inputs["question"]) - inputs = {**inputs, "retrieved_data": retrieved_data} - - # Call LLM - return call_model(**inputs) - - with open(medqa_dataset_path, "r") as file: - datapoints = [json.loads(line) for line in file.readlines()] - - for path, code, return_type in [ - (get_test_path("Levenshtein Distance"), levenshtein, "number"), - (get_test_path("Exact Match"), exact_match, "boolean"), - ]: - humanloop_client.evaluators.upsert( - path=path, - # TODO: spec comes up as Any - spec={ - "arguments_type": "target_required", - "return_type": return_type, - "evaluator_type": "python", - "code": inspect.getsource(code), - }, - ) - - return MedQAScenario( - retrieval_tool=(get_test_path("Retrieval"), retrieval_tool), - call_model=(get_test_path("Call Model"), call_model), - ask_question=(get_test_path("Pipeline"), ask_question), - medqa_dataset_path=(get_test_path("MedQA Dataset"), datapoints), - levenshtein_path=get_test_path("Levenshtein Distance"), - exact_match_path=get_test_path("Exact Match"), - ) - - return factory diff --git a/tests/integration/evaluate_medqa/test_evaluate_medqa.py b/tests/integration/evaluate_medqa/test_evaluate_medqa.py deleted file mode 100644 index 637dd6b3..00000000 --- a/tests/integration/evaluate_medqa/test_evaluate_medqa.py +++ /dev/null @@ -1,75 +0,0 @@ -import time -from typing import Callable - -import pytest -from humanloop.types.evaluation_response import EvaluationResponse -from tests.conftest import DirectoryIdentifiers -from tests.integration.evaluate_medqa.conftest import MedQAScenario -from humanloop import Humanloop - - -@pytest.mark.skip("skip for demo") -@pytest.mark.parametrize("use_call", [True]) -def test_scenario( - evaluate_medqa_scenario_factory: Callable[[bool], MedQAScenario], - humanloop_client: Humanloop, - test_directory: DirectoryIdentifiers, - use_call: bool, -): - evaluate_medqa_scenario = evaluate_medqa_scenario_factory(use_call) - ask_question_path, ask_question = evaluate_medqa_scenario.ask_question - medqa_dataset_path, medqa_dataset = evaluate_medqa_scenario.medqa_dataset_path - levenshtein_path = evaluate_medqa_scenario.levenshtein_path - exact_match_path = evaluate_medqa_scenario.exact_match_path - - humanloop_client.evaluations.run( # type: ignore [attr-defined] - name="Test", - file={ - "path": ask_question_path, - "callable": ask_question, - }, - dataset={ - "path": medqa_dataset_path, - "datapoints": medqa_dataset[:1], - }, - evaluators=[ - {"path": levenshtein_path}, - {"path": exact_match_path}, - ], - ) - - time.sleep(3) - - response = humanloop_client.directories.get(test_directory.id) - flow = [file for file in response.files if file.type == "flow"][0] - logs_page = humanloop_client.logs.list(file_id=flow.id) - assert len(logs_page.items) == 1 # type: ignore [arg-type] - - flow_log_id = logs_page.items[0].id # type: ignore [index] - flow_log = humanloop_client.logs.get(flow_log_id) - if not isinstance(flow_log, dict): - flow_log = flow_log.dict() # type: ignore [assignment] - assert flow_log["trace_status"] == "complete" # type: ignore [index] - assert len(flow_log["trace_children"]) == 2 # type: ignore [index] - - levenshtein = [file for file in response.files if file.path == levenshtein_path][0] - levenshtein_logs_page = humanloop_client.logs.list(file_id=levenshtein.id) - assert len(levenshtein_logs_page.items) == 1 # type: ignore [arg-type] - assert levenshtein_logs_page.items[0].parent_id == flow_log_id # type: ignore - assert levenshtein_logs_page.items[0].error is None # type: ignore [index] - - exact_match = [file for file in response.files if file.path == exact_match_path][0] - exact_match_logs_page = humanloop_client.logs.list(file_id=exact_match.id) - assert len(exact_match_logs_page.items) == 1 # type: ignore [arg-type] - assert exact_match_logs_page.items[0].parent_id == flow_log_id # type: ignore - assert exact_match_logs_page.items[0].error is None # type: ignore [index] - - response = humanloop_client.evaluations.list(file_id=flow.id) # type: ignore [assignment] - assert len(response.items) == 1 # type: ignore [attr-defined] - evaluation: EvaluationResponse = response.items[0] # type: ignore [attr-defined] - assert evaluation.status == "completed" # type: ignore [attr-defined] - assert evaluation.name == "Test" - assert evaluation.runs_count == 1 - assert evaluation.file_id == flow.id - for evaluator in evaluation.evaluators: - assert evaluator.orchestrated is True diff --git a/tests/otel/test_helpers.py b/tests/otel/test_helpers.py index c409640e..3bd5ce45 100644 --- a/tests/otel/test_helpers.py +++ b/tests/otel/test_helpers.py @@ -4,11 +4,12 @@ def test_read_empty(test_span: Span): - assert read_from_opentelemetry_span(test_span) == {} + with pytest.raises(TypeError): + assert read_from_opentelemetry_span(test_span) == {} def test_read_non_existent_key(test_span: Span): - with pytest.raises(KeyError): + with pytest.raises(TypeError): assert read_from_opentelemetry_span(test_span, "key") == {} write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, key="key") # NOTE: attributes cannot be None at this point @@ -16,7 +17,7 @@ def test_read_non_existent_key(test_span: Span): "key.x": 7, "key.y": "foo", } - with pytest.raises(KeyError): + with pytest.raises(TypeError): assert read_from_opentelemetry_span(test_span, "key.z") is None @@ -158,7 +159,7 @@ def test_write_drops_dict_all_null_values(test_span: Span): # WHEN reading the value from the span # THEN the value is not present in the span attributes assert "key" not in test_span.attributes # type: ignore - with pytest.raises(KeyError): + with pytest.raises(TypeError): assert read_from_opentelemetry_span(test_span, "key") == {} diff --git a/tests/utilities/__init__.py b/tests/utilities/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/utilities/test_flow.py b/tests/utilities/test_flow.py deleted file mode 100644 index 0d92ec1f..00000000 --- a/tests/utilities/test_flow.py +++ /dev/null @@ -1,298 +0,0 @@ -import os -import random -import string -import time - -from unittest.mock import patch -import pytest -from openai import OpenAI -from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam -from opentelemetry.sdk.trace import Tracer -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter -from opentelemetry.sdk.trace import ReadableSpan - -from humanloop.utilities.flow import flow -from humanloop.utilities.prompt import prompt_decorator_factory -from humanloop.utilities.tool import tool_decorator_factory -from humanloop.otel.constants import HUMANLOOP_FILE_KEY -from humanloop.otel.exporter import HumanloopSpanExporter -from humanloop.otel.helpers import read_from_opentelemetry_span - - -pytest.skip("skip for demo", allow_module_level=True) - - -def _test_scenario( - opentelemetry_tracer: Tracer, -): - @tool_decorator_factory(opentelemetry_tracer=opentelemetry_tracer) - def _random_string() -> str: - """Return a random string.""" - return "".join( - random.choices( - string.ascii_letters + string.digits, - k=10, - ) - ) - - @prompt_decorator_factory( # type: ignore - opentelemetry_tracer=opentelemetry_tracer, - path=None, - template="You are an assistant on the following topics: {topics}.", - ) - def _call_llm(messages: list[ChatCompletionMessageParam]) -> str: - client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) - return ( - client.chat.completions.create( - model="gpt-4o", - messages=messages, - temperature=0.8, - ) - .choices[0] - .message.content - ) + _random_string() - - @flow( - opentelemetry_tracer=opentelemetry_tracer, - attributes={"foo": "bar", "baz": 7}, - ) - def _agent_call(messages: list[dict]) -> str: - return _call_llm(messages=messages) - - @flow( # type: ignore - opentelemetry_tracer=opentelemetry_tracer, - ) - def _flow_over_flow(messages: list[dict]) -> str: - return _agent_call(messages=messages) - - return _random_string, _call_llm, _agent_call, _flow_over_flow - - -@pytest.mark.skip("skip for demo") -@pytest.mark.flaky(retries=3, delay=30) -def test_decorators_without_flow( - opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - tracer, exporter = opentelemetry_hl_test_configuration - - _call_llm = _test_scenario(tracer)[1] - - # GIVEN a call to @prompt annotated function that calls a @tool - _call_llm( - [ - { - "role": "system", - "content": "You are an assistant on the following topics: greetings in foreign languages.", - }, - { - "role": "user", - "content": "Hello, how are you?", - }, - ] - ) - # WHEN exporting the spans - # Wait for the prompt span to be exported; It was waiting - # on the OpenAI call span to finish first - time.sleep(10) - spans = exporter.get_finished_spans() - - # THEN 3 spans arrive at the exporter - assert len(spans) == 3 - - for i in range(3): - if spans[i].name == "humanloop.tool": - tool_span = spans[i] - elif spans[i].name == "humanloop.prompt": - prompt_span = spans[i] - - assert read_from_opentelemetry_span( - span=tool_span, - key=HUMANLOOP_FILE_KEY, - )["tool"] - assert read_from_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_FILE_KEY, - )["prompt"] - - -@pytest.mark.flaky(retries=3, delay=30) -def test_decorators_with_flow_decorator( - opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN a @flow entrypoint to an instrumented application - tracer, exporter = opentelemetry_hl_test_configuration - - _agent_call = _test_scenario(tracer)[2] - - # WHEN calling the Flow - _agent_call( - [ - { - "role": "system", - "content": "You are an assistant on the following topics: greetings in foreign languages.", - }, - { - "role": "user", - "content": "Hello, how are you?", - }, - ] - ) - - time.sleep(10) - - # THEN 4 spans arrive at the exporter - spans = exporter.get_finished_spans() - assert len(spans) == 4 - - for i in range(4): - if spans[i].name == "humanloop.flow": - flow_span = spans[i] - elif spans[i].name == "humanloop.prompt": - prompt_span = spans[i] - elif spans[i].name == "humanloop.tool": - tool_span = spans[i] - - # THEN the span are returned bottom to top - assert read_from_opentelemetry_span(span=tool_span, key=HUMANLOOP_FILE_KEY)["tool"] - assert read_from_opentelemetry_span(span=prompt_span, key=HUMANLOOP_FILE_KEY)["prompt"] - assert read_from_opentelemetry_span(span=flow_span, key=HUMANLOOP_FILE_KEY)["flow"] - - -@pytest.mark.flaky(retries=3, delay=30) -def test_flow_decorator_flow_in_flow( - opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], - call_llm_messages: list[dict], -): - # GIVEN A configured OpenTelemetry tracer and exporter - tracer, exporter = opentelemetry_hl_test_configuration - - _flow_over_flow = _test_scenario(tracer)[3] - - # WHEN Calling the _test_flow_in_flow function with specific messages - _flow_over_flow(call_llm_messages) - - # Wait for the Prompt span to be exported; It was asynchronously waiting - # on the OpenAI call span to finish first - time.sleep(1) - - # THEN 5 spans arrive at the exporter - spans = exporter.get_finished_spans() - assert len(spans) == 5 - - for i in range(5): - if spans[i].name == "humanloop.flow" and spans[i].parent is None: - flow_span = spans[i] - elif spans[i].name == "humanloop.flow" and spans[i].parent: - nested_flow_span = spans[i] - elif spans[i].name == "humanloop.prompt": - prompt_span = spans[i] - elif spans[i].name == "humanloop.tool": - tool_span = spans[i] - - assert read_from_opentelemetry_span(span=tool_span, key=HUMANLOOP_FILE_KEY)["tool"] - assert read_from_opentelemetry_span(span=prompt_span, key=HUMANLOOP_FILE_KEY)["prompt"] - assert read_from_opentelemetry_span(span=nested_flow_span, key=HUMANLOOP_FILE_KEY)["flow"] != {} - with pytest.raises(KeyError): - read_from_opentelemetry_span(span=flow_span, key=HUMANLOOP_FILE_KEY)["flow"] != {} - - -@pytest.mark.flaky(retries=3, delay=30) -def test_flow_decorator_with_hl_exporter( - call_llm_messages: list[dict], - opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter], -): - # NOTE: type ignore comments are caused by the MagicMock used to mock _client - # GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter - tracer, exporter = opentelemetry_hl_with_exporter_test_configuration - - _agent_call = _test_scenario(tracer)[2] - - with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method: - # WHEN calling the @flow decorated function - _agent_call(call_llm_messages) - - # Exporter is threaded, need to wait threads shutdown - time.sleep(3) - - assert len(mock_export_method.call_args_list) == 4 - - for i in range(4): - span = mock_export_method.call_args_list[i][0][0][0] - if span.name == "humanloop.flow": - flow_span = span - elif span.name == "humanloop.prompt": - prompt_span = span - elif span.name == "humanloop.tool": - tool_span = span - - assert read_from_opentelemetry_span( - span=flow_span, - key=HUMANLOOP_FILE_KEY, - )["flow"]["attributes"] == { # type: ignore[index,call-overload] - "foo": "bar", - "baz": 7, - } - # THEN the second uploaded span is the Prompt - assert "prompt" in read_from_opentelemetry_span( - span=prompt_span, - key=HUMANLOOP_FILE_KEY, - ) - # THEN the first uploaded span is the Tool - assert "tool" in read_from_opentelemetry_span( - span=tool_span, - key=HUMANLOOP_FILE_KEY, - ) - - # THEN the first Log uploaded is the Flow - first_log = exporter._client.flows.log.call_args_list[0][1] # type: ignore - assert "flow" in first_log - exporter._client.flows.log.assert_called_once() # type: ignore - flow_log_call_args = exporter._client.flows.log.call_args_list[0] # type: ignore - assert flow_log_call_args.kwargs["flow"]["attributes"] == {"foo": "bar", "baz": 7} - flow_log_id = exporter._client.flows.log.return_value.id # type: ignore - - # THEN the second Log uploaded is the Prompt - exporter._client.prompts.log.assert_called_once() # type: ignore - prompt_log_call_args = exporter._client.prompts.log.call_args_list[0] # type: ignore - assert prompt_log_call_args.kwargs["trace_parent_id"] == flow_log_id - assert prompt_log_call_args.kwargs["prompt"]["temperature"] == 0.8 - prompt_log_id = exporter._client.prompts.log.return_value.id # type: ignore - - # THEN the final Log uploaded is the Tool - exporter._client.tools.log.assert_called_once() # type: ignore - tool_log_call_args = exporter._client.tools.log.call_args_list[0] # type: ignore - assert tool_log_call_args.kwargs["trace_parent_id"] == prompt_log_id - - -@pytest.mark.flaky(retries=3, delay=30) -def test_flow_decorator_hl_exporter_flow_inside_flow( - call_llm_messages: list[dict], - opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter], -): - # GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter - tracer, exporter = opentelemetry_hl_with_exporter_test_configuration - - _flow_over_flow = _test_scenario(tracer)[3] - - with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method: - # WHEN calling the @flow decorated function - _flow_over_flow(call_llm_messages) - - # Exporter is threaded, need to wait threads shutdown - time.sleep(3) - - # THEN 5 spans are arrive at the exporter - assert len(mock_export_method.call_args_list) == 5 - - # THEN one of the flows is nested inside the other - spans: list[ReadableSpan] = [mock_export_method.call_args_list[i][0][0][0] for i in range(1, 5)] - counter = 0 - for span in spans: - if span.name == "humanloop.flow": - counter += 1 - if span.parent: - nested_flow_span = span - else: - flow_span = span - # We are certain span_id exists for these 2 spans - assert nested_flow_span.parent.span_id == flow_span.context.span_id # type: ignore diff --git a/tests/utilities/test_prompt.py b/tests/utilities/test_prompt.py deleted file mode 100644 index cafda67d..00000000 --- a/tests/utilities/test_prompt.py +++ /dev/null @@ -1,584 +0,0 @@ -import os -import time -from typing import Any, Callable, Optional - -import cohere -import pytest - -# replicate has no typing stubs -import replicate # type: ignore -from anthropic import Anthropic -from anthropic.types.message_param import MessageParam -from dotenv import load_dotenv -from groq import Groq -from groq import NotFoundError as GroqNotFoundError -from humanloop.client import Humanloop -from humanloop.eval_utils.run import HumanloopDecoratorError -from humanloop.utilities.prompt import prompt_decorator_factory -from humanloop.otel.constants import HUMANLOOP_FILE_KEY -from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span -from humanloop.types.model_providers import ModelProviders -from humanloop.types.prompt_kernel_request import PromptKernelRequest -from openai import OpenAI -from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam -from opentelemetry.sdk.trace import Tracer -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter - -from replicate.exceptions import ModelError as ReplicateModelError # type: ignore [import] - -from tests.conftest import DirectoryIdentifiers # type: ignore [import] - - -pytest.skip("skip for demo", allow_module_level=True) - -_PROVIDER_AND_MODEL = [ - ("openai", "gpt-4o-mini"), - ("groq", "llama3-8b-8192"), - ("cohere", "command"), - ("replicate", "meta/meta-llama-3-8b-instruct"), - ("anthropic", "claude-3-haiku-20240307"), -] - - -def _test_scenario(opentelemetry_tracer: Tracer, **kwargs): - """ - Set up the function decorated with @prompt. - - Normally the opentelemetry_tracer would be passed in by the Humanloop client. - In a test environment, the Tracer is obtained from a fixture and the test - call this function to setup the decorated function that is tested. - """ - - @prompt_decorator_factory(opentelemetry_tracer=opentelemetry_tracer, **kwargs) - def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -> Optional[str]: - load_dotenv() - if provider == "openai": - # NOTE: These tests check if instrumentors are capable of intercepting OpenAI - # provider calls. Could not find a way to intercept them coming from a Mock. - client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # type: ignore - return ( - client.chat.completions.create( - model=model, - messages=messages, # type: ignore - temperature=0.8, - ) - .choices[0] - .message.content - ) - if provider == "anthropic": - client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) # type: ignore - messages_anthropic_format = [ - MessageParam( - content=message["content"], - role="user" if message["role"] in ("user", "system") else "assistant", - ) - for message in messages - ] - return ( - client.messages.create( # type: ignore - model=model, - messages=messages_anthropic_format, - max_tokens=200, - temperature=0.8, - ) - .content[0] - .text - ) - if provider == "groq": - try: - client = Groq( # type: ignore - # This is the default and can be omitted - api_key=os.environ.get("GROQ_API_KEY"), - ) - return ( - client.chat.completions.create( - messages=messages, # type: ignore - model=model, - temperature=0.8, - ) - .choices[0] - .message.content - ) - except GroqNotFoundError: - # NOTE: Tests in this file are integration tests that rely on live LLM provider - # clients. If a test fails, it might be flaky. If this happens, consider adding - # a skip mechanism similar to Groq - pytest.skip("GROQ not available") - if provider == "cohere": - client = cohere.Client(api_key=os.getenv("COHERE_API_KEY")) # type: ignore - messages_cohere_format: list[cohere.Message] = [] - for message in messages: - if message["role"] == "system": - messages_cohere_format.append(cohere.SystemMessage(message=message["content"])) - elif message["role"] == "user": - messages_cohere_format.append(cohere.UserMessage(message=message["content"])) - elif message["role"] == "assistant": - messages_cohere_format.append(cohere.ChatbotMessage(message=message["content"])) - return client.chat( # type: ignore - chat_history=messages_cohere_format, - model=model, - max_tokens=200, - message=messages[-1]["content"], - temperature=0.8, - ).text - if provider == "replicate": - # TODO: Instrumentor only picks up methods on module-level, not client level - # This should be documented somewhere or changed - replicate.default_client._api_token = os.getenv("REPLICATE_API_KEY") - try: - output = "" - for event in replicate.run( - model, - input={ - "prompt": messages[0]["content"] + " " + messages[-1]["content"], - "temperature": 0.8, - }, - ): - output += str(event) - except ReplicateModelError: - pytest.skip("Replicate not available") - if not output: - pytest.skip("Replicate not available") - return output - raise ValueError(f"Unknown provider: {provider}") - - return _call_llm_base - - -# LLM provider might not be available, retry the test -@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) -def test_prompt_decorator( - provider_model: tuple[str, str], - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], - call_llm_messages: list[ChatCompletionMessageParam], -): - provider, model = provider_model - # GIVEN an OpenTelemetry configuration without HumanloopSpanProcessor - tracer, exporter = opentelemetry_test_configuration - # WHEN using the Prompt decorator - - call_llm = _test_scenario(tracer) - - call_llm( - provider=provider, - model=model, - messages=call_llm_messages, - ) - - # Wait for the Prompt span to be exported, it is waiting - # asynchronously for the LLM provider call span to finish - time.sleep(10) - - # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt - spans = exporter.get_finished_spans() - assert len(spans) == 2 - assert not is_humanloop_span(span=spans[0]) - assert is_humanloop_span(span=spans[1]) - # THEN the Prompt span is not enhanced with information from the LLM provider - assert is_humanloop_span(spans[1]) - # THEN no information is added to the Prompt span without the HumanloopSpanProcessor - assert spans[1].attributes.get("prompt") is None # type: ignore - - -# LLM provider might not be available, retry the test -@pytest.mark.flaky(retries=3, delay=60) -@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) -def test_prompt_decorator_with_hl_processor( - provider_model: tuple[str, str], - opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], - call_llm_messages: list[ChatCompletionMessageParam], -): - provider, model = provider_model - # GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor - tracer, exporter = opentelemetry_hl_test_configuration - # WHEN using the Prompt decorator - - call_llm = _test_scenario(opentelemetry_tracer=tracer) - - call_llm( - provider=provider, - model=model, - messages=call_llm_messages, - ) - - # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt - - # Wait for the Prompt span to be exported, it is waiting - # asynchronously for the LLM provider call span to finish - time.sleep(10) - - spans = exporter.get_finished_spans() - assert len(spans) == 2 - assert not is_humanloop_span(span=spans[0]) - assert is_humanloop_span(span=spans[1]) - # THEN the Prompt span is enhanced with information and forms a correct PromptKernel - prompt_kernel = PromptKernelRequest.model_validate( - read_from_opentelemetry_span( - span=spans[1], - key=HUMANLOOP_FILE_KEY, - )["prompt"] # type: ignore - ) - # THEN temperature is intercepted from LLM provider call - assert prompt_kernel.temperature == 0.8 - # THEN the provider intercepted from LLM provider call - assert prompt_kernel.provider == provider - # THEN model is intercepted from LLM provider call - assert prompt_kernel.model == model - # THEN top_p is not present since it's not present in the LLM provider call - assert prompt_kernel.top_p is None - - -# LLM provider might not be available, retry the test -@pytest.mark.flaky(retries=3, delay=60) -@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) -def test_prompt_decorator_with_defaults( - provider_model: tuple[str, str], - opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], - call_llm_messages: list[ChatCompletionMessageParam], -): - provider, model = provider_model - # GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor - tracer, exporter = opentelemetry_hl_test_configuration - # WHEN using the Prompt decorator with default values - - call_llm = _test_scenario( - opentelemetry_tracer=tracer, - temperature=0.9, - top_p=0.1, - template="You are an assistant on the following topics: {topics}.", - path=None, - ) - - call_llm( - provider=provider, - model=model, - messages=call_llm_messages, - ) - - # Wait for the Prompt span to be exported, it is waiting - # asynchronously for the LLM provider call span to finish - time.sleep(10) - - spans = exporter.get_finished_spans() - # THEN the Prompt span is enhanced with information and forms a correct PromptKernel - prompt = PromptKernelRequest.model_validate( - read_from_opentelemetry_span(span=spans[1], key=HUMANLOOP_FILE_KEY)["prompt"] # type: ignore - ) - # THEN temperature intercepted from LLM provider call is overridden by default value - assert prompt.temperature == 0.9 - # THEN top_p is taken from decorator default value - assert prompt.top_p == 0.1 - # THEN the provider intercepted from LLM provider call - assert prompt.model == model - - -# LLM provider might not be available, retry the test -@pytest.mark.flaky(retries=3, delay=60) -@pytest.mark.parametrize( - "attributes_test_expected", - [ - ( - {"foo": "bar"}, - {"foo": "bar"}, - ), - ( - {}, - None, - ), - ( - None, - None, - ), - ], -) -def test_prompt_attributes( - attributes_test_expected: tuple[dict[str, str], dict[str, str]], - call_llm_messages: list[ChatCompletionMessageParam], - opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - test_attributes, expected_attributes = attributes_test_expected - tracer, exporter = opentelemetry_hl_test_configuration - - call_llm = _test_scenario( - opentelemetry_tracer=tracer, - path=None, - attributes=test_attributes, - ) - - call_llm( - provider="openai", - model="gpt-4o", - messages=call_llm_messages, - ) - - # Wait for the Prompt span to be exported, it is waiting - # asynchronously for the LLM provider call span to finish - time.sleep(10) - - assert len(exporter.get_finished_spans()) == 2 - - prompt_kernel = PromptKernelRequest.model_validate( - read_from_opentelemetry_span( - span=exporter.get_finished_spans()[1], - key=HUMANLOOP_FILE_KEY, - )["prompt"] # type: ignore - ) - assert prompt_kernel.attributes == expected_attributes - - -def test_prompt_decorator_with_hl_call( - humanloop_client: Humanloop, - test_directory: DirectoryIdentifiers, - get_test_path: Callable[[str], str], -): - file_path = get_test_path("Test Prompt") - - @humanloop_client.prompt(path=file_path) - def call_llm_with_hl_call(messages: list[dict]): - response = humanloop_client.prompts.call( # type: ignore [call-args] - path=file_path, - messages=messages, # type: ignore [arg-type] - prompt={ - "model": "gpt-4o-mini", - "temperature": 0.8, - }, - ) - return response.logs[0].output_message.content # type: ignore [union-attr] - - output = call_llm_with_hl_call( - messages=[ - { - "role": "user", - "content": "Hi!", - }, - ] - ) - - assert output is not None - response = humanloop_client.directories.get(id=test_directory.id) - prompt = [file for file in response.files if file.path == file_path][0] - assert prompt.path == file_path - response = humanloop_client.logs.list(file_id=prompt.id) # type: ignore [assignment] - assert len(response.items) == 1 # type: ignore - - -@pytest.mark.skip("prompt.call() unhandled behavior") -def test_overridden_call_with_prompt_in_prompt( - humanloop_client: Humanloop, - test_directory: DirectoryIdentifiers, - get_test_path: Callable[[str], str], -): - inner_file_path = get_test_path("Test Prompt") - outer_file_path = get_test_path("Outer Test Prompt") - - @humanloop_client.prompt(path=inner_file_path) - def call_llm_with_hl_call(messages: list[dict]): - response = humanloop_client.prompts.call( # type: ignore [call-args] - path=inner_file_path, - messages=messages, # type: ignore [arg-type] - prompt={ - "model": "gpt-4o-mini", - "temperature": 0.8, - }, - ) - return response.logs[0].output_message.content # type: ignore [union-attr] - - @humanloop_client.prompt(path=outer_file_path) - def outer_call_llm_with_hl_call(messages: list[dict]): - output = call_llm_with_hl_call(messages) - response = humanloop_client.prompts.call( # type: ignore [call-args] - path=outer_file_path, - messages=[ - { - "role": "user", - "content": f"Give a clever response to this {output}", - } - ], - prompt={ - "model": "gpt-4o-mini", - "temperature": 0.8, - }, - ) - return response.logs[0].output_message.content # type: ignore [union-attr] - - output = outer_call_llm_with_hl_call( - messages=[ - { - "role": "user", - "content": "Hi!", - }, - ] - ) - - # Wait for the workspace to be updated - time.sleep(10) - - assert output is not None - response = humanloop_client.directories.get(id=test_directory.id) - outer_prompt = [file for file in response.files if file.path == outer_file_path][0] - inner_prompt = [file for file in response.files if file.path == inner_file_path][0] - - assert outer_prompt.path == outer_file_path - response = humanloop_client.logs.list(file_id=outer_prompt.id) # type: ignore [assignment] - assert len(response.items) == 1 # type: ignore [attr-defined] - - assert inner_prompt.path == inner_file_path - response = humanloop_client.logs.list(file_id=inner_prompt.id) # type: ignore [assignment] - assert len(response.items) == 1 # type: ignore [attr-defined] - - -def test_overridden_call_fails_obviously( - humanloop_client: Humanloop, - test_directory: DirectoryIdentifiers, - get_test_path: Callable[[str], str], -): - file_path = get_test_path("Test Prompt") - - @humanloop_client.prompt(path=file_path) - def call_llm_with_hl_call(): - response = humanloop_client.prompts.call( # type: ignore [call-args] - path=file_path, - messages={ - "role": "system", - "content": "This fails because messages should be a list", - }, # type: ignore [arg-type] - prompt={ - "model": "gpt-4o-mini", - "temperature": 0.8, - }, - ) - return response.logs[0].output_message.content # type: ignore [union-attr] - - with pytest.raises(HumanloopDecoratorError): - call_llm_with_hl_call() - - response = humanloop_client.directories.get(id=test_directory.id) - assert not any(file.path == file_path for file in response.files) - - -def test_overridden_call_must_match_utility_path( - humanloop_client: Humanloop, - test_directory: DirectoryIdentifiers, - get_test_path: Callable[[str], str], -): - @humanloop_client.prompt( - path=get_test_path("Test Prompt"), - temperature=0.7, - ) - def call_llm_with_hl_call(): - response = humanloop_client.prompts.call( - path=get_test_path("Test Prompt 2"), - prompt={ - "model": "gpt-4o-mini", - }, - messages=[ - { - "role": "user", - "content": "How are you?", - } - ], - ) - - return response.logs[0].output_message.content - - with pytest.raises(HumanloopDecoratorError): - call_llm_with_hl_call() - - response = humanloop_client.directories.get(id=test_directory.id) - assert not any(file.path == get_test_path("Test Prompt") for file in response.files) - - -@pytest.mark.parametrize( - "key,utility_value,call_value", - [ - # TODO: Bug found in backend: not specifying a model 400s but creates a File - # ("provider", "openai", "anthropic"), - ("temperature", 0.8, 0.5), - ("top_p", 0.5, 0.3), - ("stop", "foo", "bar"), - ("presence_penalty", 0.7, 0.5), - ("frequency_penalty", 1.5, 1), - ("other", "foo", "bar"), - ("seed", 42, 43), - # TODO: Bug found in backend: not specifying a model 400s but creates a File - # ("response_format", {"type": "json_object"}, {"type": "json_schema"}), - ], -) -def test_overridden_call_must_match_utility( - key: str, - utility_value: Any, - call_value: Any, - humanloop_client: Humanloop, - get_test_path: Callable[[str], str], - test_directory: DirectoryIdentifiers, -): - path = get_test_path("Test Prompt") - - @humanloop_client.prompt(path=path, **{key: utility_value}) - def call_llm_with_hl_call(): - response = humanloop_client.prompts.call( - path=path, - prompt={ - "model": "gpt-4o-mini", - **{key: call_value}, - }, - messages=[ - { - "role": "user", - "content": "How are you?", - } - ], - ) - - return response.logs[0].output_message.content - - with pytest.raises(HumanloopDecoratorError): - call_llm_with_hl_call() - - response = humanloop_client.directories.get(id=test_directory.id) - assert not any(file.path == path for file in response.files) - - -@pytest.mark.parametrize( - "key,prompt_call_value", - [ - ("temperature", 0.5), - ("top_p", 0.3), - ("stop", "bar"), - ("presence_penalty", 0.5), - ("frequency_penalty", 1), - ("seed", 42), - ], -) -def test_values_specified_cal_override_utility( - key: str, - prompt_call_value: Any, - humanloop_client: Humanloop, - get_test_path: Callable[[str], str], - test_directory: DirectoryIdentifiers, -): - path = get_test_path("Test Prompt") - - @humanloop_client.prompt(path=path) - def call_llm_with_hl_call(): - response = humanloop_client.prompts.call( - path=path, - prompt={ - "model": "gpt-4o-mini", - **{key: prompt_call_value}, - }, - messages=[ - { - "role": "user", - "content": "How are you?", - } - ], - ) - - return response.logs[0].output_message.content - - call_llm_with_hl_call() - - response = humanloop_client.directories.get(id=test_directory.id) - prompt = [file for file in response.files if file.path == path][0] - assert getattr(prompt, key) == prompt_call_value diff --git a/tests/utilities/test_tool.py b/tests/utilities/test_tool.py deleted file mode 100644 index e6046037..00000000 --- a/tests/utilities/test_tool.py +++ /dev/null @@ -1,567 +0,0 @@ -import sys -import time -from typing import Any, Optional, TypedDict, Union - -import pytest -from humanloop.utilities.tool import tool_decorator_factory -from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_LOG_KEY -from humanloop.otel.helpers import read_from_opentelemetry_span -from jsonschema.protocols import Validator -from opentelemetry.sdk.trace import Tracer -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter - - -def test_calculator_decorator( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN a test OpenTelemetry configuration - tracer, exporter = opentelemetry_test_configuration - - @tool_decorator_factory(opentelemetry_tracer=tracer) - def calculator(operation: str, num1: float, num2: float) -> float: - """Do arithmetic operations on two numbers.""" - if operation == "add": - return num1 + num2 - elif operation == "subtract": - return num1 - num2 - elif operation == "multiply": - return num1 * num2 - elif operation == "divide": - return num1 / num2 - else: - raise ValueError(f"Invalid operation: {operation}") - - # WHEN calling the @tool decorated function - result = calculator(operation="add", num1=1, num2=2) - assert result == 3 - # THEN a single span is created and the log and file attributes are correctly set - spans = exporter.get_finished_spans() - assert len(spans) == 1 - hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_FILE_KEY) - hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_LOG_KEY) - assert hl_log["output"] == str(result) == "3" - assert hl_log["inputs"] == { - "operation": "add", - "num1": 1, - "num2": 2, - } - assert hl_file["tool"]["function"]["description"] == "Do arithmetic operations on two numbers." - # TODO: pydantic is inconsistent by dumping either tuple or list - assert calculator.json_schema == hl_file["tool"]["function"] - - Validator.check_schema(calculator.json_schema) - - -def test_union_type(opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter]): - tracer, _ = opentelemetry_test_configuration - - @tool_decorator_factory(opentelemetry_tracer=tracer) - def foo(a: Union[int, float], b: float) -> float: - return a + b - - assert foo.json_schema["parameters"]["properties"]["a"] == { - "anyOf": [ - {"type": "integer"}, - {"type": "number"}, - ] - } - assert foo.json_schema["parameters"]["properties"]["b"] == {"type": "number"} - assert foo.json_schema["parameters"]["required"] == ("a", "b") - - Validator.check_schema(foo.json_schema) - - -def test_not_required_parameter( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - tracer, exporter = opentelemetry_test_configuration - - @tool_decorator_factory(opentelemetry_tracer=tracer) - def test_calculator(a: Optional[float], b: float) -> float: - if a is None: - a = 0 - return a + b - - assert test_calculator(3, 4) == 7 - assert len(exporter.get_finished_spans()) == 1 - assert test_calculator.json_schema["parameters"]["properties"]["a"] == { - "type": ["number", "null"], - } - - Validator.check_schema(test_calculator.json_schema) - - -def test_no_annotation_on_parameter( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a function annotated with @tool and without type hint on a parameter - @tool_decorator_factory(opentelemetry_tracer=tracer) - def calculator(a: Optional[float], b) -> float: - if a is None: - a = 0 - return a + b - - # WHEN building the Tool kernel - # THEN the JSON schema is correctly built and `b` is of `any` type - # NOTE: JSONSchema dropped support for 'any' type, we include all types - # as a workaround - assert calculator.json_schema == { - "description": "", - "name": "calculator", - "parameters": { - "properties": { - "a": {"type": ["number", "null"]}, - "b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, - }, - "required": ("b",), - "type": "object", - "additionalProperties": False, - }, - "strict": True, - } - - Validator.check_schema(calculator.json_schema) - - -def test_dict_annotation_no_sub_types( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a function annotated with @tool and without type hint on a parameter - @tool_decorator_factory(opentelemetry_tracer=tracer) - def calculator(a: Optional[float], b: dict) -> float: - if a is None: - a = 0 - return a + b["c"] - - # WHEN building the Tool kernel - # THEN the JSON schema is correctly built and `b` accepts any type - # on both keys and values - assert calculator.json_schema == { - "description": "", - "name": "calculator", - "parameters": { - "properties": { - "a": {"type": ["number", "null"]}, - "b": { - "type": "object", - "properties": { - "key": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, - "value": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, - }, - }, - }, - "required": ("b",), - "type": "object", - "additionalProperties": False, - }, - "strict": True, - } - - Validator.check_schema(calculator.json_schema) - - -def test_list_annotation_no_sub_types( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a function annotated with @tool and without type hint on a parameter - @tool_decorator_factory(opentelemetry_tracer=tracer) - def calculator(a: Optional[float], b: Optional[list]) -> float: - if a is None: - a = 0 - sum = a - if b is None: - return sum - for val in b: - sum += val - return sum - - # WHEN building the Tool kernel - # THEN the JSON schema is correctly built and `b` accepts any type - assert calculator.json_schema == { - "description": "", - "name": "calculator", - "parameters": { - "properties": { - "a": {"type": ["number", "null"]}, - "b": { - "type": ["array", "null"], - "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, - }, - }, - "required": (), - "type": "object", - "additionalProperties": False, - }, - "strict": True, - } - - -def test_tuple_annotation_no_sub_types( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a function annotated with @tool and without type hint on a parameter - @tool_decorator_factory(opentelemetry_tracer=tracer) - def calculator(a: Optional[float], b: Optional[tuple]) -> float: - if a is None: - a = 0 - sum = a - if b is None: - return sum - for val in b: - sum += val - return sum - - # WHEN building the Tool kernel - # THEN the JSON schema is correctly built and `b` accepts any type - assert calculator.json_schema == { - "description": "", - "name": "calculator", - "parameters": { - "properties": { - "a": {"type": ["number", "null"]}, - "b": { - "type": ["array", "null"], - "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, - }, - }, - "required": (), - "type": "object", - "additionalProperties": False, - }, - "strict": True, - } - - -def test_function_without_return_annotation( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a function annotated with @tool and without type hint on the return value - # WHEN building the Tool kernel - @tool_decorator_factory(opentelemetry_tracer=tracer) - def foo(a: Optional[float], b: float) -> float: - """Add two numbers.""" - if a is None: - a = 0 - return a + b - - # THEN the JSONSchema is valid - Validator.check_schema(foo.json_schema) - - -def test_list_annotation_parameter( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, exporter = opentelemetry_test_configuration - - # WHEN defining a tool with a list parameter - @tool_decorator_factory(opentelemetry_tracer=tracer) - def foo(to_join: list[str]) -> str: - return " ".join(to_join) - - assert "a b c" == foo(to_join=["a", "b", "c"]) - - # THEN the function call results in a Span - assert len(exporter.get_finished_spans()) == 1 - # THEN the argument is correctly described in the JSON schema - assert foo.json_schema["parameters"]["properties"]["to_join"] == { # type: ignore - "type": "array", - "items": {"type": "string"}, - } - # THEN the JSONSchema is valid - Validator.check_schema(foo.json_schema) - - -def test_list_in_list_parameter_annotation( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a tool definition with a list of lists parameter - # WHEN building the Tool Kernel - @tool_decorator_factory(opentelemetry_tracer=tracer) - def nested_plain_join(to_join: list[list[str]]): - return " ".join([val for sub_list in to_join for val in sub_list]) - - # THEN the JSON schema is correctly built and parameter is correctly described - assert nested_plain_join.json_schema["parameters"]["properties"]["to_join"] == { - "type": "array", - "items": { - "type": "array", - "items": {"type": "string"}, - }, - } - - # THEN the JSONSchema is valid - Validator.check_schema(nested_plain_join.json_schema) - - -def test_complex_dict_annotation( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a tool definition with a dictionary parameter - # WHEN building the Tool Kernel - @tool_decorator_factory(opentelemetry_tracer=tracer) - def foo(a: dict[Union[int, str], list[str]]): - return a - - # THEN the parameter is correctly described - assert foo.json_schema["parameters"]["properties"]["a"] == { - "type": "object", - "properties": { - "key": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, - "value": {"type": "array", "items": {"type": "string"}}, - }, - } - - # THEN the JSONSchema is valid - Validator.check_schema(foo.json_schema) - - -def test_tuple_annotation( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a tool definition with a tuple parameter - # WHEN building the Tool Kernel - @tool_decorator_factory(opentelemetry_tracer=tracer) - def foo(a: Optional[tuple[int, Optional[str], float]]): - return a - - # THEN the parameter is correctly described - assert foo.json_schema["parameters"]["properties"]["a"] == { - "type": ["array", "null"], - "items": [ - {"type": "integer"}, - {"type": ["string", "null"]}, - {"type": "number"}, - ], - } - - # THEN the JSONSchema is valid - Validator.check_schema(foo.json_schema) - - -def test_tool_no_args( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a tool definition without arguments - # WHEN building the Tool Kernel - @tool_decorator_factory(opentelemetry_tracer=tracer) - def foo(): - return 42 - - # THEN the JSON schema is correctly built - assert foo.json_schema == { - "description": "", - "name": "foo", - "parameters": { - "properties": {}, - "required": [], - "type": "object", - "additionalProperties": False, - }, - "strict": True, - } - - # THEN the JSONSchema is valid - Validator.check_schema(foo.json_schema) - - -def test_custom_types_throws( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a user-defined type - class Foo(TypedDict): - a: int # type: ignore - b: int # type: ignore - - # WHEN defining a tool with a parameter of that type - with pytest.raises(ValueError) as exc: - - @tool_decorator_factory(opentelemetry_tracer=tracer) - def foo_bar(foo: Foo): - return foo.a + foo.b # type: ignore - - # THEN a ValueError is raised - assert exc.value.args[0].startswith("Error parsing signature of @tool annotated function foo_bar") - - -def test_tool_as_higher_order_function( - opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], -): - tracer, exporter = opentelemetry_hl_test_configuration - - def calculator(operation: str, num1: float, num2: float) -> float: - """Do arithmetic operations on two numbers.""" - if operation == "add": - return num1 + num2 - elif operation == "subtract": - return num1 - num2 - elif operation == "multiply": - return num1 * num2 - elif operation == "divide": - return num1 / num2 - else: - raise ValueError(f"Invalid operation: {operation}") - - higher_order_fn_tool = tool_decorator_factory(opentelemetry_tracer=tracer)(calculator) - - @tool_decorator_factory(opentelemetry_tracer=tracer) # type: ignore - def calculator(operation: str, num1: float, num2: float) -> float: - """Do arithmetic operations on two numbers.""" - if operation == "add": - return num1 + num2 - elif operation == "subtract": - return num1 - num2 - elif operation == "multiply": - return num1 * num2 - elif operation == "divide": - return num1 / num2 - else: - raise ValueError(f"Invalid operation: {operation}") - - higher_order_fn_tool(operation="add", num1=1, num2=2) - calculator(operation="add", num1=1, num2=2) - - # Processor handles HL spans asynchronously, wait for them - time.sleep(1) - - assert len(spans := exporter.get_finished_spans()) == 2 - - hl_file_higher_order_fn = read_from_opentelemetry_span( - span=spans[0], - key=HUMANLOOP_FILE_KEY, - ) - hl_file_decorated_fn = read_from_opentelemetry_span( - span=spans[1], - key=HUMANLOOP_FILE_KEY, - ) - assert hl_file_higher_order_fn["tool"]["source_code"] == hl_file_decorated_fn["tool"]["source_code"] # type: ignore - - -if sys.version_info >= (3, 10): - # Testing that function parsing for Tool decorator - # works with Python 3.10 and above syntax - - def test_python310_syntax( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], - ): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a function annotated with @tool where a parameter uses `|` for Optional - @tool_decorator_factory(opentelemetry_tracer=tracer) - def calculator(a: float, b: float | None = None) -> float: - # NOTE: dummy function, only testing its signature not correctness - if a is None: - a = 0 - return a + b # type: ignore - - # WHEN building the Tool kernel - # THEN the JSON schema is correct - assert calculator.json_schema == { - "description": "", - "name": "calculator", - "parameters": { - "properties": { - "a": {"type": "number"}, - "b": {"type": ["number", "null"]}, - }, - "required": ("a",), - "type": "object", - "additionalProperties": False, - }, - "strict": True, - } - - Validator.check_schema(calculator.json_schema) - - def test_python310_union_syntax( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], - ): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a function annotated with @tool where a parameter uses `|` for Union - @tool_decorator_factory(opentelemetry_tracer=tracer) - def calculator(a: float, b: float | int | str) -> float: - # NOTE: dummy function, only testing its signature not correctness - return a + b # type: ignore - - # WHEN building the Tool kernel - # THEN the JSON schema is correct - assert calculator.json_schema == { - "description": "", - "name": "calculator", - "parameters": { - "properties": { - "a": {"type": "number"}, - "b": {"anyOf": [{"type": "number"}, {"type": "integer"}, {"type": "string"}]}, - }, - "required": ("a", "b"), - "type": "object", - "additionalProperties": False, - }, - "strict": True, - } - - Validator.check_schema(calculator.json_schema) - - def test_python_list_ellipsis( - opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], - ): - # GIVEN an OTel configuration - tracer, _ = opentelemetry_test_configuration - - # GIVEN a function annotated with @tool where a parameter uses `...` - @tool_decorator_factory(opentelemetry_tracer=tracer) - def calculator(b: ...) -> float | None: # type: ignore - # NOTE: dummy function, only testing its signature not correctness - if isinstance(b, list): - return sum(b) - return None - - # WHEN building the Tool kernel - # THEN the JSON schema is correct - assert calculator.json_schema == { - "description": "", - "name": "calculator", - "parameters": { - "properties": { - # THEN b is of any type - "b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, - }, - "required": ("b",), - "type": "object", - "additionalProperties": False, - }, - "strict": True, - } From 246c728f07bf2581bdb3ffed609c7b14a04b3063 Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Fri, 7 Mar 2025 14:45:39 +0000 Subject: [PATCH 11/14] Docstrings, small refactors --- src/humanloop/client.py | 191 +++++++++++------------- src/humanloop/context.py | 105 +++++++++---- src/humanloop/decorators/prompt.py | 9 +- src/humanloop/error.py | 7 + src/humanloop/otel/exporter/__init__.py | 25 ++-- src/humanloop/otel/exporter/proto.py | 3 + src/humanloop/otel/processor.py | 4 +- src/humanloop/overload.py | 18 ++- 8 files changed, 196 insertions(+), 166 deletions(-) diff --git a/src/humanloop/client.py b/src/humanloop/client.py index cb6c9c64..dbdb898b 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -27,6 +27,10 @@ class ExtendedEvalsClient(EvaluationsClient): + """ + Provides high-level utilities for running Evaluations on the local runtime. + """ + client: BaseHumanloop def __init__( @@ -50,7 +54,7 @@ def run( :param name: the name of the Evaluation to run. If it does not exist, a new Evaluation will be created under your File. :param dataset: the dataset to map your function over to produce the outputs required by the Evaluation. :param evaluators: define how judgments are provided for this Evaluation. - :param workers: the number of threads to process datapoints using your function concurrently. + :param workers: Number of concurrent threads for processing datapoints. :return: per Evaluator checks. """ if self.client is None: @@ -67,6 +71,10 @@ def run( class ExtendedPromptsClient(PromptsClient): + """ + Adds utility for populating Prompt template inputs. + """ + populate_template = staticmethod(populate_template) # type: ignore [assignment] @@ -90,16 +98,14 @@ def __init__( opentelemetry_tracer_provider: Optional[TracerProvider] = None, opentelemetry_tracer: Optional[Tracer] = None, ): - """See docstring of :func:`BaseHumanloop.__init__(...)` - - This class extends the base client with custom evaluation utilities - and decorators for declaring Files in code. + """ + Extends the base client with custom evaluation utilities and + decorators for declaring Files in code. - The Humanloop SDK File decorators use OpenTelemetry internally. You can provide a - TracerProvider and a Tracer if you'd like to integrate them with your existing - telemetry system. Otherwise, an internal TracerProvider will be used. - If you provide only the `TraceProvider`, the SDK will log under a Tracer - named `humanloop.sdk`. + The Humanloop SDK File decorators use OpenTelemetry internally. + You can provide a TracerProvider and a Tracer to integrate + with your existing telemetry system. If not provided, + an internal TracerProvider will be used. """ super().__init__( base_url=base_url, @@ -116,6 +122,7 @@ def __init__( self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper) # Overload the .log method of the clients to be aware of Evaluation Context + # and the @flow decorator providing the trace_id self.prompts = overload_log(client=self.prompts) self.prompts = overload_call(client=self.prompts) self.flows = overload_log(client=self.flows) @@ -145,22 +152,12 @@ def prompt( self, *, path: str, - template: Optional[str] = None, ): - """Decorator for declaring a [Prompt](https://humanloop.com/docs/explanation/prompts) in code. - - The decorator intercepts calls to LLM provider APIs and creates - a new Prompt file based on the hyperparameters used in the call. - If a hyperparameter is specified in the `@prompt` decorator, then - they override any value intercepted from the LLM provider call. - - If the [Prompt](https://humanloop.com/docs/explanation/prompts) already exists - on the specified path, a new version will be upserted when any of the above change. - - Here's an example of declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code: + """Auto-instrument LLM provider and create [Prompt](https://humanloop.com/docs/explanation/prompts) + Logs on Humanloop from them. ```python - @prompt(template="You are an assistant on the following topics: {{topics}}.") + @prompt(path="My Prompt") def call_llm(messages): client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) return client.chat.completions.create( @@ -170,49 +167,55 @@ def call_llm(messages): max_tokens=200, messages=messages, ).choices[0].message.content - ``` - - This will create a [Prompt](https://humanloop.com/docs/explanation/prompts] with the following attributes: - ```python + Calling the function above creates a new Log on Humanloop + against this Prompt version: { + provider: "openai", model: "gpt-4o", endpoint: "chat", - template: "You are an assistant on the following topics: {{topics}}.", - provider: "openai", max_tokens: 200, temperature: 0.8, frequency_penalty: 0.5, } + ``` - Every call to the decorated function will create a Log against the Prompt. For example: - - ```python - call_llm(messages=[ - {"role": "system", "content": "You are an assistant on the following topics: finance."} - {"role": "user", "content": "What can you do?"} - ]) + If a different model, endpoint, or hyperparameter is used, a new + Prompt version is created. For example: ``` + @humanloop_client.prompt(path="My Prompt") + def call_llm(messages): + client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + client.chat.completions.create( + model="gpt-4o-mini", + temperature=0.5, + ).choices[0].message.content - The Prompt Log will be created with the following inputs: - ```python + client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) + client.messages.create( + model="claude-3-5-sonnet-20240620", + temperature=0.5, + ).content + + Calling this function will create two versions of the same Prompt: { - "inputs": { - "topics": "finance" - }, - messages: [ - {"role": "system", "content": "You are an assistant on the following topics: finance."} - {"role": "user", "content": "What can you do?"} - ] - "output": "Hello, I'm an assistant that can help you with anything related to finance." + provider: "openai", + model: "gpt-4o-mini", + endpoint: "chat", + max_tokens: 200, + temperature: 0.5, + frequency_penalty: 0.5, } - ``` - The decorated function should return a string or the output should be JSON serializable. If - the output cannot be serialized, TypeError will be raised. + { + provider: "anthropic", + model: "claude-3-5-sonnet-20240620", + endpoint: "messages", + temperature: 0.5, + } - If the function raises an exception, the log created by the function will have the output - field set to None and the error field set to the string representation of the exception. + And one Log will be added to each version of the Prompt. + ``` :param path: The path where the Prompt is created. If not provided, the function name is used as the path and the File @@ -220,7 +223,7 @@ def call_llm(messages): :param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams` """ - return prompt_decorator_factory(path=path, template=template) + return prompt_decorator_factory(path=path) def tool( self, @@ -229,27 +232,22 @@ def tool( attributes: Optional[dict[str, Any]] = None, setup_values: Optional[dict[str, Any]] = None, ): - """Decorator for declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code. + """Manage [Tool](https://humanloop.com/docs/explanation/tools) Files through code. - The decorator inspects the wrapped function's source code, name, - argument type hints and docstring to infer the values that define - the [Tool](https://humanloop.com/docs/explanation/tools). + The decorator inspects the wrapped function's source code to infer the Tool's + JSON Schema. If the function declaration changes, a new Tool version + is upserted with an updated JSON Schema. - If the [Tool](https://humanloop.com/docs/explanation/tools) already exists - on the specified path, a new version will be upserted when any of the - above change. - - Here's an example of declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code: + For example: ```python - @tool + # Adding @tool on this function + @humanloop_client.tool(path="calculator") def calculator(a: int, b: Optional[int]) -> int: \"\"\"Add two numbers together.\"\"\" return a + b - ``` - This will create a [Tool](https://humanloop.com/docs/explanation/tools) with the following attributes: - ```python + # Creates a Tool with this JSON Schema: { strict: True, function: { @@ -267,35 +265,16 @@ def calculator(a: int, b: Optional[int]) -> int: } ``` - Every call to the decorated function will create a Log against the Tool. For example: + The return value of the decorated function must be JSON serializable. - ```python - calculator(a=1, b=2) - ``` + If the function raises an exception, the created Log will have `output` + set to null, and the `error` field populated. - Will create the following Log: + :param path: The path of the File in the Humanloop workspace. - ```python - { - "inputs": { - a: 1, - b: 2 - }, - "output": 3 - } - ``` + :param setup_values: Values needed to setup the Tool, defined in [JSON Schema](https://json-schema.org/) - The decorated function should return a string or the output should be JSON serializable. If - the output cannot be serialized, TypeError will be raised. - - If the function raises an exception, the log created by the function will have the output - field set to None and the error field set to the string representation of the exception. - - :param path: The path to the Tool. If not provided, the function name - will be used as the path and the File will be created in the root - of your organization's workspace. - - :param tool_kernel: Attributes that define the Tool. See `class:ToolKernelRequestParams` + :param attributes: Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. """ return tool_decorator_factory( opentelemetry_tracer=self._opentelemetry_tracer, @@ -310,13 +289,13 @@ def flow( path: str, attributes: Optional[dict[str, Any]] = None, ): - """Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code. + """Trace SDK logging calls through [Flows](https://humanloop.com/docs/explanation/flows). + + Use it as the entrypoint of your LLM feature. Logging calls like `prompts.call(...)`, + `tools.call(...)`, or other Humanloop decorators will be automatically added to the trace. - A [Flow](https://humanloop.com/docs/explanation/flows) wrapped callable should - be used as the entrypoint of your LLM feature. Call other functions wrapped with - Humanloop decorators to create a trace of Logs on Humanloop. + For example: - Here's an example of declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code: ```python @prompt(template="You are an assistant on the following topics: {{topics}}.") def call_llm(messages): @@ -330,7 +309,7 @@ def call_llm(messages): ).choices[0].message.content @flow(attributes={"version": "v1"}) - def entrypoint(): + def agent(): while True: messages = [] user_input = input("You: ") @@ -342,23 +321,23 @@ def entrypoint(): print(f"Assistant: {response}") ``` - In this example, the Flow instruments a conversational agent where the - Prompt defined in `call_llm` is called multiple times in a loop. Calling - `entrypoint` will create a Flow Trace under which multiple Prompt Logs - will be nested, allowing you to track the whole conversation session - between the user and the assistant. + Each call to agent will create a trace corresponding to the conversation + session. Multiple Prompt Logs will be created as the LLM is called. They + will be added to the trace, allowing you to see the whole conversation + in the UI. - The decorated function should return a string or the output should be JSON serializable. If - the output cannot be serialized, TypeError will be raised. + If the function returns a ChatMessage-like object, the Log will + populate the `output_message` field. Otherwise, it will serialize + the return value and populate the `output` field. - If the function raises an exception, the log created by the function will have the output - field set to None and the error field set to the string representation of the exception. + If an exception is raised, the output fields will be set to None + and the error message will be set in the Log's `error` field. :param path: The path to the Flow. If not provided, the function name will be used as the path and the File will be created in the root of your organization workspace. - :param flow_kernel: Attributes that define the Flow. See `class:ToolKernelRequestParams` + :param attributes: Additional fields to describe the Flow. Helpful to separate Flow versions from each other with details on how they were created or used. """ return flow_decorator_factory( client=self, diff --git a/src/humanloop/context.py b/src/humanloop/context.py index aa5940ce..d3b96f33 100644 --- a/src/humanloop/context.py +++ b/src/humanloop/context.py @@ -13,12 +13,14 @@ def get_trace_id() -> Optional[str]: + # Use threading.get_ident() to ensure the context is unique to the current thread key = str(hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident()))) return context_api.get_value(key=key) # type: ignore [return-value] @contextmanager def set_trace_id(flow_log_id: str) -> Generator[None, None, None]: + # Use threading.get_ident() to ensure the context is unique to the current thread key = str(hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident()))) token = context_api.attach(context_api.set_value(key=key, value=flow_log_id)) yield @@ -36,6 +38,7 @@ class DecoratorContext: def set_decorator_context( decorator_context: DecoratorContext, ) -> Generator[DecoratorContext, None, None]: + # Use threading.get_ident() to ensure the context is unique to the current thread key = str(hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident()))) reset_token = context_api.attach( context_api.set_value( @@ -48,11 +51,27 @@ def set_decorator_context( def get_decorator_context() -> Optional[DecoratorContext]: + # Use threading.get_ident() to ensure the context is unique to the current thread key = str(hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident()))) return context_api.get_value(key) # type: ignore [return-value] class EvaluationContext: + """ + Represents the context for evaluating a specific data point within a run. + + This class integrates with the OpenTelemetry (OTEL) runtime context API to distribute data points across threads. + Each thread processes one data point by invoking a callable and subsequently logs the results against a run. + + Attributes: + source_datapoint_id (str): The unique identifier of the source data point. + run_id (str): The unique identifier of the evaluation run. + file_id (str): The identifier of the file associated with the evaluation. + path (str): The file path associated with the evaluation. + _logged (bool): Tracks whether logging has already occurred in this context to ensure only the first log is counted. + _callback (Callable[[str], None]): A callback function to be executed when logging occurs. + """ + source_datapoint_id: str run_id: str file_id: str @@ -61,68 +80,87 @@ class EvaluationContext: _callback: Callable[[str], None] _context_log_belongs_eval_file: bool + def __init__( + self, + source_datapoint_id: str, + run_id: str, + eval_callback: Callable[[str], None], + file_id: str, + path: str, + ): + self.source_datapoint_id = source_datapoint_id + self.run_id = run_id + self._callback = eval_callback + self.file_id = file_id + self.path = path + self._logged = False + self._context_log_belongs_eval_file = False + @property def logged(self) -> bool: + """ + Return true if the current datapoint has been evaluated already. + """ return self._logged - @contextmanager - def spy_log_args( + def log_args_with_context( self, log_args: dict[str, Any], path: Optional[str] = None, file_id: Optional[str] = None, - ) -> Generator[dict[str, Any], None, None]: + ) -> tuple[dict[str, Any], Optional[Callable[[str], None]]]: + """ + Logs arguments within the evaluation context if the path or file ID matches. + + This method ensures that if multiple logs are made against the same file, only the first one + is considered toward the evaluation run. If a log has already been made, subsequent calls + will return the log arguments without adding evaluation-specific metadata. + + Args: + log_args (dict[str, Any]): The log arguments to be recorded. + path (Optional[str]): The file path for logging (if applicable). + file_id (Optional[str]): The file ID for logging (if applicable). + + Returns: + tuple[dict[str, Any], Optional[Callable[[str], None]]]: + - Updated log arguments with additional context information if applicable. + - A callback function if logging belongs to the evaluation file, otherwise None. + + Raises: + HumanloopRuntimeError: If neither `path` nor `file_id` is provided. + """ if path is None and file_id is None: - raise HumanloopRuntimeError( - "Internal error: Evaluation context called without providing a path of file_id" - ) + raise HumanloopRuntimeError("Internal error: Evaluation context called without providing a path or file_id") + + # Ensure only the first log against the same file is considered + if self._logged: + return log_args, None if self.path is not None and self.path == path: self._logged = True self._context_log_belongs_eval_file = True - yield { + return { **log_args, "source_datapoint_id": self.source_datapoint_id, "run_id": self.run_id, - } + }, self._callback elif self.file_id is not None and self.file_id == file_id: self._logged = True self._context_log_belongs_eval_file = True - yield { + return { **log_args, "source_datapoint_id": self.source_datapoint_id, "run_id": self.run_id, - } + }, self._callback else: - yield log_args - self._context_log_belongs_eval_file = False - - @property - def callback(self) -> Optional[Callable[[str], None]]: - if self._context_log_belongs_eval_file: - return self._callback - return None - - def __init__( - self, - source_datapoint_id: str, - run_id: str, - eval_callback: Callable[[str], None], - file_id: str, - path: str, - ): - self.source_datapoint_id = source_datapoint_id - self.run_id = run_id - self._callback = eval_callback - self.file_id = file_id - self.path = path - self._logged = False + return log_args, None @contextmanager def set_evaluation_context( evaluation_context: EvaluationContext, ) -> Generator[None, None, None]: + # Use threading.get_ident() to ensure the context is unique to the current thread key = str(hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident()))) reset_token = context_api.attach(context_api.set_value(key, evaluation_context)) yield @@ -130,5 +168,6 @@ def set_evaluation_context( def get_evaluation_context() -> Optional[EvaluationContext]: + # Use threading.get_ident() to ensure the context is unique to the current thread key = str(hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident()))) return context_api.get_value(key) # type: ignore [return-value] diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py index 468bffcd..5e99cab0 100644 --- a/src/humanloop/decorators/prompt.py +++ b/src/humanloop/decorators/prompt.py @@ -2,7 +2,7 @@ import logging from typing_extensions import ParamSpec -from typing import Callable, Optional, TypeVar +from typing import Callable, TypeVar from humanloop.context import DecoratorContext, set_decorator_context from humanloop.evals.types import File @@ -13,7 +13,7 @@ R = TypeVar("R") -def prompt_decorator_factory(path: str, template: Optional[str]): +def prompt_decorator_factory(path: str): def decorator(func: Callable[P, R]) -> Callable[P, R]: @wraps(func) def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: @@ -22,7 +22,8 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: path=path, type="prompt", version={ - "template": template, + # TODO: Implement a reverse-lookup of the template + "template": None, }, ) ): @@ -33,7 +34,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: path=path, type="prompt", version={ # type: ignore [typeddict-item] - "template": template, + "template": None, }, callable=wrapper, ) diff --git a/src/humanloop/error.py b/src/humanloop/error.py index ed7ac177..beff349c 100644 --- a/src/humanloop/error.py +++ b/src/humanloop/error.py @@ -2,6 +2,13 @@ class HumanloopRuntimeError(Exception): + """ + SDK custom code handles exceptions by populating Logs' `error` field. + + This exception signals an error severe enough to crash the execution + e.g. illegal use of decorators. + """ + def __init__(self, message: Optional[str] = None): self.message = message diff --git a/src/humanloop/otel/exporter/__init__.py b/src/humanloop/otel/exporter/__init__.py index ab997d87..b3220488 100644 --- a/src/humanloop/otel/exporter/__init__.py +++ b/src/humanloop/otel/exporter/__init__.py @@ -85,25 +85,24 @@ def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: ) evaluation_context = get_evaluation_context() if evaluation_context is not None: - with evaluation_context.spy_log_args( + kwargs_eval, eval_callback = evaluation_context.log_args_with_context( path=path, # type: ignore [arg-type] log_args=log_args, # type: ignore [arg-type] - ) as log_args: - write_to_opentelemetry_span( - span=span, - key=HUMANLOOP_LOG_KEY, - value=log_args, - ) - eval_context_callback = evaluation_context.callback + ) + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=kwargs_eval, + ) else: - eval_context_callback = None + eval_callback = None except HumanloopRuntimeError as e: raise e - except Exception as e: - # No log args, no callback - eval_context_callback = None + except Exception: + # No log args in the span + eval_callback = None - self._upload_queue.put((span, eval_context_callback)) + self._upload_queue.put((span, eval_callback)) return SpanExportResult.SUCCESS diff --git a/src/humanloop/otel/exporter/proto.py b/src/humanloop/otel/exporter/proto.py index 437ffe86..85dde94b 100644 --- a/src/humanloop/otel/exporter/proto.py +++ b/src/humanloop/otel/exporter/proto.py @@ -12,6 +12,9 @@ def serialize_span(span_to_export: ReadableSpan) -> str: + """ + Serialize a span into format compatible with the /otel backend endpoint. + """ payload = TracesData( resource_spans=[ ResourceSpans( diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py index cf253562..c04922ba 100644 --- a/src/humanloop/otel/processor.py +++ b/src/humanloop/otel/processor.py @@ -1,6 +1,4 @@ import logging -from typing import Optional -from opentelemetry import context as context_api from opentelemetry.sdk.trace import ReadableSpan, Span @@ -23,6 +21,7 @@ def __init__(self, exporter: SpanExporter) -> None: super().__init__(exporter) def on_start(self, span: Span, parent_context=...): + """Called when a Span is started.""" if is_llm_provider_call(span): decorator_context = get_decorator_context() if decorator_context and decorator_context.type == "prompt": @@ -42,6 +41,7 @@ def on_start(self, span: Span, parent_context=...): span.set_attribute(f"{HUMANLOOP_LOG_KEY}.trace_parent_id", trace_id) def on_end(self, span: ReadableSpan): + """Called when a Span finishes recording.""" if is_llm_provider_call(span): decorator_context = get_decorator_context() if decorator_context is None or decorator_context.type != "prompt": diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py index bff1a610..b0c83215 100644 --- a/src/humanloop/overload.py +++ b/src/humanloop/overload.py @@ -68,14 +68,16 @@ def _overload_log( } evaluation_context = get_evaluation_context() if evaluation_context is not None: - with evaluation_context.spy_log_args(path=kwargs.get("path"), log_args=kwargs) as kwargs: - try: - response = self._log(**kwargs) - except Exception as e: - # Re-raising as HumanloopDecoratorError so the decorators don't catch it - raise HumanloopRuntimeError from e - if evaluation_context.callback is not None: - evaluation_context.callback(response.id) + kwargs_eval, eval_callback = evaluation_context.log_args_with_context( + path=kwargs.get("path"), log_args=kwargs + ) + try: + response = self._log(**kwargs_eval) + except Exception as e: + # Re-raising as HumanloopDecoratorError so the decorators don't catch it + raise HumanloopRuntimeError from e + if eval_callback is not None: + eval_callback(response.id) else: try: response = self._log(**kwargs) From 10012274a83e027f966ed78b15419dcb52463374 Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Fri, 7 Mar 2025 14:49:12 +0000 Subject: [PATCH 12/14] mypy nit --- src/humanloop/decorators/tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py index 102de834..1fddc3d6 100644 --- a/src/humanloop/decorators/tool.py +++ b/src/humanloop/decorators/tool.py @@ -64,7 +64,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: with opentelemetry_tracer.start_as_current_span("humanloop.tool") as span: # Write the Tool Kernel to the Span on HL_FILE_OT_KEY write_to_opentelemetry_span( - span=span, + span=span, # type: ignore [arg-type] key=HUMANLOOP_FILE_KEY, value=tool_kernel, # type: ignore [arg-type] ) @@ -104,7 +104,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: } # Write the Tool Log to the Span on HL_LOG_OT_KEY write_to_opentelemetry_span( - span=span, + span=span, # type: ignore [arg-type] key=HUMANLOOP_LOG_KEY, value=tool_log, # type: ignore [arg-type] ) From 201fb95202cfce08298d1a332150e6861ad19ead Mon Sep 17 00:00:00 2001 From: Andrei Bratu Date: Fri, 7 Mar 2025 15:11:43 +0000 Subject: [PATCH 13/14] checking dependencies --- poetry.lock | 33 +-------------------------------- pyproject.toml | 7 +++---- 2 files changed, 4 insertions(+), 36 deletions(-) diff --git a/poetry.lock b/poetry.lock index abcd91cb..010258ef 100644 --- a/poetry.lock +++ b/poetry.lock @@ -248,25 +248,6 @@ humanfriendly = ">=9.1" [package.extras] cron = ["capturer (>=2.4)"] -[[package]] -name = "deepdiff" -version = "8.3.0" -description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "deepdiff-8.3.0-py3-none-any.whl", hash = "sha256:838acf1b17d228f4155bcb69bb265c41cbb5b2aba2575f07efa67ad9b9b7a0b5"}, - {file = "deepdiff-8.3.0.tar.gz", hash = "sha256:92a8d7c75a4b26b385ec0372269de258e20082307ccf74a4314341add3d88391"}, -] - -[package.dependencies] -orderly-set = ">=5.3.0,<6" - -[package.extras] -cli = ["click (==8.1.8)", "pyyaml (==6.0.2)"] -optimize = ["orjson"] - [[package]] name = "deprecated" version = "1.2.18" @@ -1234,18 +1215,6 @@ files = [ {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] -[[package]] -name = "orderly-set" -version = "5.3.0" -description = "Orderly set" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "orderly_set-5.3.0-py3-none-any.whl", hash = "sha256:c2c0bfe604f5d3d9b24e8262a06feb612594f37aa3845650548befd7772945d1"}, - {file = "orderly_set-5.3.0.tar.gz", hash = "sha256:80b3d8fdd3d39004d9aad389eaa0eab02c71f0a0511ba3a6d54a935a6c6a0acc"}, -] - [[package]] name = "packaging" version = "24.2" @@ -2504,4 +2473,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.9,<4" -content-hash = "228369a2cf47fc8534f45277fd0a0118ab96b5ec90b0c6088de2f00f06a502d4" +content-hash = "8841da1852d6bbeddd7c87b36fe7d3d64e1aacf550a1e6cfdfaa9a1bc3f76301" diff --git a/pyproject.toml b/pyproject.toml index 3ba977cd..93b6e192 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,6 @@ Repository = 'https://github.com/humanloop/humanloop-python' [tool.poetry.dependencies] python = ">=3.9,<4" -deepdiff = "^8.2.0" httpx = ">=0.21.2" httpx-sse = "0.4.0" mmh3 = "^5.1.0" @@ -52,10 +51,12 @@ pydantic = ">= 1.9.2" pydantic-core = "^2.18.2" typing_extensions = ">= 4.0.0" opentelemetry-proto = "^1.30.0" +protobuf = "^5.29.3" [tool.poetry.dev-dependencies] mypy = "1.0.1" pytest = "^7.4.0" +types-protobuf = "^5.29.1.20250208" pytest-asyncio = "^0.23.5" python-dateutil = "^2.9.0" types-python-dateutil = "^2.9.0.20240316" @@ -66,8 +67,6 @@ jsonschema = "^4.23.0" numpy = "<2.0.0" onnxruntime = "<=1.19.2" openai = "^1.52.2" -protobuf = "^5.29.3" -types-protobuf = "^5.29.1.20250208" pandas = "^2.2.0" parse-type = ">=0.6.4" pyarrow = "^19.0.0" @@ -90,4 +89,4 @@ line-length = 120 [build-system] requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +build-backend = "poetry.core.masonry.api" \ No newline at end of file From e5d052536365248ce151f8fbfcf658be645dd84a Mon Sep 17 00:00:00 2001 From: James Baskerville Date: Fri, 7 Mar 2025 18:23:44 +0000 Subject: [PATCH 14/14] fix: Add sleep to OTEL exporters (#51) Infinite polling the export queue without a sleep was causing threads to take up a full core of CPU. Simple sleep fixes that. --- src/humanloop/otel/exporter/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/humanloop/otel/exporter/__init__.py b/src/humanloop/otel/exporter/__init__.py index b3220488..681deb76 100644 --- a/src/humanloop/otel/exporter/__init__.py +++ b/src/humanloop/otel/exporter/__init__.py @@ -1,5 +1,6 @@ import logging +import time import typing from queue import Empty as EmptyQueue from queue import Queue @@ -129,7 +130,8 @@ def _do_work(self): # Don't block or the thread will never be notified of the shutdown thread_args = self._upload_queue.get(block=False) # type: ignore except EmptyQueue: - # Wait for the another span to arrive + # Wait for another span to arrive + time.sleep(0.1) continue span_to_export, eval_context_callback = thread_args